HDFS-5515. Fix TestDFSStartupVersions for HDFS-2832.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1542176 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2013-11-15 05:21:58 +00:00
parent 132a8ff7c7
commit cd768489f3
5 changed files with 12 additions and 10 deletions

View File

@ -106,4 +106,6 @@ IMPROVEMENTS:
HDFS-5510. Fix a findbug warning in DataStorage.java on HDFS-2832 branch. HDFS-5510. Fix a findbug warning in DataStorage.java on HDFS-2832 branch.
(Junping Du via Arpit Agarwal) (Junping Du via Arpit Agarwal)
HDFS-5515. Fix TestDFSStartupVersions for HDFS-2832. (Arpit Agarwal)

View File

@ -97,8 +97,8 @@ public DataStorage(StorageInfo storageInfo) {
public synchronized String getDatanodeUuid() { public synchronized String getDatanodeUuid() {
return datanodeUuid; return datanodeUuid;
} }
synchronized void setDatanodeUuid(String newDatanodeUuid) { public synchronized void setDatanodeUuid(String newDatanodeUuid) {
this.datanodeUuid = newDatanodeUuid; this.datanodeUuid = newDatanodeUuid;
} }
@ -292,8 +292,7 @@ protected void setPropertiesFromFields(Properties props,
props.setProperty("storageID", sd.getStorageUuid()); props.setProperty("storageID", sd.getStorageUuid());
String datanodeUuid = getDatanodeUuid(); String datanodeUuid = getDatanodeUuid();
if (LayoutVersion.supports(Feature.ADD_DATANODE_AND_STORAGE_UUIDS, if (datanodeUuid != null) {
layoutVersion) && datanodeUuid != null) {
props.setProperty("datanodeUuid", datanodeUuid); props.setProperty("datanodeUuid", datanodeUuid);
} }

View File

@ -237,7 +237,7 @@ boolean isVersionCompatible(StorageData namenodeSd, StorageData datanodeSd) {
* this iterations version 3-tuple * this iterations version 3-tuple
* </pre> * </pre>
*/ */
@Test @Test (timeout=300000)
public void testVersions() throws Exception { public void testVersions() throws Exception {
UpgradeUtilities.initialize(); UpgradeUtilities.initialize();
Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, Configuration conf = UpgradeUtilities.initializeStorageStateConf(1,

View File

@ -454,6 +454,7 @@ public static void createDataNodeVersionFile(File[] parent,
public static void createDataNodeVersionFile(File[] parent, public static void createDataNodeVersionFile(File[] parent,
StorageInfo version, String bpid, String bpidToWrite) throws IOException { StorageInfo version, String bpid, String bpidToWrite) throws IOException {
DataStorage storage = new DataStorage(version); DataStorage storage = new DataStorage(version);
storage.setDatanodeUuid("FixedDatanodeUuid");
File[] versionFiles = new File[parent.length]; File[] versionFiles = new File[parent.length];
for (int i = 0; i < parent.length; i++) { for (int i = 0; i < parent.length; i++) {

View File

@ -55,7 +55,7 @@ public class TestListCorruptFileBlocks {
static Log LOG = NameNode.stateChangeLog; static Log LOG = NameNode.stateChangeLog;
/** check if nn.getCorruptFiles() returns a file that has corrupted blocks */ /** check if nn.getCorruptFiles() returns a file that has corrupted blocks */
@Test @Test (timeout=300000)
public void testListCorruptFilesCorruptedBlock() throws Exception { public void testListCorruptFilesCorruptedBlock() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
Random random = new Random(); Random random = new Random();
@ -131,7 +131,7 @@ public void testListCorruptFilesCorruptedBlock() throws Exception {
/** /**
* Check that listCorruptFileBlocks works while the namenode is still in safemode. * Check that listCorruptFileBlocks works while the namenode is still in safemode.
*/ */
@Test @Test (timeout=300000)
public void testListCorruptFileBlocksInSafeMode() throws Exception { public void testListCorruptFileBlocksInSafeMode() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
Random random = new Random(); Random random = new Random();
@ -262,7 +262,7 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception {
} }
// deliberately remove blocks from a file and validate the list-corrupt-file-blocks API // deliberately remove blocks from a file and validate the list-corrupt-file-blocks API
@Test @Test (timeout=300000)
public void testlistCorruptFileBlocks() throws Exception { public void testlistCorruptFileBlocks() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
@ -372,7 +372,7 @@ private int countPaths(RemoteIterator<Path> iter) throws IOException {
/** /**
* test listCorruptFileBlocks in DistributedFileSystem * test listCorruptFileBlocks in DistributedFileSystem
*/ */
@Test @Test (timeout=300000)
public void testlistCorruptFileBlocksDFS() throws Exception { public void testlistCorruptFileBlocksDFS() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
@ -445,7 +445,7 @@ public void testlistCorruptFileBlocksDFS() throws Exception {
* Also, test that DFS.listCorruptFileBlocks can make multiple successive * Also, test that DFS.listCorruptFileBlocks can make multiple successive
* calls. * calls.
*/ */
@Test @Test (timeout=300000)
public void testMaxCorruptFiles() throws Exception { public void testMaxCorruptFiles() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {