HDFS-5515. Fix TestDFSStartupVersions for HDFS-2832.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1542176 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2013-11-15 05:21:58 +00:00
parent 132a8ff7c7
commit cd768489f3
5 changed files with 12 additions and 10 deletions

View File

@ -107,3 +107,5 @@ IMPROVEMENTS:
HDFS-5510. Fix a findbug warning in DataStorage.java on HDFS-2832 branch.
(Junping Du via Arpit Agarwal)
HDFS-5515. Fix TestDFSStartupVersions for HDFS-2832. (Arpit Agarwal)

View File

@ -98,7 +98,7 @@ public synchronized String getDatanodeUuid() {
return datanodeUuid;
}
synchronized void setDatanodeUuid(String newDatanodeUuid) {
public synchronized void setDatanodeUuid(String newDatanodeUuid) {
this.datanodeUuid = newDatanodeUuid;
}
@ -292,8 +292,7 @@ protected void setPropertiesFromFields(Properties props,
props.setProperty("storageID", sd.getStorageUuid());
String datanodeUuid = getDatanodeUuid();
if (LayoutVersion.supports(Feature.ADD_DATANODE_AND_STORAGE_UUIDS,
layoutVersion) && datanodeUuid != null) {
if (datanodeUuid != null) {
props.setProperty("datanodeUuid", datanodeUuid);
}

View File

@ -237,7 +237,7 @@ boolean isVersionCompatible(StorageData namenodeSd, StorageData datanodeSd) {
* this iterations version 3-tuple
* </pre>
*/
@Test
@Test (timeout=300000)
public void testVersions() throws Exception {
UpgradeUtilities.initialize();
Configuration conf = UpgradeUtilities.initializeStorageStateConf(1,

View File

@ -454,6 +454,7 @@ public static void createDataNodeVersionFile(File[] parent,
public static void createDataNodeVersionFile(File[] parent,
StorageInfo version, String bpid, String bpidToWrite) throws IOException {
DataStorage storage = new DataStorage(version);
storage.setDatanodeUuid("FixedDatanodeUuid");
File[] versionFiles = new File[parent.length];
for (int i = 0; i < parent.length; i++) {

View File

@ -55,7 +55,7 @@ public class TestListCorruptFileBlocks {
static Log LOG = NameNode.stateChangeLog;
/** check if nn.getCorruptFiles() returns a file that has corrupted blocks */
@Test
@Test (timeout=300000)
public void testListCorruptFilesCorruptedBlock() throws Exception {
MiniDFSCluster cluster = null;
Random random = new Random();
@ -131,7 +131,7 @@ public void testListCorruptFilesCorruptedBlock() throws Exception {
/**
* Check that listCorruptFileBlocks works while the namenode is still in safemode.
*/
@Test
@Test (timeout=300000)
public void testListCorruptFileBlocksInSafeMode() throws Exception {
MiniDFSCluster cluster = null;
Random random = new Random();
@ -262,7 +262,7 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception {
}
// deliberately remove blocks from a file and validate the list-corrupt-file-blocks API
@Test
@Test (timeout=300000)
public void testlistCorruptFileBlocks() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
@ -372,7 +372,7 @@ private int countPaths(RemoteIterator<Path> iter) throws IOException {
/**
* test listCorruptFileBlocks in DistributedFileSystem
*/
@Test
@Test (timeout=300000)
public void testlistCorruptFileBlocksDFS() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
@ -445,7 +445,7 @@ public void testlistCorruptFileBlocksDFS() throws Exception {
* Also, test that DFS.listCorruptFileBlocks can make multiple successive
* calls.
*/
@Test
@Test (timeout=300000)
public void testMaxCorruptFiles() throws Exception {
MiniDFSCluster cluster = null;
try {