From e2091275dc26745c4e919cd767283d32608a1817 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Mon, 29 Apr 2013 22:40:03 +0000 Subject: [PATCH] HDFS-4610. Reverting the patch Jenkins build is not run. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1477396 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 --- .../hadoop/hdfs/server/common/Storage.java | 2 +- ...ImagePreTransactionalStorageInspector.java | 3 +-- .../hdfs/server/namenode/NNStorage.java | 9 +++---- .../hdfs/server/namenode/TransferFsImage.java | 3 +-- .../apache/hadoop/hdfs/MiniDFSCluster.java | 6 ++--- .../datanode/TestDataNodeVolumeFailure.java | 5 ++-- .../TestDataNodeVolumeFailureReporting.java | 25 +++++++++---------- .../TestDataNodeVolumeFailureToleration.java | 8 +++--- .../hdfs/server/datanode/TestDiskError.java | 5 ++-- .../hdfs/server/namenode/TestCheckpoint.java | 16 ++++++------ .../hdfs/server/namenode/TestEditLog.java | 4 +-- .../TestNNStorageRetentionFunctional.java | 7 +++--- 13 files changed, 43 insertions(+), 53 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8f8ff1cae6..1ab5be7805 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -332,9 +332,6 @@ Trunk (Unreleased) HDFS-4734. HDFS Tests that use ShellCommandFencer are broken on Windows. (Arpit Agarwal via suresh) - HDFS-4610. Use common utils FileUtil#setReadable/Writable/Executable & - FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh) - BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 2a96faaf6e..a72e9be49c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -448,7 +448,7 @@ public StorageState analyzeStorage(StartupOption startOpt, Storage storage) LOG.warn(rootPath + "is not a directory"); return StorageState.NON_EXISTENT; } - if (!FileUtil.canWrite(root)) { + if (!root.canWrite()) { LOG.warn("Cannot access storage directory " + rootPath); return StorageState.NON_EXISTENT; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java index d0554f6204..1a637cc32b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java @@ -33,7 +33,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; @@ -129,7 +128,7 @@ void inspectDirectory(StorageDirectory sd) throws IOException { static long readCheckpointTime(StorageDirectory sd) throws IOException { File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME); long timeStamp = 0L; - if (timeFile.exists() && FileUtil.canRead(timeFile)) { + if (timeFile.exists() && timeFile.canRead()) { DataInputStream in = new DataInputStream(new FileInputStream(timeFile)); try { timeStamp = in.readLong(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index ea5f254e57..a5a4167e29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -34,7 +34,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; @@ -231,8 +230,8 @@ void attemptRestoreRemovedStorage() { File root = sd.getRoot(); LOG.info("currently disabled dir " + root.getAbsolutePath() + "; type="+sd.getStorageDirType() - + ";canwrite="+FileUtil.canWrite(root)); - if(root.exists() && FileUtil.canWrite(root)) { + + ";canwrite="+root.canWrite()); + if(root.exists() && root.canWrite()) { LOG.info("restoring dir " + sd.getRoot().getAbsolutePath()); this.addStorageDir(sd); // restore this.removedStorageDirs.remove(sd); @@ -506,7 +505,7 @@ public File getFsImageName(long txid) { dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { sd = it.next(); File fsImage = getStorageFile(sd, NameNodeFile.IMAGE, txid); - if(FileUtil.canRead(sd.getRoot()) && fsImage.exists()) + if(sd.getRoot().canRead() && fsImage.exists()) return fsImage; } return null; @@ -723,7 +722,7 @@ File findImageFile(long txid) { private File findFile(NameNodeDirType dirType, String name) { for (StorageDirectory sd : dirIterable(dirType)) { File candidate = new File(sd.getCurrentDir(), name); - if (FileUtil.canRead(sd.getCurrentDir()) && + if (sd.getCurrentDir().canRead() && candidate.exists()) { return candidate; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index 0dfe7abbb4..60e703b4e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -33,7 +33,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.Time; @@ -103,7 +102,7 @@ static void downloadEditsToStorage(String fsName, RemoteEditLog log, assert !dstFiles.isEmpty() : "No checkpoint targets."; for (File f : dstFiles) { - if (f.exists() && FileUtil.canRead(f)) { + if (f.exists() && f.canRead()) { LOG.info("Skipping download of remote edit log " + log + " since it already is stored locally at " + f); return; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 2daa9cc9a0..60d89627c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -681,9 +681,9 @@ private String createPermissionsDiagnosisString(File path) { sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n"); sb.append("\tpermissions: "); sb.append(path.isDirectory() ? "d": "-"); - sb.append(FileUtil.canRead(path) ? "r" : "-"); - sb.append(FileUtil.canWrite(path) ? "w" : "-"); - sb.append(FileUtil.canExecute(path) ? "x" : "-"); + sb.append(path.canRead() ? "r" : "-"); + sb.append(path.canWrite() ? "w" : "-"); + sb.append(path.canExecute() ? "x" : "-"); sb.append("\n"); path = path.getParentFile(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 5c2fd94080..3ba91c4dc1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -31,7 +31,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReaderFactory; @@ -92,10 +91,10 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { if(data_fail != null) { - FileUtil.setWritable(data_fail, true); + data_fail.setWritable(true); } if(failedDir != null) { - FileUtil.setWritable(failedDir, true); + failedDir.setWritable(true); } if(cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index d07bb45075..e53933539e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -31,7 +31,6 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -89,8 +88,8 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { for (int i = 0; i < 3; i++) { - FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true); - FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true); + new File(dataDir, "data"+(2*i+1)).setExecutable(true); + new File(dataDir, "data"+(2*i+2)).setExecutable(true); } cluster.shutdown(); } @@ -132,8 +131,8 @@ public void testSuccessiveVolumeFailures() throws Exception { * fail. The client does not retry failed nodes even though * perhaps they could succeed because just a single volume failed. */ - assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false)); - assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false)); + assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false)); + assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false)); /* * Create file1 and wait for 3 replicas (ie all DNs can still @@ -169,7 +168,7 @@ public void testSuccessiveVolumeFailures() throws Exception { * Now fail a volume on the third datanode. We should be able to get * three replicas since we've already identified the other failures. */ - assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, false)); + assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(false)); Path file2 = new Path("/test2"); DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L); DFSTestUtil.waitReplication(fs, file2, (short)3); @@ -201,7 +200,7 @@ public void testSuccessiveVolumeFailures() throws Exception { * and that it's no longer up. Only wait for two replicas since * we'll never get a third. */ - assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, false)); + assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(false)); Path file3 = new Path("/test3"); DFSTestUtil.createFile(fs, file3, 1024, (short)3, 1L); DFSTestUtil.waitReplication(fs, file3, (short)2); @@ -223,10 +222,10 @@ public void testSuccessiveVolumeFailures() throws Exception { * restart, so file creation should be able to succeed after * restoring the data directories and restarting the datanodes. */ - assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, true)); - assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true)); - assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, true)); - assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, true)); + assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(true)); + assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true)); + assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(true)); + assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(true)); cluster.restartDataNodes(); cluster.waitActive(); Path file4 = new Path("/test4"); @@ -262,8 +261,8 @@ public void testVolFailureStatsPreservedOnNNRestart() throws Exception { // third healthy so one node in the pipeline will not fail). File dn1Vol1 = new File(dataDir, "data"+(2*0+1)); File dn2Vol1 = new File(dataDir, "data"+(2*1+1)); - assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false)); - assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false)); + assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false)); + assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false)); Path file1 = new Path("/test1"); DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java index 73dc77c3b2..ea256853f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java @@ -77,8 +77,8 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { for (int i = 0; i < 3; i++) { - FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true); - FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true); + new File(dataDir, "data"+(2*i+1)).setExecutable(true); + new File(dataDir, "data"+(2*i+2)).setExecutable(true); } cluster.shutdown(); } @@ -152,7 +152,7 @@ public void testConfigureMinValidVolumes() throws Exception { // Fail a volume on the 2nd DN File dn2Vol1 = new File(dataDir, "data"+(2*1+1)); - assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false)); + assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false)); // Should only get two replicas (the first DN and the 3rd) Path file1 = new Path("/test1"); @@ -165,7 +165,7 @@ public void testConfigureMinValidVolumes() throws Exception { // If we restore the volume we should still only be able to get // two replicas since the DN is still considered dead. - assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true)); + assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true)); Path file2 = new Path("/test2"); DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L); DFSTestUtil.waitReplication(fs, file2, (short)2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java index ac268a2a4e..3c10a7ae12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java @@ -27,7 +27,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -107,8 +106,8 @@ public void testShutdown() throws Exception { } } finally { // restore its old permission - FileUtil.setWritable(dir1, true); - FileUtil.setWritable(dir2, true); + dir1.setWritable(true); + dir2.setWritable(true); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 2c56b3faaf..1dfc4d5e75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -157,7 +157,7 @@ public void testNameDirError() throws IOException { try { // Simulate the mount going read-only - FileUtil.setWritable(dir, false); + dir.setWritable(false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false).build(); fail("NN should have failed to start with " + dir + " set unreadable"); @@ -167,7 +167,7 @@ public void testNameDirError() throws IOException { } finally { cleanup(cluster); cluster = null; - FileUtil.setWritable(dir, true); + dir.setWritable(true); } } } @@ -1825,7 +1825,7 @@ public void testCheckpointWithFailedStorageDir() throws Exception { StorageDirectory sd1 = storage.getStorageDir(1); currentDir = sd0.getCurrentDir(); - FileUtil.setExecutable(currentDir, false); + currentDir.setExecutable(false); // Upload checkpoint when NN has a bad storage dir. This should // succeed and create the checkpoint in the good dir. @@ -1835,7 +1835,7 @@ public void testCheckpointWithFailedStorageDir() throws Exception { new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2))); // Restore the good dir - FileUtil.setExecutable(currentDir, true); + currentDir.setExecutable(true); nn.restoreFailedStorage("true"); nn.rollEditLog(); @@ -1846,7 +1846,7 @@ public void testCheckpointWithFailedStorageDir() throws Exception { assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { if (currentDir != null) { - FileUtil.setExecutable(currentDir, true); + currentDir.setExecutable(true); } cleanup(secondary); secondary = null; @@ -1896,7 +1896,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { StorageDirectory sd0 = storage.getStorageDir(0); assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType()); currentDir = sd0.getCurrentDir(); - FileUtil.setExecutable(currentDir, false); + currentDir.setExecutable(false); // Try to upload checkpoint -- this should fail since there are no // valid storage dirs @@ -1909,7 +1909,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { } // Restore the good dir - FileUtil.setExecutable(currentDir, true); + currentDir.setExecutable(true); nn.restoreFailedStorage("true"); nn.rollEditLog(); @@ -1920,7 +1920,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { if (currentDir != null) { - FileUtil.setExecutable(currentDir, true); + currentDir.setExecutable(true); } cleanup(secondary); secondary = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index dc837457b7..f98f8da4cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -881,14 +881,14 @@ public void testFailedOpen() throws Exception { logDir.mkdirs(); FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir); try { - FileUtil.setWritable(logDir, false); + logDir.setWritable(false); log.openForWrite(); fail("Did no throw exception on only having a bad dir"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "too few journals successfully started", ioe); } finally { - FileUtil.setWritable(logDir, true); + logDir.setWritable(true); log.close(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java index d27b91d224..dcc6b2da4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java @@ -28,7 +28,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -107,10 +106,10 @@ public void testPurgingWithNameEditsDirAfterFailure() getInProgressEditsFileName(5)); LOG.info("Failing first storage dir by chmodding it"); - FileUtil.setExecutable(sd0, false); + sd0.setExecutable(false); doSaveNamespace(nn); LOG.info("Restoring accessibility of first storage dir"); - FileUtil.setExecutable(sd0, true); + sd0.setExecutable(true); LOG.info("nothing should have been purged in first storage dir"); assertGlobEquals(cd0, "fsimage_\\d*", @@ -139,7 +138,7 @@ public void testPurgingWithNameEditsDirAfterFailure() assertGlobEquals(cd0, "edits_.*", getInProgressEditsFileName(9)); } finally { - FileUtil.setExecutable(sd0, true); + sd0.setExecutable(true); LOG.info("Shutting down..."); if (cluster != null) {