From 3704e091a8d6c4181dfb29ac9d950e997b1170c0 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Tue, 30 Apr 2013 00:35:53 +0000 Subject: [PATCH] HDFS-4610. Use common utils FileUtil#setReadable/Writable/Executable and FileUtil#canRead/Write/Execute. Contrbitued by Ivan Mitic. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1477427 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/common/Storage.java | 2 +- ...ImagePreTransactionalStorageInspector.java | 3 ++- .../hdfs/server/namenode/NNStorage.java | 9 ++++--- .../hdfs/server/namenode/TransferFsImage.java | 3 ++- .../apache/hadoop/hdfs/MiniDFSCluster.java | 6 ++--- .../datanode/TestDataNodeVolumeFailure.java | 5 ++-- .../TestDataNodeVolumeFailureReporting.java | 25 ++++++++++--------- .../TestDataNodeVolumeFailureToleration.java | 8 +++--- .../hdfs/server/datanode/TestDiskError.java | 5 ++-- .../hdfs/server/namenode/TestCheckpoint.java | 16 ++++++------ .../hdfs/server/namenode/TestEditLog.java | 4 +-- .../TestNNStorageRetentionFunctional.java | 7 +++--- 13 files changed, 53 insertions(+), 43 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1ab5be7805..709478af14 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -332,6 +332,9 @@ Trunk (Unreleased) HDFS-4734. HDFS Tests that use ShellCommandFencer are broken on Windows. (Arpit Agarwal via suresh) + HDFS-4610. Use common utils FileUtil#setReadable/Writable/Executable and + FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh) + BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index a72e9be49c..2a96faaf6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -448,7 +448,7 @@ public StorageState analyzeStorage(StartupOption startOpt, Storage storage) LOG.warn(rootPath + "is not a directory"); return StorageState.NON_EXISTENT; } - if (!root.canWrite()) { + if (!FileUtil.canWrite(root)) { LOG.warn("Cannot access storage directory " + rootPath); return StorageState.NON_EXISTENT; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java index 1a637cc32b..d0554f6204 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java @@ -33,6 +33,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; @@ -128,7 +129,7 @@ void inspectDirectory(StorageDirectory sd) throws IOException { static long readCheckpointTime(StorageDirectory sd) throws IOException { File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME); long timeStamp = 0L; - if (timeFile.exists() && timeFile.canRead()) { + if (timeFile.exists() && FileUtil.canRead(timeFile)) { DataInputStream in = new DataInputStream(new FileInputStream(timeFile)); try { timeStamp = in.readLong(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index a5a4167e29..ea5f254e57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -34,6 +34,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; @@ -230,8 +231,8 @@ void attemptRestoreRemovedStorage() { File root = sd.getRoot(); LOG.info("currently disabled dir " + root.getAbsolutePath() + "; type="+sd.getStorageDirType() - + ";canwrite="+root.canWrite()); - if(root.exists() && root.canWrite()) { + + ";canwrite="+FileUtil.canWrite(root)); + if(root.exists() && FileUtil.canWrite(root)) { LOG.info("restoring dir " + sd.getRoot().getAbsolutePath()); this.addStorageDir(sd); // restore this.removedStorageDirs.remove(sd); @@ -505,7 +506,7 @@ public File getFsImageName(long txid) { dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { sd = it.next(); File fsImage = getStorageFile(sd, NameNodeFile.IMAGE, txid); - if(sd.getRoot().canRead() && fsImage.exists()) + if(FileUtil.canRead(sd.getRoot()) && fsImage.exists()) return fsImage; } return null; @@ -722,7 +723,7 @@ File findImageFile(long txid) { private File findFile(NameNodeDirType dirType, String name) { for (StorageDirectory sd : dirIterable(dirType)) { File candidate = new File(sd.getCurrentDir(), name); - if (sd.getCurrentDir().canRead() && + if (FileUtil.canRead(sd.getCurrentDir()) && candidate.exists()) { return candidate; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index 60e703b4e0..0dfe7abbb4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -33,6 +33,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.Time; @@ -102,7 +103,7 @@ static void downloadEditsToStorage(String fsName, RemoteEditLog log, assert !dstFiles.isEmpty() : "No checkpoint targets."; for (File f : dstFiles) { - if (f.exists() && f.canRead()) { + if (f.exists() && FileUtil.canRead(f)) { LOG.info("Skipping download of remote edit log " + log + " since it already is stored locally at " + f); return; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 60d89627c0..2daa9cc9a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -681,9 +681,9 @@ private String createPermissionsDiagnosisString(File path) { sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n"); sb.append("\tpermissions: "); sb.append(path.isDirectory() ? "d": "-"); - sb.append(path.canRead() ? "r" : "-"); - sb.append(path.canWrite() ? "w" : "-"); - sb.append(path.canExecute() ? "x" : "-"); + sb.append(FileUtil.canRead(path) ? "r" : "-"); + sb.append(FileUtil.canWrite(path) ? "w" : "-"); + sb.append(FileUtil.canExecute(path) ? "x" : "-"); sb.append("\n"); path = path.getParentFile(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 3ba91c4dc1..5c2fd94080 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReaderFactory; @@ -91,10 +92,10 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { if(data_fail != null) { - data_fail.setWritable(true); + FileUtil.setWritable(data_fail, true); } if(failedDir != null) { - failedDir.setWritable(true); + FileUtil.setWritable(failedDir, true); } if(cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index e53933539e..d07bb45075 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -88,8 +89,8 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { for (int i = 0; i < 3; i++) { - new File(dataDir, "data"+(2*i+1)).setExecutable(true); - new File(dataDir, "data"+(2*i+2)).setExecutable(true); + FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true); + FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true); } cluster.shutdown(); } @@ -131,8 +132,8 @@ public void testSuccessiveVolumeFailures() throws Exception { * fail. The client does not retry failed nodes even though * perhaps they could succeed because just a single volume failed. */ - assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false)); - assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false)); /* * Create file1 and wait for 3 replicas (ie all DNs can still @@ -168,7 +169,7 @@ public void testSuccessiveVolumeFailures() throws Exception { * Now fail a volume on the third datanode. We should be able to get * three replicas since we've already identified the other failures. */ - assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, false)); Path file2 = new Path("/test2"); DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L); DFSTestUtil.waitReplication(fs, file2, (short)3); @@ -200,7 +201,7 @@ public void testSuccessiveVolumeFailures() throws Exception { * and that it's no longer up. Only wait for two replicas since * we'll never get a third. */ - assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, false)); Path file3 = new Path("/test3"); DFSTestUtil.createFile(fs, file3, 1024, (short)3, 1L); DFSTestUtil.waitReplication(fs, file3, (short)2); @@ -222,10 +223,10 @@ public void testSuccessiveVolumeFailures() throws Exception { * restart, so file creation should be able to succeed after * restoring the data directories and restarting the datanodes. */ - assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(true)); - assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true)); - assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(true)); - assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(true)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, true)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, true)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, true)); cluster.restartDataNodes(); cluster.waitActive(); Path file4 = new Path("/test4"); @@ -261,8 +262,8 @@ public void testVolFailureStatsPreservedOnNNRestart() throws Exception { // third healthy so one node in the pipeline will not fail). File dn1Vol1 = new File(dataDir, "data"+(2*0+1)); File dn2Vol1 = new File(dataDir, "data"+(2*1+1)); - assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false)); - assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false)); Path file1 = new Path("/test1"); DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java index ea256853f9..73dc77c3b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java @@ -77,8 +77,8 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { for (int i = 0; i < 3; i++) { - new File(dataDir, "data"+(2*i+1)).setExecutable(true); - new File(dataDir, "data"+(2*i+2)).setExecutable(true); + FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true); + FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true); } cluster.shutdown(); } @@ -152,7 +152,7 @@ public void testConfigureMinValidVolumes() throws Exception { // Fail a volume on the 2nd DN File dn2Vol1 = new File(dataDir, "data"+(2*1+1)); - assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false)); // Should only get two replicas (the first DN and the 3rd) Path file1 = new Path("/test1"); @@ -165,7 +165,7 @@ public void testConfigureMinValidVolumes() throws Exception { // If we restore the volume we should still only be able to get // two replicas since the DN is still considered dead. - assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true)); + assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true)); Path file2 = new Path("/test2"); DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L); DFSTestUtil.waitReplication(fs, file2, (short)2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java index 3c10a7ae12..ac268a2a4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java @@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -106,8 +107,8 @@ public void testShutdown() throws Exception { } } finally { // restore its old permission - dir1.setWritable(true); - dir2.setWritable(true); + FileUtil.setWritable(dir1, true); + FileUtil.setWritable(dir2, true); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 1dfc4d5e75..2c56b3faaf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -157,7 +157,7 @@ public void testNameDirError() throws IOException { try { // Simulate the mount going read-only - dir.setWritable(false); + FileUtil.setWritable(dir, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false).build(); fail("NN should have failed to start with " + dir + " set unreadable"); @@ -167,7 +167,7 @@ public void testNameDirError() throws IOException { } finally { cleanup(cluster); cluster = null; - dir.setWritable(true); + FileUtil.setWritable(dir, true); } } } @@ -1825,7 +1825,7 @@ public void testCheckpointWithFailedStorageDir() throws Exception { StorageDirectory sd1 = storage.getStorageDir(1); currentDir = sd0.getCurrentDir(); - currentDir.setExecutable(false); + FileUtil.setExecutable(currentDir, false); // Upload checkpoint when NN has a bad storage dir. This should // succeed and create the checkpoint in the good dir. @@ -1835,7 +1835,7 @@ public void testCheckpointWithFailedStorageDir() throws Exception { new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2))); // Restore the good dir - currentDir.setExecutable(true); + FileUtil.setExecutable(currentDir, true); nn.restoreFailedStorage("true"); nn.rollEditLog(); @@ -1846,7 +1846,7 @@ public void testCheckpointWithFailedStorageDir() throws Exception { assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { if (currentDir != null) { - currentDir.setExecutable(true); + FileUtil.setExecutable(currentDir, true); } cleanup(secondary); secondary = null; @@ -1896,7 +1896,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { StorageDirectory sd0 = storage.getStorageDir(0); assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType()); currentDir = sd0.getCurrentDir(); - currentDir.setExecutable(false); + FileUtil.setExecutable(currentDir, false); // Try to upload checkpoint -- this should fail since there are no // valid storage dirs @@ -1909,7 +1909,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { } // Restore the good dir - currentDir.setExecutable(true); + FileUtil.setExecutable(currentDir, true); nn.restoreFailedStorage("true"); nn.rollEditLog(); @@ -1920,7 +1920,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { if (currentDir != null) { - currentDir.setExecutable(true); + FileUtil.setExecutable(currentDir, true); } cleanup(secondary); secondary = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index f98f8da4cf..dc837457b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -881,14 +881,14 @@ public void testFailedOpen() throws Exception { logDir.mkdirs(); FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir); try { - logDir.setWritable(false); + FileUtil.setWritable(logDir, false); log.openForWrite(); fail("Did no throw exception on only having a bad dir"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "too few journals successfully started", ioe); } finally { - logDir.setWritable(true); + FileUtil.setWritable(logDir, true); log.close(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java index dcc6b2da4a..d27b91d224 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -106,10 +107,10 @@ public void testPurgingWithNameEditsDirAfterFailure() getInProgressEditsFileName(5)); LOG.info("Failing first storage dir by chmodding it"); - sd0.setExecutable(false); + FileUtil.setExecutable(sd0, false); doSaveNamespace(nn); LOG.info("Restoring accessibility of first storage dir"); - sd0.setExecutable(true); + FileUtil.setExecutable(sd0, true); LOG.info("nothing should have been purged in first storage dir"); assertGlobEquals(cd0, "fsimage_\\d*", @@ -138,7 +139,7 @@ public void testPurgingWithNameEditsDirAfterFailure() assertGlobEquals(cd0, "edits_.*", getInProgressEditsFileName(9)); } finally { - sd0.setExecutable(true); + FileUtil.setExecutable(sd0, true); LOG.info("Shutting down..."); if (cluster != null) {