diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index eb2117cfdc..6b05bcaa41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -717,6 +717,9 @@ Release 2.1.0-beta - UNRELEASED HDFS-4783. TestDelegationTokensWithHA#testHAUtilClonesDelegationTokens fails on Windows. (cnauroth) + HDFS-4818. Several HDFS tests that attempt to make directories unusable do + not work correctly on Windows. (cnauroth) + BREAKDOWN OF HDFS-2802 HDFS SNAPSHOT SUBTASKS AND RELATED JIRAS HDFS-4076. Support snapshot of single files. (szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index 01d505728e..21c56c2f66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -842,8 +842,8 @@ public void reportErrorOnFile(File f) { String absPath = f.getAbsolutePath(); for (StorageDirectory sd : storageDirs) { String dirPath = sd.getRoot().getAbsolutePath(); - if (!dirPath.endsWith("/")) { - dirPath += "/"; + if (!dirPath.endsWith(File.separator)) { + dirPath += File.separator; } if (absPath.startsWith(dirPath)) { reportErrorsOnDirectory(sd); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 6aad689e40..9d475f4231 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -864,9 +864,13 @@ public void testStorageAlreadyLockedErrorMessage() throws Exception { savedSd.lock(); fail("Namenode should not be able to lock a storage that is already locked"); } catch (IOException ioe) { - String jvmName = ManagementFactory.getRuntimeMXBean().getName(); - assertTrue("Error message does not include JVM name '" + jvmName - + "'", logs.getOutput().contains(jvmName)); + // cannot read lock file on Windows, so message cannot get JVM name + String lockingJvmName = Path.WINDOWS ? "" : + " " + ManagementFactory.getRuntimeMXBean().getName(); + String expectedLogMessage = "It appears that another namenode" + + lockingJvmName + " has already locked the storage directory"; + assertTrue("Log output does not contain expected log message: " + + expectedLogMessage, logs.getOutput().contains(expectedLogMessage)); } } finally { cleanup(cluster); @@ -2035,7 +2039,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { StorageDirectory sd0 = storage.getStorageDir(0); assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType()); currentDir = sd0.getCurrentDir(); - FileUtil.setExecutable(currentDir, false); + assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000")); // Try to upload checkpoint -- this should fail since there are no // valid storage dirs @@ -2048,7 +2052,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { } // Restore the good dir - FileUtil.setExecutable(currentDir, true); + assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755")); nn.restoreFailedStorage("true"); nn.rollEditLog(); @@ -2059,7 +2063,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { if (currentDir != null) { - FileUtil.setExecutable(currentDir, true); + FileUtil.chmod(currentDir.getAbsolutePath(), "755"); } cleanup(secondary); secondary = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java index cefffc8705..e3fd99a4b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java @@ -241,8 +241,8 @@ public void testFinalizeErrorReportedToNNStorage() throws IOException, Interrupt try { jm.finalizeLogSegment(0, 1); } finally { - assertTrue(storage.getRemovedStorageDirs().contains(sd)); FileUtil.chmod(sdRootPath, "+w", true); + assertTrue(storage.getRemovedStorageDirs().contains(sd)); } } @@ -439,8 +439,12 @@ public void testReadFromMiddleOfEditLog() throws CorruptionException, FileJournalManager jm = new FileJournalManager(conf, sd, storage); EditLogInputStream elis = getJournalInputStream(jm, 5, true); - FSEditLogOp op = elis.readOp(); - assertEquals("read unexpected op", op.getTransactionId(), 5); + try { + FSEditLogOp op = elis.readOp(); + assertEquals("read unexpected op", op.getTransactionId(), 5); + } finally { + IOUtils.cleanup(LOG, elis); + } } /** @@ -463,9 +467,13 @@ public void testExcludeInProgressStreams() throws CorruptionException, assertEquals(100, getNumberOfTransactions(jm, 1, false, false)); EditLogInputStream elis = getJournalInputStream(jm, 90, false); - FSEditLogOp lastReadOp = null; - while ((lastReadOp = elis.readOp()) != null) { - assertTrue(lastReadOp.getTransactionId() <= 100); + try { + FSEditLogOp lastReadOp = null; + while ((lastReadOp = elis.readOp()) != null) { + assertTrue(lastReadOp.getTransactionId() <= 100); + } + } finally { + IOUtils.cleanup(LOG, elis); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 0e1183e48e..7f20eff22f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -106,6 +106,9 @@ public class TestFsck { static final Pattern numCorruptBlocksPattern = Pattern.compile( ".*Corrupt blocks:\t\t([0123456789]*).*"); + private static final String LINE_SEPARATOR = + System.getProperty("line.separator"); + static String runFsck(Configuration conf, int expectedErrCode, boolean checkErrorCode,String... path) throws Exception { @@ -321,7 +324,7 @@ public void testFsckMove() throws Exception { while (true) { outStr = runFsck(conf, 1, false, "/"); String numCorrupt = null; - for (String line : outStr.split("\n")) { + for (String line : outStr.split(LINE_SEPARATOR)) { Matcher m = numCorruptBlocksPattern.matcher(line); if (m.matches()) { numCorrupt = m.group(1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java index d27b91d224..11665efa7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName; import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals; +import static org.junit.Assert.assertEquals; import java.io.File; import java.io.IOException; @@ -59,7 +60,7 @@ public class TestNNStorageRetentionFunctional { */ @Test public void testPurgingWithNameEditsDirAfterFailure() - throws IOException { + throws Exception { MiniDFSCluster cluster = null; Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0); @@ -107,10 +108,10 @@ public void testPurgingWithNameEditsDirAfterFailure() getInProgressEditsFileName(5)); LOG.info("Failing first storage dir by chmodding it"); - FileUtil.setExecutable(sd0, false); + assertEquals(0, FileUtil.chmod(cd0.getAbsolutePath(), "000")); doSaveNamespace(nn); LOG.info("Restoring accessibility of first storage dir"); - FileUtil.setExecutable(sd0, true); + assertEquals(0, FileUtil.chmod(cd0.getAbsolutePath(), "755")); LOG.info("nothing should have been purged in first storage dir"); assertGlobEquals(cd0, "fsimage_\\d*", @@ -139,7 +140,7 @@ public void testPurgingWithNameEditsDirAfterFailure() assertGlobEquals(cd0, "edits_.*", getInProgressEditsFileName(9)); } finally { - FileUtil.setExecutable(sd0, true); + FileUtil.chmod(cd0.getAbsolutePath(), "755"); LOG.info("Shutting down..."); if (cluster != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index cf624b75aa..45965cb435 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -130,7 +130,8 @@ public void testNameNodeMXBeanInfo() throws Exception { // This will cause the first dir to fail. File failedNameDir = new File(nameDirUris.toArray(new URI[0])[0]); - assertEquals(0, FileUtil.chmod(failedNameDir.getAbsolutePath(), "000")); + assertEquals(0, FileUtil.chmod( + new File(failedNameDir, "current").getAbsolutePath(), "000")); cluster.getNameNodeRpc().rollEditLog(); nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, @@ -150,7 +151,8 @@ public void testNameNodeMXBeanInfo() throws Exception { } finally { if (cluster != null) { for (URI dir : cluster.getNameDirs(0)) { - FileUtil.chmod(new File(dir).toString(), "700"); + FileUtil.chmod( + new File(new File(dir), "current").getAbsolutePath(), "755"); } cluster.shutdown(); }