From 78ec38b2ede8bdf3874b2ae051af9580007a9ba1 Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Tue, 24 Nov 2015 16:01:55 -0800 Subject: [PATCH] HDFS-8807. dfs.datanode.data.dir does not handle spaces between storageType and URI correctly. Contributed by Anu Engineer --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/datanode/StorageLocation.java | 2 +- .../hdfs/server/datanode/TestDataDirs.java | 29 ++++++++++++++----- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 92897b9ede..db49e54ab0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1681,6 +1681,9 @@ Release 2.8.0 - UNRELEASED HDFS-9314. Improve BlockPlacementPolicyDefault's picking of excess replicas. (Xiao Chen via mingma) + HDFS-8807. dfs.datanode.data.dir does not handle spaces between + storageType and URI correctly. (Anu Engineer via szetszwo) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java index 78734595ae..46e8e8a844 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java @@ -87,7 +87,7 @@ public static StorageLocation parse(String rawLocation) if (matcher.matches()) { String classString = matcher.group(1); - location = matcher.group(2); + location = matcher.group(2).trim(); if (!classString.isEmpty()) { storageType = StorageType.valueOf(StringUtils.toUpperCase(classString)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java index 396945e0e3..d41c13e612 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java @@ -36,7 +36,7 @@ public class TestDataDirs { - @Test (timeout = 30000) + @Test(timeout = 30000) public void testDataDirParsing() throws Throwable { Configuration conf = new Configuration(); List locations; @@ -46,12 +46,16 @@ public void testDataDirParsing() throws Throwable { File dir3 = new File("/dir3"); File dir4 = new File("/dir4"); + File dir5 = new File("/dir5"); + File dir6 = new File("/dir6"); // Verify that a valid string is correctly parsed, and that storage - // type is not case-sensitive - String locations1 = "[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3,[ram_disk]/dir4"; + // type is not case-sensitive and we are able to handle white-space between + // storage type and URI. + String locations1 = "[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3," + + "[ram_disk]/dir4,[disk]/dir5, [disk] /dir6, [disk] "; conf.set(DFS_DATANODE_DATA_DIR_KEY, locations1); locations = DataNode.getStorageLocations(conf); - assertThat(locations.size(), is(5)); + assertThat(locations.size(), is(8)); assertThat(locations.get(0).getStorageType(), is(StorageType.DISK)); assertThat(locations.get(0).getUri(), is(dir0.toURI())); assertThat(locations.get(1).getStorageType(), is(StorageType.DISK)); @@ -62,6 +66,14 @@ public void testDataDirParsing() throws Throwable { assertThat(locations.get(3).getUri(), is(dir3.toURI())); assertThat(locations.get(4).getStorageType(), is(StorageType.RAM_DISK)); assertThat(locations.get(4).getUri(), is(dir4.toURI())); + assertThat(locations.get(5).getStorageType(), is(StorageType.DISK)); + assertThat(locations.get(5).getUri(), is(dir5.toURI())); + assertThat(locations.get(6).getStorageType(), is(StorageType.DISK)); + assertThat(locations.get(6).getUri(), is(dir6.toURI())); + + // not asserting the 8th URI since it is incomplete and it in the + // test set to make sure that we don't fail if we get URIs like that. + assertThat(locations.get(7).getStorageType(), is(StorageType.DISK)); // Verify that an unrecognized storage type result in an exception. String locations2 = "[BadMediaType]/dir0,[ssd]/dir1,[disk]/dir2"; @@ -69,7 +81,7 @@ public void testDataDirParsing() throws Throwable { try { locations = DataNode.getStorageLocations(conf); fail(); - } catch(IllegalArgumentException iae) { + } catch (IllegalArgumentException iae) { DataNode.LOG.info("The exception is expected.", iae); } @@ -85,12 +97,13 @@ public void testDataDirParsing() throws Throwable { assertThat(locations.get(1).getUri(), is(dir1.toURI())); } - @Test (timeout = 30000) + @Test(timeout = 30000) public void testDataDirValidation() throws Throwable { - + DataNodeDiskChecker diskChecker = mock(DataNodeDiskChecker.class); doThrow(new IOException()).doThrow(new IOException()).doNothing() - .when(diskChecker).checkDir(any(LocalFileSystem.class), any(Path.class)); + .when(diskChecker) + .checkDir(any(LocalFileSystem.class), any(Path.class)); LocalFileSystem fs = mock(LocalFileSystem.class); AbstractList locations = new ArrayList();