diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java index 5d0d9d6975..7152f02d3a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java @@ -478,7 +478,8 @@ FSDataOutputStream create(PathData item, boolean lazyPersist) createFlags.add(LAZY_PERSIST); } return create(item.path, - null, + FsPermission.getFileDefault().applyUMask( + FsPermission.getUMask(getConf())), createFlags, getConf().getInt("io.file.buffer.size", 4096), lazyPersist ? 1 : getDefaultReplication(item.path), diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt index 0877636464..f6c8672b1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt @@ -47,3 +47,7 @@ HDFS-7066. LazyWriter#evictBlocks misses a null check for replicaState. (Xiaoyu Yao via Arpit Agarwal) + HDFS-7064. Fix unit test failures in HDFS-6581 branch. (Xiaoyu Yao via + Arpit Agarwal) + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 276aa5b946..149ca27a35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -381,8 +381,9 @@ void deleteBPDirectories(String bpid, boolean force) throws IOException { !FileUtil.fullyDelete(finalizedDir)) { throw new IOException("Failed to delete " + finalizedDir); } - if (!DatanodeUtil.dirNoFilesRecursive(lazypersistDir) || - !FileUtil.fullyDelete(lazypersistDir)) { + if (lazypersistDir.exists() && + ((!DatanodeUtil.dirNoFilesRecursive(lazypersistDir) || + !FileUtil.fullyDelete(lazypersistDir)))) { throw new IOException("Failed to delete " + lazypersistDir); } FileUtil.fullyDelete(tmpDir); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java index c0b4f9a869..94af015c46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java @@ -51,7 +51,7 @@ public void testDataDirParsing() throws Throwable { String locations1 = "[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3,[ram_disk]/dir4"; conf.set(DFS_DATANODE_DATA_DIR_KEY, locations1); locations = DataNode.getStorageLocations(conf); - assertThat(locations.size(), is(4)); + assertThat(locations.size(), is(5)); assertThat(locations.get(0).getStorageType(), is(StorageType.DISK)); assertThat(locations.get(0).getUri(), is(dir0.toURI())); assertThat(locations.get(1).getStorageType(), is(StorageType.DISK));