diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 01de9b1bec..cc6758f0fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -626,6 +626,9 @@ Release 2.8.0 - UNRELEASED HDFS-8290. WebHDFS calls before namesystem initialization can cause NullPointerException. (cnauroth) + HDFS-8219. setStoragePolicy with folder behavior is different after cluster restart. + (surendra singh lilhore via Xiaoyu Yao) + Release 2.7.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 28e150c169..83e52bc05c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -739,7 +739,7 @@ public void logOpenFile(String path, INodeFile newNode, boolean overwrite, .setClientMachine( newNode.getFileUnderConstructionFeature().getClientMachine()) .setOverwrite(overwrite) - .setStoragePolicyId(newNode.getStoragePolicyID()); + .setStoragePolicyId(newNode.getLocalStoragePolicyID()); AclFeature f = newNode.getAclFeature(); if (f != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index 9621dc8869..5e3b55f5e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -26,6 +26,7 @@ import com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; @@ -1172,4 +1173,48 @@ public void testGetAllStoragePolicies() throws Exception { cluster.shutdown(); } } + + @Test + public void testGetFileStoragePolicyAfterRestartNN() throws Exception { + //HDFS8219 + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(REPLICATION) + .storageTypes( + new StorageType[] {StorageType.DISK, StorageType.ARCHIVE}) + .build(); + cluster.waitActive(); + final DistributedFileSystem fs = cluster.getFileSystem(); + try { + final String file = "/testScheduleWithinSameNode/file"; + Path dir = new Path("/testScheduleWithinSameNode"); + fs.mkdirs(dir); + // 2. Set Dir policy + fs.setStoragePolicy(dir, "COLD"); + // 3. Create file + final FSDataOutputStream out = fs.create(new Path(file)); + out.writeChars("testScheduleWithinSameNode"); + out.close(); + // 4. Set Dir policy + fs.setStoragePolicy(dir, "HOT"); + HdfsFileStatus status = fs.getClient().getFileInfo(file); + // 5. get file policy, it should be parent policy. + Assert + .assertTrue( + "File storage policy should be HOT", + status.getStoragePolicy() + == HdfsServerConstants.HOT_STORAGE_POLICY_ID); + // 6. restart NameNode for reloading edits logs. + cluster.restartNameNode(true); + // 7. get file policy, it should be parent policy. + status = fs.getClient().getFileInfo(file); + Assert + .assertTrue( + "File storage policy should be HOT", + status.getStoragePolicy() + == HdfsServerConstants.HOT_STORAGE_POLICY_ID); + + } finally { + cluster.shutdown(); + } + } }