diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java index 81d337f44c..bd4e5ed065 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java @@ -51,7 +51,6 @@ static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm, assert fsd.getFSNamesystem().hasWriteLock(); FSPermissionChecker pc = fsd.getPermissionChecker(); - List xAttrs = Lists.newArrayListWithCapacity(1); INodesInPath iip; fsd.writeLock(); try { @@ -62,8 +61,11 @@ static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm, fsd.checkPathAccess(pc, iip, FsAction.WRITE); } XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd); - xAttrs.add(satisfyXAttr); - fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); + if (satisfyXAttr != null) { + List xAttrs = Lists.newArrayListWithCapacity(1); + xAttrs.add(satisfyXAttr); + fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); + } } finally { fsd.writeUnlock(); } @@ -79,16 +81,19 @@ static XAttr unprotectedSatisfyStoragePolicy(INodesInPath iip, // TODO: think about optimization here, label the dir instead // of the sub-files of the dir. - if (inode.isFile()) { + if (inode.isFile() && inode.asFile().numBlocks() != 0) { candidateNodes.add(inode); } else if (inode.isDirectory()) { for (INode node : inode.asDirectory().getChildrenList(snapshotId)) { - if (node.isFile()) { + if (node.isFile() && node.asFile().numBlocks() != 0) { candidateNodes.add(node); } } } + if (candidateNodes.isEmpty()) { + return null; + } // If node has satisfy xattr, then stop adding it // to satisfy movement queue. if (inodeHasSatisfyXAttr(candidateNodes)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java index fa954b88b1..8e08a1ed88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java @@ -907,6 +907,38 @@ public void testSPSShouldNotLeakXattrIfSatisfyStoragePolicyCallOnECFiles() } } + /** + * Test SPS with empty file. + * 1. Create one empty file. + * 2. Call satisfyStoragePolicy for empty file. + * 3. SPS should skip this file and xattr should not be added for empty file. + */ + @Test(timeout = 300000) + public void testSPSWhenFileLengthIsZero() throws Exception { + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0) + .build(); + cluster.waitActive(); + DistributedFileSystem fs = cluster.getFileSystem(); + Path filePath = new Path("/zeroSizeFile"); + DFSTestUtil.createFile(fs, filePath, 0, (short) 1, 0); + FSEditLog editlog = cluster.getNameNode().getNamesystem().getEditLog(); + long lastWrittenTxId = editlog.getLastWrittenTxId(); + fs.satisfyStoragePolicy(filePath); + Assert.assertEquals("Xattr should not be added for the file", + lastWrittenTxId, editlog.getLastWrittenTxId()); + INode inode = cluster.getNameNode().getNamesystem().getFSDirectory() + .getINode(filePath.toString()); + Assert.assertTrue("XAttrFeature should be null for file", + inode.getXAttrFeature() == null); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + private String createFileAndSimulateFavoredNodes(int favoredNodesCount) throws IOException { ArrayList dns = hdfsCluster.getDataNodes();