From 9f4bf3bdf9e74800643477cfb18361e01cf6859c Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Mon, 11 Jan 2016 11:31:59 -0800 Subject: [PATCH] HDFS-9621. getListing wrongly associates Erasure Coding policy to pre-existing replicated files under an EC directory. Contributed by Jing Zhao. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/namenode/FSDirErasureCodingOp.java | 1 - .../namenode/FSDirStatAndListingOp.java | 81 ++++++++++--------- .../hdfs/server/namenode/FSDirectory.java | 6 +- .../hdfs/server/namenode/FSEditLogLoader.java | 8 +- .../hdfs/server/namenode/INodesInPath.java | 4 +- .../hdfs/TestErasureCodingPolicies.java | 58 +++++++++++++ .../hadoop/hdfs/server/namenode/TestFsck.java | 19 +++-- 8 files changed, 125 insertions(+), 55 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 095766c716..f9e4cfd478 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -406,6 +406,9 @@ Trunk (Unreleased) HDFS-9615. Fix variable name typo in DFSConfigKeys. (Ray Chiang via Arpit Agarwal) + HDFS-9621. getListing wrongly associates Erasure Coding policy to pre-existing + replicated files under an EC directory. (jing9) + BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS HDFS-7347. Configurable erasure coding policy for individual files and diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 4514097f8b..0051c5f301 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -218,7 +218,6 @@ static ErasureCodingPolicy getErasureCodingPolicy(final FSNamesystem fsn, static ErasureCodingPolicy[] getErasureCodingPolicies(final FSNamesystem fsn) throws IOException { assert fsn.hasReadLock(); - return fsn.getErasureCodingPolicyManager().getPolicies(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index d8baa6b3f8..0aa073bea5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -257,12 +257,14 @@ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, .BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; if (!targetNode.isDirectory()) { + // return the file's status. note that the iip already includes the + // target INode INodeAttributes nodeAttrs = getINodeAttributes( fsd, src, HdfsFileStatus.EMPTY_NAME, targetNode, snapshot); return new DirectoryListing( new HdfsFileStatus[]{ createFileStatus( - fsd, HdfsFileStatus.EMPTY_NAME, targetNode, nodeAttrs, + fsd, HdfsFileStatus.EMPTY_NAME, nodeAttrs, needLocation, parentStoragePolicy, snapshot, isRawPath, iip) }, 0); } @@ -276,7 +278,7 @@ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, int locationBudget = fsd.getLsLimit(); int listingCnt = 0; HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; - for (int i=0; i0; i++) { + for (int i = 0; i < numOfListing && locationBudget > 0; i++) { INode cur = contents.get(startChild+i); byte curPolicy = isSuperUser && !cur.isSymlink()? cur.getLocalStoragePolicyID(): @@ -284,9 +286,11 @@ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, INodeAttributes nodeAttrs = getINodeAttributes( fsd, src, cur.getLocalNameBytes(), cur, snapshot); - listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(), - cur, nodeAttrs, needLocation, getStoragePolicyID(curPolicy, - parentStoragePolicy), snapshot, isRawPath, iip); + final INodesInPath iipWithChild = INodesInPath.append(iip, cur, + cur.getLocalNameBytes()); + listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(), nodeAttrs, + needLocation, getStoragePolicyID(curPolicy, parentStoragePolicy), + snapshot, isRawPath, iipWithChild); listingCnt++; if (needLocation) { // Once we hit lsLimit locations, stop. @@ -341,8 +345,7 @@ private static DirectoryListing getSnapshotsListing( fsd, src, sRoot.getLocalNameBytes(), node, Snapshot.CURRENT_STATE_ID); listing[i] = createFileStatus( - fsd, sRoot.getLocalNameBytes(), - sRoot, nodeAttrs, + fsd, sRoot.getLocalNameBytes(), nodeAttrs, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, INodesInPath.fromINode(sRoot)); @@ -362,31 +365,31 @@ private static DirectoryListing getReservedListing(FSDirectory fsd) { /** Get the file info for a specific file. * @param fsd FSDirectory - * @param src The string representation of the path to the file + * @param iip The path to the file, the file is included * @param isRawPath true if a /.reserved/raw pathname was passed by the user * @param includeStoragePolicy whether to include storage policy * @return object containing information regarding the file * or null if file not found */ static HdfsFileStatus getFileInfo( - FSDirectory fsd, String path, INodesInPath src, boolean isRawPath, + FSDirectory fsd, String path, INodesInPath iip, boolean isRawPath, boolean includeStoragePolicy) throws IOException { fsd.readLock(); try { - final INode i = src.getLastINode(); - if (i == null) { + final INode node = iip.getLastINode(); + if (node == null) { return null; } - byte policyId = includeStoragePolicy && !i.isSymlink() ? - i.getStoragePolicyID() : + byte policyId = includeStoragePolicy && !node.isSymlink() ? + node.getStoragePolicyID() : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; INodeAttributes nodeAttrs = getINodeAttributes(fsd, path, HdfsFileStatus.EMPTY_NAME, - i, src.getPathSnapshotId()); - return createFileStatus(fsd, HdfsFileStatus.EMPTY_NAME, i, nodeAttrs, - policyId, src.getPathSnapshotId(), isRawPath, src); + node, iip.getPathSnapshotId()); + return createFileStatus(fsd, HdfsFileStatus.EMPTY_NAME, nodeAttrs, + policyId, iip.getPathSnapshotId(), isRawPath, iip); } finally { fsd.readUnlock(); } @@ -423,57 +426,60 @@ static HdfsFileStatus getFileInfo( * * @param fsd FSDirectory * @param path the local name - * @param node inode * @param needLocation if block locations need to be included or not * @param isRawPath true if this is being called on behalf of a path in * /.reserved/raw + * @param iip the INodesInPath containing the target INode and its ancestors * @return a file status * @throws java.io.IOException if any error occurs */ private static HdfsFileStatus createFileStatus( - FSDirectory fsd, byte[] path, INode node, INodeAttributes nodeAttrs, + FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs, boolean needLocation, byte storagePolicy, int snapshot, boolean isRawPath, INodesInPath iip) throws IOException { if (needLocation) { - return createLocatedFileStatus(fsd, path, node, nodeAttrs, storagePolicy, + return createLocatedFileStatus(fsd, path, nodeAttrs, storagePolicy, snapshot, isRawPath, iip); } else { - return createFileStatus(fsd, path, node, nodeAttrs, storagePolicy, + return createFileStatus(fsd, path, nodeAttrs, storagePolicy, snapshot, isRawPath, iip); } } /** - * Create FileStatus by file INode + * Create FileStatus for an given INodeFile. + * @param iip The INodesInPath containing the INodeFile and its ancestors */ static HdfsFileStatus createFileStatusForEditLog( - FSDirectory fsd, String fullPath, byte[] path, INode node, + FSDirectory fsd, String fullPath, byte[] path, byte storagePolicy, int snapshot, boolean isRawPath, INodesInPath iip) throws IOException { INodeAttributes nodeAttrs = getINodeAttributes( - fsd, fullPath, path, node, snapshot); - return createFileStatus(fsd, path, node, nodeAttrs, - storagePolicy, snapshot, isRawPath, iip); + fsd, fullPath, path, iip.getLastINode(), snapshot); + return createFileStatus(fsd, path, nodeAttrs, storagePolicy, + snapshot, isRawPath, iip); } /** - * Create FileStatus by file INode + * create file status for a given INode + * @param iip the INodesInPath containing the target INode and its ancestors */ static HdfsFileStatus createFileStatus( - FSDirectory fsd, byte[] path, INode node, + FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs, byte storagePolicy, int snapshot, boolean isRawPath, INodesInPath iip) throws IOException { long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; final boolean isEncrypted; + final INode node = iip.getLastINode(); final FileEncryptionInfo feInfo = isRawPath ? null : FSDirEncryptionZoneOp .getFileEncryptionInfo(fsd, node, snapshot, iip); - final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy( - fsd.getFSNamesystem(), iip); + final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp + .getErasureCodingPolicy(fsd.getFSNamesystem(), iip); if (node.isFile()) { final INodeFile fileNode = node.asFile(); @@ -481,11 +487,9 @@ static HdfsFileStatus createFileStatus( replication = fileNode.getFileReplication(snapshot); blocksize = fileNode.getPreferredBlockSize(); isEncrypted = (feInfo != null) - || (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd, - INodesInPath.fromINode(node))); + || (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd, iip)); } else { - isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, - INodesInPath.fromINode(node)); + isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip); } int childrenNum = node.isDirectory() ? @@ -517,9 +521,10 @@ private static INodeAttributes getINodeAttributes( /** * Create FileStatus with location info by file INode + * @param iip the INodesInPath containing the target INode and its ancestors */ private static HdfsLocatedFileStatus createLocatedFileStatus( - FSDirectory fsd, byte[] path, INode node, INodeAttributes nodeAttrs, + FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs, byte storagePolicy, int snapshot, boolean isRawPath, INodesInPath iip) throws IOException { assert fsd.hasReadLock(); @@ -528,6 +533,8 @@ private static HdfsLocatedFileStatus createLocatedFileStatus( long blocksize = 0; LocatedBlocks loc = null; final boolean isEncrypted; + final INode node = iip.getLastINode(); + final FileEncryptionInfo feInfo = isRawPath ? null : FSDirEncryptionZoneOp .getFileEncryptionInfo(fsd, node, snapshot, iip); final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy( @@ -550,11 +557,9 @@ private static HdfsLocatedFileStatus createLocatedFileStatus( loc = new LocatedBlocks(); } isEncrypted = (feInfo != null) - || (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd, - INodesInPath.fromINode(node))); + || (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd, iip)); } else { - isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, - INodesInPath.fromINode(node)); + isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip); } int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 661d788efc..6ca7a03c05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1665,11 +1665,11 @@ void resetLastInodeIdWithoutChecking(long newValue) { INodeAttributes getAttributes(String fullPath, byte[] path, INode node, int snapshot) { - INodeAttributes nodeAttrs = node; + INodeAttributes nodeAttrs; if (attributeProvider != null) { nodeAttrs = node.getSnapshotINode(snapshot); - fullPath = fullPath + (fullPath.endsWith(Path.SEPARATOR) ? "" - : Path.SEPARATOR) + fullPath = fullPath + + (fullPath.endsWith(Path.SEPARATOR) ? "" : Path.SEPARATOR) + DFSUtil.bytes2String(path); nodeAttrs = attributeProvider.getAttributes(fullPath, nodeAttrs); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 23683d336e..a74dd7fcbc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -375,13 +375,14 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, addCloseOp.atime, addCloseOp.blockSize, true, addCloseOp.clientName, addCloseOp.clientMachine, addCloseOp.storagePolicyId); + assert newFile != null; iip = INodesInPath.replace(iip, iip.length() - 1, newFile); fsNamesys.leaseManager.addLease(addCloseOp.clientName, newFile.getId()); // add the op into retry cache if necessary if (toAddRetryCache) { HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog( - fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, newFile, + fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, iip); fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, @@ -400,8 +401,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, // add the op into retry cache if necessary if (toAddRetryCache) { HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog( - fsNamesys.dir, path, - HdfsFileStatus.EMPTY_NAME, newFile, + fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, iip); fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, @@ -478,7 +478,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, // add the op into retry cache if necessary if (toAddRetryCache) { HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog( - fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, file, + fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, iip); fsNamesys.addCacheEntryWithPayload(appendOp.rpcClientId, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index 72ca6ff683..1d540b7fce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -254,7 +254,7 @@ public static INodesInPath replace(INodesInPath iip, int pos, INode inode) { */ public static INodesInPath append(INodesInPath iip, INode child, byte[] childName) { - Preconditions.checkArgument(!iip.isSnapshot && iip.length() > 0); + Preconditions.checkArgument(iip.length() > 0); Preconditions.checkArgument(iip.getLastINode() != null && iip .getLastINode().isDirectory()); INode[] inodes = new INode[iip.length() + 1]; @@ -263,7 +263,7 @@ public static INodesInPath append(INodesInPath iip, INode child, byte[][] path = new byte[iip.path.length + 1][]; System.arraycopy(iip.path, 0, path, 0, path.length - 1); path[path.length - 1] = childName; - return new INodesInPath(inodes, path, false, iip.snapshotId); + return new INodesInPath(inodes, path, iip.isSnapshot, iip.snapshotId); } private final byte[][] path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java index dcb528cd54..1328e3e639 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -65,6 +66,63 @@ public void shutdownCluster() throws IOException { } } + /** + * for pre-existing files (with replicated blocks) in an EC dir, getListing + * should report them as non-ec. + */ + @Test(timeout=60000) + public void testReplicatedFileUnderECDir() throws IOException { + final Path dir = new Path("/ec"); + final Path replicatedFile = new Path(dir, "replicatedFile"); + // create a file with replicated blocks + DFSTestUtil.createFile(fs, replicatedFile, 0, (short) 3, 0L); + + // set ec policy on dir + fs.setErasureCodingPolicy(dir, null); + // create a file which should be using ec + final Path ecSubDir = new Path(dir, "ecSubDir"); + final Path ecFile = new Path(ecSubDir, "ecFile"); + DFSTestUtil.createFile(fs, ecFile, 0, (short) 1, 0L); + + assertNull(fs.getClient().getFileInfo(replicatedFile.toString()) + .getErasureCodingPolicy()); + assertNotNull(fs.getClient().getFileInfo(ecFile.toString()) + .getErasureCodingPolicy()); + + // list "/ec" + DirectoryListing listing = fs.getClient().listPaths(dir.toString(), + new byte[0], false); + HdfsFileStatus[] files = listing.getPartialListing(); + assertEquals(2, files.length); + // the listing is always sorted according to the local name + assertEquals(ecSubDir.getName(), files[0].getLocalName()); + assertNotNull(files[0].getErasureCodingPolicy()); // ecSubDir + assertEquals(replicatedFile.getName(), files[1].getLocalName()); + assertNull(files[1].getErasureCodingPolicy()); // replicatedFile + + // list "/ec/ecSubDir" + files = fs.getClient().listPaths(ecSubDir.toString(), + new byte[0], false).getPartialListing(); + assertEquals(1, files.length); + assertEquals(ecFile.getName(), files[0].getLocalName()); + assertNotNull(files[0].getErasureCodingPolicy()); // ecFile + + // list "/" + files = fs.getClient().listPaths("/", new byte[0], false).getPartialListing(); + assertEquals(1, files.length); + assertEquals(dir.getName(), files[0].getLocalName()); // ec + assertNotNull(files[0].getErasureCodingPolicy()); + + // rename "/ec/ecSubDir/ecFile" to "/ecFile" + assertTrue(fs.rename(ecFile, new Path("/ecFile"))); + files = fs.getClient().listPaths("/", new byte[0], false).getPartialListing(); + assertEquals(2, files.length); + assertEquals(dir.getName(), files[0].getLocalName()); // ec + assertNotNull(files[0].getErasureCodingPolicy()); + assertEquals(ecFile.getName(), files[1].getLocalName()); + assertNotNull(files[1].getErasureCodingPolicy()); + } + @Test(timeout = 60000) public void testBasicSetECPolicy() throws IOException, InterruptedException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index cc74190530..b54f585cd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -652,14 +652,12 @@ public void testFsckOpenFiles() throws Exception { public void testFsckOpenECFiles() throws Exception { DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsckECFile"). setNumFiles(4).build(); - MiniDFSCluster cluster = null; + Configuration conf = new HdfsConfiguration(); + conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(10) + .build(); FileSystem fs = null; - String outStr; try { - Configuration conf = new HdfsConfiguration(); - conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(10).build(); - cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); String topDir = "/myDir"; byte[] randomBytes = new byte[3000000]; int seed = 42; @@ -667,7 +665,11 @@ public void testFsckOpenECFiles() throws Exception { cluster.waitActive(); fs = cluster.getFileSystem(); util.createFiles(fs, topDir); + // set topDir to EC when it has replicated files + cluster.getFileSystem().getClient().setErasureCodingPolicy(topDir, null); + // create a new file under topDir + DFSTestUtil.createFile(fs, new Path(topDir, "ecFile"), 1024, (short) 1, 0L); // Open a EC file for writing and do not close for now Path openFile = new Path(topDir + "/openECFile"); FSDataOutputStream out = fs.create(openFile); @@ -677,8 +679,11 @@ public void testFsckOpenECFiles() throws Exception { writeCount++; } + // make sure the fsck can correctly handle mixed ec/replicated files + runFsck(conf, 0, true, topDir, "-files", "-blocks", "-openforwrite"); + // We expect the filesystem to be HEALTHY and show one open file - outStr = runFsck(conf, 0, true, openFile.toString(), "-files", + String outStr = runFsck(conf, 0, true, openFile.toString(), "-files", "-blocks", "-openforwrite"); assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); assertTrue(outStr.contains("OPENFORWRITE"));