From 91c81fdc24709b3caf1f6281c8879ffee08db956 Mon Sep 17 00:00:00 2001 From: Zhe Zhang Date: Tue, 19 May 2015 13:58:50 -0700 Subject: [PATCH] HDFS-8375. Add cellSize as an XAttr to ECZone. Contributed by Vinayakumar B. --- .../hadoop/hdfs/protocol/HdfsFileStatus.java | 14 ++++-- .../SnapshottableDirectoryStatus.java | 2 +- .../hadoop/hdfs/web/JsonUtilClient.java | 2 +- .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 8 +-- .../org/apache/hadoop/hdfs/DFSClient.java | 7 +-- .../hadoop/hdfs/DFSStripedInputStream.java | 10 ++-- .../hadoop/hdfs/DFSStripedOutputStream.java | 2 +- .../hadoop/hdfs/DistributedFileSystem.java | 9 ++-- .../hadoop/hdfs/protocol/ClientProtocol.java | 2 +- .../hdfs/protocol/ErasureCodingZoneInfo.java | 14 +++++- .../hdfs/protocol/HdfsLocatedFileStatus.java | 5 +- ...amenodeProtocolServerSideTranslatorPB.java | 3 +- .../ClientNamenodeProtocolTranslatorPB.java | 5 +- .../hadoop/hdfs/protocolPB/PBHelper.java | 15 ++++-- .../server/blockmanagement/BlockManager.java | 12 +++-- .../blockmanagement/DatanodeDescriptor.java | 4 +- .../erasurecode/ErasureCodingWorker.java | 2 +- .../namenode/ErasureCodingZoneManager.java | 40 ++++++++++++--- .../namenode/FSDirStatAndListingOp.java | 21 +++++--- .../hdfs/server/namenode/FSDirectory.java | 4 +- .../hdfs/server/namenode/FSNamesystem.java | 49 +++++++------------ .../server/namenode/NameNodeRpcServer.java | 5 +- .../hdfs/server/namenode/Namesystem.java | 8 +-- .../protocol/BlockECRecoveryCommand.java | 26 +++++----- .../hdfs/tools/erasurecode/ECCommand.java | 14 +++++- .../hadoop/hdfs/util/StripedBlockUtil.java | 49 +++++++++---------- .../src/main/proto/erasurecoding.proto | 3 ++ .../hadoop-hdfs/src/main/proto/hdfs.proto | 3 +- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 2 +- .../hadoop/hdfs/TestDFSClientRetries.java | 4 +- .../hdfs/TestDFSStripedInputStream.java | 10 ++-- .../hdfs/TestDFSStripedOutputStream.java | 2 +- ...TestDFSStripedOutputStreamWithFailure.java | 2 +- .../hadoop/hdfs/TestEncryptionZones.java | 2 +- .../hadoop/hdfs/TestErasureCodingZones.java | 18 +++---- .../hdfs/TestFileStatusWithECschema.java | 2 +- .../org/apache/hadoop/hdfs/TestLease.java | 4 +- .../hadoop/hdfs/TestRecoverStripedFile.java | 2 +- .../hadoop/hdfs/TestWriteReadStripedFile.java | 2 +- .../hadoop/hdfs/protocolPB/TestPBHelper.java | 6 ++- .../server/namenode/TestAddStripedBlocks.java | 2 +- .../server/namenode/TestFSEditLogLoader.java | 4 +- .../hdfs/server/namenode/TestFSImage.java | 4 +- .../hadoop/hdfs/server/namenode/TestFsck.java | 4 +- .../namenode/TestQuotaWithStripedBlocks.java | 2 +- ...stOfflineImageViewerWithStripedBlocks.java | 2 +- .../hdfs/util/TestStripedBlockUtil.java | 4 +- .../apache/hadoop/hdfs/web/TestJsonUtil.java | 2 +- 48 files changed, 244 insertions(+), 174 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java index f07973a8e2..8c902b4aae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java @@ -49,7 +49,8 @@ public class HdfsFileStatus { private final FileEncryptionInfo feInfo; - private final ECSchema schema; + private final ECSchema ecSchema; + private final int stripeCellSize; // Used by dir, not including dot and dotdot. Always zero for a regular file. private final int childrenNum; @@ -76,7 +77,7 @@ public HdfsFileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time, long access_time, FsPermission permission, String owner, String group, byte[] symlink, byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo, - byte storagePolicy, ECSchema schema) { + byte storagePolicy, ECSchema ecSchema, int stripeCellSize) { this.length = length; this.isdir = isdir; this.block_replication = (short)block_replication; @@ -96,7 +97,8 @@ public HdfsFileStatus(long length, boolean isdir, int block_replication, this.childrenNum = childrenNum; this.feInfo = feInfo; this.storagePolicy = storagePolicy; - this.schema = schema; + this.ecSchema = ecSchema; + this.stripeCellSize = stripeCellSize; } /** @@ -255,7 +257,11 @@ public final FileEncryptionInfo getFileEncryptionInfo() { } public ECSchema getECSchema() { - return schema; + return ecSchema; + } + + public int getStripeCellSize() { + return stripeCellSize; } public final int getChildrenNum() { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java index 813ea266cd..a6c7b10c91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java @@ -61,7 +61,7 @@ public SnapshottableDirectoryStatus(long modification_time, long access_time, int snapshotNumber, int snapshotQuota, byte[] parentFullPath) { this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time, access_time, permission, owner, group, null, localName, inodeId, - childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null); + childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null, 0); this.snapshotNumber = snapshotNumber; this.snapshotQuota = snapshotQuota; this.parentFullPath = parentFullPath; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index 62f679b727..5635c1b7d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -132,7 +132,7 @@ static HdfsFileStatus toFileStatus(final Map json, boolean includesType) { blockSize, mTime, aTime, permission, owner, group, symlink, DFSUtilClient.string2Bytes(localName), fileId, childrenNum, null, - storagePolicy, null); + storagePolicy, null, 0); } /** Convert a Json map to an ExtendedBlock object. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index 1e7dbeaec1..d6c9dba695 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -227,9 +227,11 @@ (Yi Liu via jing9) HDFS-8320. Erasure coding: consolidate striping-related terminologies. (zhz) - - HDFS-8366. Erasure Coding: Make the timeout parameter of polling blocking queue - configurable in DFSStripedOutputStream. (Li Bo) + + HDFS-8366. Erasure Coding: Make the timeout parameter of polling blocking queue + configurable in DFSStripedOutputStream. (Li Bo) HDFS-8378. Erasure Coding: Few improvements for the erasure coding worker. (Rakesh R via waltersu4549) + + HDFS-8375. Add cellSize as an XAttr to ECZone. ( Vinayakumar B via zhz). diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index ffeb568cff..42f48f0959 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1197,7 +1197,8 @@ public DFSInputStream open(String src, int buffersize, boolean verifyChecksum) if (fileInfo != null) { ECSchema schema = fileInfo.getECSchema(); if (schema != null) { - return new DFSStripedInputStream(this, src, verifyChecksum, schema); + return new DFSStripedInputStream(this, src, verifyChecksum, schema, + fileInfo.getStripeCellSize()); } } return new DFSInputStream(this, src, verifyChecksum); @@ -3009,12 +3010,12 @@ public RemoteIterator listEncryptionZones() return new EncryptionZoneIterator(namenode, traceSampler); } - public void createErasureCodingZone(String src, ECSchema schema) + public void createErasureCodingZone(String src, ECSchema schema, int cellSize) throws IOException { checkOpen(); TraceScope scope = getPathTraceScope("createErasureCodingZone", src); try { - namenode.createErasureCodingZone(src, schema); + namenode.createErasureCodingZone(src, schema, cellSize); } catch (RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, SafeModeException.class, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java index 744d5863af..3b7eb58814 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java @@ -125,12 +125,12 @@ boolean include(long pos) { private final CompletionService readingService; DFSStripedInputStream(DFSClient dfsClient, String src, boolean verifyChecksum, - ECSchema schema) throws IOException { + ECSchema schema, int cellSize) throws IOException { super(dfsClient, src, verifyChecksum); assert schema != null; this.schema = schema; - cellSize = schema.getChunkSize(); + this.cellSize = cellSize; dataBlkNum = (short) schema.getNumDataUnits(); parityBlkNum = (short) schema.getNumParityUnits(); groupSize = dataBlkNum; @@ -189,7 +189,7 @@ private synchronized void blockSeekTo(long target) throws IOException { targetBlockGroup, cellSize, dataBlkNum, parityBlkNum); // The purpose is to get start offset into each block. long[] offsetsForInternalBlocks = getStartOffsetsForInternalBlocks(schema, - targetBlockGroup, offsetIntoBlockGroup); + cellSize, targetBlockGroup, offsetIntoBlockGroup); Preconditions.checkNotNull(offsetsForInternalBlocks); final ReaderRetryPolicy retry = new ReaderRetryPolicy(); @@ -514,8 +514,8 @@ protected void fetchBlockByteRange(long blockStartOffset, long start, // Refresh the striped block group LocatedStripedBlock blockGroup = getBlockGroupAt(blockStartOffset); - AlignedStripe[] stripes = divideByteRangeIntoStripes(schema, blockGroup, - start, end, buf, offset); + AlignedStripe[] stripes = divideByteRangeIntoStripes(schema, cellSize, + blockGroup, start, end, buf, offset); for (AlignedStripe stripe : stripes) { fetchOneStripe(blockGroup, buf, stripe, corruptedBlockMap); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java index a6480238b5..4399a37f85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java @@ -230,7 +230,7 @@ private StripedDataStreamer getLeadingStreamer() { final ECSchema schema = stat.getECSchema(); final int numParityBlocks = schema.getNumParityUnits(); - cellSize = schema.getChunkSize(); + cellSize = stat.getStripeCellSize(); numDataBlocks = schema.getNumDataUnits(); numAllBlocks = numDataBlocks + numParityBlocks; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 0cd8334cf2..2e21372593 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -2281,16 +2281,17 @@ public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid) * * @param path Directory to create the ec zone * @param schema ECSchema for the zone. If not specified default will be used. + * @param cellSize Cellsize for the striped erasure coding * @throws IOException */ - public void createErasureCodingZone(final Path path, final ECSchema schema) - throws IOException { + public void createErasureCodingZone(final Path path, final ECSchema schema, + final int cellSize) throws IOException { Path absF = fixRelativePart(path); new FileSystemLinkResolver() { @Override public Void doCall(final Path p) throws IOException, UnresolvedLinkException { - dfs.createErasureCodingZone(getPathName(p), schema); + dfs.createErasureCodingZone(getPathName(p), schema, cellSize); return null; } @@ -2298,7 +2299,7 @@ public Void doCall(final Path p) throws IOException, public Void next(final FileSystem fs, final Path p) throws IOException { if (fs instanceof DistributedFileSystem) { DistributedFileSystem myDfs = (DistributedFileSystem) fs; - myDfs.createErasureCodingZone(p, schema); + myDfs.createErasureCodingZone(p, schema, cellSize); return null; } throw new UnsupportedOperationException( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index b0b457cf41..4f985ba607 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1463,7 +1463,7 @@ public List listXAttrs(String src) * default */ @AtMostOnce - public void createErasureCodingZone(String src, ECSchema schema) + public void createErasureCodingZone(String src, ECSchema schema, int cellSize) throws IOException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZoneInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZoneInfo.java index ec0efbd345..282eeaf60b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZoneInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZoneInfo.java @@ -25,10 +25,12 @@ public class ErasureCodingZoneInfo { private String dir; private ECSchema schema; + private int cellSize; - public ErasureCodingZoneInfo(String dir, ECSchema schema) { + public ErasureCodingZoneInfo(String dir, ECSchema schema, int cellSize) { this.dir = dir; this.schema = schema; + this.cellSize = cellSize; } /** @@ -49,8 +51,16 @@ public ECSchema getSchema() { return schema; } + /** + * Get cellSize for the EC Zone + */ + public int getCellSize() { + return cellSize; + } + @Override public String toString() { - return "Dir: " + getDir() + ", Schema: " + schema; + return "Dir: " + getDir() + ", Schema: " + schema + ", cellSize: " + + cellSize; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java index 9194d26258..4701538de9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java @@ -59,10 +59,11 @@ public HdfsLocatedFileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time, long access_time, FsPermission permission, String owner, String group, byte[] symlink, byte[] path, long fileId, LocatedBlocks locations, - int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy, ECSchema schema) { + int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy, + ECSchema schema, int stripeCellSize) { super(length, isdir, block_replication, blocksize, modification_time, access_time, permission, owner, group, symlink, path, fileId, - childrenNum, feInfo, storagePolicy, schema); + childrenNum, feInfo, storagePolicy, schema, stripeCellSize); this.locations = locations; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index bae753b72b..863b217254 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -1408,7 +1408,8 @@ public CreateErasureCodingZoneResponseProto createErasureCodingZone( try { ECSchema schema = req.hasSchema() ? PBHelper.convertECSchema(req .getSchema()) : null; - server.createErasureCodingZone(req.getSrc(), schema); + int cellSize = req.hasCellSize() ? req.getCellSize() : 0; + server.createErasureCodingZone(req.getSrc(), schema, cellSize); return CreateErasureCodingZoneResponseProto.newBuilder().build(); } catch (IOException e) { throw new ServiceException(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 67b1457414..336e3a25a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -1422,7 +1422,7 @@ public BatchedEntries listEncryptionZones(long id) } @Override - public void createErasureCodingZone(String src, ECSchema schema) + public void createErasureCodingZone(String src, ECSchema schema, int cellSize) throws IOException { final CreateErasureCodingZoneRequestProto.Builder builder = CreateErasureCodingZoneRequestProto.newBuilder(); @@ -1430,6 +1430,9 @@ public void createErasureCodingZone(String src, ECSchema schema) if (schema != null) { builder.setSchema(PBHelper.convertECSchema(schema)); } + if (cellSize > 0) { + builder.setCellSize(cellSize); + } CreateErasureCodingZoneRequestProto req = builder.build(); try { rpcProxy.createErasureCodingZone(null, req); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index a6a356cc9c..4d0f8710cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -1506,7 +1506,8 @@ public static HdfsFileStatus convert(HdfsFileStatusProto fs) { fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null, fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy() : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, - fs.hasEcSchema() ? PBHelper.convertECSchema(fs.getEcSchema()) : null); + fs.hasEcSchema() ? PBHelper.convertECSchema(fs.getEcSchema()) : null, + fs.hasStripeCellSize() ? fs.getStripeCellSize() : 0); } public static SnapshottableDirectoryStatus convert( @@ -1570,6 +1571,7 @@ public static HdfsFileStatusProto convert(HdfsFileStatus fs) { if(fs.getECSchema() != null) { builder.setEcSchema(PBHelper.convertECSchema(fs.getECSchema())); } + builder.setStripeCellSize(fs.getStripeCellSize()); return builder.build(); } @@ -3157,12 +3159,14 @@ public static ECSchemaProto convertECSchema(ECSchema schema) { public static ErasureCodingZoneInfoProto convertECZoneInfo(ErasureCodingZoneInfo ecZoneInfo) { return ErasureCodingZoneInfoProto.newBuilder().setDir(ecZoneInfo.getDir()) - .setSchema(convertECSchema(ecZoneInfo.getSchema())).build(); + .setSchema(convertECSchema(ecZoneInfo.getSchema())) + .setCellSize(ecZoneInfo.getCellSize()).build(); } public static ErasureCodingZoneInfo convertECZoneInfo(ErasureCodingZoneInfoProto ecZoneInfoProto) { return new ErasureCodingZoneInfo(ecZoneInfoProto.getDir(), - convertECSchema(ecZoneInfoProto.getSchema())); + convertECSchema(ecZoneInfoProto.getSchema()), + ecZoneInfoProto.getCellSize()); } public static BlockECRecoveryInfo convertBlockECRecoveryInfo( @@ -3196,9 +3200,11 @@ public static BlockECRecoveryInfo convertBlockECRecoveryInfo( } ECSchema ecSchema = convertECSchema(blockEcRecoveryInfoProto.getEcSchema()); + int cellSize = blockEcRecoveryInfoProto.getCellSize(); return new BlockECRecoveryInfo(block, sourceDnInfos, targetDnInfos, - targetStorageUuids, convertStorageTypes, liveBlkIndices, ecSchema); + targetStorageUuids, convertStorageTypes, liveBlkIndices, ecSchema, + cellSize); } public static BlockECRecoveryInfoProto convertBlockECRecoveryInfo( @@ -3224,6 +3230,7 @@ public static BlockECRecoveryInfoProto convertBlockECRecoveryInfo( builder.addAllLiveBlockIndices(convertIntArray(liveBlockIndices)); builder.setEcSchema(convertECSchema(blockEcRecoveryInfo.getECSchema())); + builder.setCellSize(blockEcRecoveryInfo.getCellSize()); return builder.build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index d296aa8707..28e7b89e06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -1560,14 +1561,14 @@ int computeRecoveryWorkForBlocks(List> blocksToRecover) { assert rw instanceof ErasureCodingWork; assert rw.targets.length > 0; String src = block.getBlockCollection().getName(); - ECSchema ecSchema = null; + ErasureCodingZoneInfo ecZoneInfo = null; try { - ecSchema = namesystem.getECSchemaForPath(src); + ecZoneInfo = namesystem.getErasureCodingZoneInfoForPath(src); } catch (IOException e) { blockLog - .warn("Failed to get the EC schema for the file {} ", src); + .warn("Failed to get the EC zone info for the file {} ", src); } - if (ecSchema == null) { + if (ecZoneInfo == null) { blockLog.warn("No EC schema found for the file {}. " + "So cannot proceed for recovery", src); // TODO: we may have to revisit later for what we can do better to @@ -1577,7 +1578,8 @@ int computeRecoveryWorkForBlocks(List> blocksToRecover) { rw.targets[0].getDatanodeDescriptor().addBlockToBeErasureCoded( new ExtendedBlock(namesystem.getBlockPoolId(), block), rw.srcNodes, rw.targets, - ((ErasureCodingWork) rw).liveBlockIndicies, ecSchema); + ((ErasureCodingWork) rw).liveBlockIndicies, + ecZoneInfo.getSchema(), ecZoneInfo.getCellSize()); } else { rw.srcNodes[0].addBlockToBeReplicated(block, targets); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 83d33035bd..47bc7652e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -610,10 +610,10 @@ void addBlockToBeReplicated(Block block, DatanodeStorageInfo[] targets) { */ void addBlockToBeErasureCoded(ExtendedBlock block, DatanodeDescriptor[] sources, DatanodeStorageInfo[] targets, - short[] liveBlockIndices, ECSchema ecSchema) { + short[] liveBlockIndices, ECSchema ecSchema, int cellSize) { assert (block != null && sources != null && sources.length > 0); BlockECRecoveryInfo task = new BlockECRecoveryInfo(block, sources, targets, - liveBlockIndices, ecSchema); + liveBlockIndices, ecSchema, cellSize); erasurecodeBlocks.offer(task); BlockManager.LOG.debug("Adding block recovery task " + task + "to " + getName() + ", current queue size is " + erasurecodeBlocks.size()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java index 4723e9fca3..d227de85cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java @@ -269,7 +269,7 @@ private class ReconstructAndTransferBlock implements Runnable { ECSchema schema = recoveryInfo.getECSchema(); dataBlkNum = schema.getNumDataUnits(); parityBlkNum = schema.getNumParityUnits(); - cellSize = schema.getChunkSize(); + cellSize = recoveryInfo.getCellSize(); blockGroup = recoveryInfo.getExtendedBlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java index 6b6add7497..371b8acf55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java @@ -19,12 +19,20 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; + import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.erasurecode.ECSchema; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.EnumSet; @@ -78,17 +86,21 @@ ErasureCodingZoneInfo getECZoneInfo(INodesInPath iip) throws IOException { : inode.getXAttrFeature().getXAttrs(); for (XAttr xAttr : xAttrs) { if (XATTR_ERASURECODING_ZONE.equals(XAttrHelper.getPrefixName(xAttr))) { - String schemaName = new String(xAttr.getValue()); + ByteArrayInputStream bIn=new ByteArrayInputStream(xAttr.getValue()); + DataInputStream dIn=new DataInputStream(bIn); + int cellSize = WritableUtils.readVInt(dIn); + String schemaName = WritableUtils.readString(dIn); ECSchema schema = dir.getFSNamesystem().getECSchemaManager() .getSchema(schemaName); - return new ErasureCodingZoneInfo(inode.getFullPathName(), schema); + return new ErasureCodingZoneInfo(inode.getFullPathName(), schema, + cellSize); } } } return null; } - XAttr createErasureCodingZone(String src, ECSchema schema) + XAttr createErasureCodingZone(String src, ECSchema schema, int cellSize) throws IOException { assert dir.hasWriteLock(); final INodesInPath srcIIP = dir.getINodesInPath4Write(src, false); @@ -113,10 +125,24 @@ XAttr createErasureCodingZone(String src, ECSchema schema) schema = ErasureCodingSchemaManager.getSystemDefaultSchema(); } - // Now persist the schema name in xattr - byte[] schemaBytes = schema.getSchemaName().getBytes(); - final XAttr ecXAttr = XAttrHelper.buildXAttr(XATTR_ERASURECODING_ZONE, - schemaBytes); + if (cellSize <= 0) { + cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE; + } + + // Write the cellsize first and then schema name + final XAttr ecXAttr; + DataOutputStream dOut = null; + try { + ByteArrayOutputStream bOut = new ByteArrayOutputStream(); + dOut = new DataOutputStream(bOut); + WritableUtils.writeVInt(dOut, cellSize); + // Now persist the schema name in xattr + WritableUtils.writeString(dOut, schema.getSchemaName()); + ecXAttr = XAttrHelper.buildXAttr(XATTR_ERASURECODING_ZONE, + bOut.toByteArray()); + } finally { + IOUtils.closeStream(dOut); + } final List xattrs = Lists.newArrayListWithCapacity(1); xattrs.add(ecXAttr); FSDirXAttrOp.unprotectedSetXAttrs(dir, src, xattrs, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 7133cf1950..eba5013a5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -317,7 +318,7 @@ static HdfsFileStatus getFileInfo( if (fsd.getINode4DotSnapshot(srcs) != null) { return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null, - HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null); + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null, 0); } return null; } @@ -385,8 +386,10 @@ static HdfsFileStatus createFileStatus( final FileEncryptionInfo feInfo = isRawPath ? null : fsd.getFileEncryptionInfo(node, snapshot, iip); - final ECSchema schema = fsd.getECSchema(iip); - + final ErasureCodingZoneInfo ecZoneInfo = fsd.getECZoneInfo(iip); + final ECSchema schema = ecZoneInfo != null ? ecZoneInfo.getSchema() : null; + final int cellSize = ecZoneInfo != null ? ecZoneInfo.getCellSize() : 0; + if (node.isFile()) { final INodeFile fileNode = node.asFile(); size = fileNode.computeFileSize(snapshot); @@ -417,7 +420,8 @@ static HdfsFileStatus createFileStatus( childrenNum, feInfo, storagePolicy, - schema); + schema, + cellSize); } private static INodeAttributes getINodeAttributes( @@ -464,8 +468,10 @@ private static HdfsLocatedFileStatus createLocatedFileStatus( } int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0; - final ECSchema schema = fsd.getECSchema(iip); - + final ErasureCodingZoneInfo ecZoneInfo = fsd.getECZoneInfo(iip); + final ECSchema schema = ecZoneInfo != null ? ecZoneInfo.getSchema() : null; + final int cellSize = ecZoneInfo != null ? ecZoneInfo.getCellSize() : 0; + HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(size, node.isDirectory(), replication, blocksize, node.getModificationTime(snapshot), @@ -473,7 +479,8 @@ private static HdfsLocatedFileStatus createLocatedFileStatus( getPermissionForFileStatus(nodeAttrs, isEncrypted), nodeAttrs.getUserName(), nodeAttrs.getGroupName(), node.isSymlink() ? node.asSymlink().getSymlink() : null, path, - node.getId(), loc, childrenNum, feInfo, storagePolicy, schema); + node.getId(), loc, childrenNum, feInfo, storagePolicy, schema, + cellSize); // Set caching information for the located blocks. if (loc != null) { CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index bb8282d1da..735ea5a928 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1230,11 +1230,11 @@ FileEncryptionInfo getFileEncryptionInfo(INode inode, int snapshotId, } } - XAttr createErasureCodingZone(String src, ECSchema schema) + XAttr createErasureCodingZone(String src, ECSchema schema, int cellSize) throws IOException { writeLock(); try { - return ecZoneManager.createErasureCodingZone(src, schema); + return ecZoneManager.createErasureCodingZone(src, schema, cellSize); } finally { writeUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6643dfe813..60f86d6fa3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -7555,14 +7555,14 @@ BatchedListEntries listEncryptionZones(long prevId) * @param srcArg the path of a directory which will be the root of the * erasure coding zone. The directory must be empty. * @param schema ECSchema for the erasure coding zone - * + * @param cellSize Cell size of stripe * @throws AccessControlException if the caller is not the superuser. * @throws UnresolvedLinkException if the path can't be resolved. * @throws SafeModeException if the Namenode is in safe mode. */ void createErasureCodingZone(final String srcArg, final ECSchema schema, - final boolean logRetryCache) throws IOException, UnresolvedLinkException, - SafeModeException, AccessControlException { + int cellSize, final boolean logRetryCache) throws IOException, + UnresolvedLinkException, SafeModeException, AccessControlException { String src = srcArg; HdfsFileStatus resultingStat = null; FSPermissionChecker pc = null; @@ -7585,7 +7585,7 @@ void createErasureCodingZone(final String srcArg, final ECSchema schema, checkNameNodeSafeMode("Cannot create erasure coding zone on " + src); src = dir.resolvePath(pc, src, pathComponents); - final XAttr ecXAttr = dir.createErasureCodingZone(src, schema); + final XAttr ecXAttr = dir.createErasureCodingZone(src, schema, cellSize); List xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(ecXAttr); getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); @@ -7604,9 +7604,9 @@ void createErasureCodingZone(final String srcArg, final ECSchema schema, */ ErasureCodingInfo getErasureCodingInfo(String src) throws AccessControlException, UnresolvedLinkException, IOException { - ECSchema schema = getECSchemaForPath(src); - if (schema != null) { - return new ErasureCodingInfo(src, schema); + ErasureCodingZoneInfo zoneInfo = getErasureCodingZoneInfo(src); + if (zoneInfo != null) { + return new ErasureCodingInfo(src, zoneInfo.getSchema()); } return null; } @@ -7614,21 +7614,13 @@ ErasureCodingInfo getErasureCodingInfo(String src) throws AccessControlException /** * Get the erasure coding zone information for specified path */ - ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws AccessControlException, - UnresolvedLinkException, IOException { + ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) + throws AccessControlException, UnresolvedLinkException, IOException { checkOperation(OperationCategory.READ); - final byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(src); - final FSPermissionChecker pc = getPermissionChecker(); readLock(); try { checkOperation(OperationCategory.READ); - src = dir.resolvePath(pc, src, pathComponents); - final INodesInPath iip = dir.getINodesInPath(src, true); - if (isPermissionEnabled) { - dir.checkPathAccess(pc, iip, FsAction.READ); - } - return dir.getECZoneInfo(iip); + return getErasureCodingZoneInfoForPath(src); } finally { readUnlock(); } @@ -7849,24 +7841,17 @@ private static void enableAsyncAuditLog() { } @Override - public ECSchema getECSchemaForPath(String src) throws IOException { - checkOperation(OperationCategory.READ); + public ErasureCodingZoneInfo getErasureCodingZoneInfoForPath(String src) + throws IOException { final byte[][] pathComponents = FSDirectory .getPathComponentsForReservedPath(src); final FSPermissionChecker pc = getPermissionChecker(); - readLock(); - try { - checkOperation(OperationCategory.READ); - src = dir.resolvePath(pc, src, pathComponents); - final INodesInPath iip = dir.getINodesInPath(src, true); - if (isPermissionEnabled) { - dir.checkPathAccess(pc, iip, FsAction.READ); - } - // Get schema set for the zone - return dir.getECSchema(iip); - } finally { - readUnlock(); + src = dir.resolvePath(pc, src, pathComponents); + final INodesInPath iip = dir.getINodesInPath(src, true); + if (isPermissionEnabled) { + dir.checkPathAccess(pc, iip, FsAction.READ); } + return dir.getECZoneInfo(iip); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 5a69b2f2e0..747f5283d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1824,7 +1824,7 @@ public BatchedEntries listEncryptionZones( } @Override // ClientProtocol - public void createErasureCodingZone(String src, ECSchema schema) + public void createErasureCodingZone(String src, ECSchema schema, int cellSize) throws IOException { checkNNStartup(); final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); @@ -1833,7 +1833,8 @@ public void createErasureCodingZone(String src, ECSchema schema) } boolean success = false; try { - namesystem.createErasureCodingZone(src, schema, cacheEntry != null); + namesystem.createErasureCodingZone(src, schema, cellSize, + cacheEntry != null); success = true; } finally { RetryCache.setState(cacheEntry, success); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java index e6c7fc0f17..a32e800e51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.util.RwLock; @@ -51,12 +52,13 @@ public interface Namesystem extends RwLock, SafeMode { public boolean isInSnapshot(BlockCollection bc); /** - * Gets the ECSchema for the specified path + * Gets the ECZone info for path * * @param src * - path - * @return ECSchema + * @return {@link ErasureCodingZoneInfo} * @throws IOException */ - public ECSchema getECSchemaForPath(String src) throws IOException; + public ErasureCodingZoneInfo getErasureCodingZoneInfoForPath(String src) + throws IOException; } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockECRecoveryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockECRecoveryCommand.java index 61e49e933d..56a1546d29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockECRecoveryCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockECRecoveryCommand.java @@ -78,25 +78,22 @@ public static class BlockECRecoveryInfo { private StorageType[] targetStorageTypes; private final short[] liveBlockIndices; private final ECSchema ecSchema; + private final int cellSize; public BlockECRecoveryInfo(ExtendedBlock block, DatanodeInfo[] sources, DatanodeStorageInfo[] targetDnStorageInfo, short[] liveBlockIndices, - ECSchema ecSchema) { - this.block = block; - this.sources = sources; - this.targets = DatanodeStorageInfo.toDatanodeInfos(targetDnStorageInfo); - this.targetStorageIDs = DatanodeStorageInfo - .toStorageIDs(targetDnStorageInfo); - this.targetStorageTypes = DatanodeStorageInfo - .toStorageTypes(targetDnStorageInfo); - this.liveBlockIndices = liveBlockIndices; - this.ecSchema = ecSchema; + ECSchema ecSchema, int cellSize) { + this(block, sources, DatanodeStorageInfo + .toDatanodeInfos(targetDnStorageInfo), DatanodeStorageInfo + .toStorageIDs(targetDnStorageInfo), DatanodeStorageInfo + .toStorageTypes(targetDnStorageInfo), liveBlockIndices, ecSchema, + cellSize); } - + public BlockECRecoveryInfo(ExtendedBlock block, DatanodeInfo[] sources, DatanodeInfo[] targets, String[] targetStorageIDs, StorageType[] targetStorageTypes, short[] liveBlockIndices, - ECSchema ecSchema) { + ECSchema ecSchema, int cellSize) { this.block = block; this.sources = sources; this.targets = targets; @@ -104,6 +101,7 @@ public BlockECRecoveryInfo(ExtendedBlock block, DatanodeInfo[] sources, this.targetStorageTypes = targetStorageTypes; this.liveBlockIndices = liveBlockIndices; this.ecSchema = ecSchema; + this.cellSize = cellSize; } public ExtendedBlock getExtendedBlock() { @@ -134,6 +132,10 @@ public ECSchema getECSchema() { return ecSchema; } + public int getCellSize() { + return cellSize; + } + @Override public String toString() { return new StringBuilder().append("BlockECRecoveryInfo(\n ") diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java index 2b6a6a5206..2d8220851f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.shell.PathData; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.util.StringUtils; @@ -88,14 +89,23 @@ static class CreateECZoneCommand extends ECCommand { + "Options :\n" + " -s : EC schema name to encode files. " + "If not passed default schema will be used\n" + + " -c : cell size to use for striped encoding files." + + " If not passed default cellsize of " + + HdfsConstants.BLOCK_STRIPED_CELL_SIZE + " will be used\n" + " : Path to an empty directory. Under this directory " + "files will be encoded using specified schema"; private String schemaName; + private int cellSize = 0; private ECSchema schema = null; @Override protected void processOptions(LinkedList args) throws IOException { schemaName = StringUtils.popOptionWithArgument("-s", args); + String cellSizeStr = StringUtils.popOptionWithArgument("-c", args); + if (cellSizeStr != null) { + cellSize = (int) StringUtils.TraditionalBinaryPrefix + .string2long(cellSizeStr); + } if (args.isEmpty()) { throw new HadoopIllegalArgumentException(" is missing"); } @@ -131,7 +141,7 @@ protected void processPath(PathData item) throws IOException { throw new HadoopIllegalArgumentException(sb.toString()); } } - dfs.createErasureCodingZone(item.path, schema); + dfs.createErasureCodingZone(item.path, schema, cellSize); out.println("EC Zone created successfully at " + item.path); } catch (IOException e) { throw new IOException("Unable to create EC zone for the path " @@ -213,4 +223,4 @@ protected void processOptions(LinkedList args) throws IOException { out.println(sb.toString()); } } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java index 2fa3fdf6ae..6f7dcb1478 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java @@ -306,6 +306,7 @@ public static void decodeAndFillBuffer(final byte[][] decodeInputs, byte[] buf, * {@link AlignedStripe}. * @param ecSchema The codec schema for the file, which carries the numbers * of data / parity blocks, as well as cell size + * @param cellSize Cell size of stripe * @param blockGroup The striped block group * @param rangeStartInBlockGroup The byte range's start offset in block group * @param rangeEndInBlockGroup The byte range's end offset in block group @@ -315,28 +316,29 @@ public static void decodeAndFillBuffer(final byte[][] decodeInputs, byte[] buf, * At most 5 stripes will be generated from each logical range, as * demonstrated in the header of {@link AlignedStripe}. */ - public static AlignedStripe[] divideByteRangeIntoStripes ( - ECSchema ecSchema, LocatedStripedBlock blockGroup, + public static AlignedStripe[] divideByteRangeIntoStripes(ECSchema ecSchema, + int cellSize, LocatedStripedBlock blockGroup, long rangeStartInBlockGroup, long rangeEndInBlockGroup, byte[] buf, int offsetInBuf) { // TODO: change ECSchema naming to use cell size instead of chunk size // Step 0: analyze range and calculate basic parameters - int cellSize = ecSchema.getChunkSize(); int dataBlkNum = ecSchema.getNumDataUnits(); // Step 1: map the byte range to StripingCells - StripingCell[] cells = getStripingCellsOfByteRange(ecSchema, blockGroup, - rangeStartInBlockGroup, rangeEndInBlockGroup); + StripingCell[] cells = getStripingCellsOfByteRange(ecSchema, cellSize, + blockGroup, rangeStartInBlockGroup, rangeEndInBlockGroup); // Step 2: get the unmerged ranges on each internal block - VerticalRange[] ranges = getRangesForInternalBlocks(ecSchema, cells); + VerticalRange[] ranges = getRangesForInternalBlocks(ecSchema, cellSize, + cells); // Step 3: merge into at most 5 stripes AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecSchema, ranges); // Step 4: calculate each chunk's position in destination buffer - calcualteChunkPositionsInBuf(ecSchema, stripes, cells, buf, offsetInBuf); + calcualteChunkPositionsInBuf(ecSchema, cellSize, stripes, cells, buf, + offsetInBuf); // Step 5: prepare ALLZERO blocks prepareAllZeroChunks(blockGroup, buf, stripes, cellSize, dataBlkNum); @@ -351,19 +353,18 @@ public static AlignedStripe[] divideByteRangeIntoStripes ( */ @VisibleForTesting private static StripingCell[] getStripingCellsOfByteRange(ECSchema ecSchema, - LocatedStripedBlock blockGroup, + int cellSize, LocatedStripedBlock blockGroup, long rangeStartInBlockGroup, long rangeEndInBlockGroup) { Preconditions.checkArgument( rangeStartInBlockGroup <= rangeEndInBlockGroup && rangeEndInBlockGroup < blockGroup.getBlockSize()); - int cellSize = ecSchema.getChunkSize(); int len = (int) (rangeEndInBlockGroup - rangeStartInBlockGroup + 1); int firstCellIdxInBG = (int) (rangeStartInBlockGroup / cellSize); int lastCellIdxInBG = (int) (rangeEndInBlockGroup / cellSize); int numCells = lastCellIdxInBG - firstCellIdxInBG + 1; StripingCell[] cells = new StripingCell[numCells]; - cells[0] = new StripingCell(ecSchema, firstCellIdxInBG); - cells[numCells - 1] = new StripingCell(ecSchema, lastCellIdxInBG); + cells[0] = new StripingCell(ecSchema, cellSize, firstCellIdxInBG); + cells[numCells - 1] = new StripingCell(ecSchema, cellSize, lastCellIdxInBG); cells[0].offset = (int) (rangeStartInBlockGroup % cellSize); cells[0].size = @@ -373,7 +374,7 @@ private static StripingCell[] getStripingCellsOfByteRange(ECSchema ecSchema, } for (int i = 1; i < numCells - 1; i++) { - cells[i] = new StripingCell(ecSchema, i + firstCellIdxInBG); + cells[i] = new StripingCell(ecSchema, cellSize, i + firstCellIdxInBG); } return cells; @@ -383,18 +384,16 @@ private static StripingCell[] getStripingCellsOfByteRange(ECSchema ecSchema, * Given a logical start offset in a block group, calculate the physical * start offset into each stored internal block. */ - public static long[] getStartOffsetsForInternalBlocks( - ECSchema ecSchema, LocatedStripedBlock blockGroup, - long rangeStartInBlockGroup) { + public static long[] getStartOffsetsForInternalBlocks(ECSchema ecSchema, + int cellSize, LocatedStripedBlock blockGroup, long rangeStartInBlockGroup) { Preconditions.checkArgument( rangeStartInBlockGroup < blockGroup.getBlockSize()); int dataBlkNum = ecSchema.getNumDataUnits(); int parityBlkNum = ecSchema.getNumParityUnits(); - int cellSize = ecSchema.getChunkSize(); long[] startOffsets = new long[dataBlkNum + parityBlkNum]; Arrays.fill(startOffsets, -1L); int firstCellIdxInBG = (int) (rangeStartInBlockGroup / cellSize); - StripingCell firstCell = new StripingCell(ecSchema, firstCellIdxInBG); + StripingCell firstCell = new StripingCell(ecSchema, cellSize, firstCellIdxInBG); firstCell.offset = (int) (rangeStartInBlockGroup % cellSize); startOffsets[firstCell.idxInStripe] = firstCell.idxInInternalBlk * cellSize + firstCell.offset; @@ -404,7 +403,7 @@ public static long[] getStartOffsetsForInternalBlocks( if (idx * cellSize >= blockGroup.getBlockSize()) { break; } - StripingCell cell = new StripingCell(ecSchema, idx); + StripingCell cell = new StripingCell(ecSchema, cellSize, idx); startOffsets[cell.idxInStripe] = cell.idxInInternalBlk * cellSize; if (startOffsets[cell.idxInStripe] < earliestStart) { earliestStart = startOffsets[cell.idxInStripe]; @@ -422,8 +421,7 @@ public static long[] getStartOffsetsForInternalBlocks( */ @VisibleForTesting private static VerticalRange[] getRangesForInternalBlocks(ECSchema ecSchema, - StripingCell[] cells) { - int cellSize = ecSchema.getChunkSize(); + int cellSize, StripingCell[] cells) { int dataBlkNum = ecSchema.getNumDataUnits(); int parityBlkNum = ecSchema.getNumParityUnits(); @@ -486,7 +484,7 @@ private static AlignedStripe[] mergeRangesForInternalBlocks( } private static void calcualteChunkPositionsInBuf(ECSchema ecSchema, - AlignedStripe[] stripes, StripingCell[] cells, byte[] buf, + int cellSize, AlignedStripe[] stripes, StripingCell[] cells, byte[] buf, int offsetInBuf) { /** * | <--------------- AlignedStripe --------------->| @@ -505,7 +503,6 @@ private static void calcualteChunkPositionsInBuf(ECSchema ecSchema, * * Cell indexing convention defined in {@link StripingCell} */ - int cellSize = ecSchema.getChunkSize(); int done = 0; for (StripingCell cell : cells) { long cellStart = cell.idxInInternalBlk * cellSize + cell.offset; @@ -587,17 +584,17 @@ static class StripingCell { int offset; int size; - StripingCell(ECSchema ecSchema, int idxInBlkGroup) { + StripingCell(ECSchema ecSchema, int cellSize, int idxInBlkGroup) { this.schema = ecSchema; this.idxInBlkGroup = idxInBlkGroup; this.idxInInternalBlk = idxInBlkGroup / ecSchema.getNumDataUnits(); this.idxInStripe = idxInBlkGroup - this.idxInInternalBlk * ecSchema.getNumDataUnits(); this.offset = 0; - this.size = ecSchema.getChunkSize(); + this.size = cellSize; } - StripingCell(ECSchema ecSchema, int idxInInternalBlk, + StripingCell(ECSchema ecSchema, int cellSize, int idxInInternalBlk, int idxInStripe) { this.schema = ecSchema; this.idxInInternalBlk = idxInInternalBlk; @@ -605,7 +602,7 @@ static class StripingCell { this.idxInBlkGroup = idxInInternalBlk * ecSchema.getNumDataUnits() + idxInStripe; this.offset = 0; - this.size = ecSchema.getChunkSize(); + this.size = cellSize; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/erasurecoding.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/erasurecoding.proto index 2302d1d4c4..058ed96ee3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/erasurecoding.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/erasurecoding.proto @@ -37,11 +37,13 @@ message ErasureCodingInfoProto { message ErasureCodingZoneInfoProto { required string dir = 1; required ECSchemaProto schema = 2; + required uint32 cellSize = 3; } message CreateErasureCodingZoneRequestProto { required string src = 1; optional ECSchemaProto schema = 2; + optional uint32 cellSize = 3; } message CreateErasureCodingZoneResponseProto { @@ -81,4 +83,5 @@ message BlockECRecoveryInfoProto { required StorageTypesProto targetStorageTypes = 5; repeated uint32 liveBlockIndices = 6; required ECSchemaProto ecSchema = 7; + required uint32 cellSize = 8; } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 64030bebd3..f64cf8f025 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -359,7 +359,8 @@ message HdfsFileStatusProto { // Optional field for erasure coding optional ECSchemaProto ecSchema = 17; -} + optional uint32 stripeCellSize = 18; +} /** * Checksum algorithms/types used in HDFS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 9f106cfcab..558c45d8c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1867,7 +1867,7 @@ public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir assert dir != null; dfs.mkdirs(dir); try { - dfs.getClient().createErasureCodingZone(dir.toString(), null); + dfs.getClient().createErasureCodingZone(dir.toString(), null, 0); } catch (IOException e) { if (!e.getMessage().contains("non-empty directory")) { throw e; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index ec88a542de..4918dee4c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -255,12 +255,12 @@ public Object answer(InvocationOnMock invocation) Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString()); + 1010, 0, null, (byte) 0, null, 0)).when(mockNN).getFileInfo(anyString()); Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0, null, (byte) 0, null)) + 1010, 0, null, (byte) 0, null, 0)) .when(mockNN) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java index 9032d09d68..a71441f021 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java @@ -73,7 +73,7 @@ public void setup() throws IOException { cluster.waitActive(); fs = cluster.getFileSystem(); fs.mkdirs(dirPath); - fs.getClient().createErasureCodingZone(dirPath.toString(), null); + fs.getClient().createErasureCodingZone(dirPath.toString(), null, CELLSIZE); } @After @@ -94,7 +94,7 @@ public void testGetBlock() throws Exception { LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations( filePath.toString(), 0, BLOCK_GROUP_SIZE * numBlocks); final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), - filePath.toString(), false, schema); + filePath.toString(), false, schema, CELLSIZE); List lbList = lbs.getLocatedBlocks(); for (LocatedBlock aLbList : lbList) { @@ -146,7 +146,7 @@ public void testPread() throws Exception { } } DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), - filePath.toString(), false, schema); + filePath.toString(), false, schema, CELLSIZE); int[] startOffsets = {0, 1, CELLSIZE - 102, CELLSIZE, CELLSIZE + 102, CELLSIZE*DATA_BLK_NUM, CELLSIZE*DATA_BLK_NUM + 102, @@ -188,7 +188,7 @@ public void testPreadWithDNFailure() throws Exception { } DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), filePath.toString(), false, - ErasureCodingSchemaManager.getSystemDefaultSchema()); + ErasureCodingSchemaManager.getSystemDefaultSchema(), CELLSIZE); int readSize = BLOCK_GROUP_SIZE; byte[] readBuffer = new byte[readSize]; byte[] expected = new byte[readSize]; @@ -284,7 +284,7 @@ private void testStatefulRead(boolean useByteBuffer, boolean cellMisalignPacket) DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), filePath.toString(), - false, schema); + false, schema, CELLSIZE); byte[] expected = new byte[fileSize]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java index ec98e68612..e795ea7a8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java @@ -65,7 +65,7 @@ public void setup() throws IOException { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().createErasureCodingZone("/", null, 0); fs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java index c2e588ad50..4ad3b2e4e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java @@ -74,7 +74,7 @@ public void setup() throws IOException { cluster.waitActive(); dfs = cluster.getFileSystem(); dfs.mkdirs(dir); - dfs.createErasureCodingZone(dir, null); + dfs.createErasureCodingZone(dir, null, 0); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 6e2ec5ec95..1f317b8599 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -737,7 +737,7 @@ private static void mockCreate(ClientProtocol mcp, version, new byte[suite.getAlgorithmBlockSize()], new byte[suite.getAlgorithmBlockSize()], "fakeKey", "fakeVersion"), - (byte) 0, null)) + (byte) 0, null, 0)) .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java index 59818c09c3..a7c3cd4d45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java @@ -63,7 +63,7 @@ public void testCreateECZone() fs.mkdir(testDir, FsPermission.getDirDefault()); /* Normal creation of an erasure coding zone */ - fs.getClient().createErasureCodingZone(testDir.toString(), null); + fs.getClient().createErasureCodingZone(testDir.toString(), null, 0); /* Verify files under the zone are striped */ final Path ECFilePath = new Path(testDir, "foo"); @@ -76,7 +76,7 @@ public void testCreateECZone() fs.mkdir(notEmpty, FsPermission.getDirDefault()); fs.create(new Path(notEmpty, "foo")); try { - fs.getClient().createErasureCodingZone(notEmpty.toString(), null); + fs.getClient().createErasureCodingZone(notEmpty.toString(), null, 0); fail("Erasure coding zone on non-empty dir"); } catch (IOException e) { assertExceptionContains("erasure coding zone for a non-empty directory", e); @@ -86,10 +86,10 @@ public void testCreateECZone() final Path zone1 = new Path("/zone1"); final Path zone2 = new Path(zone1, "zone2"); fs.mkdir(zone1, FsPermission.getDirDefault()); - fs.getClient().createErasureCodingZone(zone1.toString(), null); + fs.getClient().createErasureCodingZone(zone1.toString(), null, 0); fs.mkdir(zone2, FsPermission.getDirDefault()); try { - fs.getClient().createErasureCodingZone(zone2.toString(), null); + fs.getClient().createErasureCodingZone(zone2.toString(), null, 0); fail("Nested erasure coding zones"); } catch (IOException e) { assertExceptionContains("already in an erasure coding zone", e); @@ -99,7 +99,7 @@ public void testCreateECZone() final Path fPath = new Path("/file"); fs.create(fPath); try { - fs.getClient().createErasureCodingZone(fPath.toString(), null); + fs.getClient().createErasureCodingZone(fPath.toString(), null, 0); fail("Erasure coding zone on file"); } catch (IOException e) { assertExceptionContains("erasure coding zone for a file", e); @@ -112,8 +112,8 @@ public void testMoveValidity() throws IOException, InterruptedException { final Path dstECDir = new Path("/dstEC"); fs.mkdir(srcECDir, FsPermission.getDirDefault()); fs.mkdir(dstECDir, FsPermission.getDirDefault()); - fs.getClient().createErasureCodingZone(srcECDir.toString(), null); - fs.getClient().createErasureCodingZone(dstECDir.toString(), null); + fs.getClient().createErasureCodingZone(srcECDir.toString(), null, 0); + fs.getClient().createErasureCodingZone(dstECDir.toString(), null, 0); final Path srcFile = new Path(srcECDir, "foo"); fs.create(srcFile); @@ -157,7 +157,7 @@ public void testGetErasureCodingInfoWithSystemDefaultSchema() throws Exception { // dir ECInfo before creating ec zone assertNull(fs.getClient().getErasureCodingInfo(src)); // dir ECInfo after creating ec zone - fs.getClient().createErasureCodingZone(src, null); //Default one will be used. + fs.getClient().createErasureCodingZone(src, null, 0); //Default one will be used. ECSchema sysDefaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema(); verifyErasureCodingInfo(src, sysDefaultSchema); fs.create(new Path(ecDir, "/child1")).close(); @@ -178,7 +178,7 @@ public void testGetErasureCodingInfo() throws Exception { // dir ECInfo before creating ec zone assertNull(fs.getClient().getErasureCodingInfo(src)); // dir ECInfo after creating ec zone - fs.getClient().createErasureCodingZone(src, usingSchema); + fs.getClient().createErasureCodingZone(src, usingSchema, 0); verifyErasureCodingInfo(src, usingSchema); fs.create(new Path(ecDir, "/child1")).close(); // verify for the files in ec zone diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECschema.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECschema.java index f8c0667eb0..3c400b7a95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECschema.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECschema.java @@ -50,7 +50,7 @@ public void testFileStatusWithECschema() throws Exception { final ECSchema schema1 = ErasureCodingSchemaManager.getSystemDefaultSchema(); // create EC zone on dir - fs.createErasureCodingZone(dir, schema1); + fs.createErasureCodingZone(dir, schema1, 0); final ECSchema schame2 = client.getFileInfo(dir.toUri().getPath()).getECSchema(); assertNotNull(schame2); assertTrue(schema1.equals(schame2)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index b77ff3a000..d0cd335096 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -354,12 +354,12 @@ public void testFactory() throws Exception { Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString()); + 1010, 0, null, (byte) 0, null, 0)).when(mcp).getFileInfo(anyString()); Mockito .doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0, null, (byte) 0, null)) + 1010, 0, null, (byte) 0, null, 0)) .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java index dfdcee29f6..e5e324cfbb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java @@ -77,7 +77,7 @@ public void setup() throws IOException { cluster.waitActive(); fs = cluster.getFileSystem(); - fs.getClient().createErasureCodingZone("/", null); + fs.getClient().createErasureCodingZone("/", null, 0); List datanodes = cluster.getDataNodes(); for (int i = 0; i < dnNum; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java index f78fb7a5c0..1976dcaa86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java @@ -59,7 +59,7 @@ public static void setup() throws IOException { conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().createErasureCodingZone("/", null, cellSize); fs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 1be00b6fd8..a0b203887e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -663,7 +663,8 @@ public void testBlockECRecoveryCommand() { short[] liveBlkIndices0 = new short[2]; BlockECRecoveryInfo blkECRecoveryInfo0 = new BlockECRecoveryInfo( new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0, - liveBlkIndices0, ErasureCodingSchemaManager.getSystemDefaultSchema()); + liveBlkIndices0, ErasureCodingSchemaManager.getSystemDefaultSchema(), + 64 * 1024); DatanodeInfo[] dnInfos1 = new DatanodeInfo[] { DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() }; DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil @@ -677,7 +678,8 @@ public void testBlockECRecoveryCommand() { short[] liveBlkIndices1 = new short[2]; BlockECRecoveryInfo blkECRecoveryInfo1 = new BlockECRecoveryInfo( new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1, - liveBlkIndices1, ErasureCodingSchemaManager.getSystemDefaultSchema()); + liveBlkIndices1, ErasureCodingSchemaManager.getSystemDefaultSchema(), + 64 * 1024); List blkRecoveryInfosList = new ArrayList(); blkRecoveryInfosList.add(blkECRecoveryInfo0); blkRecoveryInfosList.add(blkECRecoveryInfo1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index ba763604a8..a35cbf480a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -74,7 +74,7 @@ public void setup() throws IOException { .numDataNodes(GROUP_SIZE).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); - dfs.getClient().createErasureCodingZone("/", null); + dfs.getClient().createErasureCodingZone("/", null, 0); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 290942372d..abb9bf5aea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -451,7 +451,7 @@ public void testAddNewStripedBlock() throws IOException{ //set the storage policy of the directory fs.mkdir(new Path(testDir), new FsPermission("755")); - fs.getClient().getNamenode().createErasureCodingZone(testDir, null); + fs.getClient().getNamenode().createErasureCodingZone(testDir, null, 0); // Create a file with striped block Path p = new Path(testFilePath); @@ -523,7 +523,7 @@ public void testUpdateStripedBlocks() throws IOException{ //set the storage policy of the directory fs.mkdir(new Path(testDir), new FsPermission("755")); - fs.getClient().getNamenode().createErasureCodingZone(testDir, null); + fs.getClient().getNamenode().createErasureCodingZone(testDir, null, 0); //create a file with striped blocks Path p = new Path(testFilePath); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index 5e015bf0c3..8fd0753448 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -140,7 +140,7 @@ private void testPersistHelper(Configuration conf) throws IOException { private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf, boolean isUC) throws IOException{ // contruct a INode with StripedBlock for saving and loading - fsn.createErasureCodingZone("/", null, false); + fsn.createErasureCodingZone("/", null, 0, false); long id = 123456789; byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes(); PermissionStatus permissionStatus = new PermissionStatus("testuser_a", @@ -402,7 +402,7 @@ public void testSupportBlockGroup() throws IOException { .build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); - fs.getClient().getNamenode().createErasureCodingZone("/", null); + fs.getClient().getNamenode().createErasureCodingZone("/", null, 0); Path file = new Path("/striped"); FSDataOutputStream out = fs.create(file); byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 787082791a..b36beb2a58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -1198,7 +1198,7 @@ public void testFsckFileNotFound() throws Exception { HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, - path, fileId, numChildren, null, storagePolicy, null); + path, fileId, numChildren, null, storagePolicy, null, 0); Result res = new Result(conf); try { @@ -1629,4 +1629,4 @@ public void testFsckWithDecommissionedReplicas() throws Exception { } } } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java index 86fcb88bd9..9c585a4a23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java @@ -66,7 +66,7 @@ public void setUp() throws IOException { dfs = cluster.getFileSystem(); dfs.mkdirs(ecDir); - dfs.getClient().createErasureCodingZone(ecDir.toString(), ecSchema); + dfs.getClient().createErasureCodingZone(ecDir.toString(), ecSchema, 0); dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA); dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA); dfs.setStoragePolicy(ecDir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java index f3ef39a80c..2a51f99b79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java @@ -61,7 +61,7 @@ public static void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().createErasureCodingZone("/", null, 0); fs = cluster.getFileSystem(); Path eczone = new Path("/eczone"); fs.mkdirs(eczone); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java index 6f29d69048..5a1c3fc096 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java @@ -152,7 +152,7 @@ private byte[][] createInternalBlkBuffers(int bgSize) { int done = 0; while (done < bgSize) { Preconditions.checkState(done % CELLSIZE == 0); - StripingCell cell = new StripingCell(SCEHMA, done / CELLSIZE); + StripingCell cell = new StripingCell(SCEHMA, CELLSIZE, done / CELLSIZE); int idxInStripe = cell.idxInStripe; int size = Math.min(CELLSIZE, bgSize - done); for (int i = 0; i < size; i++) { @@ -247,7 +247,7 @@ public void testDivideByteRangeIntoStripes() { continue; } AlignedStripe[] stripes = divideByteRangeIntoStripes(SCEHMA, - blockGroup, brStart, brStart + brSize - 1, assembled, 0); + CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled, 0); for (AlignedStripe stripe : stripes) { for (int i = 0; i < DATA_BLK_NUM; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index 8947c5b49a..303d063ecc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -65,7 +65,7 @@ public void testHdfsFileStatus() throws IOException { final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, now, now + 10, new FsPermission((short) 0644), "user", "group", DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"), - HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null); + HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null, 0); final FileStatus fstatus = toFileStatus(status, parent); System.out.println("status = " + status); System.out.println("fstatus = " + fstatus);