HDFS-8316. Erasure coding: refactor EC constants to be consistent with HDFS-8249. Contributed by Zhe Zhang.

This commit is contained in:
Jing Zhao 2015-05-04 11:24:35 -07:00 committed by Zhe Zhang
parent 77d94dd5ec
commit ea6c66ed57
8 changed files with 28 additions and 11 deletions

View File

@ -75,6 +75,17 @@ public final class HdfsConstants {
public static final String CLIENT_NAMENODE_PROTOCOL_NAME =
"org.apache.hadoop.hdfs.protocol.ClientProtocol";
/*
* These values correspond to the values used by the system default erasure
* coding schema.
* TODO: to be removed once all places use schema.
*/
public static final byte NUM_DATA_BLOCKS = 6;
public static final byte NUM_PARITY_BLOCKS = 3;
// The chunk size for striped block which is used by erasure coding
public static final int BLOCK_STRIPED_CELL_SIZE = 256 * 1024;
// SafeMode actions
public enum SafeModeAction {
SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET

View File

@ -158,3 +158,6 @@
HDFS-7949. WebImageViewer need support file size calculation with striped
blocks. (Rakesh R via Zhe Zhang)
HDFS-8316. Erasure coding: refactor EC constants to be consistent with HDFS-8249.
(Zhe Zhang via jing9)

View File

@ -419,7 +419,7 @@ void setClosed() {
@Override
protected synchronized void closeImpl() throws IOException {
if (isClosed()) {
getLeadingStreamer().getLastException().check();
getLeadingStreamer().getLastException().check(true);
return;
}

View File

@ -241,11 +241,11 @@ public static boolean isStripedBlockID(long id) {
* data/parity block id in the same striped block group.
*/
public static long convertToStripedID(long id) {
return id & (~HdfsConstants.BLOCK_GROUP_INDEX_MASK);
return id & (~HdfsServerConstants.BLOCK_GROUP_INDEX_MASK);
}
public static int getBlockIndex(Block reportedBlock) {
return (int) (reportedBlock.getBlockId() &
HdfsConstants.BLOCK_GROUP_INDEX_MASK);
HdfsServerConstants.BLOCK_GROUP_INDEX_MASK);
}
}

View File

@ -21,8 +21,8 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.util.SequentialNumber;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_GROUP_INDEX_MASK;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_BLOCKS_IN_GROUP;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BLOCK_GROUP_INDEX_MASK;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_BLOCKS_IN_GROUP;
/**
* Generate the next valid block group ID by incrementing the maximum block

View File

@ -396,6 +396,9 @@ enum BlockUCState {
"raw.hdfs.crypto.file.encryption.info";
String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER =
"security.hdfs.unreadable.by.superuser";
public static final String XATTR_ERASURECODING_ZONE =
String XATTR_ERASURECODING_ZONE =
"raw.hdfs.erasurecoding.zone";
long BLOCK_GROUP_INDEX_MASK = 15;
byte MAX_BLOCKS_IN_GROUP = 16;
}

View File

@ -102,7 +102,7 @@ public void testAllocateBlockId() throws Exception {
DFSTestUtil.writeFile(dfs, testPath, "hello again");
lb = dfs.getClient().getLocatedBlocks(testPath.toString(), 0);
final long secondId = lb.get(0).getBlock().getBlockId();
Assert.assertEquals(firstId + HdfsConstants.MAX_BLOCKS_IN_GROUP, secondId);
Assert.assertEquals(firstId + HdfsServerConstants.MAX_BLOCKS_IN_GROUP, secondId);
}
@Test (timeout=60000)
@ -159,7 +159,7 @@ private void checkStripedBlockUC(BlockInfoStriped block,
Assert.assertEquals(HdfsConstants.NUM_PARITY_BLOCKS,
block.getParityBlockNum());
Assert.assertEquals(0,
block.getBlockId() & HdfsConstants.BLOCK_GROUP_INDEX_MASK);
block.getBlockId() & HdfsServerConstants.BLOCK_GROUP_INDEX_MASK);
final BlockInfoStripedUnderConstruction blockUC =
(BlockInfoStripedUnderConstruction) block;

View File

@ -28,10 +28,10 @@
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.junit.Test;
@ -46,8 +46,8 @@ public class TestStripedINodeFile {
"userName", null, FsPermission.getDefault());
private static INodeFile createStripedINodeFile() {
return new INodeFile(HdfsConstantsClient.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
null, (short)0, 1024L, HdfsConstants.COLD_STORAGE_POLICY_ID);
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
null, (short)0, 1024L, HdfsServerConstants.COLD_STORAGE_POLICY_ID);
}
@Test