HDFS-11506. Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test code. Contributed by Manoj Govindassamy.
This commit is contained in:
parent
e96a0b8c92
commit
819808a016
@ -129,16 +129,6 @@ public static ErasureCodingPolicy[] getSystemPolicies() {
|
||||
return SYS_POLICIES;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get system-wide default policy, which can be used by default
|
||||
* when no policy is specified for a path.
|
||||
* @return ecPolicy
|
||||
*/
|
||||
public static ErasureCodingPolicy getSystemDefaultPolicy() {
|
||||
// make this configurable?
|
||||
return SYS_POLICY1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a policy by policy ID.
|
||||
* @return ecPolicy, or null if not found
|
||||
|
@ -1910,7 +1910,7 @@ public static void createStripedFile(MiniDFSCluster cluster, Path file,
|
||||
Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir)
|
||||
throws Exception {
|
||||
createStripedFile(cluster, file, dir, numBlocks, numStripesPerBlk,
|
||||
toMkdir, ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||
toMkdir, StripedFileTestUtil.getDefaultECPolicy());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -28,7 +28,6 @@
|
||||
import org.apache.hadoop.fs.PathFilter;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.util.StopWatch;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
@ -42,9 +41,7 @@
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.CompletionService;
|
||||
import java.util.concurrent.ExecutorCompletionService;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
@ -81,7 +78,7 @@ public class ErasureCodeBenchmarkThroughput
|
||||
private static final String EC_FILE_BASE = "ec-file-";
|
||||
private static final String TMP_FILE_SUFFIX = ".tmp";
|
||||
private static final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private static final byte[] data = new byte[BUFFER_SIZE_MB * 1024 * 1024];
|
||||
|
||||
static {
|
||||
|
@ -29,9 +29,11 @@
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
@ -558,4 +560,14 @@ public static LocatedBlocks getLocatedBlocks(Path file,
|
||||
throws IOException {
|
||||
return fs.getClient().getLocatedBlocks(file.toString(), 0, Long.MAX_VALUE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get system-wide default Erasure Coding Policy, which can be
|
||||
* used by default when no policy is specified for a path.
|
||||
* @return ErasureCodingPolicy
|
||||
*/
|
||||
public static ErasureCodingPolicy getDefaultECPolicy() {
|
||||
return ErasureCodingPolicyManager.getPolicyByID(
|
||||
HdfsConstants.RS_6_3_POLICY_ID);
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,6 @@
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
|
||||
@ -76,7 +75,7 @@ public class TestDFSStripedInputStream {
|
||||
public Timeout globalTimeout = new Timeout(300000);
|
||||
|
||||
public ErasureCodingPolicy getEcPolicy() {
|
||||
return ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
return StripedFileTestUtil.getDefaultECPolicy();
|
||||
}
|
||||
|
||||
@Before
|
||||
|
@ -26,7 +26,6 @@
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
|
||||
@ -62,7 +61,7 @@ public class TestDFSStripedOutputStream {
|
||||
public Timeout globalTimeout = new Timeout(300000);
|
||||
|
||||
public ErasureCodingPolicy getEcPolicy() {
|
||||
return ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
return StripedFileTestUtil.getDefaultECPolicy();
|
||||
}
|
||||
|
||||
@Before
|
||||
|
@ -36,7 +36,6 @@
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
|
||||
@ -89,7 +88,7 @@ public class TestDFSStripedOutputStreamWithFailure {
|
||||
9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1;
|
||||
|
||||
public ErasureCodingPolicy getEcPolicy() {
|
||||
return ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
return StripedFileTestUtil.getDefaultECPolicy();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -47,7 +47,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
@ -83,7 +82,7 @@ public class TestDecommissionWithStriped {
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem dfs;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private int numDNs;
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
@ -143,7 +142,7 @@ public void setup() throws IOException {
|
||||
|
||||
dfs.mkdirs(ecDir);
|
||||
dfs.setErasureCodingPolicy(ecDir,
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -56,7 +56,7 @@ public class TestErasureCodingPolicies {
|
||||
private DistributedFileSystem fs;
|
||||
private static final int BLOCK_SIZE = 1024;
|
||||
private static final ErasureCodingPolicy EC_POLICY =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private FSNamesystem namesystem;
|
||||
|
||||
@Rule
|
||||
@ -95,7 +95,7 @@ public void testReplicatedFileUnderECDir() throws IOException {
|
||||
|
||||
// set ec policy on dir
|
||||
fs.setErasureCodingPolicy(dir,
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
// create a file which should be using ec
|
||||
final Path ecSubDir = new Path(dir, "ecSubDir");
|
||||
final Path ecFile = new Path(ecSubDir, "ecFile");
|
||||
@ -270,7 +270,7 @@ public void testReplication() throws IOException {
|
||||
final Path testDir = new Path("/ec");
|
||||
fs.mkdir(testDir, FsPermission.getDirDefault());
|
||||
fs.setErasureCodingPolicy(testDir,
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
final Path fooFile = new Path(testDir, "foo");
|
||||
// create ec file with replication=0
|
||||
fs.create(fooFile, FsPermission.getFileDefault(), true,
|
||||
@ -292,7 +292,7 @@ public void testGetErasureCodingPolicyWithSystemDefaultECPolicy() throws Excepti
|
||||
assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
|
||||
// dir EC policy after setting
|
||||
ErasureCodingPolicy sysDefaultECPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
fs.getClient().setErasureCodingPolicy(src, sysDefaultECPolicy.getName());
|
||||
verifyErasureCodingInfo(src, sysDefaultECPolicy);
|
||||
fs.create(new Path(ecDir, "child1")).close();
|
||||
|
@ -28,7 +28,6 @@
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
@ -41,7 +40,7 @@ public class TestErasureCodingPolicyWithSnapshot {
|
||||
|
||||
private final static int SUCCESS = 0;
|
||||
private final ErasureCodingPolicy sysDefaultPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final short groupSize = (short) (
|
||||
sysDefaultPolicy.getNumDataUnits() +
|
||||
sysDefaultPolicy.getNumParityUnits());
|
||||
|
@ -26,7 +26,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
@ -47,7 +46,7 @@ public class TestFileChecksum {
|
||||
private static final Logger LOG = LoggerFactory
|
||||
.getLogger(TestFileChecksum.class);
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
|
||||
@ -82,7 +81,7 @@ public void setup() throws IOException {
|
||||
Path ecPath = new Path(ecDir);
|
||||
cluster.getFileSystem().mkdir(ecPath, FsPermission.getDirDefault());
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy(ecDir,
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
fs = cluster.getFileSystem();
|
||||
client = fs.getClient();
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
@ -71,7 +70,7 @@ public void testFileStatusWithECPolicy() throws Exception {
|
||||
assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy());
|
||||
fs.delete(file, true);
|
||||
|
||||
final ErasureCodingPolicy ecPolicy1 = ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
final ErasureCodingPolicy ecPolicy1 = StripedFileTestUtil.getDefaultECPolicy();
|
||||
// set EC policy on dir
|
||||
fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
|
||||
final ErasureCodingPolicy ecPolicy2 = client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
|
||||
|
@ -29,7 +29,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
@ -57,7 +56,7 @@ public class TestLeaseRecoveryStriped {
|
||||
.getLog(TestLeaseRecoveryStriped.class);
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
|
@ -35,7 +35,6 @@
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
@ -68,7 +67,7 @@ public class TestReadStripedFileWithDecoding {
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem fs;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks =
|
||||
(short) ecPolicy.getNumParityUnits();
|
||||
@ -103,7 +102,7 @@ public void setup() throws IOException {
|
||||
false);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
fs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
|
@ -24,7 +24,6 @@
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.Rule;
|
||||
@ -43,7 +42,7 @@ public class TestReadStripedFileWithMissingBlocks {
|
||||
private DistributedFileSystem fs;
|
||||
private Configuration conf = new HdfsConfiguration();
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
|
@ -44,7 +44,6 @@
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
|
||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||
@ -62,7 +61,7 @@ public class TestReconstructStripedFile {
|
||||
public static final Log LOG = LogFactory.getLog(TestReconstructStripedFile.class);
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlkNum = ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
@ -108,7 +107,7 @@ public void setup() throws IOException {
|
||||
|
||||
fs = cluster.getFileSystem();
|
||||
fs.getClient().setErasureCodingPolicy("/",
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
|
||||
List<DataNode> datanodes = cluster.getDataNodes();
|
||||
for (int i = 0; i < dnNum; i++) {
|
||||
@ -418,7 +417,7 @@ public void testProcessErasureCodingTasksSubmitionShouldSucceed()
|
||||
|
||||
BlockECReconstructionInfo invalidECInfo = new BlockECReconstructionInfo(
|
||||
new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, liveIndices,
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||
StripedFileTestUtil.getDefaultECPolicy());
|
||||
List<BlockECReconstructionInfo> ecTasks = new ArrayList<>();
|
||||
ecTasks.add(invalidECInfo);
|
||||
dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
|
||||
|
@ -25,7 +25,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.junit.After;
|
||||
@ -44,7 +43,7 @@
|
||||
public class TestSafeModeWithStripedFile {
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final int numDNs = dataBlocks + parityBlocks;
|
||||
@ -64,7 +63,7 @@ public void setup() throws IOException {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
cluster.waitActive();
|
||||
}
|
||||
|
||||
|
@ -51,8 +51,7 @@ public class TestUnsetAndChangeDirectoryEcPolicy {
|
||||
private MiniDFSCluster cluster;
|
||||
private Configuration conf = new Configuration();
|
||||
private DistributedFileSystem fs;
|
||||
private ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy();
|
||||
private ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
|
@ -27,7 +27,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
@ -48,7 +47,7 @@
|
||||
public class TestWriteReadStripedFile {
|
||||
public static final Log LOG = LogFactory.getLog(TestWriteReadStripedFile.class);
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
@ -81,7 +80,7 @@ public void setup() throws IOException {
|
||||
fs = cluster.getFileSystem();
|
||||
fs.mkdirs(new Path("/ec"));
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec",
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -24,7 +24,6 @@
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.Assert;
|
||||
@ -47,7 +46,7 @@ public class TestWriteStripedFileWithFailure {
|
||||
}
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final int numDNs = dataBlocks + parityBlocks;
|
||||
@ -60,7 +59,7 @@ public void setup() throws IOException {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
fs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockType;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
@ -77,7 +78,6 @@
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
|
||||
@ -228,7 +228,7 @@ private static BlockWithLocations getBlockWithLocations(
|
||||
datanodeUuids, storageIDs, storageTypes);
|
||||
if (isStriped) {
|
||||
blkLocs = new StripedBlockWithLocations(blkLocs, indices, dataBlkNum,
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getCellSize());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getCellSize());
|
||||
}
|
||||
return blkLocs;
|
||||
}
|
||||
@ -720,7 +720,7 @@ public void testBlockECRecoveryCommand() {
|
||||
byte[] liveBlkIndices0 = new byte[2];
|
||||
BlockECReconstructionInfo blkECRecoveryInfo0 = new BlockECReconstructionInfo(
|
||||
new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0,
|
||||
liveBlkIndices0, ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||
liveBlkIndices0, StripedFileTestUtil.getDefaultECPolicy());
|
||||
DatanodeInfo[] dnInfos1 = new DatanodeInfo[] {
|
||||
DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
|
||||
DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil
|
||||
@ -734,7 +734,7 @@ public void testBlockECRecoveryCommand() {
|
||||
byte[] liveBlkIndices1 = new byte[2];
|
||||
BlockECReconstructionInfo blkECRecoveryInfo1 = new BlockECReconstructionInfo(
|
||||
new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1,
|
||||
liveBlkIndices1, ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||
liveBlkIndices1, StripedFileTestUtil.getDefaultECPolicy());
|
||||
List<BlockECReconstructionInfo> blkRecoveryInfosList = new ArrayList<BlockECReconstructionInfo>();
|
||||
blkRecoveryInfosList.add(blkECRecoveryInfo0);
|
||||
blkRecoveryInfosList.add(blkECRecoveryInfo1);
|
||||
@ -823,8 +823,8 @@ private void assertBlockECRecoveryInfoEquals(
|
||||
ErasureCodingPolicy ecPolicy2 = blkECRecoveryInfo2.getErasureCodingPolicy();
|
||||
// Compare ECPolicies same as default ECPolicy as we used system default
|
||||
// ECPolicy used in this test
|
||||
compareECPolicies(ErasureCodingPolicyManager.getSystemDefaultPolicy(), ecPolicy1);
|
||||
compareECPolicies(ErasureCodingPolicyManager.getSystemDefaultPolicy(), ecPolicy2);
|
||||
compareECPolicies(StripedFileTestUtil.getDefaultECPolicy(), ecPolicy1);
|
||||
compareECPolicies(StripedFileTestUtil.getDefaultECPolicy(), ecPolicy2);
|
||||
}
|
||||
|
||||
private void compareECPolicies(ErasureCodingPolicy ecPolicy1, ErasureCodingPolicy ecPolicy2) {
|
||||
|
@ -46,7 +46,6 @@
|
||||
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.junit.AfterClass;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
@ -203,7 +202,7 @@ static void initConfWithRamDisk(Configuration conf,
|
||||
}
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int groupSize = dataBlocks + parityBlocks;
|
||||
@ -1941,7 +1940,7 @@ private void doTestBalancerWithStripedFile(Configuration conf) throws Exception
|
||||
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
|
||||
ClientProtocol.class).getProxy();
|
||||
client.setErasureCodingPolicy("/",
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
|
||||
long totalCapacity = sum(capacities);
|
||||
|
||||
|
@ -18,8 +18,8 @@
|
||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Rule;
|
||||
@ -43,7 +43,7 @@ public class TestBlockInfoStriped {
|
||||
private static final long BASE_ID = -1600;
|
||||
private final Block baseBlock = new Block(BASE_ID);
|
||||
private final ErasureCodingPolicy testECPolicy
|
||||
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
= StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int totalBlocks = testECPolicy.getNumDataUnits() +
|
||||
testECPolicy.getNumParityUnits();
|
||||
private final BlockInfoStriped info = new BlockInfoStriped(baseBlock,
|
||||
|
@ -20,11 +20,11 @@
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||
import org.apache.hadoop.net.ServerSocketUtil;
|
||||
import org.junit.Rule;
|
||||
@ -35,7 +35,7 @@
|
||||
|
||||
public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS {
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
@ -84,7 +84,7 @@ public void testRead() throws Exception {
|
||||
.numDataNodes(numDNs)
|
||||
.build();
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
try {
|
||||
cluster.waitActive();
|
||||
doTestRead(conf, cluster, true);
|
||||
|
@ -20,8 +20,8 @@
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.junit.Test;
|
||||
|
||||
@ -33,7 +33,7 @@
|
||||
public class TestLowRedundancyBlockQueues {
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
|
||||
private BlockInfo genBlockInfo(long id) {
|
||||
return new BlockInfoContiguous(new Block(id), (short) 3);
|
||||
|
@ -23,13 +23,13 @@
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.net.NetworkTopology;
|
||||
@ -59,7 +59,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
|
||||
}
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
@ -151,7 +151,7 @@ public void testReconstructForNotEnoughRacks() throws Exception {
|
||||
cluster.waitActive();
|
||||
fs = cluster.getFileSystem();
|
||||
fs.setErasureCodingPolicy(new Path("/"),
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
FSNamesystem fsn = cluster.getNamesystem();
|
||||
BlockManager bm = fsn.getBlockManager();
|
||||
|
||||
@ -222,7 +222,7 @@ public void testChooseExcessReplicasToDelete() throws Exception {
|
||||
cluster.waitActive();
|
||||
fs = cluster.getFileSystem();
|
||||
fs.setErasureCodingPolicy(new Path("/"),
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
|
||||
MiniDFSCluster.DataNodeProperties lastHost = stopDataNode(
|
||||
hosts[hosts.length - 1]);
|
||||
@ -276,7 +276,7 @@ public void testReconstructionWithDecommission() throws Exception {
|
||||
cluster.waitActive();
|
||||
fs = cluster.getFileSystem();
|
||||
fs.setErasureCodingPolicy(new Path("/"),
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
|
||||
final BlockManager bm = cluster.getNamesystem().getBlockManager();
|
||||
final DatanodeManager dm = bm.getDatanodeManager();
|
||||
|
@ -37,9 +37,9 @@
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
@ -58,7 +58,7 @@ public class TestSequentialBlockGroupId {
|
||||
.getLog("TestSequentialBlockGroupId");
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final short REPLICATION = 1;
|
||||
private final long SEED = 0;
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
@ -89,7 +89,7 @@ public void setup() throws Exception {
|
||||
.getBlockIdManager().getBlockGroupIdGenerator();
|
||||
fs.mkdirs(ecDir);
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/ecDir",
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -27,6 +27,7 @@
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
@ -34,7 +35,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.Time;
|
||||
@ -54,7 +54,7 @@ public class TestSortLocatedStripedBlock {
|
||||
.getLogger(TestSortLocatedStripedBlock.class);
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
|
@ -65,6 +65,7 @@
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
|
||||
import org.apache.hadoop.util.AutoCloseableLock;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
@ -137,7 +138,7 @@ public class TestBlockRecovery {
|
||||
public TestName currentTestName = new TestName();
|
||||
|
||||
private final int cellSize =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getCellSize();
|
||||
StripedFileTestUtil.getDefaultECPolicy().getCellSize();
|
||||
private final int bytesPerChecksum = 512;
|
||||
private final int[][][] blockLengthsSuite = {
|
||||
{{11 * cellSize, 10 * cellSize, 9 * cellSize, 8 * cellSize,
|
||||
|
@ -33,7 +33,6 @@
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
|
||||
@ -55,7 +54,7 @@ public class TestDataNodeErasureCodingMetrics {
|
||||
public static final Log LOG = LogFactory.
|
||||
getLog(TestDataNodeErasureCodingMetrics.class);
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
@ -76,7 +75,7 @@ public void setup() throws IOException {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.waitActive();
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
fs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,6 @@
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.mover.Mover.MLocation;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.minikdc.MiniKdc;
|
||||
@ -478,7 +477,7 @@ public void testMoverFailedRetry() throws Exception {
|
||||
}
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
@ -538,7 +537,7 @@ public void testMoverWithStripedFile() throws Exception {
|
||||
HdfsConstants.HOT_STORAGE_POLICY_NAME);
|
||||
// set an EC policy on "/bar" directory
|
||||
client.setErasureCodingPolicy(barDir,
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
|
||||
// write file to barDir
|
||||
final String fooFile = "/bar/foo";
|
||||
|
@ -56,7 +56,7 @@ public class TestAddOverReplicatedStripedBlocks {
|
||||
private final Path dirPath = new Path("/striped");
|
||||
private Path filePath = new Path(dirPath, "file");
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final short groupSize = (short) (dataBlocks + parityBlocks);
|
||||
@ -82,7 +82,7 @@ public void setup() throws IOException {
|
||||
fs = cluster.getFileSystem();
|
||||
fs.mkdirs(dirPath);
|
||||
fs.getClient().setErasureCodingPolicy(dirPath.toString(),
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
}
|
||||
|
||||
@After
|
||||
@ -192,7 +192,7 @@ public void testProcessOverReplicatedAndCorruptStripedBlock()
|
||||
long groupId = bg.getBlock().getBlockId();
|
||||
Block blk = new Block(groupId, blockSize, gs);
|
||||
BlockInfoStriped blockInfo = new BlockInfoStriped(blk,
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||
StripedFileTestUtil.getDefaultECPolicy());
|
||||
for (int i = 0; i < groupSize; i++) {
|
||||
blk.setBlockId(groupId + i);
|
||||
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
|
||||
|
@ -24,6 +24,7 @@
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
@ -45,7 +46,7 @@
|
||||
|
||||
public class TestAddStripedBlockInFBR {
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
@ -88,7 +89,7 @@ public void testAddBlockInFullBlockReport() throws Exception {
|
||||
dfs.mkdirs(ecDir);
|
||||
dfs.mkdirs(repDir);
|
||||
dfs.getClient().setErasureCodingPolicy(ecDir.toString(),
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
|
||||
// create several non-EC files and one EC file
|
||||
final Path[] repFiles = new Path[groupSize];
|
||||
|
@ -24,6 +24,7 @@
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
@ -67,7 +68,7 @@
|
||||
|
||||
public class TestAddStripedBlocks {
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
@ -86,8 +87,8 @@ public void setup() throws IOException {
|
||||
.numDataNodes(groupSize).build();
|
||||
cluster.waitActive();
|
||||
dfs = cluster.getFileSystem();
|
||||
dfs.getClient().setErasureCodingPolicy("/", ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getName());
|
||||
dfs.getClient().setErasureCodingPolicy("/",
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Assert;
|
||||
@ -73,11 +74,11 @@ public void testInvalid() throws Exception {
|
||||
// Test first with an invalid policy
|
||||
expectInvalidPolicy("not-a-policy");
|
||||
// Test with an invalid policy and a valid policy
|
||||
expectInvalidPolicy("not-a-policy," + ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getName());
|
||||
expectInvalidPolicy("not-a-policy," +
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
// Test with a valid and an invalid policy
|
||||
expectInvalidPolicy(ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getName() + ", not-a-policy");
|
||||
expectInvalidPolicy(
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName() + ", not-a-policy");
|
||||
// Some more invalid values
|
||||
expectInvalidPolicy("not-a-policy, ");
|
||||
expectInvalidPolicy(" ,not-a-policy, ");
|
||||
@ -85,8 +86,7 @@ public void testInvalid() throws Exception {
|
||||
|
||||
@Test
|
||||
public void testValid() throws Exception {
|
||||
String ecPolicyName = ErasureCodingPolicyManager.getSystemDefaultPolicy()
|
||||
.getName();
|
||||
String ecPolicyName = StripedFileTestUtil.getDefaultECPolicy().getName();
|
||||
expectValidPolicy(ecPolicyName, 1);
|
||||
expectValidPolicy(ecPolicyName + ", ", 1);
|
||||
expectValidPolicy(",", 0);
|
||||
|
@ -46,6 +46,7 @@
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
@ -99,7 +100,7 @@ private static Configuration getConf() {
|
||||
private static final int NUM_DATA_NODES = 0;
|
||||
|
||||
private final ErasureCodingPolicy testECPolicy
|
||||
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
= StripedFileTestUtil.getDefaultECPolicy();
|
||||
|
||||
@Test
|
||||
public void testDisplayRecentEditLogOpCodes() throws IOException {
|
||||
|
@ -676,7 +676,7 @@ public void testFsckOpenECFiles() throws Exception {
|
||||
setNumFiles(4).build();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
||||
ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
final int cellSize = ecPolicy.getCellSize();
|
||||
final int numAllUnits = dataBlocks + ecPolicy.getNumParityUnits();
|
||||
@ -1997,10 +1997,9 @@ public void testECFsck() throws Exception {
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
|
||||
precision);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
||||
int dataBlocks = ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getNumDataUnits();
|
||||
int parityBlocks = ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getNumParityUnits();
|
||||
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
|
||||
int parityBlocks =
|
||||
StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
|
||||
int totalSize = dataBlocks + parityBlocks;
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
|
||||
fs = cluster.getFileSystem();
|
||||
@ -2288,12 +2287,10 @@ private void testUpgradeDomain(boolean defineUpgradeDomain,
|
||||
@Test (timeout = 300000)
|
||||
public void testFsckCorruptECFile() throws Exception {
|
||||
DistributedFileSystem fs = null;
|
||||
int dataBlocks = ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getNumDataUnits();
|
||||
int parityBlocks = ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getNumParityUnits();
|
||||
int cellSize = ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getCellSize();
|
||||
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
|
||||
int parityBlocks =
|
||||
StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
|
||||
int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
|
||||
int totalSize = dataBlocks + parityBlocks;
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(totalSize).build();
|
||||
@ -2308,7 +2305,7 @@ public void testFsckCorruptECFile() throws Exception {
|
||||
Path ecDirPath = new Path("/striped");
|
||||
fs.mkdir(ecDirPath, FsPermission.getDirDefault());
|
||||
fs.getClient().setErasureCodingPolicy(ecDirPath.toString(),
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
Path file = new Path(ecDirPath, "corrupted");
|
||||
final int length = cellSize * dataBlocks;
|
||||
final byte[] bytes = StripedFileTestUtil.generateBytes(length);
|
||||
@ -2359,12 +2356,10 @@ public void testFsckCorruptECFile() throws Exception {
|
||||
@Test (timeout = 300000)
|
||||
public void testFsckMissingECFile() throws Exception {
|
||||
DistributedFileSystem fs = null;
|
||||
int dataBlocks = ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getNumDataUnits();
|
||||
int parityBlocks = ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getNumParityUnits();
|
||||
int cellSize = ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getCellSize();
|
||||
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
|
||||
int parityBlocks =
|
||||
StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
|
||||
int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
|
||||
int totalSize = dataBlocks + parityBlocks;
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(totalSize).build();
|
||||
@ -2374,7 +2369,7 @@ public void testFsckMissingECFile() throws Exception {
|
||||
Path ecDirPath = new Path("/striped");
|
||||
fs.mkdir(ecDirPath, FsPermission.getDirDefault());
|
||||
fs.getClient().setErasureCodingPolicy(ecDirPath.toString(),
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
Path file = new Path(ecDirPath, "missing");
|
||||
final int length = cellSize * dataBlocks;
|
||||
final byte[] bytes = StripedFileTestUtil.generateBytes(length);
|
||||
|
@ -725,12 +725,10 @@ public void testVerifyMissingBlockGroupsMetrics() throws Exception {
|
||||
DistributedFileSystem fs = null;
|
||||
try {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
int dataBlocks = ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getNumDataUnits();
|
||||
int parityBlocks = ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getNumParityUnits();
|
||||
int cellSize = ErasureCodingPolicyManager
|
||||
.getSystemDefaultPolicy().getCellSize();
|
||||
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
|
||||
int parityBlocks =
|
||||
StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
|
||||
int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
|
||||
int totalSize = dataBlocks + parityBlocks;
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(totalSize).build();
|
||||
@ -740,7 +738,7 @@ public void testVerifyMissingBlockGroupsMetrics() throws Exception {
|
||||
Path ecDirPath = new Path("/striped");
|
||||
fs.mkdir(ecDirPath, FsPermission.getDirDefault());
|
||||
fs.getClient().setErasureCodingPolicy(ecDirPath.toString(),
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
Path file = new Path(ecDirPath, "corrupted");
|
||||
final int length = cellSize * dataBlocks;
|
||||
final byte[] bytes = StripedFileTestUtil.generateBytes(length);
|
||||
|
@ -25,6 +25,7 @@
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
@ -46,7 +47,7 @@ public class TestQuotaWithStripedBlocks {
|
||||
private static final int BLOCK_SIZE = 1024 * 1024;
|
||||
private static final long DISK_QUOTA = BLOCK_SIZE * 10;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocsk = ecPolicy.getNumParityUnits();
|
||||
private final int groupSize = dataBlocks + parityBlocsk;
|
||||
|
@ -60,7 +60,7 @@ public class TestReconstructStripedBlocks {
|
||||
public static final Logger LOG = LoggerFactory.getLogger(
|
||||
TestReconstructStripedBlocks.class);
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
@ -202,7 +202,7 @@ public void test2RecoveryTasksForSameBlockGroup() throws Exception {
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
BlockManager bm = cluster.getNamesystem().getBlockManager();
|
||||
fs.getClient().setErasureCodingPolicy("/",
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
int fileLen = dataBlocks * blockSize;
|
||||
Path p = new Path("/test2RecoveryTasksForSameBlockGroup");
|
||||
final byte[] data = new byte[fileLen];
|
||||
@ -268,7 +268,7 @@ public void testCountLiveReplicas() throws Exception {
|
||||
try {
|
||||
fs.mkdirs(dirPath);
|
||||
fs.setErasureCodingPolicy(dirPath,
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
DFSTestUtil.createFile(fs, filePath,
|
||||
cellSize * dataBlocks * 2, (short) 1, 0L);
|
||||
|
||||
|
@ -39,6 +39,7 @@
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
@ -321,7 +322,7 @@ public void testDeleteOp() throws Exception {
|
||||
|
||||
// set erasure coding policy
|
||||
dfs.setErasureCodingPolicy(ecDir,
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
DFSTestUtil.createFile(dfs, ecFile, len, (short) 1, 0xFEED);
|
||||
DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED);
|
||||
final FSDirectory fsd = fsn.getFSDirectory();
|
||||
@ -423,7 +424,7 @@ public void testUnsuitableStoragePoliciesWithECStripedMode()
|
||||
client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
|
||||
// set an EC policy on "/foo" directory
|
||||
client.setErasureCodingPolicy(fooDir,
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
|
||||
// write file to fooDir
|
||||
final String barFile = "/foo/bar";
|
||||
|
@ -31,12 +31,12 @@
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
@ -46,7 +46,7 @@
|
||||
|
||||
public class TestOfflineImageViewerWithStripedBlocks {
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
private int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
|
||||
@ -64,7 +64,7 @@ public void setup() throws IOException {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.waitActive();
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
|
||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||
fs = cluster.getFileSystem();
|
||||
Path eczone = new Path("/eczone");
|
||||
fs.mkdirs(eczone);
|
||||
@ -144,7 +144,7 @@ private void testFileSize(int numBytes) throws IOException,
|
||||
// Verify space consumed present in BlockInfoStriped
|
||||
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
|
||||
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
|
||||
assertEquals(ErasureCodingPolicyManager.getSystemDefaultPolicy().getId(),
|
||||
assertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(),
|
||||
fileNode.getErasureCodingPolicyID());
|
||||
assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
|
||||
long actualFileSize = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user