From 819808a016e16325502169e0091a16a6b2ae5387 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Thu, 9 Mar 2017 17:29:11 -0800 Subject: [PATCH] HDFS-11506. Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test code. Contributed by Manoj Govindassamy. --- .../namenode/ErasureCodingPolicyManager.java | 10 ------ .../org/apache/hadoop/hdfs/DFSTestUtil.java | 2 +- .../hdfs/ErasureCodeBenchmarkThroughput.java | 5 +-- .../hadoop/hdfs/StripedFileTestUtil.java | 12 +++++++ .../hdfs/TestDFSStripedInputStream.java | 3 +- .../hdfs/TestDFSStripedOutputStream.java | 3 +- ...TestDFSStripedOutputStreamWithFailure.java | 3 +- .../hdfs/TestDecommissionWithStriped.java | 5 ++- .../hdfs/TestErasureCodingPolicies.java | 8 ++--- .../TestErasureCodingPolicyWithSnapshot.java | 3 +- .../apache/hadoop/hdfs/TestFileChecksum.java | 5 ++- .../hdfs/TestFileStatusWithECPolicy.java | 3 +- .../hadoop/hdfs/TestLeaseRecoveryStriped.java | 3 +- .../hdfs/TestReadStripedFileWithDecoding.java | 5 ++- .../TestReadStripedFileWithMissingBlocks.java | 3 +- .../hdfs/TestReconstructStripedFile.java | 7 ++-- .../hdfs/TestSafeModeWithStripedFile.java | 5 ++- .../TestUnsetAndChangeDirectoryEcPolicy.java | 3 +- .../hadoop/hdfs/TestWriteReadStripedFile.java | 5 ++- .../hdfs/TestWriteStripedFileWithFailure.java | 5 ++- .../hadoop/hdfs/protocolPB/TestPBHelper.java | 12 +++---- .../hdfs/server/balancer/TestBalancer.java | 5 ++- .../blockmanagement/TestBlockInfoStriped.java | 4 +-- .../TestBlockTokenWithDFSStriped.java | 6 ++-- .../TestLowRedundancyBlockQueues.java | 4 +-- ...nstructStripedBlocksWithRackAwareness.java | 10 +++--- .../TestSequentialBlockGroupId.java | 6 ++-- .../TestSortLocatedStripedBlock.java | 4 +-- .../server/datanode/TestBlockRecovery.java | 3 +- .../TestDataNodeErasureCodingMetrics.java | 5 ++- .../hadoop/hdfs/server/mover/TestMover.java | 5 ++- .../TestAddOverReplicatedStripedBlocks.java | 6 ++-- .../namenode/TestAddStripedBlockInFBR.java | 5 +-- .../server/namenode/TestAddStripedBlocks.java | 7 ++-- .../namenode/TestEnabledECPolicies.java | 12 +++---- .../server/namenode/TestFSEditLogLoader.java | 3 +- .../hadoop/hdfs/server/namenode/TestFsck.java | 33 ++++++++----------- .../server/namenode/TestNameNodeMXBean.java | 12 +++---- .../namenode/TestQuotaWithStripedBlocks.java | 3 +- .../TestReconstructStripedBlocks.java | 6 ++-- .../server/namenode/TestStripedINodeFile.java | 5 +-- ...stOfflineImageViewerWithStripedBlocks.java | 8 ++--- 42 files changed, 121 insertions(+), 141 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index 02cbbdf38f..29af207299 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -129,16 +129,6 @@ public static ErasureCodingPolicy[] getSystemPolicies() { return SYS_POLICIES; } - /** - * Get system-wide default policy, which can be used by default - * when no policy is specified for a path. - * @return ecPolicy - */ - public static ErasureCodingPolicy getSystemDefaultPolicy() { - // make this configurable? - return SYS_POLICY1; - } - /** * Get a policy by policy ID. * @return ecPolicy, or null if not found diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 7bf5cdcf6e..13291952a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1910,7 +1910,7 @@ public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir) throws Exception { createStripedFile(cluster, file, dir, numBlocks, numStripesPerBlk, - toMkdir, ErasureCodingPolicyManager.getSystemDefaultPolicy()); + toMkdir, StripedFileTestUtil.getDefaultECPolicy()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java index d1a7569fa0..20ddfd1865 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -42,9 +41,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutorCompletionService; -import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; /** @@ -81,7 +78,7 @@ public class ErasureCodeBenchmarkThroughput private static final String EC_FILE_BASE = "ec-file-"; private static final String TMP_FILE_SUFFIX = ".tmp"; private static final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private static final byte[] data = new byte[BUFFER_SIZE_MB * 1024 * 1024]; static { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java index 520d0e3134..8008ed3396 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java @@ -29,9 +29,11 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; +import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream; import org.apache.hadoop.io.IOUtils; @@ -558,4 +560,14 @@ public static LocatedBlocks getLocatedBlocks(Path file, throws IOException { return fs.getClient().getLocatedBlocks(file.toString(), 0, Long.MAX_VALUE); } + + /** + * Get system-wide default Erasure Coding Policy, which can be + * used by default when no policy is specified for a path. + * @return ErasureCodingPolicy + */ + public static ErasureCodingPolicy getDefaultECPolicy() { + return ErasureCodingPolicyManager.getPolicyByID( + HdfsConstants.RS_6_3_POLICY_ID); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java index 29ef6943af..68fde95ca8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.io.erasurecode.CodecUtil; import org.apache.hadoop.io.erasurecode.ErasureCodeNative; @@ -76,7 +75,7 @@ public class TestDFSStripedInputStream { public Timeout globalTimeout = new Timeout(300000); public ErasureCodingPolicy getEcPolicy() { - return ErasureCodingPolicyManager.getSystemDefaultPolicy(); + return StripedFileTestUtil.getDefaultECPolicy(); } @Before diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java index 675f0422c1..ebdecfd640 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java @@ -26,7 +26,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.io.erasurecode.CodecUtil; import org.apache.hadoop.io.erasurecode.ErasureCodeNative; import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory; @@ -62,7 +61,7 @@ public class TestDFSStripedOutputStream { public Timeout globalTimeout = new Timeout(300000); public ErasureCodingPolicy getEcPolicy() { - return ErasureCodingPolicyManager.getSystemDefaultPolicy(); + return StripedFileTestUtil.getDefaultECPolicy(); } @Before diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java index 9bd29231fd..c66c7f27b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.erasurecode.CodecUtil; import org.apache.hadoop.io.erasurecode.ErasureCodeNative; @@ -89,7 +88,7 @@ public class TestDFSStripedOutputStreamWithFailure { 9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1; public ErasureCodingPolicy getEcPolicy() { - return ErasureCodingPolicyManager.getSystemDefaultPolicy(); + return StripedFileTestUtil.getDefaultECPolicy(); } /* diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java index f8ed0c3f69..a5df85fcb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -83,7 +82,7 @@ public class TestDecommissionWithStriped { private MiniDFSCluster cluster; private DistributedFileSystem dfs; private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private int numDNs; private final int cellSize = ecPolicy.getCellSize(); private final int dataBlocks = ecPolicy.getNumDataUnits(); @@ -143,7 +142,7 @@ public void setup() throws IOException { dfs.mkdirs(ecDir); dfs.setErasureCodingPolicy(ecDir, - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java index 4a4027b6ea..5ba4403d2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java @@ -56,7 +56,7 @@ public class TestErasureCodingPolicies { private DistributedFileSystem fs; private static final int BLOCK_SIZE = 1024; private static final ErasureCodingPolicy EC_POLICY = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private FSNamesystem namesystem; @Rule @@ -95,7 +95,7 @@ public void testReplicatedFileUnderECDir() throws IOException { // set ec policy on dir fs.setErasureCodingPolicy(dir, - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); // create a file which should be using ec final Path ecSubDir = new Path(dir, "ecSubDir"); final Path ecFile = new Path(ecSubDir, "ecFile"); @@ -270,7 +270,7 @@ public void testReplication() throws IOException { final Path testDir = new Path("/ec"); fs.mkdir(testDir, FsPermission.getDirDefault()); fs.setErasureCodingPolicy(testDir, - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); final Path fooFile = new Path(testDir, "foo"); // create ec file with replication=0 fs.create(fooFile, FsPermission.getFileDefault(), true, @@ -292,7 +292,7 @@ public void testGetErasureCodingPolicyWithSystemDefaultECPolicy() throws Excepti assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy()); // dir EC policy after setting ErasureCodingPolicy sysDefaultECPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); fs.getClient().setErasureCodingPolicy(src, sysDefaultECPolicy.getName()); verifyErasureCodingInfo(src, sysDefaultECPolicy); fs.create(new Path(ecDir, "child1")).close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java index 6cf4ef42c7..4833c24d2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.util.ToolRunner; import org.junit.After; import org.junit.Before; @@ -41,7 +40,7 @@ public class TestErasureCodingPolicyWithSnapshot { private final static int SUCCESS = 0; private final ErasureCodingPolicy sysDefaultPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final short groupSize = (short) ( sysDefaultPolicy.getNumDataUnits() + sysDefaultPolicy.getNumParityUnits()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java index 8ae176ffef..7f63e18abd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -47,7 +46,7 @@ public class TestFileChecksum { private static final Logger LOG = LoggerFactory .getLogger(TestFileChecksum.class); private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private int dataBlocks = ecPolicy.getNumDataUnits(); private int parityBlocks = ecPolicy.getNumParityUnits(); @@ -82,7 +81,7 @@ public void setup() throws IOException { Path ecPath = new Path(ecDir); cluster.getFileSystem().mkdir(ecPath, FsPermission.getDirDefault()); cluster.getFileSystem().getClient().setErasureCodingPolicy(ecDir, - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); fs = cluster.getFileSystem(); client = fs.getClient(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java index d7b73274a5..3be0e8d585 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java @@ -26,7 +26,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.junit.After; import org.junit.Before; @@ -71,7 +70,7 @@ public void testFileStatusWithECPolicy() throws Exception { assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy()); fs.delete(file, true); - final ErasureCodingPolicy ecPolicy1 = ErasureCodingPolicyManager.getSystemDefaultPolicy(); + final ErasureCodingPolicy ecPolicy1 = StripedFileTestUtil.getDefaultECPolicy(); // set EC policy on dir fs.setErasureCodingPolicy(dir, ecPolicy1.getName()); final ErasureCodingPolicy ecPolicy2 = client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java index 710ff622b4..2ba5aede85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -57,7 +56,7 @@ public class TestLeaseRecoveryStriped { .getLog(TestLeaseRecoveryStriped.class); private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final int dataBlocks = ecPolicy.getNumDataUnits(); private final int parityBlocks = ecPolicy.getNumParityUnits(); private final int cellSize = ecPolicy.getCellSize(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java index d7d9cc1c20..4c6ad99c88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -68,7 +67,7 @@ public class TestReadStripedFileWithDecoding { private MiniDFSCluster cluster; private DistributedFileSystem fs; private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); @@ -103,7 +102,7 @@ public void setup() throws IOException { false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); fs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java index b65626e149..930986f076 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java @@ -24,7 +24,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.junit.Assert; import org.junit.Test; import org.junit.Rule; @@ -43,7 +42,7 @@ public class TestReadStripedFileWithMissingBlocks { private DistributedFileSystem fs; private Configuration conf = new HdfsConfiguration(); private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); private final int cellSize = ecPolicy.getCellSize(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index 38939f5482..88edcb40d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo; import org.apache.hadoop.hdfs.util.StripedBlockUtil; @@ -62,7 +61,7 @@ public class TestReconstructStripedFile { public static final Log LOG = LogFactory.getLog(TestReconstructStripedFile.class); private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final int dataBlkNum = ecPolicy.getNumDataUnits(); private final int parityBlkNum = ecPolicy.getNumParityUnits(); private final int cellSize = ecPolicy.getCellSize(); @@ -108,7 +107,7 @@ public void setup() throws IOException { fs = cluster.getFileSystem(); fs.getClient().setErasureCodingPolicy("/", - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); List datanodes = cluster.getDataNodes(); for (int i = 0; i < dnNum; i++) { @@ -418,7 +417,7 @@ public void testProcessErasureCodingTasksSubmitionShouldSucceed() BlockECReconstructionInfo invalidECInfo = new BlockECReconstructionInfo( new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, liveIndices, - ErasureCodingPolicyManager.getSystemDefaultPolicy()); + StripedFileTestUtil.getDefaultECPolicy()); List ecTasks = new ArrayList<>(); ecTasks.add(invalidECInfo); dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java index 0731779681..85538ba916 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.junit.After; @@ -44,7 +43,7 @@ public class TestSafeModeWithStripedFile { private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); private final int numDNs = dataBlocks + parityBlocks; @@ -64,7 +63,7 @@ public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster.waitActive(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java index ec19a744d5..32f6bc8013 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java @@ -51,8 +51,7 @@ public class TestUnsetAndChangeDirectoryEcPolicy { private MiniDFSCluster cluster; private Configuration conf = new Configuration(); private DistributedFileSystem fs; - private ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager - .getSystemDefaultPolicy(); + private ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); private final int cellSize = ecPolicy.getCellSize(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java index 76ca704e93..9aac65b411 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.ipc.RemoteException; @@ -48,7 +47,7 @@ public class TestWriteReadStripedFile { public static final Log LOG = LogFactory.getLog(TestWriteReadStripedFile.class); private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final int cellSize = ecPolicy.getCellSize(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); @@ -81,7 +80,7 @@ public void setup() throws IOException { fs = cluster.getFileSystem(); fs.mkdirs(new Path("/ec")); cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec", - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java index 23a9821490..03e9e10bf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java @@ -24,7 +24,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.Assert; @@ -47,7 +46,7 @@ public class TestWriteStripedFileWithFailure { } private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); private final int numDNs = dataBlocks + parityBlocks; @@ -60,7 +59,7 @@ public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); fs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index ff0852809a..ff4b8ec8b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -77,7 +78,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; @@ -228,7 +228,7 @@ private static BlockWithLocations getBlockWithLocations( datanodeUuids, storageIDs, storageTypes); if (isStriped) { blkLocs = new StripedBlockWithLocations(blkLocs, indices, dataBlkNum, - ErasureCodingPolicyManager.getSystemDefaultPolicy().getCellSize()); + StripedFileTestUtil.getDefaultECPolicy().getCellSize()); } return blkLocs; } @@ -720,7 +720,7 @@ public void testBlockECRecoveryCommand() { byte[] liveBlkIndices0 = new byte[2]; BlockECReconstructionInfo blkECRecoveryInfo0 = new BlockECReconstructionInfo( new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0, - liveBlkIndices0, ErasureCodingPolicyManager.getSystemDefaultPolicy()); + liveBlkIndices0, StripedFileTestUtil.getDefaultECPolicy()); DatanodeInfo[] dnInfos1 = new DatanodeInfo[] { DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() }; DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil @@ -734,7 +734,7 @@ public void testBlockECRecoveryCommand() { byte[] liveBlkIndices1 = new byte[2]; BlockECReconstructionInfo blkECRecoveryInfo1 = new BlockECReconstructionInfo( new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1, - liveBlkIndices1, ErasureCodingPolicyManager.getSystemDefaultPolicy()); + liveBlkIndices1, StripedFileTestUtil.getDefaultECPolicy()); List blkRecoveryInfosList = new ArrayList(); blkRecoveryInfosList.add(blkECRecoveryInfo0); blkRecoveryInfosList.add(blkECRecoveryInfo1); @@ -823,8 +823,8 @@ private void assertBlockECRecoveryInfoEquals( ErasureCodingPolicy ecPolicy2 = blkECRecoveryInfo2.getErasureCodingPolicy(); // Compare ECPolicies same as default ECPolicy as we used system default // ECPolicy used in this test - compareECPolicies(ErasureCodingPolicyManager.getSystemDefaultPolicy(), ecPolicy1); - compareECPolicies(ErasureCodingPolicyManager.getSystemDefaultPolicy(), ecPolicy2); + compareECPolicies(StripedFileTestUtil.getDefaultECPolicy(), ecPolicy1); + compareECPolicies(StripedFileTestUtil.getDefaultECPolicy(), ecPolicy2); } private void compareECPolicies(ErasureCodingPolicy ecPolicy1, ErasureCodingPolicy ecPolicy2) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 70aa4e08ca..d9a8e5b165 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -46,7 +46,6 @@ import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.junit.AfterClass; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -203,7 +202,7 @@ static void initConfWithRamDisk(Configuration conf, } private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final int dataBlocks = ecPolicy.getNumDataUnits(); private final int parityBlocks = ecPolicy.getNumParityUnits(); private final int groupSize = dataBlocks + parityBlocks; @@ -1941,7 +1940,7 @@ private void doTestBalancerWithStripedFile(Configuration conf) throws Exception client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); client.setErasureCodingPolicy("/", - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); long totalCapacity = sum(capacities); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java index 6a55d8bf33..1040d21857 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.junit.Assert; import org.junit.Rule; @@ -43,7 +43,7 @@ public class TestBlockInfoStriped { private static final long BASE_ID = -1600; private final Block baseBlock = new Block(BASE_ID); private final ErasureCodingPolicy testECPolicy - = ErasureCodingPolicyManager.getSystemDefaultPolicy(); + = StripedFileTestUtil.getDefaultECPolicy(); private final int totalBlocks = testECPolicy.getNumDataUnits() + testECPolicy.getNumParityUnits(); private final BlockInfoStriped info = new BlockInfoStriped(baseBlock, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java index 20c3accbd3..82c9fde1d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java @@ -20,11 +20,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.server.balancer.TestBalancer; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.net.ServerSocketUtil; import org.junit.Rule; @@ -35,7 +35,7 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS { private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final int dataBlocks = ecPolicy.getNumDataUnits(); private final int parityBlocks = ecPolicy.getNumParityUnits(); private final int cellSize = ecPolicy.getCellSize(); @@ -84,7 +84,7 @@ public void testRead() throws Exception { .numDataNodes(numDNs) .build(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); try { cluster.waitActive(); doTestRead(conf, cluster, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java index 2eb7abff88..d853762a6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java @@ -20,8 +20,8 @@ import java.util.Iterator; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.junit.Test; @@ -33,7 +33,7 @@ public class TestLowRedundancyBlockQueues { private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private BlockInfo genBlockInfo(long id) { return new BlockInfoContiguous(new Block(id), (short) 3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java index 832876e449..9b7c7cd81e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java @@ -23,13 +23,13 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.net.NetworkTopology; @@ -59,7 +59,7 @@ public class TestReconstructStripedBlocksWithRackAwareness { } private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final int cellSize = ecPolicy.getCellSize(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); @@ -151,7 +151,7 @@ public void testReconstructForNotEnoughRacks() throws Exception { cluster.waitActive(); fs = cluster.getFileSystem(); fs.setErasureCodingPolicy(new Path("/"), - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); FSNamesystem fsn = cluster.getNamesystem(); BlockManager bm = fsn.getBlockManager(); @@ -222,7 +222,7 @@ public void testChooseExcessReplicasToDelete() throws Exception { cluster.waitActive(); fs = cluster.getFileSystem(); fs.setErasureCodingPolicy(new Path("/"), - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); MiniDFSCluster.DataNodeProperties lastHost = stopDataNode( hosts[hosts.length - 1]); @@ -276,7 +276,7 @@ public void testReconstructionWithDecommission() throws Exception { cluster.waitActive(); fs = cluster.getFileSystem(); fs.setErasureCodingPolicy(new Path("/"), - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); final BlockManager bm = cluster.getNamesystem().getBlockManager(); final DatanodeManager dm = bm.getDatanodeManager(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java index 7920c59307..3c18112b56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java @@ -37,9 +37,9 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; @@ -58,7 +58,7 @@ public class TestSequentialBlockGroupId { .getLog("TestSequentialBlockGroupId"); private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final short REPLICATION = 1; private final long SEED = 0; private final int dataBlocks = ecPolicy.getNumDataUnits(); @@ -89,7 +89,7 @@ public void setup() throws Exception { .getBlockIdManager().getBlockGroupIdGenerator(); fs.mkdirs(ecDir); cluster.getFileSystem().getClient().setErasureCodingPolicy("/ecDir", - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java index 4db361740b..616b4c340d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; @@ -34,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Time; @@ -54,7 +54,7 @@ public class TestSortLocatedStripedBlock { .getLogger(TestSortLocatedStripedBlock.class); private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final int cellSize = ecPolicy.getCellSize(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index b64f1e29f3..9f0011c8a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -137,7 +138,7 @@ public class TestBlockRecovery { public TestName currentTestName = new TestName(); private final int cellSize = - ErasureCodingPolicyManager.getSystemDefaultPolicy().getCellSize(); + StripedFileTestUtil.getDefaultECPolicy().getCellSize(); private final int bytesPerChecksum = 512; private final int[][][] blockLengthsSuite = { {{11 * cellSize, 10 * cellSize, 9 * cellSize, 8 * cellSize, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java index 7036c7a45b..d36db233f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; @@ -55,7 +54,7 @@ public class TestDataNodeErasureCodingMetrics { public static final Log LOG = LogFactory. getLog(TestDataNodeErasureCodingMetrics.class); private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final int dataBlocks = ecPolicy.getNumDataUnits(); private final int parityBlocks = ecPolicy.getNumParityUnits(); private final int cellSize = ecPolicy.getCellSize(); @@ -76,7 +75,7 @@ public void setup() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); fs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index a403ff4bb0..0fcb275595 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -78,7 +78,6 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.mover.Mover.MLocation; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.minikdc.MiniKdc; @@ -478,7 +477,7 @@ public void testMoverFailedRetry() throws Exception { } private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final int dataBlocks = ecPolicy.getNumDataUnits(); private final int parityBlocks = ecPolicy.getNumParityUnits(); private final int cellSize = ecPolicy.getCellSize(); @@ -538,7 +537,7 @@ public void testMoverWithStripedFile() throws Exception { HdfsConstants.HOT_STORAGE_POLICY_NAME); // set an EC policy on "/bar" directory client.setErasureCodingPolicy(barDir, - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); // write file to barDir final String fooFile = "/bar/foo"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java index 670efd6955..6a4cd32a58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java @@ -56,7 +56,7 @@ public class TestAddOverReplicatedStripedBlocks { private final Path dirPath = new Path("/striped"); private Path filePath = new Path(dirPath, "file"); private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); private final short groupSize = (short) (dataBlocks + parityBlocks); @@ -82,7 +82,7 @@ public void setup() throws IOException { fs = cluster.getFileSystem(); fs.mkdirs(dirPath); fs.getClient().setErasureCodingPolicy(dirPath.toString(), - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); } @After @@ -192,7 +192,7 @@ public void testProcessOverReplicatedAndCorruptStripedBlock() long groupId = bg.getBlock().getBlockId(); Block blk = new Block(groupId, blockSize, gs); BlockInfoStriped blockInfo = new BlockInfoStriped(blk, - ErasureCodingPolicyManager.getSystemDefaultPolicy()); + StripedFileTestUtil.getDefaultECPolicy()); for (int i = 0; i < groupSize; i++) { blk.setBlockId(groupId + i); cluster.injectBlocks(i, Arrays.asList(blk), bpid); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java index 87fbcc63c5..a3afa93e3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; @@ -45,7 +46,7 @@ public class TestAddStripedBlockInFBR { private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final int cellSize = ecPolicy.getCellSize(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); @@ -88,7 +89,7 @@ public void testAddBlockInFullBlockReport() throws Exception { dfs.mkdirs(ecDir); dfs.mkdirs(repDir); dfs.getClient().setErasureCodingPolicy(ecDir.toString(), - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); // create several non-EC files and one EC file final Path[] repFiles = new Path[groupSize]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index 2df1aa4985..2eb23b9f82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -67,7 +68,7 @@ public class TestAddStripedBlocks { private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); private final int cellSize = ecPolicy.getCellSize(); @@ -86,8 +87,8 @@ public void setup() throws IOException { .numDataNodes(groupSize).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); - dfs.getClient().setErasureCodingPolicy("/", ErasureCodingPolicyManager - .getSystemDefaultPolicy().getName()); + dfs.getClient().setErasureCodingPolicy("/", + StripedFileTestUtil.getDefaultECPolicy().getName()); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java index dd4ae0b81a..7f18d18d73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; @@ -73,11 +74,11 @@ public void testInvalid() throws Exception { // Test first with an invalid policy expectInvalidPolicy("not-a-policy"); // Test with an invalid policy and a valid policy - expectInvalidPolicy("not-a-policy," + ErasureCodingPolicyManager - .getSystemDefaultPolicy().getName()); + expectInvalidPolicy("not-a-policy," + + StripedFileTestUtil.getDefaultECPolicy().getName()); // Test with a valid and an invalid policy - expectInvalidPolicy(ErasureCodingPolicyManager - .getSystemDefaultPolicy().getName() + ", not-a-policy"); + expectInvalidPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName() + ", not-a-policy"); // Some more invalid values expectInvalidPolicy("not-a-policy, "); expectInvalidPolicy(" ,not-a-policy, "); @@ -85,8 +86,7 @@ public void testInvalid() throws Exception { @Test public void testValid() throws Exception { - String ecPolicyName = ErasureCodingPolicyManager.getSystemDefaultPolicy() - .getName(); + String ecPolicyName = StripedFileTestUtil.getDefaultECPolicy().getName(); expectValidPolicy(ecPolicyName, 1); expectValidPolicy(ecPolicyName + ", ", 1); expectValidPolicy(",", 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 72d76b77ef..104727d83a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; @@ -99,7 +100,7 @@ private static Configuration getConf() { private static final int NUM_DATA_NODES = 0; private final ErasureCodingPolicy testECPolicy - = ErasureCodingPolicyManager.getSystemDefaultPolicy(); + = StripedFileTestUtil.getDefaultECPolicy(); @Test public void testDisplayRecentEditLogOpCodes() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 76c5378062..b94e6108c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -676,7 +676,7 @@ public void testFsckOpenECFiles() throws Exception { setNumFiles(4).build(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); final int dataBlocks = ecPolicy.getNumDataUnits(); final int cellSize = ecPolicy.getCellSize(); final int numAllUnits = dataBlocks + ecPolicy.getNumParityUnits(); @@ -1997,10 +1997,9 @@ public void testECFsck() throws Exception { conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); - int dataBlocks = ErasureCodingPolicyManager - .getSystemDefaultPolicy().getNumDataUnits(); - int parityBlocks = ErasureCodingPolicyManager - .getSystemDefaultPolicy().getNumParityUnits(); + int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits(); + int parityBlocks = + StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); int totalSize = dataBlocks + parityBlocks; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build(); fs = cluster.getFileSystem(); @@ -2288,12 +2287,10 @@ private void testUpgradeDomain(boolean defineUpgradeDomain, @Test (timeout = 300000) public void testFsckCorruptECFile() throws Exception { DistributedFileSystem fs = null; - int dataBlocks = ErasureCodingPolicyManager - .getSystemDefaultPolicy().getNumDataUnits(); - int parityBlocks = ErasureCodingPolicyManager - .getSystemDefaultPolicy().getNumParityUnits(); - int cellSize = ErasureCodingPolicyManager - .getSystemDefaultPolicy().getCellSize(); + int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits(); + int parityBlocks = + StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); + int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize(); int totalSize = dataBlocks + parityBlocks; cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(totalSize).build(); @@ -2308,7 +2305,7 @@ public void testFsckCorruptECFile() throws Exception { Path ecDirPath = new Path("/striped"); fs.mkdir(ecDirPath, FsPermission.getDirDefault()); fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); Path file = new Path(ecDirPath, "corrupted"); final int length = cellSize * dataBlocks; final byte[] bytes = StripedFileTestUtil.generateBytes(length); @@ -2359,12 +2356,10 @@ public void testFsckCorruptECFile() throws Exception { @Test (timeout = 300000) public void testFsckMissingECFile() throws Exception { DistributedFileSystem fs = null; - int dataBlocks = ErasureCodingPolicyManager - .getSystemDefaultPolicy().getNumDataUnits(); - int parityBlocks = ErasureCodingPolicyManager - .getSystemDefaultPolicy().getNumParityUnits(); - int cellSize = ErasureCodingPolicyManager - .getSystemDefaultPolicy().getCellSize(); + int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits(); + int parityBlocks = + StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); + int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize(); int totalSize = dataBlocks + parityBlocks; cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(totalSize).build(); @@ -2374,7 +2369,7 @@ public void testFsckMissingECFile() throws Exception { Path ecDirPath = new Path("/striped"); fs.mkdir(ecDirPath, FsPermission.getDirDefault()); fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); Path file = new Path(ecDirPath, "missing"); final int length = cellSize * dataBlocks; final byte[] bytes = StripedFileTestUtil.generateBytes(length); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index d4d4763731..231f75fc90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -725,12 +725,10 @@ public void testVerifyMissingBlockGroupsMetrics() throws Exception { DistributedFileSystem fs = null; try { Configuration conf = new HdfsConfiguration(); - int dataBlocks = ErasureCodingPolicyManager - .getSystemDefaultPolicy().getNumDataUnits(); - int parityBlocks = ErasureCodingPolicyManager - .getSystemDefaultPolicy().getNumParityUnits(); - int cellSize = ErasureCodingPolicyManager - .getSystemDefaultPolicy().getCellSize(); + int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits(); + int parityBlocks = + StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); + int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize(); int totalSize = dataBlocks + parityBlocks; cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(totalSize).build(); @@ -740,7 +738,7 @@ public void testVerifyMissingBlockGroupsMetrics() throws Exception { Path ecDirPath = new Path("/striped"); fs.mkdir(ecDirPath, FsPermission.getDirDefault()); fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); Path file = new Path(ecDirPath, "corrupted"); final int length = cellSize * dataBlocks; final byte[] bytes = StripedFileTestUtil.generateBytes(length); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java index 326ddc84c6..1e836dc09b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -46,7 +47,7 @@ public class TestQuotaWithStripedBlocks { private static final int BLOCK_SIZE = 1024 * 1024; private static final long DISK_QUOTA = BLOCK_SIZE * 10; private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final int dataBlocks = ecPolicy.getNumDataUnits(); private final int parityBlocsk = ecPolicy.getNumParityUnits(); private final int groupSize = dataBlocks + parityBlocsk; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java index 5e4a9db39d..01a8e08bb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java @@ -60,7 +60,7 @@ public class TestReconstructStripedBlocks { public static final Logger LOG = LoggerFactory.getLogger( TestReconstructStripedBlocks.class); private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private final int cellSize = ecPolicy.getCellSize(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); @@ -202,7 +202,7 @@ public void test2RecoveryTasksForSameBlockGroup() throws Exception { DistributedFileSystem fs = cluster.getFileSystem(); BlockManager bm = cluster.getNamesystem().getBlockManager(); fs.getClient().setErasureCodingPolicy("/", - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); int fileLen = dataBlocks * blockSize; Path p = new Path("/test2RecoveryTasksForSameBlockGroup"); final byte[] data = new byte[fileLen]; @@ -268,7 +268,7 @@ public void testCountLiveReplicas() throws Exception { try { fs.mkdirs(dirPath); fs.setErasureCodingPolicy(dirPath, - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); DFSTestUtil.createFile(fs, filePath, cellSize * dataBlocks * 2, (short) 1, 0L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java index ae9793a6be..f1610b1839 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.NameNodeProxies; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -321,7 +322,7 @@ public void testDeleteOp() throws Exception { // set erasure coding policy dfs.setErasureCodingPolicy(ecDir, - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); DFSTestUtil.createFile(dfs, ecFile, len, (short) 1, 0xFEED); DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED); final FSDirectory fsd = fsn.getFSDirectory(); @@ -423,7 +424,7 @@ public void testUnsuitableStoragePoliciesWithECStripedMode() client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); // set an EC policy on "/foo" directory client.setErasureCodingPolicy(fooDir, - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); // write file to fooDir final String barFile = "/foo/bar"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java index e7794d6c9e..0bfa054302 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java @@ -31,12 +31,12 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.INodeFile; @@ -46,7 +46,7 @@ public class TestOfflineImageViewerWithStripedBlocks { private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + StripedFileTestUtil.getDefaultECPolicy(); private int dataBlocks = ecPolicy.getNumDataUnits(); private int parityBlocks = ecPolicy.getNumParityUnits(); @@ -64,7 +64,7 @@ public void setup() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", - ErasureCodingPolicyManager.getSystemDefaultPolicy().getName()); + StripedFileTestUtil.getDefaultECPolicy().getName()); fs = cluster.getFileSystem(); Path eczone = new Path("/eczone"); fs.mkdirs(eczone); @@ -144,7 +144,7 @@ private void testFileSize(int numBytes) throws IOException, // Verify space consumed present in BlockInfoStriped FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile(); - assertEquals(ErasureCodingPolicyManager.getSystemDefaultPolicy().getId(), + assertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(), fileNode.getErasureCodingPolicyID()); assertTrue("Invalid block size", fileNode.getBlocks().length > 0); long actualFileSize = 0;