HDFS-12547. Extend TestQuotaWithStripedBlocks with a random EC policy. Contributed by Takanobu Asanuma.

This commit is contained in:
Andrew Wang 2017-10-10 17:35:49 -07:00
parent 78af6cdc53
commit a297fb0886
2 changed files with 76 additions and 14 deletions

View File

@ -44,27 +44,39 @@
* Make sure we correctly update the quota usage with the striped blocks. * Make sure we correctly update the quota usage with the striped blocks.
*/ */
public class TestQuotaWithStripedBlocks { public class TestQuotaWithStripedBlocks {
private static final int BLOCK_SIZE = 1024 * 1024; private int blockSize;
private static final long DISK_QUOTA = BLOCK_SIZE * 10; private ErasureCodingPolicy ecPolicy;
private final ErasureCodingPolicy ecPolicy = private int dataBlocks;
StripedFileTestUtil.getDefaultECPolicy(); private int parityBlocsk;
private final int dataBlocks = ecPolicy.getNumDataUnits(); private int groupSize;
private final int parityBlocsk = ecPolicy.getNumParityUnits(); private int cellSize;
private final int groupSize = dataBlocks + parityBlocsk; private Path ecDir;
private final int cellSize = ecPolicy.getCellSize(); private long diskQuota;
private static final Path ecDir = new Path("/ec");
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
private FSDirectory dir; private FSDirectory dir;
private DistributedFileSystem dfs; private DistributedFileSystem dfs;
public ErasureCodingPolicy getEcPolicy() {
return StripedFileTestUtil.getDefaultECPolicy();
}
@Rule @Rule
public Timeout globalTimeout = new Timeout(300000); public Timeout globalTimeout = new Timeout(300000);
@Before @Before
public void setUp() throws IOException { public void setUp() throws IOException {
blockSize = 1024 * 1024;
ecPolicy = getEcPolicy();
dataBlocks = ecPolicy.getNumDataUnits();
parityBlocsk = ecPolicy.getNumParityUnits();
groupSize = dataBlocks + parityBlocsk;
cellSize = ecPolicy.getCellSize();
ecDir = new Path("/ec");
diskQuota = blockSize * (groupSize + 1);
final Configuration conf = new Configuration(); final Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
cluster.waitActive(); cluster.waitActive();
@ -75,8 +87,8 @@ public void setUp() throws IOException {
dfs.mkdirs(ecDir); dfs.mkdirs(ecDir);
dfs.getClient() dfs.getClient()
.setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName()); .setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA); dfs.setQuota(ecDir, Long.MAX_VALUE - 1, diskQuota);
dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA); dfs.setQuotaByStorageType(ecDir, StorageType.DISK, diskQuota);
dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME); dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
} }
@ -112,8 +124,8 @@ public void testUpdatingQuotaCount() throws Exception {
final long diskUsed = dirNode.getDirectoryWithQuotaFeature() final long diskUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.DISK); .getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
// When we add a new block we update the quota using the full block size. // When we add a new block we update the quota using the full block size.
Assert.assertEquals(BLOCK_SIZE * groupSize, spaceUsed); Assert.assertEquals(blockSize * groupSize, spaceUsed);
Assert.assertEquals(BLOCK_SIZE * groupSize, diskUsed); Assert.assertEquals(blockSize * groupSize, diskUsed);
dfs.getClient().getNamenode().complete(file.toString(), dfs.getClient().getNamenode().complete(file.toString(),
dfs.getClient().getClientName(), previous, fileNode.getId()); dfs.getClient().getClientName(), previous, fileNode.getId());

View File

@ -0,0 +1,50 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This test extends TestQuotaWithStripedBlocks to use a random
* (non-default) EC policy.
*/
public class TestQuotaWithStripedBlocksWithRandomECPolicy extends
TestQuotaWithStripedBlocks {
private static final Logger LOG = LoggerFactory.getLogger(
TestQuotaWithStripedBlocksWithRandomECPolicy.class);
private ErasureCodingPolicy ecPolicy;
public TestQuotaWithStripedBlocksWithRandomECPolicy() {
// If you want to debug this test with a specific ec policy, please use
// SystemErasureCodingPolicies class.
// e.g. ecPolicy = SystemErasureCodingPolicies.getByID(RS_3_2_POLICY_ID);
ecPolicy = StripedFileTestUtil.getRandomNonDefaultECPolicy();
LOG.info("run {} with {}.",
TestQuotaWithStripedBlocksWithRandomECPolicy.class
.getSuperclass().getSimpleName(), ecPolicy.getName());
}
@Override
public ErasureCodingPolicy getEcPolicy() {
return ecPolicy;
}
}