HDFS-13642. Creating a file with block size smaller than EC policy's cell size should fail.

This commit is contained in:
Xiao Chen 2018-06-08 15:13:38 -07:00
parent a1272448bf
commit cf4108313d
9 changed files with 58 additions and 23 deletions

View File

@ -19,6 +19,7 @@
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.XAttr;
@ -344,16 +345,28 @@ static ErasureCodingPolicy getErasureCodingPolicy(final FSNamesystem fsn,
}
/**
* Check if the file or directory has an erasure coding policy.
* Get the erasure coding policy information for specified path and policy
* name. If ec policy name is given, it will be parsed and the corresponding
* policy will be returned. Otherwise, get the policy from the parents of the
* iip.
*
* @param fsn namespace
* @param ecPolicyName the ec policy name
* @param iip inodes in the path containing the file
* @return Whether the file or directory has an erasure coding policy.
* @return {@link ErasureCodingPolicy}, or null if no policy is found
* @throws IOException
*/
static boolean hasErasureCodingPolicy(final FSNamesystem fsn,
final INodesInPath iip) throws IOException {
return unprotectedGetErasureCodingPolicy(fsn, iip) != null;
static ErasureCodingPolicy getErasureCodingPolicy(FSNamesystem fsn,
String ecPolicyName, INodesInPath iip) throws IOException {
ErasureCodingPolicy ecPolicy;
if (!StringUtils.isEmpty(ecPolicyName)) {
ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicyByName(
fsn, ecPolicyName);
} else {
ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
fsn, iip);
}
return ecPolicy;
}
/**

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.AddBlockFlag;
@ -543,13 +542,8 @@ private static INodesInPath addFile(
boolean isStriped = false;
ErasureCodingPolicy ecPolicy = null;
if (!shouldReplicate) {
if (!StringUtils.isEmpty(ecPolicyName)) {
ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicyByName(
fsd.getFSNamesystem(), ecPolicyName);
} else {
ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
fsd.getFSNamesystem(), existing);
}
ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy(
fsd.getFSNamesystem(), ecPolicyName, existing);
if (ecPolicy != null && (!ecPolicy.isReplicationPolicy())) {
isStriped = true;
}

View File

@ -2403,11 +2403,6 @@ private HdfsFileStatus startFileInt(String src,
iip = FSDirWriteFileOp.resolvePathForStartFile(
dir, pc, src, flag, createParent);
if (shouldReplicate ||
(org.apache.commons.lang.StringUtils.isEmpty(ecPolicyName) &&
!FSDirErasureCodingOp.hasErasureCodingPolicy(this, iip))) {
blockManager.verifyReplication(src, replication, clientMachine);
}
if (blockSize < minBlockSize) {
throw new IOException("Specified block size is less than configured" +
@ -2415,6 +2410,22 @@ private HdfsFileStatus startFileInt(String src,
+ "): " + blockSize + " < " + minBlockSize);
}
if (shouldReplicate) {
blockManager.verifyReplication(src, replication, clientMachine);
} else {
final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp
.getErasureCodingPolicy(this, ecPolicyName, iip);
if (ecPolicy != null && (!ecPolicy.isReplicationPolicy())) {
if (blockSize < ecPolicy.getCellSize()) {
throw new IOException("Specified block size (" + blockSize
+ ") is less than the cell size (" + ecPolicy.getCellSize()
+") of the erasure coding policy (" + ecPolicy + ").");
}
} else {
blockManager.verifyReplication(src, replication, clientMachine);
}
}
FileEncryptionInfo feInfo = null;
if (!iip.isRaw() && provider != null) {
EncryptionKeyInfo ezInfo = FSDirEncryptionZoneOp.getEncryptionKeyInfo(

View File

@ -1559,8 +1559,9 @@ public static void runOperations(MiniDFSCluster cluster,
out.write("replicated".getBytes());
}
try (FSDataOutputStream out = filesystem.createFile(
new Path(ecDir, "RS-3-2")).ecPolicyName(ecPolicyRS32.getName()).build()) {
try (FSDataOutputStream out = filesystem
.createFile(new Path(ecDir, "RS-3-2"))
.ecPolicyName(ecPolicyRS32.getName()).blockSize(1024 * 1024).build()) {
out.write("RS-3-2".getBytes());
}
}

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import java.io.ByteArrayInputStream;
import java.io.IOException;
@ -221,4 +222,19 @@ private void testOneFile(String src, int writeBytes) throws Exception {
StripedFileTestUtil.checkData(fs, testPath, writeBytes,
new ArrayList<DatanodeInfo>(), null, blockSize * dataBlocks);
}
@Test
public void testFileBlockSizeSmallerThanCellSize() throws Exception {
final Path path = new Path("testFileBlockSizeSmallerThanCellSize");
final byte[] bytes = StripedFileTestUtil.generateBytes(cellSize * 2);
try {
DFSTestUtil.writeFile(fs, path, bytes, cellSize / 2);
fail("Creating a file with block size smaller than "
+ "ec policy's cell size should fail");
} catch (IOException expected) {
LOG.info("Caught expected exception", expected);
GenericTestUtils
.assertExceptionContains("less than the cell size", expected);
}
}
}

View File

@ -71,7 +71,7 @@ public class TestErasureCodingExerciseAPIs {
private DistributedFileSystem fs;
private HdfsAdmin dfsAdmin;
private FileSystemTestWrapper fsWrapper;
private static final int BLOCK_SIZE = 1 << 14; // 16k
private static final int BLOCK_SIZE = 1 << 20; // 1MB
private ErasureCodingPolicy ecPolicy;
private static ErasureCodingPolicy getEcPolicy() {

View File

@ -63,7 +63,7 @@ public class TestErasureCodingPolicies {
private Configuration conf;
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private static final int BLOCK_SIZE = 16 * 1024;
private static final int BLOCK_SIZE = 1024 * 1024;
private ErasureCodingPolicy ecPolicy;
private FSNamesystem namesystem;

View File

@ -1510,7 +1510,7 @@
<REPLICATION>1</REPLICATION>
<MTIME>1512607204120</MTIME>
<ATIME>1512607204120</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<BLOCKSIZE>1048576</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>