HDFS-13209. DistributedFileSystem.create should allow an option to provide StoragePolicy. Contributed by Ayush Saxena.

This commit is contained in:
Surendra Singh Lilhore 2019-02-14 22:13:14 +05:30
parent 080a421911
commit 0d7a5ac5f5
25 changed files with 159 additions and 59 deletions

View File

@ -1211,13 +1211,31 @@ public DFSOutputStream create(String src, FsPermission permission,
long blockSize, Progressable progress, int buffersize,
ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes,
String ecPolicyName) throws IOException {
return create(src, permission, flag, createParent, replication, blockSize,
progress, buffersize, checksumOpt, favoredNodes, ecPolicyName, null);
}
/**
* Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
* addition of Progressable, int, ChecksumOpt, InetSocketAddress[], String)}
* with the storagePolicy that is used to specify a specific storage policy
* instead of inheriting any policy from this new file's parent directory.
* This policy will be persisted in HDFS. A value of null means inheriting
* parent groups' whatever policy.
*/
public DFSOutputStream create(String src, FsPermission permission,
EnumSet<CreateFlag> flag, boolean createParent, short replication,
long blockSize, Progressable progress, int buffersize,
ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes,
String ecPolicyName, String storagePolicy)
throws IOException {
checkOpen();
final FsPermission masked = applyUMask(permission);
LOG.debug("{}: masked={}", src, masked);
final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
src, masked, flag, createParent, replication, blockSize, progress,
dfsClientConf.createChecksum(checksumOpt),
getFavoredNodesStr(favoredNodes), ecPolicyName);
getFavoredNodesStr(favoredNodes), ecPolicyName, storagePolicy);
beginFileLease(result.getFileId(), result);
return result;
}
@ -1271,7 +1289,7 @@ public DFSOutputStream primitiveCreate(String src, FsPermission absPermission,
DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
flag, createParent, replication, blockSize, progress, checksum,
null, null);
null, null, null);
}
beginFileLease(result.getFileId(), result);
return result;

View File

@ -260,7 +260,8 @@ protected DFSOutputStream(DFSClient dfsClient, String src,
static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
short replication, long blockSize, Progressable progress,
DataChecksum checksum, String[] favoredNodes, String ecPolicyName)
DataChecksum checksum, String[] favoredNodes, String ecPolicyName,
String storagePolicy)
throws IOException {
try (TraceScope ignored =
dfsClient.newPathTraceScope("newStreamForCreate", src)) {
@ -275,7 +276,8 @@ static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
try {
stat = dfsClient.namenode.create(src, masked, dfsClient.clientName,
new EnumSetWritable<>(flag), createParent, replication,
blockSize, SUPPORTED_CRYPTO_VERSIONS, ecPolicyName);
blockSize, SUPPORTED_CRYPTO_VERSIONS, ecPolicyName,
storagePolicy);
break;
} catch (RemoteException re) {
IOException e = re.unwrapRemoteException(

View File

@ -563,13 +563,17 @@ public FSDataOutputStream next(final FileSystem fs, final Path p)
* replication policy from its ancestor (the default).
* ecPolicyName and SHOULD_REPLICATE CreateFlag are mutually exclusive. It's
* invalid to set both SHOULD_REPLICATE and a non-null ecPolicyName.
* The third addition is storagePolicyName. A non-null storage Policy
* specifies an explicit storage policy for this file, overriding the
* inherited policy.
*
*/
private HdfsDataOutputStream create(final Path f,
final FsPermission permission, final EnumSet<CreateFlag> flag,
final int bufferSize, final short replication, final long blockSize,
final Progressable progress, final ChecksumOpt checksumOpt,
final InetSocketAddress[] favoredNodes, final String ecPolicyName)
final InetSocketAddress[] favoredNodes, final String ecPolicyName,
final String storagePolicy)
throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.CREATE);
@ -579,7 +583,7 @@ private HdfsDataOutputStream create(final Path f,
public HdfsDataOutputStream doCall(final Path p) throws IOException {
final DFSOutputStream out = dfs.create(getPathName(f), permission,
flag, true, replication, blockSize, progress, bufferSize,
checksumOpt, favoredNodes, ecPolicyName);
checksumOpt, favoredNodes, ecPolicyName, storagePolicy);
return dfs.createWrappedOutputStream(out, statistics);
}
@Override
@ -588,7 +592,8 @@ public HdfsDataOutputStream next(final FileSystem fs, final Path p)
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem)fs;
return myDfs.create(p, permission, flag, bufferSize, replication,
blockSize, progress, checksumOpt, favoredNodes, ecPolicyName);
blockSize, progress, checksumOpt, favoredNodes, ecPolicyName,
storagePolicy);
}
throw new UnsupportedOperationException("Cannot create with" +
" favoredNodes through a symlink to a non-DistributedFileSystem: "
@ -619,14 +624,15 @@ protected HdfsDataOutputStream primitiveCreate(Path f,
*
* @see #create(Path, FsPermission, EnumSet, int, short, long, Progressable,
* ChecksumOpt, InetSocketAddress[], String) for the descriptions of
* additional parameters, i.e., favoredNodes and ecPolicyName.
* additional parameters, i.e., favoredNodes, ecPolicyName and
* storagePolicyName.
*/
private HdfsDataOutputStream createNonRecursive(final Path f,
final FsPermission permission, final EnumSet<CreateFlag> flag,
final int bufferSize, final short replication, final long blockSize,
final Progressable progress, final ChecksumOpt checksumOpt,
final InetSocketAddress[] favoredNodes, final String ecPolicyName)
throws IOException {
final InetSocketAddress[] favoredNodes, final String ecPolicyName,
final String storagePolicyName) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.CREATE);
Path absF = fixRelativePart(f);
@ -635,7 +641,7 @@ private HdfsDataOutputStream createNonRecursive(final Path f,
public HdfsDataOutputStream doCall(final Path p) throws IOException {
final DFSOutputStream out = dfs.create(getPathName(f), permission,
flag, false, replication, blockSize, progress, bufferSize,
checksumOpt, favoredNodes, ecPolicyName);
checksumOpt, favoredNodes, ecPolicyName, storagePolicyName);
return dfs.createWrappedOutputStream(out, statistics);
}
@Override
@ -645,7 +651,7 @@ public HdfsDataOutputStream next(final FileSystem fs, final Path p)
DistributedFileSystem myDfs = (DistributedFileSystem)fs;
return myDfs.createNonRecursive(p, permission, flag, bufferSize,
replication, blockSize, progress, checksumOpt, favoredNodes,
ecPolicyName);
ecPolicyName, storagePolicyName);
}
throw new UnsupportedOperationException("Cannot create with" +
" favoredNodes through a symlink to a non-DistributedFileSystem: "
@ -3183,6 +3189,7 @@ public static final class HdfsDataOutputStreamBuilder
private final DistributedFileSystem dfs;
private InetSocketAddress[] favoredNodes = null;
private String ecPolicyName = null;
private String storagePolicyName = null;
/**
* Construct a HdfsDataOutputStream builder for a file.
@ -3254,6 +3261,22 @@ public HdfsDataOutputStreamBuilder noLocalWrite() {
return this;
}
@VisibleForTesting
String getStoragePolicyName() {
return storagePolicyName;
}
/**
* Enforce a file to follow the specified storage policy irrespective of the
* storage policy of its parent directory.
*/
public HdfsDataOutputStreamBuilder storagePolicyName(
@Nonnull final String policyName) {
Preconditions.checkNotNull(policyName);
storagePolicyName = policyName;
return this;
}
@VisibleForTesting
String getEcPolicyName() {
return ecPolicyName;
@ -3320,11 +3343,12 @@ public FSDataOutputStream build() throws IOException {
return dfs.create(getPath(), getPermission(), getFlags(),
getBufferSize(), getReplication(), getBlockSize(),
getProgress(), getChecksumOpt(), getFavoredNodes(),
getEcPolicyName());
getEcPolicyName(), getStoragePolicyName());
} else {
return dfs.createNonRecursive(getPath(), getPermission(), getFlags(),
getBufferSize(), getReplication(), getBlockSize(), getProgress(),
getChecksumOpt(), getFavoredNodes(), getEcPolicyName());
getChecksumOpt(), getFavoredNodes(), getEcPolicyName(),
getStoragePolicyName());
}
} else if (getFlags().contains(CreateFlag.APPEND)) {
return dfs.append(getPath(), getFlags(), getBufferSize(), getProgress(),

View File

@ -176,6 +176,7 @@ LocatedBlocks getBlockLocations(String src, long offset, long length)
* policy. ecPolicyName and SHOULD_REPLICATE CreateFlag
* are mutually exclusive. It's invalid to set both
* SHOULD_REPLICATE flag and a non-null ecPolicyName.
*@param storagePolicy the name of the storage policy.
*
* @return the status of the created file, it could be null if the server
* doesn't support returning the file status
@ -209,7 +210,8 @@ LocatedBlocks getBlockLocations(String src, long offset, long length)
HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions, String ecPolicyName)
CryptoProtocolVersion[] supportedVersions, String ecPolicyName,
String storagePolicy)
throws IOException;
/**

View File

@ -345,7 +345,8 @@ public FsServerDefaults getServerDefaults() throws IOException {
public HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions, String ecPolicyName)
CryptoProtocolVersion[] supportedVersions, String ecPolicyName,
String storagePolicy)
throws IOException {
CreateRequestProto.Builder builder = CreateRequestProto.newBuilder()
.setSrc(src)
@ -358,6 +359,9 @@ public HdfsFileStatus create(String src, FsPermission masked,
if (ecPolicyName != null) {
builder.setEcPolicyName(ecPolicyName);
}
if (storagePolicy != null) {
builder.setStoragePolicy(storagePolicy);
}
FsPermission unmasked = masked.getUnmasked();
if (unmasked != null) {
builder.setUnmasked(PBHelperClient.convert(unmasked));

View File

@ -83,6 +83,7 @@ message CreateRequestProto {
repeated CryptoProtocolVersionProto cryptoProtocolVersion = 8;
optional FsPermissionProto unmasked = 9;
optional string ecPolicyName = 10;
optional string storagePolicy = 11;
}
message CreateResponseProto {

View File

@ -192,7 +192,8 @@ public FsServerDefaults getServerDefaults() throws IOException {
public HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions, String ecPolicyName)
CryptoProtocolVersion[] supportedVersions, String ecPolicyName,
String storagePolicy)
throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.WRITE);
@ -213,9 +214,9 @@ public HdfsFileStatus create(String src, FsPermission masked,
new Class<?>[] {String.class, FsPermission.class, String.class,
EnumSetWritable.class, boolean.class, short.class,
long.class, CryptoProtocolVersion[].class,
String.class},
String.class, String.class},
createLocation.getDest(), masked, clientName, flag, createParent,
replication, blockSize, supportedVersions, ecPolicyName);
replication, blockSize, supportedVersions, ecPolicyName, storagePolicy);
return (HdfsFileStatus) rpcClient.invokeSingle(createLocation, method);
}

View File

@ -500,10 +500,11 @@ public FsServerDefaults getServerDefaults() throws IOException {
public HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions, String ecPolicyName)
CryptoProtocolVersion[] supportedVersions, String ecPolicyName,
String storagePolicy)
throws IOException {
return clientProto.create(src, masked, clientName, flag, createParent,
replication, blockSize, supportedVersions, ecPolicyName);
replication, blockSize, supportedVersions, ecPolicyName, storagePolicy);
}
/**

View File

@ -884,7 +884,7 @@ public void testProxyGetAdditionalDatanode()
HdfsFileStatus status = routerProtocol.create(
newRouterFile, new FsPermission("777"), clientName,
new EnumSetWritable<CreateFlag>(createFlag), true, (short) 1,
(long) 1024, CryptoProtocolVersion.supported(), null);
(long) 1024, CryptoProtocolVersion.supported(), null, null);
// Add a block via router (requires client to have same lease)
LocatedBlock block = routerProtocol.addBlock(

View File

@ -478,7 +478,7 @@ public CreateResponseProto create(RpcController controller,
(short) req.getReplication(), req.getBlockSize(),
PBHelperClient.convertCryptoProtocolVersions(
req.getCryptoProtocolVersionList()),
req.getEcPolicyName());
req.getEcPolicyName(), req.getStoragePolicy());
if (result != null) {
return CreateResponseProto.newBuilder().setFs(PBHelperClient.convert(result))

View File

@ -361,7 +361,8 @@ static HdfsFileStatus startFile(
EnumSet<CreateFlag> flag, boolean createParent,
short replication, long blockSize,
FileEncryptionInfo feInfo, INode.BlocksMapUpdateInfo toRemoveBlocks,
boolean shouldReplicate, String ecPolicyName, boolean logRetryEntry)
boolean shouldReplicate, String ecPolicyName, String storagePolicy,
boolean logRetryEntry)
throws IOException {
assert fsn.hasWriteLock();
boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
@ -396,7 +397,7 @@ static HdfsFileStatus startFile(
if (parent != null) {
iip = addFile(fsd, parent, iip.getLastLocalName(), permissions,
replication, blockSize, holder, clientMachine, shouldReplicate,
ecPolicyName);
ecPolicyName, storagePolicy);
newNode = iip != null ? iip.getLastINode().asFile() : null;
}
if (newNode == null) {
@ -540,7 +541,7 @@ private static INodesInPath addFile(
FSDirectory fsd, INodesInPath existing, byte[] localName,
PermissionStatus permissions, short replication, long preferredBlockSize,
String clientName, String clientMachine, boolean shouldReplicate,
String ecPolicyName) throws IOException {
String ecPolicyName, String storagePolicy) throws IOException {
Preconditions.checkNotNull(existing);
long modTime = now();
@ -549,6 +550,16 @@ private static INodesInPath addFile(
try {
boolean isStriped = false;
ErasureCodingPolicy ecPolicy = null;
byte storagepolicyid = 0;
if (storagePolicy != null && !storagePolicy.isEmpty()) {
BlockStoragePolicy policy =
fsd.getBlockManager().getStoragePolicy(storagePolicy);
if (policy == null) {
throw new HadoopIllegalArgumentException(
"Cannot find a block policy with the name " + storagePolicy);
}
storagepolicyid = policy.getId();
}
if (!shouldReplicate) {
ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy(
fsd.getFSNamesystem(), ecPolicyName, existing);
@ -562,7 +573,7 @@ private static INodesInPath addFile(
final Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null);
INodeFile newNode = newINodeFile(fsd.allocateNewInodeId(), permissions,
modTime, modTime, replicationFactor, ecPolicyID, preferredBlockSize,
blockType);
storagepolicyid, blockType);
newNode.setLocalName(localName);
newNode.toUnderConstruction(clientName, clientMachine);
newiip = fsd.addINode(existing, newNode, permissions.getPermission());
@ -740,13 +751,6 @@ private static INodeFile newINodeFile(
storagePolicyId, blockType);
}
private static INodeFile newINodeFile(long id, PermissionStatus permissions,
long mtime, long atime, Short replication, Byte ecPolicyID,
long preferredBlockSize, BlockType blockType) {
return newINodeFile(id, permissions, mtime, atime, replication, ecPolicyID,
preferredBlockSize, (byte)0, blockType);
}
/**
* Persist the new block (the last block of the given file).
*/

View File

@ -2441,13 +2441,13 @@ HdfsFileStatus startFile(String src, PermissionStatus permissions,
String holder, String clientMachine, EnumSet<CreateFlag> flag,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions, String ecPolicyName,
boolean logRetryCache) throws IOException {
String storagePolicy, boolean logRetryCache) throws IOException {
HdfsFileStatus status;
try {
status = startFileInt(src, permissions, holder, clientMachine, flag,
createParent, replication, blockSize, supportedVersions, ecPolicyName,
logRetryCache);
storagePolicy, logRetryCache);
} catch (AccessControlException e) {
logAuditEvent(false, "create", src);
throw e;
@ -2460,7 +2460,8 @@ private HdfsFileStatus startFileInt(String src,
PermissionStatus permissions, String holder, String clientMachine,
EnumSet<CreateFlag> flag, boolean createParent, short replication,
long blockSize, CryptoProtocolVersion[] supportedVersions,
String ecPolicyName, boolean logRetryCache) throws IOException {
String ecPolicyName, String storagePolicy, boolean logRetryCache)
throws IOException {
if (NameNode.stateChangeLog.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
builder.append("DIR* NameSystem.startFile: src=").append(src)
@ -2549,7 +2550,8 @@ private HdfsFileStatus startFileInt(String src,
try {
stat = FSDirWriteFileOp.startFile(this, iip, permissions, holder,
clientMachine, flag, createParent, replication, blockSize, feInfo,
toRemoveBlocks, shouldReplicate, ecPolicyName, logRetryCache);
toRemoveBlocks, shouldReplicate, ecPolicyName, storagePolicy,
logRetryCache);
} catch (IOException e) {
skipSync = e instanceof StandbyException;
throw e;

View File

@ -780,7 +780,8 @@ public FsServerDefaults getServerDefaults() throws IOException {
public HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions, String ecPolicyName)
CryptoProtocolVersion[] supportedVersions, String ecPolicyName,
String storagePolicy)
throws IOException {
checkNNStartup();
String clientMachine = getClientMachine();
@ -804,7 +805,7 @@ public HdfsFileStatus create(String src, FsPermission masked,
.getShortUserName(), null, masked);
status = namesystem.startFile(src, perm, clientName, clientMachine,
flag.get(), createParent, replication, blockSize, supportedVersions,
ecPolicyName, cacheEntry != null);
ecPolicyName, storagePolicy, cacheEntry != null);
} finally {
RetryCache.setState(cacheEntry, status != null, status);
}

View File

@ -2137,7 +2137,7 @@ public static void createStripedFile(MiniDFSCluster cluster, Path file,
.create(file.toString(), new FsPermission((short)0755),
dfs.getClient().getClientName(),
new EnumSetWritable<>(EnumSet.of(CreateFlag.CREATE)),
false, (short)1, 128*1024*1024L, null, null);
false, (short) 1, 128 * 1024 * 1024L, null, null, null);
FSNamesystem ns = cluster.getNamesystem();
FSDirectory fsdir = ns.getFSDirectory();

View File

@ -276,7 +276,7 @@ public Object answer(InvocationOnMock invocation)
.build())
.when(mockNN)
.create(anyString(), any(), anyString(), any(), anyBoolean(),
anyShort(), anyLong(), any(), any());
anyShort(), anyLong(), any(), any(), any());
final DFSClient client = new DFSClient(null, mockNN, conf, null);
OutputStream os = client.create("testfile", true);

View File

@ -1450,6 +1450,45 @@ public void testFileCloseStatus() throws IOException {
}
}
@Test
public void testCreateWithStoragePolicy() throws Throwable {
Configuration conf = new HdfsConfiguration();
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.storageTypes(
new StorageType[] {StorageType.DISK, StorageType.ARCHIVE,
StorageType.SSD}).storagesPerDatanode(3).build()) {
DistributedFileSystem fs = cluster.getFileSystem();
Path file1 = new Path("/tmp/file1");
Path file2 = new Path("/tmp/file2");
fs.mkdirs(new Path("/tmp"));
fs.setStoragePolicy(new Path("/tmp"), "ALL_SSD");
FSDataOutputStream outputStream = fs.createFile(file1)
.storagePolicyName("COLD").build();
outputStream.write(1);
outputStream.close();
assertEquals(StorageType.ARCHIVE, DFSTestUtil.getAllBlocks(fs, file1)
.get(0).getStorageTypes()[0]);
assertEquals(fs.getStoragePolicy(file1).getName(), "COLD");
// Check with storage policy not specified.
outputStream = fs.createFile(file2).build();
outputStream.write(1);
outputStream.close();
assertEquals(StorageType.SSD, DFSTestUtil.getAllBlocks(fs, file2).get(0)
.getStorageTypes()[0]);
assertEquals(fs.getStoragePolicy(file2).getName(), "ALL_SSD");
// Check with default storage policy.
outputStream = fs.createFile(new Path("/default")).build();
outputStream.write(1);
outputStream.close();
assertEquals(StorageType.DISK,
DFSTestUtil.getAllBlocks(fs, new Path("/default")).get(0)
.getStorageTypes()[0]);
assertEquals(fs.getStoragePolicy(new Path("/default")).getName(), "HOT");
}
}
@Test(timeout=60000)
public void testListFiles() throws IOException {
Configuration conf = new HdfsConfiguration();

View File

@ -963,7 +963,7 @@ private static void mockCreate(ClientProtocol mcp,
.build())
.when(mcp)
.create(anyString(), any(), anyString(), any(), anyBoolean(),
anyShort(), anyLong(), any(), any());
anyShort(), anyLong(), any(), any(), any());
}
// This test only uses mocks. Called from the end of an existing test to

View File

@ -1246,7 +1246,7 @@ private void doCreateTest(CreationMethod method) throws Exception {
try {
nnrpc.create(pathStr, new FsPermission((short)0755), "client",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
true, (short)1, 128*1024*1024L, null, null);
true, (short) 1, 128 * 1024 * 1024L, null, null, null);
fail("Should have thrown exception when creating '"
+ pathStr + "'" + " by " + method);
} catch (InvalidPathException ipe) {

View File

@ -372,7 +372,7 @@ public void testFactory() throws Exception {
.build())
.when(mcp)
.create(anyString(), any(), anyString(),
any(), anyBoolean(), anyShort(), anyLong(), any(), any());
any(), anyBoolean(), anyShort(), anyLong(), any(), any(), any());
final Configuration conf = new Configuration();
final DFSClient c1 = createDFSClientAs(ugi[0], conf);

View File

@ -593,7 +593,8 @@ long executeOp(int daemonId, int inputIdx, String clientName)
FsPermission.getDefault(), clientName,
new EnumSetWritable<CreateFlag>(EnumSet
.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true,
replication, BLOCK_SIZE, CryptoProtocolVersion.supported(), null);
replication, BLOCK_SIZE, CryptoProtocolVersion.supported(), null,
null);
long end = Time.now();
for (boolean written = !closeUponCreate; !written;
written = clientProto.complete(fileNames[daemonId][inputIdx],
@ -1143,7 +1144,7 @@ void generateInputs(int[] ignore) throws IOException {
String fileName = nameGenerator.getNextFileName("ThroughputBench");
clientProto.create(fileName, FsPermission.getDefault(), clientName,
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication,
BLOCK_SIZE, CryptoProtocolVersion.supported(), null);
BLOCK_SIZE, CryptoProtocolVersion.supported(), null, null);
ExtendedBlock lastBlock = addBlocks(fileName, clientName);
clientProto.complete(fileName, clientName, lastBlock, HdfsConstants.GRANDFATHER_INODE_ID);
}

View File

@ -86,7 +86,7 @@ public void testRetryAddBlockWhileInChooseTarget() throws Exception {
nn.create(src, FsPermission.getFileDefault(),
"clientName",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
true, (short)3, 1024, null, null);
true, (short) 3, 1024, null, null, null);
// start first addBlock()
LOG.info("Starting first addBlock for " + src);
@ -158,7 +158,7 @@ public void testAddBlockRetryShouldReturnBlockWithLocations()
// create file
nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
(short) 3, 1024, null, null);
(short) 3, 1024, null, null, null);
// start first addBlock()
LOG.info("Starting first addBlock for " + src);
LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,

View File

@ -109,7 +109,7 @@ private void doTestChooseTargetNormalCase() throws Exception {
// Create the file with client machine
HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
replication, DEFAULT_BLOCK_SIZE, null, null, false);
replication, DEFAULT_BLOCK_SIZE, null, null, null, false);
//test chooseTarget for new file
LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
@ -139,7 +139,7 @@ private void doTestChooseTargetSpecialCase() throws Exception {
// Create the file with client machine
HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
(short) 20, DEFAULT_BLOCK_SIZE, null, null, false);
(short) 20, DEFAULT_BLOCK_SIZE, null, null, null, false);
//test chooseTarget for new file
LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,

View File

@ -131,7 +131,7 @@ public void testPlacementWithDFSNetworkTopology() throws Exception {
// Create the file with client machine
HdfsFileStatus fileStatus = namesystem.startFile(src, perm, clientMachine,
clientMachine, EnumSet.of(CreateFlag.CREATE), true, REPLICATION_FACTOR,
DEFAULT_BLOCK_SIZE, null, null, false);
DEFAULT_BLOCK_SIZE, null, null, null, false);
LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine, null,
null, fileStatus.getFileId(), null, null);
@ -184,7 +184,7 @@ private void testPlacement(String clientMachine,
// Create the file with client machine
HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, null, false);
REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, null, null, false);
LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
null, null, fileStatus.getFileId(), null, null);

View File

@ -232,19 +232,19 @@ public void testCreate() throws Exception {
newCall();
HdfsFileStatus status = nnRpc.create(src, perm, "holder",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
(short) 1, BlockSize, null, null);
(short) 1, BlockSize, null, null, null);
Assert.assertEquals(status, nnRpc.create(src, perm, "holder",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
(short) 1, BlockSize, null, null));
(short) 1, BlockSize, null, null, null));
Assert.assertEquals(status, nnRpc.create(src, perm, "holder",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
(short) 1, BlockSize, null, null));
(short) 1, BlockSize, null, null, null));
// A non-retried call fails
newCall();
try {
nnRpc.create(src, perm, "holder",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
true, (short) 1, BlockSize, null, null);
true, (short) 1, BlockSize, null, null, null);
Assert.fail("testCreate - expected exception is not thrown");
} catch (IOException e) {
// expected

View File

@ -414,7 +414,7 @@ void invoke() throws Exception {
new EnumSetWritable<CreateFlag>(createFlag), false, DataNodes,
BlockSize,
new CryptoProtocolVersion[] {CryptoProtocolVersion.ENCRYPTION_ZONES},
null);
null, null);
}
@Override