HDFS-6583. Remove clientNode in FileUnderConstructionFeature. Contributed by Haohui Mai.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1604541 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
8a83bb7ad6
commit
1e89eba47d
@ -468,6 +468,7 @@ Release 2.5.0 - UNRELEASED
|
||||
HDFS-6460. Ignore stale and decommissioned nodes in
|
||||
NetworkTopology#sortByDistance. (Yongjun Zhang via wang)
|
||||
|
||||
HDFS-6583. Remove clientNode in FileUnderConstructionFeature. (wheat9)
|
||||
BUG FIXES
|
||||
|
||||
HDFS-6112. NFS Gateway docs are incorrect for allowed hosts configuration.
|
||||
|
@ -258,8 +258,8 @@ void disableQuotaChecks() {
|
||||
* @throws SnapshotAccessControlException
|
||||
*/
|
||||
INodeFile addFile(String path, PermissionStatus permissions,
|
||||
short replication, long preferredBlockSize, String clientName,
|
||||
String clientMachine, DatanodeDescriptor clientNode)
|
||||
short replication, long preferredBlockSize,
|
||||
String clientName, String clientMachine)
|
||||
throws FileAlreadyExistsException, QuotaExceededException,
|
||||
UnresolvedLinkException, SnapshotAccessControlException, AclException {
|
||||
|
||||
@ -267,7 +267,7 @@ INodeFile addFile(String path, PermissionStatus permissions,
|
||||
INodeFile newNode = new INodeFile(namesystem.allocateNewInodeId(), null,
|
||||
permissions, modTime, modTime, BlockInfo.EMPTY_ARRAY, replication,
|
||||
preferredBlockSize);
|
||||
newNode.toUnderConstruction(clientName, clientMachine, clientNode);
|
||||
newNode.toUnderConstruction(clientName, clientMachine);
|
||||
|
||||
boolean added = false;
|
||||
writeLock();
|
||||
@ -305,7 +305,7 @@ INodeFile unprotectedAddFile( long id,
|
||||
newNode = new INodeFile(id, null, permissions, modificationTime,
|
||||
modificationTime, BlockInfo.EMPTY_ARRAY, replication,
|
||||
preferredBlockSize);
|
||||
newNode.toUnderConstruction(clientName, clientMachine, null);
|
||||
newNode.toUnderConstruction(clientName, clientMachine);
|
||||
|
||||
} else {
|
||||
newNode = new INodeFile(id, null, permissions, modificationTime, atime,
|
||||
|
@ -376,8 +376,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
|
||||
"for append");
|
||||
}
|
||||
LocatedBlock lb = fsNamesys.prepareFileForWrite(path,
|
||||
oldFile, addCloseOp.clientName, addCloseOp.clientMachine, null,
|
||||
false, iip.getLatestSnapshotId(), false);
|
||||
oldFile, addCloseOp.clientName, addCloseOp.clientMachine, false, iip.getLatestSnapshotId(), false);
|
||||
newFile = INodeFile.valueOf(fsDir.getINode(path),
|
||||
path, true);
|
||||
|
||||
|
@ -781,7 +781,7 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
|
||||
final INodeFile file = new INodeFile(inodeId, localName, permissions,
|
||||
modificationTime, atime, blocks, replication, blockSize);
|
||||
if (underConstruction) {
|
||||
file.toUnderConstruction(clientName, clientMachine, null);
|
||||
file.toUnderConstruction(clientName, clientMachine);
|
||||
}
|
||||
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
|
||||
} else if (numBlocks == -1) {
|
||||
@ -933,8 +933,7 @@ LayoutVersion.Feature.ADD_INODE_ID, getLayoutVersion())) {
|
||||
}
|
||||
|
||||
FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
|
||||
oldnode.toUnderConstruction(uc.getClientName(), uc.getClientMachine(),
|
||||
uc.getClientNode());
|
||||
oldnode.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
|
||||
if (oldnode.numBlocks() > 0) {
|
||||
BlockInfo ucBlock = cons.getLastBlock();
|
||||
// we do not replace the inode, just replace the last block of oldnode
|
||||
|
@ -53,7 +53,6 @@
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
@ -299,8 +298,7 @@ private INodeFile loadINodeFile(INodeSection.INode n) {
|
||||
// under-construction information
|
||||
if (f.hasFileUC()) {
|
||||
INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
|
||||
file.toUnderConstruction(uc.getClientName(), uc.getClientMachine(),
|
||||
null);
|
||||
file.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
|
||||
if (blocks.length > 0) {
|
||||
BlockInfo lastBlk = file.getLastBlock();
|
||||
// replace the last block of file
|
||||
|
@ -149,7 +149,7 @@ static INodeFile readINodeUnderConstruction(
|
||||
|
||||
INodeFile file = new INodeFile(inodeId, name, perm, modificationTime,
|
||||
modificationTime, blocks, blockReplication, preferredBlockSize);
|
||||
file.toUnderConstruction(clientName, clientMachine, null);
|
||||
file.toUnderConstruction(clientName, clientMachine);
|
||||
return file;
|
||||
}
|
||||
|
||||
|
@ -2388,9 +2388,6 @@ private void startFileInternal(FSPermissionChecker pc, String src,
|
||||
}
|
||||
|
||||
checkFsObjectLimit();
|
||||
final DatanodeDescriptor clientNode =
|
||||
blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
|
||||
|
||||
INodeFile newNode = null;
|
||||
|
||||
// Always do an implicit mkdirs for parent directory tree.
|
||||
@ -2398,7 +2395,7 @@ private void startFileInternal(FSPermissionChecker pc, String src,
|
||||
if (parent != null && mkdirsRecursively(parent.toString(),
|
||||
permissions, true, now())) {
|
||||
newNode = dir.addFile(src, permissions, replication, blockSize,
|
||||
holder, clientMachine, clientNode);
|
||||
holder, clientMachine);
|
||||
}
|
||||
|
||||
if (newNode == null) {
|
||||
@ -2473,10 +2470,8 @@ private LocatedBlock appendFileInternal(FSPermissionChecker pc, String src,
|
||||
throw new IOException("append: lastBlock=" + lastBlock +
|
||||
" of src=" + src + " is not sufficiently replicated yet.");
|
||||
}
|
||||
final DatanodeDescriptor clientNode =
|
||||
blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
|
||||
return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode,
|
||||
true, iip.getLatestSnapshotId(), logRetryCache);
|
||||
return prepareFileForWrite(src, myFile, holder, clientMachine, true,
|
||||
iip.getLatestSnapshotId(), logRetryCache);
|
||||
} catch (IOException ie) {
|
||||
NameNode.stateChangeLog.warn("DIR* NameSystem.append: " +ie.getMessage());
|
||||
throw ie;
|
||||
@ -2491,7 +2486,6 @@ private LocatedBlock appendFileInternal(FSPermissionChecker pc, String src,
|
||||
* @param file existing file object
|
||||
* @param leaseHolder identifier of the lease holder on this file
|
||||
* @param clientMachine identifier of the client machine
|
||||
* @param clientNode if the client is collocated with a DN, that DN's descriptor
|
||||
* @param writeToEditLog whether to persist this change to the edit log
|
||||
* @param logRetryCache whether to record RPC ids in editlog for retry cache
|
||||
* rebuilding
|
||||
@ -2500,12 +2494,12 @@ private LocatedBlock appendFileInternal(FSPermissionChecker pc, String src,
|
||||
* @throws IOException
|
||||
*/
|
||||
LocatedBlock prepareFileForWrite(String src, INodeFile file,
|
||||
String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
|
||||
boolean writeToEditLog, int latestSnapshot, boolean logRetryCache)
|
||||
String leaseHolder, String clientMachine,
|
||||
boolean writeToEditLog,
|
||||
int latestSnapshot, boolean logRetryCache)
|
||||
throws IOException {
|
||||
file = file.recordModification(latestSnapshot);
|
||||
final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine,
|
||||
clientNode);
|
||||
final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine);
|
||||
|
||||
leaseManager.addLease(cons.getFileUnderConstructionFeature()
|
||||
.getClientName(), src);
|
||||
@ -2777,7 +2771,8 @@ LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
|
||||
+ maxBlocksPerFile);
|
||||
}
|
||||
blockSize = pendingFile.getPreferredBlockSize();
|
||||
clientNode = pendingFile.getFileUnderConstructionFeature().getClientNode();
|
||||
clientNode = blockManager.getDatanodeManager().getDatanodeByHost(
|
||||
pendingFile.getFileUnderConstructionFeature().getClientMachine());
|
||||
replication = pendingFile.getFileReplication();
|
||||
} finally {
|
||||
readUnlock();
|
||||
@ -2983,7 +2978,9 @@ LocatedBlock getAdditionalDatanode(String src, long fileId,
|
||||
if (inode != null) src = inode.getFullPathName();
|
||||
}
|
||||
final INodeFile file = checkLease(src, clientName, inode, fileId);
|
||||
clientnode = file.getFileUnderConstructionFeature().getClientNode();
|
||||
String clientMachine = file.getFileUnderConstructionFeature()
|
||||
.getClientMachine();
|
||||
clientnode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
|
||||
preferredblocksize = file.getPreferredBlockSize();
|
||||
|
||||
//find datanode storages
|
||||
|
@ -32,15 +32,10 @@
|
||||
public class FileUnderConstructionFeature implements INode.Feature {
|
||||
private String clientName; // lease holder
|
||||
private final String clientMachine;
|
||||
// if client is a cluster node too.
|
||||
private final DatanodeDescriptor clientNode;
|
||||
|
||||
public FileUnderConstructionFeature(final String clientName,
|
||||
final String clientMachine,
|
||||
final DatanodeDescriptor clientNode) {
|
||||
public FileUnderConstructionFeature(final String clientName, final String clientMachine) {
|
||||
this.clientName = clientName;
|
||||
this.clientMachine = clientMachine;
|
||||
this.clientNode = clientNode;
|
||||
}
|
||||
|
||||
public String getClientName() {
|
||||
@ -55,10 +50,6 @@ public String getClientMachine() {
|
||||
return clientMachine;
|
||||
}
|
||||
|
||||
public DatanodeDescriptor getClientNode() {
|
||||
return clientNode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the length for the last block
|
||||
*
|
||||
|
@ -33,7 +33,6 @@
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
|
||||
@ -170,12 +169,11 @@ public boolean isUnderConstruction() {
|
||||
}
|
||||
|
||||
/** Convert this file to an {@link INodeFileUnderConstruction}. */
|
||||
INodeFile toUnderConstruction(String clientName, String clientMachine,
|
||||
DatanodeDescriptor clientNode) {
|
||||
INodeFile toUnderConstruction(String clientName, String clientMachine) {
|
||||
Preconditions.checkState(!isUnderConstruction(),
|
||||
"file is already under construction");
|
||||
FileUnderConstructionFeature uc = new FileUnderConstructionFeature(
|
||||
clientName, clientMachine, clientNode);
|
||||
clientName, clientMachine);
|
||||
addFeature(uc);
|
||||
return this;
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ static void addFiles(FSEditLog editLog, int numFiles, short replication,
|
||||
|
||||
final INodeFile inode = new INodeFile(inodeId.nextValue(), null,
|
||||
p, 0L, 0L, blocks, replication, blockSize);
|
||||
inode.toUnderConstruction("", "", null);
|
||||
inode.toUnderConstruction("", "");
|
||||
|
||||
// Append path to filename with information about blockIDs
|
||||
String path = "_" + iF + "_B" + blocks[0].getBlockId() +
|
||||
@ -98,7 +98,7 @@ static void addFiles(FSEditLog editLog, int numFiles, short replication,
|
||||
}
|
||||
INodeFile fileUc = new INodeFile(inodeId.nextValue(), null,
|
||||
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
|
||||
fileUc.toUnderConstruction("", "", null);
|
||||
fileUc.toUnderConstruction("", "");
|
||||
editLog.logOpenFile(filePath, fileUc, false);
|
||||
editLog.logCloseFile(filePath, inode);
|
||||
|
||||
|
@ -195,7 +195,7 @@ public void run() {
|
||||
for (int i = 0; i < numTransactions; i++) {
|
||||
INodeFile inode = new INodeFile(namesystem.allocateNewInodeId(), null,
|
||||
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
|
||||
inode.toUnderConstruction("", "", null);
|
||||
inode.toUnderConstruction("", "");
|
||||
|
||||
editLog.logOpenFile("/filename" + (startIndex + i), inode, false);
|
||||
editLog.logCloseFile("/filename" + (startIndex + i), inode);
|
||||
|
@ -29,8 +29,6 @@
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.junit.Assert;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -318,7 +316,7 @@ public void testValueOf () throws IOException {
|
||||
{//cast from INodeFileUnderConstruction
|
||||
final INode from = new INodeFile(
|
||||
INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L);
|
||||
from.asFile().toUnderConstruction("client", "machine", null);
|
||||
from.asFile().toUnderConstruction("client", "machine");
|
||||
|
||||
//cast to INodeFile, should success
|
||||
final INodeFile f = INodeFile.valueOf(from, path);
|
||||
@ -1070,12 +1068,11 @@ public void testFileUnderConstruction() {
|
||||
|
||||
final String clientName = "client";
|
||||
final String clientMachine = "machine";
|
||||
file.toUnderConstruction(clientName, clientMachine, null);
|
||||
file.toUnderConstruction(clientName, clientMachine);
|
||||
assertTrue(file.isUnderConstruction());
|
||||
FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
|
||||
assertEquals(clientName, uc.getClientName());
|
||||
assertEquals(clientMachine, uc.getClientMachine());
|
||||
Assert.assertNull(uc.getClientNode());
|
||||
|
||||
file.toCompleteFile(Time.now());
|
||||
assertFalse(file.isUnderConstruction());
|
||||
|
Loading…
Reference in New Issue
Block a user