HDFS-10940. Reduce performance penalty of block caching when not used. Contributed by Daryn Sharp.

This commit is contained in:
Kihwal Lee 2016-10-03 11:27:23 -05:00
parent 90020624b0
commit 744208431f
4 changed files with 31 additions and 18 deletions

View File

@ -103,6 +103,7 @@
import org.apache.hadoop.hdfs.util.FoldedTreeSet; import org.apache.hadoop.hdfs.util.FoldedTreeSet;
import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.hdfs.util.LightWeightHashSet;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.namenode.CacheManager;
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength; import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
@ -1145,9 +1146,16 @@ public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks,
fileSizeExcludeBlocksUnderConstruction, mode); fileSizeExcludeBlocksUnderConstruction, mode);
isComplete = true; isComplete = true;
} }
return new LocatedBlocks(fileSizeExcludeBlocksUnderConstruction, LocatedBlocks locations = new LocatedBlocks(
fileSizeExcludeBlocksUnderConstruction,
isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo, isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo,
ecPolicy); ecPolicy);
// Set caching information for the located blocks.
CacheManager cm = namesystem.getCacheManager();
if (cm != null) {
cm.setCachedLocations(locations);
}
return locations;
} }
} }

View File

@ -63,6 +63,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@ -902,7 +903,16 @@ public void removeCachePool(String poolName)
return new BatchedListEntries<CachePoolEntry>(results, false); return new BatchedListEntries<CachePoolEntry>(results, false);
} }
public void setCachedLocations(LocatedBlock block) { public void setCachedLocations(LocatedBlocks locations) {
// don't attempt lookups if there are no cached blocks
if (cachedBlocks.size() > 0) {
for (LocatedBlock lb : locations.getLocatedBlocks()) {
setCachedLocations(lb);
}
}
}
private void setCachedLocations(LocatedBlock block) {
CachedBlock cachedBlock = CachedBlock cachedBlock =
new CachedBlock(block.getBlock().getBlockId(), new CachedBlock(block.getBlock().getBlockId(),
(short)0, false); (short)0, false);

View File

@ -36,7 +36,6 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@ -156,7 +155,6 @@ static GetBlockLocationsResult getBlockLocations(
"Negative offset is not supported. File: " + src); "Negative offset is not supported. File: " + src);
Preconditions.checkArgument(length >= 0, Preconditions.checkArgument(length >= 0,
"Negative length is not supported. File: " + src); "Negative length is not supported. File: " + src);
CacheManager cm = fsd.getFSNamesystem().getCacheManager();
BlockManager bm = fsd.getBlockManager(); BlockManager bm = fsd.getBlockManager();
fsd.readLock(); fsd.readLock();
try { try {
@ -190,11 +188,6 @@ static GetBlockLocationsResult getBlockLocations(
inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset, inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset,
length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy); length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy);
// Set caching information for the located blocks.
for (LocatedBlock lb : blocks.getLocatedBlocks()) {
cm.setCachedLocations(lb);
}
final long now = now(); final long now = now();
boolean updateAccessTime = fsd.isAccessTimeSupported() boolean updateAccessTime = fsd.isAccessTimeSupported()
&& !iip.isSnapshot() && !iip.isSnapshot()
@ -461,7 +454,7 @@ private static HdfsFileStatus createFileStatus(
node.asDirectory().getChildrenNum(snapshot) : 0; node.asDirectory().getChildrenNum(snapshot) : 0;
INodeAttributes nodeAttrs = fsd.getAttributes(iip); INodeAttributes nodeAttrs = fsd.getAttributes(iip);
HdfsFileStatus status = createFileStatus( return createFileStatus(
size, size,
node.isDirectory(), node.isDirectory(),
replication, replication,
@ -479,14 +472,6 @@ private static HdfsFileStatus createFileStatus(
storagePolicy, storagePolicy,
ecPolicy, ecPolicy,
loc); loc);
// Set caching information for the located blocks.
if (loc != null) {
CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager();
for (LocatedBlock lb: loc.getLocatedBlocks()) {
cacheManager.setCachedLocations(lb);
}
}
return status;
} }
private static HdfsFileStatus createFileStatus(long length, boolean isdir, private static HdfsFileStatus createFileStatus(long length, boolean isdir,

View File

@ -72,6 +72,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@ -89,6 +90,7 @@
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito;
import com.google.common.base.Supplier; import com.google.common.base.Supplier;
@ -1531,4 +1533,12 @@ public void testNoBackingReplica() throws Exception {
DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false); DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false);
} }
} }
@Test
public void testNoLookupsWhenNotUsed() throws Exception {
CacheManager cm = cluster.getNamesystem().getCacheManager();
LocatedBlocks locations = Mockito.mock(LocatedBlocks.class);
cm.setCachedLocations(locations);
Mockito.verifyZeroInteractions(locations);
}
} }