HDFS-7495. Remove updatePosition argument from DFSInputStream#getBlockAt() (cmccabe)

This commit is contained in:
Colin Patrick Mccabe 2015-02-25 13:29:31 -08:00
parent 5731c0e0d0
commit caa42adf20
2 changed files with 16 additions and 20 deletions

View File

@ -668,6 +668,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7740. Test truncate with DataNodes restarting. (yliu) HDFS-7740. Test truncate with DataNodes restarting. (yliu)
HDFS-7495. Remove updatePosition argument from DFSInputStream#getBlockAt()
(cmccabe)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode. HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

View File

@ -434,12 +434,10 @@ public List<LocatedBlock> getAllBlocks() throws IOException {
* Fetch it from the namenode if not cached. * Fetch it from the namenode if not cached.
* *
* @param offset block corresponding to this offset in file is returned * @param offset block corresponding to this offset in file is returned
* @param updatePosition whether to update current position
* @return located block * @return located block
* @throws IOException * @throws IOException
*/ */
private LocatedBlock getBlockAt(long offset, private LocatedBlock getBlockAt(long offset) throws IOException {
boolean updatePosition) throws IOException {
synchronized(infoLock) { synchronized(infoLock) {
assert (locatedBlocks != null) : "locatedBlocks is null"; assert (locatedBlocks != null) : "locatedBlocks is null";
@ -449,7 +447,6 @@ private LocatedBlock getBlockAt(long offset,
if (offset < 0 || offset >= getFileLength()) { if (offset < 0 || offset >= getFileLength()) {
throw new IOException("offset < 0 || offset >= getFileLength(), offset=" throw new IOException("offset < 0 || offset >= getFileLength(), offset="
+ offset + offset
+ ", updatePosition=" + updatePosition
+ ", locatedBlocks=" + locatedBlocks); + ", locatedBlocks=" + locatedBlocks);
} }
else if (offset >= locatedBlocks.getFileLength()) { else if (offset >= locatedBlocks.getFileLength()) {
@ -470,17 +467,6 @@ else if (offset >= locatedBlocks.getFileLength()) {
} }
blk = locatedBlocks.get(targetBlockIdx); blk = locatedBlocks.get(targetBlockIdx);
} }
// update current position
if (updatePosition) {
// synchronized not strictly needed, since we only get here
// from synchronized caller methods
synchronized(this) {
pos = offset;
blockEnd = blk.getStartOffset() + blk.getBlockSize() - 1;
currentLocatedBlock = blk;
}
}
return blk; return blk;
} }
} }
@ -604,7 +590,14 @@ private synchronized DatanodeInfo blockSeekTo(long target) throws IOException {
// //
// Compute desired block // Compute desired block
// //
LocatedBlock targetBlock = getBlockAt(target, true); LocatedBlock targetBlock = getBlockAt(target);
// update current position
this.pos = target;
this.blockEnd = targetBlock.getStartOffset() +
targetBlock.getBlockSize() - 1;
this.currentLocatedBlock = targetBlock;
assert (target==pos) : "Wrong postion " + pos + " expect " + target; assert (target==pos) : "Wrong postion " + pos + " expect " + target;
long offsetIntoBlock = target - targetBlock.getStartOffset(); long offsetIntoBlock = target - targetBlock.getStartOffset();
@ -979,7 +972,7 @@ private DNAddrPair chooseDataNode(LocatedBlock block,
} }
deadNodes.clear(); //2nd option is to remove only nodes[blockId] deadNodes.clear(); //2nd option is to remove only nodes[blockId]
openInfo(); openInfo();
block = getBlockAt(block.getStartOffset(), false); block = getBlockAt(block.getStartOffset());
failures++; failures++;
continue; continue;
} }
@ -1056,7 +1049,7 @@ private void fetchBlockByteRange(LocatedBlock block, long start, long end,
byte[] buf, int offset, byte[] buf, int offset,
Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap) Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
throws IOException { throws IOException {
block = getBlockAt(block.getStartOffset(), false); block = getBlockAt(block.getStartOffset());
while (true) { while (true) {
DNAddrPair addressPair = chooseDataNode(block, null); DNAddrPair addressPair = chooseDataNode(block, null);
try { try {
@ -1108,7 +1101,7 @@ private void actualGetFromOneDataNode(final DNAddrPair datanode,
// start of the loop. // start of the loop.
CachingStrategy curCachingStrategy; CachingStrategy curCachingStrategy;
boolean allowShortCircuitLocalReads; boolean allowShortCircuitLocalReads;
block = getBlockAt(block.getStartOffset(), false); block = getBlockAt(block.getStartOffset());
synchronized(infoLock) { synchronized(infoLock) {
curCachingStrategy = cachingStrategy; curCachingStrategy = cachingStrategy;
allowShortCircuitLocalReads = !shortCircuitForbidden(); allowShortCircuitLocalReads = !shortCircuitForbidden();
@ -1208,7 +1201,7 @@ private void hedgedFetchBlockByteRange(LocatedBlock block, long start,
ByteBuffer bb = null; ByteBuffer bb = null;
int len = (int) (end - start + 1); int len = (int) (end - start + 1);
int hedgedReadId = 0; int hedgedReadId = 0;
block = getBlockAt(block.getStartOffset(), false); block = getBlockAt(block.getStartOffset());
while (true) { while (true) {
// see HDFS-6591, this metric is used to verify/catch unnecessary loops // see HDFS-6591, this metric is used to verify/catch unnecessary loops
hedgedReadOpsLoopNumForTesting++; hedgedReadOpsLoopNumForTesting++;