HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. Contributed by Zhe Zhang.

This commit is contained in:
Andrew Wang 2015-06-12 11:35:39 -07:00
parent 46b0b4179c
commit c17439c2dd
30 changed files with 418 additions and 232 deletions

View File

@ -626,6 +626,9 @@ Release 2.8.0 - UNRELEASED
HDFS-7923. The DataNodes should rate-limit their full block reports by HDFS-7923. The DataNodes should rate-limit their full block reports by
asking the NN on heartbeat messages (cmccabe) asking the NN on heartbeat messages (cmccabe)
HDFS-8499. Refactor BlockInfo class hierarchy with static helper class.
(Zhe Zhang via wang)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -79,7 +79,7 @@ public interface BlockCollection {
* Convert the last block of the collection to an under-construction block * Convert the last block of the collection to an under-construction block
* and set the locations. * and set the locations.
*/ */
public BlockInfoContiguousUnderConstruction setLastBlock(BlockInfo lastBlock, public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
DatanodeStorageInfo[] targets) throws IOException; DatanodeStorageInfo[] targets) throws IOException;
/** /**

View File

@ -51,7 +51,7 @@ public abstract class BlockInfo extends Block
* per replica is 42 bytes (LinkedList#Entry object per replica) versus 16 * per replica is 42 bytes (LinkedList#Entry object per replica) versus 16
* bytes using the triplets. * bytes using the triplets.
*/ */
protected Object[] triplets; Object[] triplets;
/** /**
* Construct an entry for blocksmap * Construct an entry for blocksmap
@ -295,7 +295,7 @@ public BlockInfo moveBlockToHead(BlockInfo head,
/** /**
* BlockInfo represents a block that is not being constructed. * BlockInfo represents a block that is not being constructed.
* In order to start modifying the block, the BlockInfo should be converted * In order to start modifying the block, the BlockInfo should be converted
* to {@link BlockInfoContiguousUnderConstruction}. * to {@link BlockInfoUnderConstruction}.
* @return {@link BlockUCState#COMPLETE} * @return {@link BlockUCState#COMPLETE}
*/ */
public BlockUCState getBlockUCState() { public BlockUCState getBlockUCState() {
@ -312,27 +312,29 @@ public boolean isComplete() {
} }
/** /**
* Convert a complete block to an under construction block. * Convert a block to an under construction block.
* @return BlockInfoUnderConstruction - an under construction block. * @return BlockInfoUnderConstruction - an under construction block.
*/ */
public BlockInfoContiguousUnderConstruction convertToBlockUnderConstruction( public BlockInfoUnderConstruction convertToBlockUnderConstruction(
BlockUCState s, DatanodeStorageInfo[] targets) { BlockUCState s, DatanodeStorageInfo[] targets) {
if(isComplete()) { if(isComplete()) {
BlockInfoContiguousUnderConstruction ucBlock = return convertCompleteBlockToUC(s, targets);
new BlockInfoContiguousUnderConstruction(this,
getBlockCollection().getPreferredBlockReplication(), s, targets);
ucBlock.setBlockCollection(getBlockCollection());
return ucBlock;
} }
// the block is already under construction // the block is already under construction
BlockInfoContiguousUnderConstruction ucBlock = BlockInfoUnderConstruction ucBlock =
(BlockInfoContiguousUnderConstruction)this; (BlockInfoUnderConstruction)this;
ucBlock.setBlockUCState(s); ucBlock.setBlockUCState(s);
ucBlock.setExpectedLocations(targets); ucBlock.setExpectedLocations(targets);
ucBlock.setBlockCollection(getBlockCollection()); ucBlock.setBlockCollection(getBlockCollection());
return ucBlock; return ucBlock;
} }
/**
* Convert a complete block to an under construction block.
*/
abstract BlockInfoUnderConstruction convertCompleteBlockToUC(
BlockUCState s, DatanodeStorageInfo[] targets);
@Override @Override
public int hashCode() { public int hashCode() {
// Super implementation is sufficient // Super implementation is sufficient

View File

@ -19,13 +19,13 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
/** /**
* Subclass of {@link BlockInfo}, used for a block with replication scheme. * Subclass of {@link BlockInfo}, used for a block with replication scheme.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class BlockInfoContiguous extends BlockInfo { public class BlockInfoContiguous extends BlockInfo {
public static final BlockInfoContiguous[] EMPTY_ARRAY = {};
public BlockInfoContiguous(short size) { public BlockInfoContiguous(short size) {
super(size); super(size);
@ -40,84 +40,37 @@ public BlockInfoContiguous(Block blk, short size) {
* This is used to convert BlockReplicationInfoUnderConstruction * This is used to convert BlockReplicationInfoUnderConstruction
* @param from BlockReplicationInfo to copy from. * @param from BlockReplicationInfo to copy from.
*/ */
protected BlockInfoContiguous(BlockInfoContiguous from) { protected BlockInfoContiguous(BlockInfo from) {
super(from); super(from);
} }
/**
* Ensure that there is enough space to include num more triplets.
* @return first free triplet index.
*/
private int ensureCapacity(int num) {
assert this.triplets != null : "BlockInfo is not initialized";
int last = numNodes();
if (triplets.length >= (last+num)*3) {
return last;
}
/* Not enough space left. Create a new array. Should normally
* happen only when replication is manually increased by the user. */
Object[] old = triplets;
triplets = new Object[(last+num)*3];
System.arraycopy(old, 0, triplets, 0, last * 3);
return last;
}
@Override @Override
boolean addStorage(DatanodeStorageInfo storage) { boolean addStorage(DatanodeStorageInfo storage) {
// find the last null node return ContiguousBlockStorageOp.addStorage(this, storage);
int lastNode = ensureCapacity(1);
setStorageInfo(lastNode, storage);
setNext(lastNode, null);
setPrevious(lastNode, null);
return true;
} }
@Override @Override
boolean removeStorage(DatanodeStorageInfo storage) { boolean removeStorage(DatanodeStorageInfo storage) {
int dnIndex = findStorageInfo(storage); return ContiguousBlockStorageOp.removeStorage(this, storage);
if (dnIndex < 0) { // the node is not found
return false;
}
assert getPrevious(dnIndex) == null && getNext(dnIndex) == null :
"Block is still in the list and must be removed first.";
// find the last not null node
int lastNode = numNodes()-1;
// replace current node triplet by the lastNode one
setStorageInfo(dnIndex, getStorageInfo(lastNode));
setNext(dnIndex, getNext(lastNode));
setPrevious(dnIndex, getPrevious(lastNode));
// set the last triplet to null
setStorageInfo(lastNode, null);
setNext(lastNode, null);
setPrevious(lastNode, null);
return true;
} }
@Override @Override
public int numNodes() { public int numNodes() {
assert this.triplets != null : "BlockInfo is not initialized"; return ContiguousBlockStorageOp.numNodes(this);
assert triplets.length % 3 == 0 : "Malformed BlockInfo";
for (int idx = getCapacity()-1; idx >= 0; idx--) {
if (getDatanode(idx) != null) {
return idx + 1;
}
}
return 0;
} }
@Override @Override
void replaceBlock(BlockInfo newBlock) { void replaceBlock(BlockInfo newBlock) {
assert newBlock instanceof BlockInfoContiguous; ContiguousBlockStorageOp.replaceBlock(this, newBlock);
for (int i = this.numNodes() - 1; i >= 0; i--) { }
final DatanodeStorageInfo storage = this.getStorageInfo(i);
final boolean removed = storage.removeBlock(this);
assert removed : "currentBlock not found.";
final DatanodeStorageInfo.AddBlockResult result = storage.addBlock( @Override
newBlock); BlockInfoUnderConstruction convertCompleteBlockToUC(
assert result == DatanodeStorageInfo.AddBlockResult.ADDED : HdfsServerConstants.BlockUCState s, DatanodeStorageInfo[] targets) {
"newBlock already exists."; BlockInfoUnderConstructionContiguous ucBlock =
} new BlockInfoUnderConstructionContiguous(this,
getBlockCollection().getPreferredBlockReplication(), s, targets);
ucBlock.setBlockCollection(getBlockCollection());
return ucBlock;
} }
} }

View File

@ -22,7 +22,9 @@
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -31,15 +33,15 @@
* Represents a block that is currently being constructed.<br> * Represents a block that is currently being constructed.<br>
* This is usually the last block of a file opened for write or append. * This is usually the last block of a file opened for write or append.
*/ */
public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous { public abstract class BlockInfoUnderConstruction extends BlockInfo {
/** Block state. See {@link BlockUCState} */ /** Block state. See {@link BlockUCState} */
private BlockUCState blockUCState; protected BlockUCState blockUCState;
/** /**
* Block replicas as assigned when the block was allocated. * Block replicas as assigned when the block was allocated.
* This defines the pipeline order. * This defines the pipeline order.
*/ */
private List<ReplicaUnderConstruction> replicas; protected List<ReplicaUnderConstruction> replicas;
/** /**
* Index of the primary data node doing the recovery. Useful for log * Index of the primary data node doing the recovery. Useful for log
@ -57,12 +59,12 @@ public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
/** /**
* The block source to use in the event of copy-on-write truncate. * The block source to use in the event of copy-on-write truncate.
*/ */
private Block truncateBlock; protected Block truncateBlock;
/** /**
* ReplicaUnderConstruction contains information about replicas while * ReplicaUnderConstruction contains information about replicas while
* they are under construction. * they are under construction.
* The GS, the length and the state of the replica is as reported by * The GS, the length and the state of the replica is as reported by
* the data-node. * the data-node.
* It is not guaranteed, but expected, that data-nodes actually have * It is not guaranteed, but expected, that data-nodes actually have
* corresponding replicas. * corresponding replicas.
@ -143,7 +145,7 @@ public String toString() {
appendStringTo(b); appendStringTo(b);
return b.toString(); return b.toString();
} }
@Override @Override
public void appendStringTo(StringBuilder sb) { public void appendStringTo(StringBuilder sb) {
sb.append("ReplicaUC[") sb.append("ReplicaUC[")
@ -158,45 +160,24 @@ public void appendStringTo(StringBuilder sb) {
* Create block and set its state to * Create block and set its state to
* {@link BlockUCState#UNDER_CONSTRUCTION}. * {@link BlockUCState#UNDER_CONSTRUCTION}.
*/ */
public BlockInfoContiguousUnderConstruction(Block blk, short replication) { public BlockInfoUnderConstruction(Block blk, short replication) {
this(blk, replication, BlockUCState.UNDER_CONSTRUCTION, null); this(blk, replication, BlockUCState.UNDER_CONSTRUCTION, null);
} }
/** /**
* Create a block that is currently being constructed. * Create a block that is currently being constructed.
*/ */
public BlockInfoContiguousUnderConstruction(Block blk, short replication, public BlockInfoUnderConstruction(Block blk, short replication,
BlockUCState state, DatanodeStorageInfo[] targets) { BlockUCState state, DatanodeStorageInfo[] targets) {
super(blk, replication); super(blk, replication);
assert getBlockUCState() != BlockUCState.COMPLETE : Preconditions.checkState(getBlockUCState() != BlockUCState.COMPLETE,
"BlockInfoUnderConstruction cannot be in COMPLETE state"; "BlockInfoUnderConstruction cannot be in COMPLETE state");
this.blockUCState = state; this.blockUCState = state;
setExpectedLocations(targets); setExpectedLocations(targets);
} }
/** /** Set expected locations. */
* Convert an under construction block to a complete block. public abstract void setExpectedLocations(DatanodeStorageInfo[] targets);
*
* @return BlockInfo - a complete block.
* @throws IOException if the state of the block
* (the generation stamp and the length) has not been committed by
* the client or it does not have at least a minimal number of replicas
* reported from data-nodes.
*/
BlockInfo convertToCompleteBlock() throws IOException {
assert getBlockUCState() != BlockUCState.COMPLETE :
"Trying to convert a COMPLETE block";
return new BlockInfoContiguous(this);
}
/** Set expected locations */
public void setExpectedLocations(DatanodeStorageInfo[] targets) {
int numLocations = targets == null ? 0 : targets.length;
this.replicas = new ArrayList<ReplicaUnderConstruction>(numLocations);
for(int i = 0; i < numLocations; i++)
replicas.add(
new ReplicaUnderConstruction(this, targets[i], ReplicaState.RBW));
}
/** /**
* Create array of expected replica locations * Create array of expected replica locations
@ -205,12 +186,13 @@ public void setExpectedLocations(DatanodeStorageInfo[] targets) {
public DatanodeStorageInfo[] getExpectedStorageLocations() { public DatanodeStorageInfo[] getExpectedStorageLocations() {
int numLocations = replicas == null ? 0 : replicas.size(); int numLocations = replicas == null ? 0 : replicas.size();
DatanodeStorageInfo[] storages = new DatanodeStorageInfo[numLocations]; DatanodeStorageInfo[] storages = new DatanodeStorageInfo[numLocations];
for(int i = 0; i < numLocations; i++) for(int i = 0; i < numLocations; i++) {
storages[i] = replicas.get(i).getExpectedStorageLocation(); storages[i] = replicas.get(i).getExpectedStorageLocation();
}
return storages; return storages;
} }
/** Get the number of expected locations */ /** Get the number of expected locations. */
public int getNumExpectedLocations() { public int getNumExpectedLocations() {
return replicas == null ? 0 : replicas.size(); return replicas == null ? 0 : replicas.size();
} }
@ -228,19 +210,15 @@ void setBlockUCState(BlockUCState s) {
blockUCState = s; blockUCState = s;
} }
/** Get block recovery ID */ /** Get block recovery ID. */
public long getBlockRecoveryId() { public long getBlockRecoveryId() {
return blockRecoveryId; return blockRecoveryId;
} }
/** Get recover block */ /** Get recover block. */
public Block getTruncateBlock() { public abstract Block getTruncateBlock();
return truncateBlock;
}
public void setTruncateBlock(Block recoveryBlock) { public abstract void setTruncateBlock(Block recoveryBlock);
this.truncateBlock = recoveryBlock;
}
/** /**
* Process the recorded replicas. When about to commit or finish the * Process the recorded replicas. When about to commit or finish the
@ -250,8 +228,9 @@ public void setTruncateBlock(Block recoveryBlock) {
public void setGenerationStampAndVerifyReplicas(long genStamp) { public void setGenerationStampAndVerifyReplicas(long genStamp) {
// Set the generation stamp for the block. // Set the generation stamp for the block.
setGenerationStamp(genStamp); setGenerationStamp(genStamp);
if (replicas == null) if (replicas == null) {
return; return;
}
// Remove the replicas with wrong gen stamp. // Remove the replicas with wrong gen stamp.
// The replica list is unchanged. // The replica list is unchanged.
@ -267,13 +246,14 @@ public void setGenerationStampAndVerifyReplicas(long genStamp) {
/** /**
* Commit block's length and generation stamp as reported by the client. * Commit block's length and generation stamp as reported by the client.
* Set block state to {@link BlockUCState#COMMITTED}. * Set block state to {@link BlockUCState#COMMITTED}.
* @param block - contains client reported block length and generation * @param block - contains client reported block length and generation
* @throws IOException if block ids are inconsistent. * @throws IOException if block ids are inconsistent.
*/ */
void commitBlock(Block block) throws IOException { void commitBlock(Block block) throws IOException {
if(getBlockId() != block.getBlockId()) if(getBlockId() != block.getBlockId()) {
throw new IOException("Trying to commit inconsistent block: id = " throw new IOException("Trying to commit inconsistent block: id = "
+ block.getBlockId() + ", expected id = " + getBlockId()); + block.getBlockId() + ", expected id = " + getBlockId());
}
blockUCState = BlockUCState.COMMITTED; blockUCState = BlockUCState.COMMITTED;
this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp()); this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp());
// Sort out invalid replicas. // Sort out invalid replicas.
@ -289,16 +269,17 @@ public void initializeBlockRecovery(long recoveryId) {
setBlockUCState(BlockUCState.UNDER_RECOVERY); setBlockUCState(BlockUCState.UNDER_RECOVERY);
blockRecoveryId = recoveryId; blockRecoveryId = recoveryId;
if (replicas.size() == 0) { if (replicas.size() == 0) {
NameNode.blockStateChangeLog.warn("BLOCK*" NameNode.blockStateChangeLog.warn("BLOCK* " +
+ " BlockInfoUnderConstruction.initLeaseRecovery:" "BlockInfoUnderConstruction.initLeaseRecovery: " +
+ " No blocks found, lease removed."); "No blocks found, lease removed.");
} }
boolean allLiveReplicasTriedAsPrimary = true; boolean allLiveReplicasTriedAsPrimary = true;
for (int i = 0; i < replicas.size(); i++) { for (int i = 0; i < replicas.size(); i++) {
// Check if all replicas have been tried or not. // Check if all replicas have been tried or not.
if (replicas.get(i).isAlive()) { if (replicas.get(i).isAlive()) {
allLiveReplicasTriedAsPrimary = allLiveReplicasTriedAsPrimary =
(allLiveReplicasTriedAsPrimary && replicas.get(i).getChosenAsPrimary()); (allLiveReplicasTriedAsPrimary &&
replicas.get(i).getChosenAsPrimary());
} }
} }
if (allLiveReplicasTriedAsPrimary) { if (allLiveReplicasTriedAsPrimary) {
@ -312,7 +293,8 @@ public void initializeBlockRecovery(long recoveryId) {
primaryNodeIndex = -1; primaryNodeIndex = -1;
for(int i = 0; i < replicas.size(); i++) { for(int i = 0; i < replicas.size(); i++) {
// Skip alive replicas which have been chosen for recovery. // Skip alive replicas which have been chosen for recovery.
if (!(replicas.get(i).isAlive() && !replicas.get(i).getChosenAsPrimary())) { if (!(replicas.get(i).isAlive() &&
!replicas.get(i).getChosenAsPrimary())) {
continue; continue;
} }
final ReplicaUnderConstruction ruc = replicas.get(i); final ReplicaUnderConstruction ruc = replicas.get(i);
@ -325,7 +307,8 @@ public void initializeBlockRecovery(long recoveryId) {
} }
} }
if (primary != null) { if (primary != null) {
primary.getExpectedStorageLocation().getDatanodeDescriptor().addBlockToBeRecovered(this); primary.getExpectedStorageLocation().
getDatanodeDescriptor().addBlockToBeRecovered(this);
primary.setChosenAsPrimary(true); primary.setChosenAsPrimary(true);
NameNode.blockStateChangeLog.info( NameNode.blockStateChangeLog.info(
"BLOCK* {} recovery started, primary={}", this, primary); "BLOCK* {} recovery started, primary={}", this, primary);
@ -358,6 +341,25 @@ void addReplicaIfNotPresent(DatanodeStorageInfo storage,
replicas.add(new ReplicaUnderConstruction(block, storage, rState)); replicas.add(new ReplicaUnderConstruction(block, storage, rState));
} }
/**
* Convert an under construction block to a complete block.
*
* @return a complete block.
* @throws IOException
* if the state of the block (the generation stamp and the length)
* has not been committed by the client or it does not have at
* least a minimal number of replicas reported from data-nodes.
*/
public abstract BlockInfo convertToCompleteBlock();
@Override
BlockInfoUnderConstruction convertCompleteBlockToUC
(HdfsServerConstants.BlockUCState s, DatanodeStorageInfo[] targets) {
BlockManager.LOG.error("convertCompleteBlockToUC should only be applied " +
"on complete blocks.");
return null;
}
@Override // BlockInfo @Override // BlockInfo
// BlockInfoUnderConstruction participates in maps the same way as BlockInfo // BlockInfoUnderConstruction participates in maps the same way as BlockInfo
public int hashCode() { public int hashCode() {

View File

@ -0,0 +1,110 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import java.util.ArrayList;
/**
* Subclass of {@link BlockInfoUnderConstruction}, representing a block under
* the contiguous (instead of striped) layout.
*/
public class BlockInfoUnderConstructionContiguous extends
BlockInfoUnderConstruction {
/**
* Create block and set its state to
* {@link HdfsServerConstants.BlockUCState#UNDER_CONSTRUCTION}.
*/
public BlockInfoUnderConstructionContiguous(Block blk, short replication) {
this(blk, replication, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION,
null);
}
/**
* Create a block that is currently being constructed.
*/
public BlockInfoUnderConstructionContiguous(Block blk, short replication,
HdfsServerConstants.BlockUCState state, DatanodeStorageInfo[] targets) {
super(blk, replication);
Preconditions.checkState(getBlockUCState() !=
HdfsServerConstants.BlockUCState.COMPLETE,
"BlockInfoUnderConstructionContiguous cannot be in COMPLETE state");
this.blockUCState = state;
setExpectedLocations(targets);
}
/**
* Convert an under construction block to a complete block.
*
* @return BlockInfo - a complete block.
* @throws IOException if the state of the block
* (the generation stamp and the length) has not been committed by
* the client or it does not have at least a minimal number of replicas
* reported from data-nodes.
*/
@Override
public BlockInfoContiguous convertToCompleteBlock() {
Preconditions.checkState(getBlockUCState() !=
HdfsServerConstants.BlockUCState.COMPLETE,
"Trying to convert a COMPLETE block");
return new BlockInfoContiguous(this);
}
@Override
boolean addStorage(DatanodeStorageInfo storage) {
return ContiguousBlockStorageOp.addStorage(this, storage);
}
@Override
boolean removeStorage(DatanodeStorageInfo storage) {
return ContiguousBlockStorageOp.removeStorage(this, storage);
}
@Override
public int numNodes() {
return ContiguousBlockStorageOp.numNodes(this);
}
@Override
void replaceBlock(BlockInfo newBlock) {
ContiguousBlockStorageOp.replaceBlock(this, newBlock);
}
@Override
public void setExpectedLocations(DatanodeStorageInfo[] targets) {
int numLocations = targets == null ? 0 : targets.length;
this.replicas = new ArrayList<>(numLocations);
for(int i = 0; i < numLocations; i++) {
replicas.add(
new ReplicaUnderConstruction(this, targets[i], HdfsServerConstants.ReplicaState.RBW));
}
}
@Override
public Block getTruncateBlock() {
return truncateBlock;
}
@Override
public void setTruncateBlock(Block recoveryBlock) {
this.truncateBlock = recoveryBlock;
}
}

View File

@ -602,7 +602,7 @@ public boolean checkMinReplication(Block block) {
* of replicas reported from data-nodes. * of replicas reported from data-nodes.
*/ */
private static boolean commitBlock( private static boolean commitBlock(
final BlockInfoContiguousUnderConstruction block, final Block commitBlock) final BlockInfoUnderConstruction block, final Block commitBlock)
throws IOException { throws IOException {
if (block.getBlockUCState() == BlockUCState.COMMITTED) if (block.getBlockUCState() == BlockUCState.COMMITTED)
return false; return false;
@ -634,7 +634,7 @@ public boolean commitOrCompleteLastBlock(BlockCollection bc,
return false; // already completed (e.g. by syncBlock) return false; // already completed (e.g. by syncBlock)
final boolean b = commitBlock( final boolean b = commitBlock(
(BlockInfoContiguousUnderConstruction) lastBlock, commitBlock); (BlockInfoUnderConstruction) lastBlock, commitBlock);
if(countNodes(lastBlock).liveReplicas() >= minReplication) if(countNodes(lastBlock).liveReplicas() >= minReplication)
completeBlock(bc, bc.numBlocks()-1, false); completeBlock(bc, bc.numBlocks()-1, false);
return b; return b;
@ -654,8 +654,8 @@ private BlockInfo completeBlock(final BlockCollection bc,
BlockInfo curBlock = bc.getBlocks()[blkIndex]; BlockInfo curBlock = bc.getBlocks()[blkIndex];
if(curBlock.isComplete()) if(curBlock.isComplete())
return curBlock; return curBlock;
BlockInfoContiguousUnderConstruction ucBlock = BlockInfoUnderConstruction ucBlock =
(BlockInfoContiguousUnderConstruction) curBlock; (BlockInfoUnderConstruction) curBlock;
int numNodes = ucBlock.numNodes(); int numNodes = ucBlock.numNodes();
if (!force && numNodes < minReplication) if (!force && numNodes < minReplication)
throw new IOException("Cannot complete block: " + throw new IOException("Cannot complete block: " +
@ -697,7 +697,7 @@ private BlockInfo completeBlock(final BlockCollection bc,
* when tailing edit logs as a Standby. * when tailing edit logs as a Standby.
*/ */
public BlockInfo forceCompleteBlock(final BlockCollection bc, public BlockInfo forceCompleteBlock(final BlockCollection bc,
final BlockInfoContiguousUnderConstruction block) throws IOException { final BlockInfoUnderConstruction block) throws IOException {
block.commitBlock(block); block.commitBlock(block);
return completeBlock(bc, block, true); return completeBlock(bc, block, true);
} }
@ -728,7 +728,7 @@ public LocatedBlock convertLastBlockToUnderConstruction(
DatanodeStorageInfo[] targets = getStorages(oldBlock); DatanodeStorageInfo[] targets = getStorages(oldBlock);
BlockInfoContiguousUnderConstruction ucBlock = BlockInfoUnderConstruction ucBlock =
bc.setLastBlock(oldBlock, targets); bc.setLastBlock(oldBlock, targets);
blocksMap.replaceBlock(ucBlock); blocksMap.replaceBlock(ucBlock);
@ -830,14 +830,14 @@ private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos,
/** @return a LocatedBlock for the given block */ /** @return a LocatedBlock for the given block */
private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos
) throws IOException { ) throws IOException {
if (blk instanceof BlockInfoContiguousUnderConstruction) { if (blk instanceof BlockInfoUnderConstruction) {
if (blk.isComplete()) { if (blk.isComplete()) {
throw new IOException( throw new IOException(
"blk instanceof BlockInfoUnderConstruction && blk.isComplete()" "blk instanceof BlockInfoUnderConstruction && blk.isComplete()"
+ ", blk=" + blk); + ", blk=" + blk);
} }
final BlockInfoContiguousUnderConstruction uc = final BlockInfoUnderConstruction uc =
(BlockInfoContiguousUnderConstruction) blk; (BlockInfoUnderConstruction) blk;
final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk);
return newLocatedBlock(eb, storages, pos, false); return newLocatedBlock(eb, storages, pos, false);
@ -1744,11 +1744,11 @@ public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) {
* reported by the datanode in the block report. * reported by the datanode in the block report.
*/ */
static class StatefulBlockInfo { static class StatefulBlockInfo {
final BlockInfoContiguousUnderConstruction storedBlock; final BlockInfoUnderConstruction storedBlock;
final Block reportedBlock; final Block reportedBlock;
final ReplicaState reportedState; final ReplicaState reportedState;
StatefulBlockInfo(BlockInfoContiguousUnderConstruction storedBlock, StatefulBlockInfo(BlockInfoUnderConstruction storedBlock,
Block reportedBlock, ReplicaState reportedState) { Block reportedBlock, ReplicaState reportedState) {
this.storedBlock = storedBlock; this.storedBlock = storedBlock;
this.reportedBlock = reportedBlock; this.reportedBlock = reportedBlock;
@ -1789,7 +1789,7 @@ private static class BlockToMarkCorrupt {
BlockToMarkCorrupt(BlockInfo stored, long gs, String reason, BlockToMarkCorrupt(BlockInfo stored, long gs, String reason,
Reason reasonCode) { Reason reasonCode) {
this(new BlockInfoContiguous((BlockInfoContiguous)stored), stored, this(new BlockInfoContiguous(stored), stored,
reason, reasonCode); reason, reasonCode);
//the corrupted block in datanode has a different generation stamp //the corrupted block in datanode has a different generation stamp
corrupted.setGenerationStamp(gs); corrupted.setGenerationStamp(gs);
@ -2148,13 +2148,13 @@ private void processFirstBlockReport(
// If block is under construction, add this replica to its list // If block is under construction, add this replica to its list
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
((BlockInfoContiguousUnderConstruction)storedBlock) ((BlockInfoUnderConstruction)storedBlock)
.addReplicaIfNotPresent(storageInfo, iblk, reportedState); .addReplicaIfNotPresent(storageInfo, iblk, reportedState);
// OpenFileBlocks only inside snapshots also will be added to safemode // OpenFileBlocks only inside snapshots also will be added to safemode
// threshold. So we need to update such blocks to safemode // threshold. So we need to update such blocks to safemode
// refer HDFS-5283 // refer HDFS-5283
BlockInfoContiguousUnderConstruction blockUC = BlockInfoUnderConstruction blockUC =
(BlockInfoContiguousUnderConstruction) storedBlock; (BlockInfoUnderConstruction) storedBlock;
if (namesystem.isInSnapshot(blockUC)) { if (namesystem.isInSnapshot(blockUC)) {
int numOfReplicas = blockUC.getNumExpectedLocations(); int numOfReplicas = blockUC.getNumExpectedLocations();
namesystem.incrementSafeBlockCount(numOfReplicas); namesystem.incrementSafeBlockCount(numOfReplicas);
@ -2309,7 +2309,7 @@ private BlockInfo processReportedBlock(
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
toUC.add(new StatefulBlockInfo( toUC.add(new StatefulBlockInfo(
(BlockInfoContiguousUnderConstruction) storedBlock, (BlockInfoUnderConstruction) storedBlock,
new Block(block), reportedState)); new Block(block), reportedState));
return storedBlock; return storedBlock;
} }
@ -2500,7 +2500,7 @@ private boolean isBlockUnderConstruction(BlockInfo storedBlock,
void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock, void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
DatanodeStorageInfo storageInfo) throws IOException { DatanodeStorageInfo storageInfo) throws IOException {
BlockInfoContiguousUnderConstruction block = ucBlock.storedBlock; BlockInfoUnderConstruction block = ucBlock.storedBlock;
block.addReplicaIfNotPresent( block.addReplicaIfNotPresent(
storageInfo, ucBlock.reportedBlock, ucBlock.reportedState); storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
@ -2561,7 +2561,7 @@ private Block addStoredBlock(final BlockInfo block,
assert block != null && namesystem.hasWriteLock(); assert block != null && namesystem.hasWriteLock();
BlockInfo storedBlock; BlockInfo storedBlock;
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
if (block instanceof BlockInfoContiguousUnderConstruction) { if (block instanceof BlockInfoUnderConstruction) {
//refresh our copy in case the block got completed in another thread //refresh our copy in case the block got completed in another thread
storedBlock = blocksMap.getStoredBlock(block); storedBlock = blocksMap.getStoredBlock(block);
} else { } else {
@ -3499,8 +3499,8 @@ public boolean checkBlocksProperlyReplicated(
String src, BlockInfo[] blocks) { String src, BlockInfo[] blocks) {
for (BlockInfo b: blocks) { for (BlockInfo b: blocks) {
if (!b.isComplete()) { if (!b.isComplete()) {
final BlockInfoContiguousUnderConstruction uc = final BlockInfoUnderConstruction uc =
(BlockInfoContiguousUnderConstruction)b; (BlockInfoUnderConstruction)b;
final int numNodes = b.numNodes(); final int numNodes = b.numNodes();
LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = " LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = "
+ uc.getBlockUCState() + ", replication# = " + numNodes + uc.getBlockUCState() + ", replication# = " + numNodes

View File

@ -0,0 +1,106 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import com.google.common.base.Preconditions;
/**
* Utility class with logic on managing storage locations shared between
* complete and under-construction blocks under the contiguous format --
* {@link BlockInfoContiguous} and
* {@link BlockInfoUnderConstructionContiguous}.
*/
class ContiguousBlockStorageOp {
/**
* Ensure that there is enough space to include num more triplets.
* @return first free triplet index.
*/
private static int ensureCapacity(BlockInfo b, int num) {
Preconditions.checkArgument(b.triplets != null,
"BlockInfo is not initialized");
int last = b.numNodes();
if (b.triplets.length >= (last+num)*3) {
return last;
}
/* Not enough space left. Create a new array. Should normally
* happen only when replication is manually increased by the user. */
Object[] old = b.triplets;
b.triplets = new Object[(last+num)*3];
System.arraycopy(old, 0, b.triplets, 0, last * 3);
return last;
}
static boolean addStorage(BlockInfo b, DatanodeStorageInfo storage) {
// find the last null node
int lastNode = ensureCapacity(b, 1);
b.setStorageInfo(lastNode, storage);
b.setNext(lastNode, null);
b.setPrevious(lastNode, null);
return true;
}
static boolean removeStorage(BlockInfo b,
DatanodeStorageInfo storage) {
int dnIndex = b.findStorageInfo(storage);
if (dnIndex < 0) { // the node is not found
return false;
}
Preconditions.checkArgument(b.getPrevious(dnIndex) == null &&
b.getNext(dnIndex) == null,
"Block is still in the list and must be removed first.");
// find the last not null node
int lastNode = b.numNodes()-1;
// replace current node triplet by the lastNode one
b.setStorageInfo(dnIndex, b.getStorageInfo(lastNode));
b.setNext(dnIndex, b.getNext(lastNode));
b.setPrevious(dnIndex, b.getPrevious(lastNode));
// set the last triplet to null
b.setStorageInfo(lastNode, null);
b.setNext(lastNode, null);
b.setPrevious(lastNode, null);
return true;
}
static int numNodes(BlockInfo b) {
Preconditions.checkArgument(b.triplets != null,
"BlockInfo is not initialized");
Preconditions.checkArgument(b.triplets.length % 3 == 0,
"Malformed BlockInfo");
for (int idx = b.getCapacity()-1; idx >= 0; idx--) {
if (b.getDatanode(idx) != null) {
return idx + 1;
}
}
return 0;
}
static void replaceBlock(BlockInfo b, BlockInfo newBlock) {
for (int i = b.numNodes() - 1; i >= 0; i--) {
final DatanodeStorageInfo storage = b.getStorageInfo(i);
final boolean removed = storage.removeBlock(b);
Preconditions.checkState(removed, "currentBlock not found.");
final DatanodeStorageInfo.AddBlockResult result = storage.addBlock(
newBlock);
Preconditions.checkState(
result == DatanodeStorageInfo.AddBlockResult.ADDED,
"newBlock already exists.");
}
}
}

View File

@ -219,8 +219,8 @@ public CachedBlocksList getPendingUncached() {
/** A queue of blocks to be replicated by this datanode */ /** A queue of blocks to be replicated by this datanode */
private final BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<BlockTargetPair>(); private final BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<BlockTargetPair>();
/** A queue of blocks to be recovered by this datanode */ /** A queue of blocks to be recovered by this datanode */
private final BlockQueue<BlockInfoContiguousUnderConstruction> recoverBlocks = private final BlockQueue<BlockInfoUnderConstruction> recoverBlocks =
new BlockQueue<BlockInfoContiguousUnderConstruction>(); new BlockQueue<BlockInfoUnderConstruction>();
/** A set of blocks to be invalidated by this datanode */ /** A set of blocks to be invalidated by this datanode */
private final LightWeightHashSet<Block> invalidateBlocks = new LightWeightHashSet<Block>(); private final LightWeightHashSet<Block> invalidateBlocks = new LightWeightHashSet<Block>();
@ -599,7 +599,7 @@ void addBlockToBeReplicated(Block block, DatanodeStorageInfo[] targets) {
/** /**
* Store block recovery work. * Store block recovery work.
*/ */
void addBlockToBeRecovered(BlockInfoContiguousUnderConstruction block) { void addBlockToBeRecovered(BlockInfoUnderConstruction block) {
if(recoverBlocks.contains(block)) { if(recoverBlocks.contains(block)) {
// this prevents adding the same block twice to the recovery queue // this prevents adding the same block twice to the recovery queue
BlockManager.LOG.info(block + " is already in the recovery queue"); BlockManager.LOG.info(block + " is already in the recovery queue");
@ -641,11 +641,12 @@ public List<BlockTargetPair> getReplicationCommand(int maxTransfers) {
return replicateBlocks.poll(maxTransfers); return replicateBlocks.poll(maxTransfers);
} }
public BlockInfoContiguousUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) { public BlockInfoUnderConstruction[] getLeaseRecoveryCommand(
List<BlockInfoContiguousUnderConstruction> blocks = recoverBlocks.poll(maxTransfers); int maxTransfers) {
List<BlockInfoUnderConstruction> blocks = recoverBlocks.poll(maxTransfers);
if(blocks == null) if(blocks == null)
return null; return null;
return blocks.toArray(new BlockInfoContiguousUnderConstruction[blocks.size()]); return blocks.toArray(new BlockInfoUnderConstruction[blocks.size()]);
} }
/** /**

View File

@ -1380,12 +1380,12 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
} }
//check lease recovery //check lease recovery
BlockInfoContiguousUnderConstruction[] blocks = nodeinfo BlockInfoUnderConstruction[] blocks = nodeinfo
.getLeaseRecoveryCommand(Integer.MAX_VALUE); .getLeaseRecoveryCommand(Integer.MAX_VALUE);
if (blocks != null) { if (blocks != null) {
BlockRecoveryCommand brCommand = new BlockRecoveryCommand( BlockRecoveryCommand brCommand = new BlockRecoveryCommand(
blocks.length); blocks.length);
for (BlockInfoContiguousUnderConstruction b : blocks) { for (BlockInfoUnderConstruction b : blocks) {
final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations(); final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations();
// Skip stale nodes during recovery - not heart beated for some time (30s by default). // Skip stale nodes during recovery - not heart beated for some time (30s by default).
final List<DatanodeStorageInfo> recoveryLocations = final List<DatanodeStorageInfo> recoveryLocations =

View File

@ -43,7 +43,8 @@
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@ -73,7 +74,7 @@ static boolean unprotectedRemoveBlock(
Block block) throws IOException { Block block) throws IOException {
// modify file-> block and blocksMap // modify file-> block and blocksMap
// fileNode should be under construction // fileNode should be under construction
BlockInfoContiguousUnderConstruction uc = fileNode.removeLastBlock(block); BlockInfoUnderConstruction uc = fileNode.removeLastBlock(block);
if (uc == null) { if (uc == null) {
return false; return false;
} }
@ -236,7 +237,7 @@ static LocatedBlock storeAllocatedBlock(FSNamesystem fsn, String src,
} else { } else {
// add new chosen targets to already allocated block and return // add new chosen targets to already allocated block and return
BlockInfo lastBlockInFile = pendingFile.getLastBlock(); BlockInfo lastBlockInFile = pendingFile.getLastBlock();
((BlockInfoContiguousUnderConstruction) lastBlockInFile) ((BlockInfoUnderConstruction) lastBlockInFile)
.setExpectedLocations(targets); .setExpectedLocations(targets);
offset = pendingFile.computeFileSize(); offset = pendingFile.computeFileSize();
return makeLocatedBlock(fsn, lastBlockInFile, targets, offset); return makeLocatedBlock(fsn, lastBlockInFile, targets, offset);
@ -520,8 +521,8 @@ private static BlockInfo addBlock(
fileINode.getPreferredBlockReplication(), true); fileINode.getPreferredBlockReplication(), true);
// associate new last block for the file // associate new last block for the file
BlockInfoContiguousUnderConstruction blockInfo = BlockInfoUnderConstruction blockInfo =
new BlockInfoContiguousUnderConstruction( new BlockInfoUnderConstructionContiguous(
block, block,
fileINode.getFileReplication(), fileINode.getFileReplication(),
HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION,
@ -662,8 +663,8 @@ private static FileState analyzeFileState(
"allocation of a new block in " + src + ". Returning previously" + "allocation of a new block in " + src + ". Returning previously" +
" allocated block " + lastBlockInFile); " allocated block " + lastBlockInFile);
long offset = file.computeFileSize(); long offset = file.computeFileSize();
BlockInfoContiguousUnderConstruction lastBlockUC = BlockInfoUnderConstruction lastBlockUC =
(BlockInfoContiguousUnderConstruction) lastBlockInFile; (BlockInfoUnderConstruction) lastBlockInFile;
onRetryBlock[0] = makeLocatedBlock(fsn, lastBlockInFile, onRetryBlock[0] = makeLocatedBlock(fsn, lastBlockInFile,
lastBlockUC.getExpectedStorageLocations(), offset); lastBlockUC.getExpectedStorageLocations(), offset);
return new FileState(file, src, iip); return new FileState(file, src, iip);

View File

@ -45,7 +45,8 @@
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@ -960,16 +961,16 @@ private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file)
} }
oldLastBlock.setNumBytes(pBlock.getNumBytes()); oldLastBlock.setNumBytes(pBlock.getNumBytes());
if (oldLastBlock instanceof BlockInfoContiguousUnderConstruction) { if (oldLastBlock instanceof BlockInfoUnderConstruction) {
fsNamesys.getBlockManager().forceCompleteBlock(file, fsNamesys.getBlockManager().forceCompleteBlock(file,
(BlockInfoContiguousUnderConstruction) oldLastBlock); (BlockInfoUnderConstruction) oldLastBlock);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock); fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock);
} }
} else { // the penultimate block is null } else { // the penultimate block is null
Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0); Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0);
} }
// add the new block // add the new block
BlockInfo newBI = new BlockInfoContiguousUnderConstruction( BlockInfo newBI = new BlockInfoUnderConstructionContiguous(
newBlock, file.getPreferredBlockReplication()); newBlock, file.getPreferredBlockReplication());
fsNamesys.getBlockManager().addBlockCollection(newBI, file); fsNamesys.getBlockManager().addBlockCollection(newBI, file);
file.addBlock(newBI); file.addBlock(newBI);
@ -1010,11 +1011,11 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
oldBlock.getGenerationStamp() != newBlock.getGenerationStamp(); oldBlock.getGenerationStamp() != newBlock.getGenerationStamp();
oldBlock.setGenerationStamp(newBlock.getGenerationStamp()); oldBlock.setGenerationStamp(newBlock.getGenerationStamp());
if (oldBlock instanceof BlockInfoContiguousUnderConstruction && if (oldBlock instanceof BlockInfoUnderConstruction &&
(!isLastBlock || op.shouldCompleteLastBlock())) { (!isLastBlock || op.shouldCompleteLastBlock())) {
changeMade = true; changeMade = true;
fsNamesys.getBlockManager().forceCompleteBlock(file, fsNamesys.getBlockManager().forceCompleteBlock(file,
(BlockInfoContiguousUnderConstruction) oldBlock); (BlockInfoUnderConstruction) oldBlock);
} }
if (changeMade) { if (changeMade) {
// The state or gen-stamp of the block has changed. So, we may be // The state or gen-stamp of the block has changed. So, we may be
@ -1049,7 +1050,7 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
// TODO: shouldn't this only be true for the last block? // TODO: shouldn't this only be true for the last block?
// what about an old-version fsync() where fsync isn't called // what about an old-version fsync() where fsync isn't called
// until several blocks in? // until several blocks in?
newBI = new BlockInfoContiguousUnderConstruction( newBI = new BlockInfoUnderConstructionContiguous(
newBlock, file.getPreferredBlockReplication()); newBlock, file.getPreferredBlockReplication());
} else { } else {
// OP_CLOSE should add finalized blocks. This code path // OP_CLOSE should add finalized blocks. This code path

View File

@ -54,7 +54,7 @@
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@ -777,8 +777,9 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
// convert the last block to BlockUC // convert the last block to BlockUC
if (blocks.length > 0) { if (blocks.length > 0) {
BlockInfo lastBlk = blocks[blocks.length - 1]; BlockInfo lastBlk = blocks[blocks.length - 1];
blocks[blocks.length - 1] = new BlockInfoContiguousUnderConstruction( blocks[blocks.length - 1] =
lastBlk, replication); new BlockInfoUnderConstructionContiguous(
lastBlk, replication);
} }
} }
} }

View File

@ -44,7 +44,7 @@
import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext;
@ -364,8 +364,8 @@ private INodeFile loadINodeFile(INodeSection.INode n) {
if (blocks.length > 0) { if (blocks.length > 0) {
BlockInfo lastBlk = file.getLastBlock(); BlockInfo lastBlk = file.getLastBlock();
// replace the last block of file // replace the last block of file
file.setBlock(file.numBlocks() - 1, new BlockInfoContiguousUnderConstruction( file.setBlock(file.numBlocks() - 1,
lastBlk, replication)); new BlockInfoUnderConstructionContiguous(lastBlk, replication));
} }
} }
return file; return file;

View File

@ -35,7 +35,7 @@
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
@ -137,7 +137,7 @@ static INodeFile readINodeUnderConstruction(
// last block is UNDER_CONSTRUCTION // last block is UNDER_CONSTRUCTION
if(numBlocks > 0) { if(numBlocks > 0) {
blk.readFields(in); blk.readFields(in);
blocks[i] = new BlockInfoContiguousUnderConstruction( blocks[i] = new BlockInfoUnderConstructionContiguous(
blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null); blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null);
} }
PermissionStatus perm = PermissionStatus.read(in); PermissionStatus perm = PermissionStatus.read(in);

View File

@ -204,7 +204,8 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@ -2003,7 +2004,7 @@ boolean truncateInternal(String src, long newLength,
final BlockInfo last = file.getLastBlock(); final BlockInfo last = file.getLastBlock();
if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) { if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
final Block truncateBlock final Block truncateBlock
= ((BlockInfoContiguousUnderConstruction)last).getTruncateBlock(); = ((BlockInfoUnderConstruction)last).getTruncateBlock();
if (truncateBlock != null) { if (truncateBlock != null) {
final long truncateLength = file.computeFileSize(false, false) final long truncateLength = file.computeFileSize(false, false)
+ truncateBlock.getNumBytes(); + truncateBlock.getNumBytes();
@ -2082,11 +2083,11 @@ Block prepareFileForTruncate(INodesInPath iip,
nextGenerationStamp(blockIdManager.isLegacyBlock(oldBlock))); nextGenerationStamp(blockIdManager.isLegacyBlock(oldBlock)));
} }
BlockInfoContiguousUnderConstruction truncatedBlockUC; BlockInfoUnderConstruction truncatedBlockUC;
if(shouldCopyOnTruncate) { if(shouldCopyOnTruncate) {
// Add new truncateBlock into blocksMap and // Add new truncateBlock into blocksMap and
// use oldBlock as a source for copy-on-truncate recovery // use oldBlock as a source for copy-on-truncate recovery
truncatedBlockUC = new BlockInfoContiguousUnderConstruction(newBlock, truncatedBlockUC = new BlockInfoUnderConstructionContiguous(newBlock,
file.getPreferredBlockReplication()); file.getPreferredBlockReplication());
truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta); truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
truncatedBlockUC.setTruncateBlock(oldBlock); truncatedBlockUC.setTruncateBlock(oldBlock);
@ -2102,7 +2103,7 @@ Block prepareFileForTruncate(INodesInPath iip,
blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta); blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta);
oldBlock = file.getLastBlock(); oldBlock = file.getLastBlock();
assert !oldBlock.isComplete() : "oldBlock should be under construction"; assert !oldBlock.isComplete() : "oldBlock should be under construction";
truncatedBlockUC = (BlockInfoContiguousUnderConstruction) oldBlock; truncatedBlockUC = (BlockInfoUnderConstruction) oldBlock;
truncatedBlockUC.setTruncateBlock(new Block(oldBlock)); truncatedBlockUC.setTruncateBlock(new Block(oldBlock));
truncatedBlockUC.getTruncateBlock().setNumBytes( truncatedBlockUC.getTruncateBlock().setNumBytes(
oldBlock.getNumBytes() - lastBlockDelta); oldBlock.getNumBytes() - lastBlockDelta);
@ -3519,7 +3520,8 @@ boolean internalReleaseLease(Lease lease, String src, INodesInPath iip,
throw new AlreadyBeingCreatedException(message); throw new AlreadyBeingCreatedException(message);
case UNDER_CONSTRUCTION: case UNDER_CONSTRUCTION:
case UNDER_RECOVERY: case UNDER_RECOVERY:
final BlockInfoContiguousUnderConstruction uc = (BlockInfoContiguousUnderConstruction)lastBlock; final BlockInfoUnderConstruction uc =
(BlockInfoUnderConstruction)lastBlock;
// determine if last block was intended to be truncated // determine if last block was intended to be truncated
Block recoveryBlock = uc.getTruncateBlock(); Block recoveryBlock = uc.getTruncateBlock();
boolean truncateRecovery = recoveryBlock != null; boolean truncateRecovery = recoveryBlock != null;
@ -3635,7 +3637,7 @@ BlockInfo getStoredBlock(Block block) {
} }
@Override @Override
public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC) { public boolean isInSnapshot(BlockInfoUnderConstruction blockUC) {
assert hasReadLock(); assert hasReadLock();
final BlockCollection bc = blockUC.getBlockCollection(); final BlockCollection bc = blockUC.getBlockCollection();
if (bc == null || !(bc instanceof INodeFile) if (bc == null || !(bc instanceof INodeFile)
@ -3682,7 +3684,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock,
waitForLoadingFSImage(); waitForLoadingFSImage();
writeLock(); writeLock();
boolean copyTruncate = false; boolean copyTruncate = false;
BlockInfoContiguousUnderConstruction truncatedBlock = null; BlockInfoUnderConstruction truncatedBlock = null;
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
// If a DN tries to commit to the standby, the recovery will // If a DN tries to commit to the standby, the recovery will
@ -3739,7 +3741,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock,
return; return;
} }
truncatedBlock = (BlockInfoContiguousUnderConstruction) iFile truncatedBlock = (BlockInfoUnderConstruction) iFile
.getLastBlock(); .getLastBlock();
long recoveryId = truncatedBlock.getBlockRecoveryId(); long recoveryId = truncatedBlock.getBlockRecoveryId();
copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId(); copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId();
@ -5774,8 +5776,8 @@ private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock,
assert hasWriteLock(); assert hasWriteLock();
// check the vadility of the block and lease holder name // check the vadility of the block and lease holder name
final INodeFile pendingFile = checkUCBlock(oldBlock, clientName); final INodeFile pendingFile = checkUCBlock(oldBlock, clientName);
final BlockInfoContiguousUnderConstruction blockinfo final BlockInfoUnderConstruction blockinfo
= (BlockInfoContiguousUnderConstruction)pendingFile.getLastBlock(); = (BlockInfoUnderConstruction)pendingFile.getLastBlock();
// check new GS & length: this is not expected // check new GS & length: this is not expected
if (newBlock.getGenerationStamp() <= blockinfo.getGenerationStamp() || if (newBlock.getGenerationStamp() <= blockinfo.getGenerationStamp() ||

View File

@ -21,7 +21,7 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
/** /**
@ -61,7 +61,7 @@ void updateLengthOfLastBlock(INodeFile f, long lastBlockLength)
BlockInfo lastBlock = f.getLastBlock(); BlockInfo lastBlock = f.getLastBlock();
assert (lastBlock != null) : "The last block for path " assert (lastBlock != null) : "The last block for path "
+ f.getFullPathName() + " is null when updating its length"; + f.getFullPathName() + " is null when updating its length";
assert (lastBlock instanceof BlockInfoContiguousUnderConstruction) assert (lastBlock instanceof BlockInfoUnderConstruction)
: "The last block for path " + f.getFullPathName() : "The last block for path " + f.getFullPathName()
+ " is not a BlockInfoUnderConstruction when updating its length"; + " is not a BlockInfoUnderConstruction when updating its length";
lastBlock.setNumBytes(lastBlockLength); lastBlock.setNumBytes(lastBlockLength);
@ -76,9 +76,9 @@ void cleanZeroSizeBlock(final INodeFile f,
final BlocksMapUpdateInfo collectedBlocks) { final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] blocks = f.getBlocks(); final BlockInfo[] blocks = f.getBlocks();
if (blocks != null && blocks.length > 0 if (blocks != null && blocks.length > 0
&& blocks[blocks.length - 1] instanceof BlockInfoContiguousUnderConstruction) { && blocks[blocks.length - 1] instanceof BlockInfoUnderConstruction) {
BlockInfoContiguousUnderConstruction lastUC = BlockInfoUnderConstruction lastUC =
(BlockInfoContiguousUnderConstruction) blocks[blocks.length - 1]; (BlockInfoUnderConstruction) blocks[blocks.length - 1];
if (lastUC.getNumBytes() == 0) { if (lastUC.getNumBytes() == 0) {
// this is a 0-sized block. do not need check its UC state here // this is a 0-sized block. do not need check its UC state here
collectedBlocks.addDeleteBlock(lastUC); collectedBlocks.addDeleteBlock(lastUC);

View File

@ -37,7 +37,7 @@
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@ -231,7 +231,7 @@ public void setBlock(int index, BlockInfo blk) {
} }
@Override // BlockCollection, the file should be under construction @Override // BlockCollection, the file should be under construction
public BlockInfoContiguousUnderConstruction setLastBlock( public BlockInfoUnderConstruction setLastBlock(
BlockInfo lastBlock, DatanodeStorageInfo[] locations) BlockInfo lastBlock, DatanodeStorageInfo[] locations)
throws IOException { throws IOException {
Preconditions.checkState(isUnderConstruction(), Preconditions.checkState(isUnderConstruction(),
@ -240,7 +240,7 @@ public BlockInfoContiguousUnderConstruction setLastBlock(
if (numBlocks() == 0) { if (numBlocks() == 0) {
throw new IOException("Failed to set last block: File is empty."); throw new IOException("Failed to set last block: File is empty.");
} }
BlockInfoContiguousUnderConstruction ucBlock = BlockInfoUnderConstruction ucBlock =
lastBlock.convertToBlockUnderConstruction( lastBlock.convertToBlockUnderConstruction(
BlockUCState.UNDER_CONSTRUCTION, locations); BlockUCState.UNDER_CONSTRUCTION, locations);
setBlock(numBlocks() - 1, ucBlock); setBlock(numBlocks() - 1, ucBlock);
@ -251,7 +251,7 @@ public BlockInfoContiguousUnderConstruction setLastBlock(
* Remove a block from the block list. This block should be * Remove a block from the block list. This block should be
* the last one on the list. * the last one on the list.
*/ */
BlockInfoContiguousUnderConstruction removeLastBlock(Block oldblock) { BlockInfoUnderConstruction removeLastBlock(Block oldblock) {
Preconditions.checkState(isUnderConstruction(), Preconditions.checkState(isUnderConstruction(),
"file is no longer under construction"); "file is no longer under construction");
if (blocks == null || blocks.length == 0) { if (blocks == null || blocks.length == 0) {
@ -262,8 +262,8 @@ BlockInfoContiguousUnderConstruction removeLastBlock(Block oldblock) {
return null; return null;
} }
BlockInfoContiguousUnderConstruction uc = BlockInfoUnderConstruction uc =
(BlockInfoContiguousUnderConstruction)blocks[size_1]; (BlockInfoUnderConstruction)blocks[size_1];
//copy to a new list //copy to a new list
BlockInfo[] newlist = new BlockInfo[size_1]; BlockInfo[] newlist = new BlockInfo[size_1];
System.arraycopy(blocks, 0, newlist, 0, size_1); System.arraycopy(blocks, 0, newlist, 0, size_1);
@ -689,7 +689,7 @@ public final long computeFileSize(boolean includesLastUcBlock,
final int last = blocks.length - 1; final int last = blocks.length - 1;
//check if the last block is BlockInfoUnderConstruction //check if the last block is BlockInfoUnderConstruction
long size = blocks[last].getNumBytes(); long size = blocks[last].getNumBytes();
if (blocks[last] instanceof BlockInfoContiguousUnderConstruction) { if (blocks[last] instanceof BlockInfoUnderConstruction) {
if (!includesLastUcBlock) { if (!includesLastUcBlock) {
size = 0; size = 0;
} else if (usePreferredBlockSize4LastUcBlock) { } else if (usePreferredBlockSize4LastUcBlock) {

View File

@ -19,7 +19,7 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.util.RwLock; import org.apache.hadoop.hdfs.util.RwLock;
import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.ipc.StandbyException;
@ -45,5 +45,5 @@ public interface Namesystem extends RwLock, SafeMode {
public void checkOperation(OperationCategory read) throws StandbyException; public void checkOperation(OperationCategory read) throws StandbyException;
public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC); public boolean isInSnapshot(BlockInfoUnderConstruction blockUC);
} }

View File

@ -22,7 +22,7 @@
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
@ -133,7 +133,7 @@ void combineAndCollectSnapshotBlocks(
Block dontRemoveBlock = null; Block dontRemoveBlock = null;
if (lastBlock != null && lastBlock.getBlockUCState().equals( if (lastBlock != null && lastBlock.getBlockUCState().equals(
HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) { HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock) dontRemoveBlock = ((BlockInfoUnderConstruction) lastBlock)
.getTruncateBlock(); .getTruncateBlock();
} }
// Collect the remaining blocks of the file, ignoring truncate block // Collect the remaining blocks of the file, ignoring truncate block

View File

@ -109,7 +109,7 @@
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@ -1612,9 +1612,9 @@ public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock()); BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
assertTrue("Block " + blk + " should be under construction, " + assertTrue("Block " + blk + " should be under construction, " +
"got: " + storedBlock, "got: " + storedBlock,
storedBlock instanceof BlockInfoContiguousUnderConstruction); storedBlock instanceof BlockInfoUnderConstruction);
BlockInfoContiguousUnderConstruction ucBlock = BlockInfoUnderConstruction ucBlock =
(BlockInfoContiguousUnderConstruction)storedBlock; (BlockInfoUnderConstruction)storedBlock;
// We expect that the replica with the most recent heart beat will be // We expect that the replica with the most recent heart beat will be
// the one to be in charge of the synchronization / recovery protocol. // the one to be in charge of the synchronization / recovery protocol.
final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations(); final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();

View File

@ -23,7 +23,6 @@
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.util.Time;
import org.junit.Test; import org.junit.Test;
/** /**
@ -40,7 +39,8 @@ public void testInitializeBlockRecovery() throws Exception {
DatanodeDescriptor dd3 = s3.getDatanodeDescriptor(); DatanodeDescriptor dd3 = s3.getDatanodeDescriptor();
dd1.isAlive = dd2.isAlive = dd3.isAlive = true; dd1.isAlive = dd2.isAlive = dd3.isAlive = true;
BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( BlockInfoUnderConstruction blockInfo =
new BlockInfoUnderConstructionContiguous(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP),
(short) 3, (short) 3,
BlockUCState.UNDER_CONSTRUCTION, BlockUCState.UNDER_CONSTRUCTION,
@ -51,7 +51,7 @@ public void testInitializeBlockRecovery() throws Exception {
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000);
blockInfo.initializeBlockRecovery(1); blockInfo.initializeBlockRecovery(1);
BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1); BlockInfoUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo); assertEquals(blockInfoRecovery[0], blockInfo);
// Recovery attempt #2. // Recovery attempt #2.

View File

@ -726,7 +726,7 @@ public void testSafeModeIBRBeforeFirstFullBR() throws Exception {
// verify the storage info is correct // verify the storage info is correct
assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo
(ds) >= 0); (ds) >= 0);
assertTrue(((BlockInfoContiguousUnderConstruction) bm. assertTrue(((BlockInfoUnderConstruction) bm.
getStoredBlock(new Block(receivingBlockId))).getNumExpectedLocations() > 0); getStoredBlock(new Block(receivingBlockId))).getNumExpectedLocations() > 0);
assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId)) assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId))
.findStorageInfo(ds) >= 0); .findStorageInfo(ds) >= 0);
@ -747,8 +747,8 @@ private BlockInfo addBlockToBM(long blkId) {
private BlockInfo addUcBlockToBM(long blkId) { private BlockInfo addUcBlockToBM(long blkId) {
Block block = new Block(blkId); Block block = new Block(blkId);
BlockInfoContiguousUnderConstruction blockInfo = BlockInfoUnderConstruction blockInfo =
new BlockInfoContiguousUnderConstruction(block, (short) 3); new BlockInfoUnderConstructionContiguous(block, (short) 3);
BlockCollection bc = Mockito.mock(BlockCollection.class); BlockCollection bc = Mockito.mock(BlockCollection.class);
Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication(); Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication();
bm.blocksMap.addBlockCollection(blockInfo, bc); bm.blocksMap.addBlockCollection(blockInfo, bc);

View File

@ -39,7 +39,6 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.Time;
import org.junit.Test; import org.junit.Test;
/** /**
@ -173,7 +172,8 @@ public void testHeartbeatBlockRecovery() throws Exception {
dd1.getStorageInfos()[0], dd1.getStorageInfos()[0],
dd2.getStorageInfos()[0], dd2.getStorageInfos()[0],
dd3.getStorageInfos()[0]}; dd3.getStorageInfos()[0]};
BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( BlockInfoUnderConstruction blockInfo =
new BlockInfoUnderConstructionContiguous(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3,
BlockUCState.UNDER_RECOVERY, storages); BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo); dd1.addBlockToBeRecovered(blockInfo);
@ -195,7 +195,7 @@ public void testHeartbeatBlockRecovery() throws Exception {
// More than the default stale interval of 30 seconds. // More than the default stale interval of 30 seconds.
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0); DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
blockInfo = new BlockInfoContiguousUnderConstruction( blockInfo = new BlockInfoUnderConstructionContiguous(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3,
BlockUCState.UNDER_RECOVERY, storages); BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo); dd1.addBlockToBeRecovered(blockInfo);
@ -216,7 +216,7 @@ public void testHeartbeatBlockRecovery() throws Exception {
// More than the default stale interval of 30 seconds. // More than the default stale interval of 30 seconds.
DFSTestUtil.resetLastUpdatesWithOffset(dd2, - 40 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd2, - 40 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, - 80 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, - 80 * 1000);
blockInfo = new BlockInfoContiguousUnderConstruction( blockInfo = new BlockInfoUnderConstructionContiguous(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3,
BlockUCState.UNDER_RECOVERY, storages); BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo); dd1.addBlockToBeRecovered(blockInfo);

View File

@ -1176,7 +1176,8 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication()
// block under construction, the BlockManager will realize the expected // block under construction, the BlockManager will realize the expected
// replication has been achieved and remove it from the under-replicated // replication has been achieved and remove it from the under-replicated
// queue. // queue.
BlockInfoContiguousUnderConstruction info = new BlockInfoContiguousUnderConstruction(block1, (short) 1); BlockInfoUnderConstruction info =
new BlockInfoUnderConstructionContiguous(block1, (short) 1);
BlockCollection bc = mock(BlockCollection.class); BlockCollection bc = mock(BlockCollection.class);
when(bc.getPreferredBlockReplication()).thenReturn((short)1); when(bc.getPreferredBlockReplication()).thenReturn((short)1);
bm.addBlockCollection(info, bc); bm.addBlockCollection(info, bc);
@ -1232,7 +1233,7 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication()
DatanodeStorageInfo[] storageAry = {new DatanodeStorageInfo( DatanodeStorageInfo[] storageAry = {new DatanodeStorageInfo(
dataNodes[0], new DatanodeStorage("s1"))}; dataNodes[0], new DatanodeStorage("s1"))};
final BlockInfoContiguousUnderConstruction ucBlock = final BlockInfoUnderConstruction ucBlock =
info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION,
storageAry); storageAry);
DatanodeStorageInfo storage = mock(DatanodeStorageInfo.class); DatanodeStorageInfo storage = mock(DatanodeStorageInfo.class);

View File

@ -36,7 +36,7 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.junit.AfterClass; import org.junit.AfterClass;
@ -170,7 +170,7 @@ public void testGetBlockLocations() throws IOException {
final List<LocatedBlock> blocks = lb.getLocatedBlocks(); final List<LocatedBlock> blocks = lb.getLocatedBlocks();
assertEquals(i, blocks.size()); assertEquals(i, blocks.size());
final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock(); final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertTrue(b instanceof BlockInfoContiguousUnderConstruction); assertTrue(b instanceof BlockInfoUnderConstruction);
if (++i < NUM_BLOCKS) { if (++i < NUM_BLOCKS) {
// write one more block // write one more block

View File

@ -24,7 +24,8 @@
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.junit.Test; import org.junit.Test;
@ -68,8 +69,10 @@ private FSNamesystem makeNameSystemSpy(Block block, INodeFile file)
namesystem.dir.getINodeMap().put(file); namesystem.dir.getINodeMap().put(file);
FSNamesystem namesystemSpy = spy(namesystem); FSNamesystem namesystemSpy = spy(namesystem);
BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( BlockInfoUnderConstruction blockInfo =
block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets); new BlockInfoUnderConstructionContiguous(
block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION,
targets);
blockInfo.setBlockCollection(file); blockInfo.setBlockCollection(file);
blockInfo.setGenerationStamp(genStamp); blockInfo.setGenerationStamp(genStamp);
blockInfo.initializeBlockRecovery(genStamp); blockInfo.initializeBlockRecovery(genStamp);

View File

@ -54,7 +54,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -1019,7 +1019,7 @@ public void testTruncateRecovery() throws IOException {
is(fsn.getBlockIdManager().getGenerationStampV2())); is(fsn.getBlockIdManager().getGenerationStampV2()));
assertThat(file.getLastBlock().getBlockUCState(), assertThat(file.getLastBlock().getBlockUCState(),
is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)); is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY));
long blockRecoveryId = ((BlockInfoContiguousUnderConstruction) file.getLastBlock()) long blockRecoveryId = ((BlockInfoUnderConstruction) file.getLastBlock())
.getBlockRecoveryId(); .getBlockRecoveryId();
assertThat(blockRecoveryId, is(initialGenStamp + 1)); assertThat(blockRecoveryId, is(initialGenStamp + 1));
fsn.getEditLog().logTruncate( fsn.getEditLog().logTruncate(
@ -1052,7 +1052,7 @@ public void testTruncateRecovery() throws IOException {
is(fsn.getBlockIdManager().getGenerationStampV2())); is(fsn.getBlockIdManager().getGenerationStampV2()));
assertThat(file.getLastBlock().getBlockUCState(), assertThat(file.getLastBlock().getBlockUCState(),
is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)); is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY));
long blockRecoveryId = ((BlockInfoContiguousUnderConstruction) file.getLastBlock()) long blockRecoveryId = ((BlockInfoUnderConstruction) file.getLastBlock())
.getBlockRecoveryId(); .getBlockRecoveryId();
assertThat(blockRecoveryId, is(initialGenStamp + 1)); assertThat(blockRecoveryId, is(initialGenStamp + 1));
fsn.getEditLog().logTruncate( fsn.getEditLog().logTruncate(

View File

@ -72,7 +72,7 @@
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
@ -752,8 +752,8 @@ void invoke() throws Exception {
boolean checkNamenodeBeforeReturn() throws Exception { boolean checkNamenodeBeforeReturn() throws Exception {
INodeFile fileNode = cluster.getNamesystem(0).getFSDirectory() INodeFile fileNode = cluster.getNamesystem(0).getFSDirectory()
.getINode4Write(file).asFile(); .getINode4Write(file).asFile();
BlockInfoContiguousUnderConstruction blkUC = BlockInfoUnderConstruction blkUC =
(BlockInfoContiguousUnderConstruction) (fileNode.getBlocks())[1]; (BlockInfoUnderConstruction) (fileNode.getBlocks())[1];
int datanodeNum = blkUC.getExpectedStorageLocations().length; int datanodeNum = blkUC.getExpectedStorageLocations().length;
for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) { for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) {
Thread.sleep(1000); Thread.sleep(1000);

View File

@ -44,7 +44,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage; import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
import org.apache.hadoop.hdfs.server.datanode.BlockScanner; import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
@ -177,7 +177,7 @@ public static void checkSnapshotCreation(DistributedFileSystem hdfs,
* Specific information for different types of INode: * Specific information for different types of INode:
* {@link INodeDirectory}:childrenSize * {@link INodeDirectory}:childrenSize
* {@link INodeFile}: fileSize, block list. Check {@link BlockInfo#toString()} * {@link INodeFile}: fileSize, block list. Check {@link BlockInfo#toString()}
* and {@link BlockInfoContiguousUnderConstruction#toString()} for detailed information. * and {@link BlockInfoUnderConstruction#toString()} for detailed information.
* {@link FileWithSnapshot}: next link * {@link FileWithSnapshot}: next link
* </pre> * </pre>
* @see INode#dumpTreeRecursively() * @see INode#dumpTreeRecursively()