HDFS-2106. Move block management code from o.a.h.h.s.namenode to a new package o.a.h.h.s.blockmanagement.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1140909 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2011-06-28 23:43:03 +00:00
parent 1834fb99f5
commit 1bcfe45e47
52 changed files with 437 additions and 355 deletions

View File

@ -534,6 +534,9 @@ Trunk (unreleased changes)
HDFS-2110. StreamFile and ByteRangeInputStream cleanup. (eli)
HDFS-2106. Move block management code from o.a.h.h.s.namenode to a new
package o.a.h.h.s.blockmanagement. (szetszwo)
OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

View File

@ -61,10 +61,10 @@
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.io.IOUtils;

View File

@ -15,16 +15,17 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.LightWeightGSet;
/**
* Internal class for block metadata.
*/
class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
private INodeFile inode;
/** For implementing {@link LightWeightGSet.LinkedElement} interface */
@ -44,12 +45,12 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
* Construct an entry for blocksmap
* @param replication the block's replication factor
*/
protected BlockInfo(int replication) {
public BlockInfo(int replication) {
this.triplets = new Object[3*replication];
this.inode = null;
}
protected BlockInfo(Block blk, int replication) {
public BlockInfo(Block blk, int replication) {
super(blk);
this.triplets = new Object[3*replication];
this.inode = null;
@ -65,11 +66,11 @@ protected BlockInfo(BlockInfo from) {
this.inode = from.inode;
}
INodeFile getINode() {
public INodeFile getINode() {
return inode;
}
void setINode(INodeFile inode) {
public void setINode(INodeFile inode) {
this.inode = inode;
}
@ -162,7 +163,7 @@ int numNodes() {
/**
* Add data-node this block belongs to.
*/
boolean addNode(DatanodeDescriptor node) {
public boolean addNode(DatanodeDescriptor node) {
if(findDatanode(node) >= 0) // the node is already there
return false;
// find the last null node
@ -176,7 +177,7 @@ boolean addNode(DatanodeDescriptor node) {
/**
* Remove data-node from the block.
*/
boolean removeNode(DatanodeDescriptor node) {
public boolean removeNode(DatanodeDescriptor node) {
int dnIndex = findDatanode(node);
if(dnIndex < 0) // the node is not found
return false;
@ -218,7 +219,7 @@ int findDatanode(DatanodeDescriptor dn) {
* If the head is null then form a new list.
* @return current block as the new head of the list.
*/
BlockInfo listInsert(BlockInfo head, DatanodeDescriptor dn) {
public BlockInfo listInsert(BlockInfo head, DatanodeDescriptor dn) {
int dnIndex = this.findDatanode(dn);
assert dnIndex >= 0 : "Data node is not found: current";
assert getPrevious(dnIndex) == null && getNext(dnIndex) == null :
@ -238,7 +239,7 @@ assert getPrevious(dnIndex) == null && getNext(dnIndex) == null :
* @return the new head of the list or null if the list becomes
* empty after deletion.
*/
BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) {
public BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) {
if(head == null)
return null;
int dnIndex = this.findDatanode(dn);
@ -284,7 +285,7 @@ boolean listIsConsistent(DatanodeDescriptor dn) {
* to {@link BlockInfoUnderConstruction}.
* @return {@link BlockUCState#COMPLETE}
*/
BlockUCState getBlockUCState() {
public BlockUCState getBlockUCState() {
return BlockUCState.COMPLETE;
}
@ -293,7 +294,7 @@ BlockUCState getBlockUCState() {
*
* @return true if the state of the block is {@link BlockUCState#COMPLETE}
*/
boolean isComplete() {
public boolean isComplete() {
return getBlockUCState().equals(BlockUCState.COMPLETE);
}
@ -302,7 +303,7 @@ boolean isComplete() {
*
* @return BlockInfoUnderConstruction - an under construction block.
*/
BlockInfoUnderConstruction convertToBlockUnderConstruction(
public BlockInfoUnderConstruction convertToBlockUnderConstruction(
BlockUCState s, DatanodeDescriptor[] targets) {
if(isComplete()) {
return new BlockInfoUnderConstruction(

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import java.util.ArrayList;
@ -24,12 +24,13 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
/**
* Represents a block that is currently being constructed.<br>
* This is usually the last block of a file opened for write or append.
*/
class BlockInfoUnderConstruction extends BlockInfo {
public class BlockInfoUnderConstruction extends BlockInfo {
/** Block state. See {@link BlockUCState} */
private BlockUCState blockUCState;
@ -128,11 +129,14 @@ public String toString() {
* Create block and set its state to
* {@link BlockUCState#UNDER_CONSTRUCTION}.
*/
BlockInfoUnderConstruction(Block blk, int replication) {
public BlockInfoUnderConstruction(Block blk, int replication) {
this(blk, replication, BlockUCState.UNDER_CONSTRUCTION, null);
}
BlockInfoUnderConstruction(Block blk, int replication,
/**
* Create a block that is currently being constructed.
*/
public BlockInfoUnderConstruction(Block blk, int replication,
BlockUCState state,
DatanodeDescriptor[] targets) {
super(blk, replication);
@ -160,7 +164,8 @@ assert getBlockUCState() != BlockUCState.COMPLETE :
return new BlockInfo(this);
}
void setExpectedLocations(DatanodeDescriptor[] targets) {
/** Set expected locations */
public void setExpectedLocations(DatanodeDescriptor[] targets) {
int numLocations = targets == null ? 0 : targets.length;
this.replicas = new ArrayList<ReplicaUnderConstruction>(numLocations);
for(int i = 0; i < numLocations; i++)
@ -172,7 +177,7 @@ void setExpectedLocations(DatanodeDescriptor[] targets) {
* Create array of expected replica locations
* (as has been assigned by chooseTargets()).
*/
DatanodeDescriptor[] getExpectedLocations() {
public DatanodeDescriptor[] getExpectedLocations() {
int numLocations = replicas == null ? 0 : replicas.size();
DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocations];
for(int i = 0; i < numLocations; i++)
@ -180,7 +185,8 @@ DatanodeDescriptor[] getExpectedLocations() {
return locations;
}
int getNumExpectedLocations() {
/** Get the number of expected locations */
public int getNumExpectedLocations() {
return replicas == null ? 0 : replicas.size();
}
@ -189,7 +195,7 @@ int getNumExpectedLocations() {
* @see BlockUCState
*/
@Override // BlockInfo
BlockUCState getBlockUCState() {
public BlockUCState getBlockUCState() {
return blockUCState;
}
@ -197,7 +203,8 @@ void setBlockUCState(BlockUCState s) {
blockUCState = s;
}
long getBlockRecoveryId() {
/** Get block recovery ID */
public long getBlockRecoveryId() {
return blockRecoveryId;
}
@ -220,7 +227,7 @@ void commitBlock(Block block) throws IOException {
* Find the first alive data-node starting from the previous primary and
* make it primary.
*/
void initializeBlockRecovery(long recoveryId) {
public void initializeBlockRecovery(long recoveryId) {
setBlockUCState(BlockUCState.UNDER_RECOVERY);
blockRecoveryId = recoveryId;
if (replicas.size() == 0) {

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import java.io.PrintWriter;
@ -39,10 +39,14 @@
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.UnderReplicatedBlocks.BlockIterator;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
/**
* Keeps information related to the blocks stored in the Hadoop cluster.
@ -57,18 +61,43 @@ public class BlockManager {
private final FSNamesystem namesystem;
volatile long pendingReplicationBlocksCount = 0L;
volatile long corruptReplicaBlocksCount = 0L;
volatile long underReplicatedBlocksCount = 0L;
volatile long scheduledReplicationBlocksCount = 0L;
volatile long excessBlocksCount = 0L;
volatile long pendingDeletionBlocksCount = 0L;
private volatile long pendingReplicationBlocksCount = 0L;
private volatile long corruptReplicaBlocksCount = 0L;
private volatile long underReplicatedBlocksCount = 0L;
public volatile long scheduledReplicationBlocksCount = 0L;
private volatile long excessBlocksCount = 0L;
private volatile long pendingDeletionBlocksCount = 0L;
//
// Mapping: Block -> { INode, datanodes, self ref }
// Updated only in response to client-sent information.
//
final BlocksMap blocksMap;
/** Used by metrics */
public long getPendingReplicationBlocksCount() {
return pendingReplicationBlocksCount;
}
/** Used by metrics */
public long getUnderReplicatedBlocksCount() {
return underReplicatedBlocksCount;
}
/** Used by metrics */
public long getCorruptReplicaBlocksCount() {
return corruptReplicaBlocksCount;
}
/** Used by metrics */
public long getScheduledReplicationBlocksCount() {
return scheduledReplicationBlocksCount;
}
/** Used by metrics */
public long getPendingDeletionBlocksCount() {
return pendingDeletionBlocksCount;
}
/** Used by metrics */
public long getExcessBlocksCount() {
return excessBlocksCount;
}
/**
* Mapping: Block -> { INode, datanodes, self ref }
* Updated only in response to client-sent information.
*/
public final BlocksMap blocksMap;
//
// Store blocks-->datanodedescriptor(s) map of corrupt replicas
@ -90,24 +119,24 @@ public class BlockManager {
// eventually remove these extras.
// Mapping: StorageID -> TreeSet<Block>
//
Map<String, Collection<Block>> excessReplicateMap =
public final Map<String, Collection<Block>> excessReplicateMap =
new TreeMap<String, Collection<Block>>();
//
// Store set of Blocks that need to be replicated 1 or more times.
// We also store pending replication-orders.
//
UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
public UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
private PendingReplicationBlocks pendingReplications;
// The maximum number of replicas allowed for a block
int maxReplication;
public int maxReplication;
// How many outgoing replication streams a given node should have at one time
int maxReplicationStreams;
public int maxReplicationStreams;
// Minimum copies needed or else write is disallowed
int minReplication;
public int minReplication;
// Default number of replicas
int defaultReplication;
public int defaultReplication;
// How many entries are returned by getCorruptInodes()
int maxCorruptFilesReturned;
@ -121,9 +150,9 @@ public class BlockManager {
Random r = new Random();
// for block replicas placement
BlockPlacementPolicy replicator;
public BlockPlacementPolicy replicator;
BlockManager(FSNamesystem fsn, Configuration conf) throws IOException {
public BlockManager(FSNamesystem fsn, Configuration conf) throws IOException {
this(fsn, conf, DEFAULT_INITIAL_MAP_CAPACITY);
}
@ -178,16 +207,16 @@ void setConfigurationParameters(Configuration conf) throws IOException {
FSNamesystem.LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks);
}
void activate() {
public void activate() {
pendingReplications.start();
}
void close() {
public void close() {
if (pendingReplications != null) pendingReplications.stop();
blocksMap.close();
}
void metaSave(PrintWriter out) {
public void metaSave(PrintWriter out) {
//
// Dump contents of neededReplication
//
@ -249,7 +278,7 @@ void metaSave(PrintWriter out) {
* @param block
* @return true if the block has minimum replicas
*/
boolean checkMinReplication(Block block) {
public boolean checkMinReplication(Block block) {
return (countNodes(block).liveReplicas() >= minReplication);
}
@ -297,7 +326,7 @@ private void commitBlock(INodeFileUnderConstruction fileINode,
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
void commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode,
public void commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode,
Block commitBlock) throws IOException {
if(commitBlock == null)
@ -362,7 +391,7 @@ BlockInfo completeBlock(INodeFile fileINode, BlockInfo block)
* @param fileINode file
* @return the last block locations if the block is partial or null otherwise
*/
LocatedBlock convertLastBlockToUnderConstruction(
public LocatedBlock convertLastBlockToUnderConstruction(
INodeFileUnderConstruction fileINode) throws IOException {
BlockInfo oldBlock = fileINode.getLastBlock();
if(oldBlock == null ||
@ -393,7 +422,7 @@ LocatedBlock convertLastBlockToUnderConstruction(
/**
* Get all valid locations of the block
*/
ArrayList<String> getValidLocations(Block block) {
public ArrayList<String> getValidLocations(Block block) {
ArrayList<String> machineSet =
new ArrayList<String>(blocksMap.numNodes(block));
for(Iterator<DatanodeDescriptor> it =
@ -407,7 +436,7 @@ ArrayList<String> getValidLocations(Block block) {
return machineSet;
}
List<LocatedBlock> getBlockLocations(BlockInfo[] blocks, long offset,
public List<LocatedBlock> getBlockLocations(BlockInfo[] blocks, long offset,
long length, int nrBlocksToReturn) throws IOException {
int curBlk = 0;
long curPos = 0, blkSize = 0;
@ -436,11 +465,15 @@ List<LocatedBlock> getBlockLocations(BlockInfo[] blocks, long offset,
return results;
}
/** @param needBlockToken
* @return a LocatedBlock for the given block */
LocatedBlock getBlockLocation(final BlockInfo blk, final long pos
/** @return a LocatedBlock for the given block */
public LocatedBlock getBlockLocation(final BlockInfo blk, final long pos
) throws IOException {
if (!blk.isComplete()) {
if (blk instanceof BlockInfoUnderConstruction) {
if (blk.isComplete()) {
throw new IOException(
"blk instanceof BlockInfoUnderConstruction && blk.isComplete()"
+ ", blk=" + blk);
}
final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)blk;
final DatanodeDescriptor[] locations = uc.getExpectedLocations();
return namesystem.createLocatedBlock(uc, locations, pos, false);
@ -476,7 +509,7 @@ LocatedBlock getBlockLocation(final BlockInfo blk, final long pos
* Check whether the replication parameter is within the range
* determined by system configuration.
*/
void verifyReplication(String src,
public void verifyReplication(String src,
short replication,
String clientName) throws IOException {
@ -544,7 +577,7 @@ void addToInvalidates(Block b, DatanodeInfo dn, boolean log) {
* @param b block
* @param dn datanode
*/
void addToInvalidates(Block b, DatanodeInfo dn) {
public void addToInvalidates(Block b, DatanodeInfo dn) {
addToInvalidates(b, dn, true);
}
@ -585,7 +618,7 @@ private void dumpRecentInvalidateSets(PrintWriter out) {
}
}
void findAndMarkBlockAsCorrupt(Block blk,
public void findAndMarkBlockAsCorrupt(Block blk,
DatanodeInfo dn) throws IOException {
BlockInfo storedBlock = getStoredBlock(blk);
if (storedBlock == null) {
@ -668,14 +701,14 @@ private void invalidateBlock(Block blk, DatanodeInfo dn)
}
}
void updateState() {
public void updateState() {
pendingReplicationBlocksCount = pendingReplications.size();
underReplicatedBlocksCount = neededReplications.size();
corruptReplicaBlocksCount = corruptReplicas.size();
}
/** Return number of under-replicated but not missing blocks */
int getUnderReplicatedNotMissingBlocks() {
public int getUnderReplicatedNotMissingBlocks() {
return neededReplications.getUnderReplicatedBlockCount();
}
@ -684,7 +717,7 @@ int getUnderReplicatedNotMissingBlocks() {
* @param nodesToProcess number of datanodes to schedule deletion work
* @return total number of block for deletion
*/
int computeInvalidateWork(int nodesToProcess) {
public int computeInvalidateWork(int nodesToProcess) {
int numOfNodes = recentInvalidateSets.size();
nodesToProcess = Math.min(numOfNodes, nodesToProcess);
@ -724,7 +757,7 @@ int computeInvalidateWork(int nodesToProcess) {
*
* @return number of blocks scheduled for replication during this iteration.
*/
int computeReplicationWork(int blocksToProcess) throws IOException {
public int computeReplicationWork(int blocksToProcess) throws IOException {
// Choose the blocks to be replicated
List<List<Block>> blocksToReplicate =
chooseUnderReplicatedBlocks(blocksToProcess);
@ -1031,7 +1064,7 @@ else if (excessBlocks != null && excessBlocks.contains(block)) {
* If there were any replication requests that timed out, reap them
* and put them back into the neededReplication queue
*/
void processPendingReplications() {
public void processPendingReplications() {
Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
if (timedOutItems != null) {
namesystem.writeLock();
@ -1464,7 +1497,7 @@ private Block addStoredBlock(final BlockInfo block,
short fileReplication = fileINode.getReplication();
if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
neededReplications.remove(storedBlock, numCurrentReplica,
num.decommissionedReplicas, fileReplication);
num.decommissionedReplicas(), fileReplication);
} else {
updateNeededReplications(storedBlock, curReplicaDelta, 0);
}
@ -1525,7 +1558,7 @@ private void invalidateCorruptReplicas(Block blk) {
* For each block in the name-node verify whether it belongs to any file,
* over or under replicated. Place it into the respective queue.
*/
void processMisReplicatedBlocks() {
public void processMisReplicatedBlocks() {
long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0;
namesystem.writeLock();
try {
@ -1570,7 +1603,7 @@ void processMisReplicatedBlocks() {
* If there are any extras, call chooseExcessReplicates() to
* mark them in the excessReplicateMap.
*/
void processOverReplicatedBlock(Block block, short replication,
public void processOverReplicatedBlock(Block block, short replication,
DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) {
assert namesystem.hasWriteLock();
if (addedNode == delNodeHint) {
@ -1597,7 +1630,7 @@ void processOverReplicatedBlock(Block block, short replication,
addedNode, delNodeHint, replicator);
}
void addToExcessReplicate(DatanodeInfo dn, Block block) {
public void addToExcessReplicate(DatanodeInfo dn, Block block) {
assert namesystem.hasWriteLock();
Collection<Block> excessBlocks = excessReplicateMap.get(dn.getStorageID());
if (excessBlocks == null) {
@ -1618,7 +1651,7 @@ void addToExcessReplicate(DatanodeInfo dn, Block block) {
* Modify (block-->datanode) map. Possibly generate replication tasks, if the
* removed block is still valid.
*/
void removeStoredBlock(Block block, DatanodeDescriptor node) {
public void removeStoredBlock(Block block, DatanodeDescriptor node) {
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
+ block + " from " + node.getName());
@ -1673,7 +1706,7 @@ void removeStoredBlock(Block block, DatanodeDescriptor node) {
/**
* The given node is reporting that it received a certain block.
*/
void addBlock(DatanodeDescriptor node, Block block, String delHint)
public void addBlock(DatanodeDescriptor node, Block block, String delHint)
throws IOException {
// decrement number of blocks scheduled to this datanode.
node.decBlocksScheduled();
@ -1726,7 +1759,7 @@ void addBlock(DatanodeDescriptor node, Block block, String delHint)
/**
* Return the number of nodes that are live and decommissioned.
*/
NumberReplicas countNodes(Block b) {
public NumberReplicas countNodes(Block b) {
int count = 0;
int live = 0;
int corrupt = 0;
@ -1805,7 +1838,7 @@ private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode,
* Return true if there are any blocks on this node that have not
* yet reached their replication factor. Otherwise returns false.
*/
boolean isReplicationInProgress(DatanodeDescriptor srcNode) {
public boolean isReplicationInProgress(DatanodeDescriptor srcNode) {
boolean status = false;
int underReplicatedBlocks = 0;
int decommissionOnlyReplicas = 0;
@ -1855,11 +1888,11 @@ boolean isReplicationInProgress(DatanodeDescriptor srcNode) {
return status;
}
int getActiveBlockCount() {
public int getActiveBlockCount() {
return blocksMap.size() - (int)pendingDeletionBlocksCount;
}
DatanodeDescriptor[] getNodes(BlockInfo block) {
public DatanodeDescriptor[] getNodes(BlockInfo block) {
DatanodeDescriptor[] nodes =
new DatanodeDescriptor[block.numNodes()];
Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
@ -1869,22 +1902,22 @@ DatanodeDescriptor[] getNodes(BlockInfo block) {
return nodes;
}
int getTotalBlocks() {
public int getTotalBlocks() {
return blocksMap.size();
}
void removeBlock(Block block) {
public void removeBlock(Block block) {
addToInvalidates(block);
corruptReplicas.removeFromCorruptReplicasMap(block);
blocksMap.removeBlock(block);
}
BlockInfo getStoredBlock(Block block) {
public BlockInfo getStoredBlock(Block block) {
return blocksMap.getStoredBlock(block);
}
/* updates a block in under replication queue */
void updateNeededReplications(Block block, int curReplicasDelta,
public void updateNeededReplications(Block block, int curReplicasDelta,
int expectedReplicasDelta) {
namesystem.writeLock();
try {
@ -1905,13 +1938,13 @@ void updateNeededReplications(Block block, int curReplicasDelta,
}
}
void checkReplication(Block block, int numExpectedReplicas) {
public void checkReplication(Block block, int numExpectedReplicas) {
// filter out containingNodes that are marked for decommission.
NumberReplicas number = countNodes(block);
if (isNeededReplication(block, numExpectedReplicas, number.liveReplicas())) {
neededReplications.add(block,
number.liveReplicas(),
number.decommissionedReplicas,
number.decommissionedReplicas(),
numExpectedReplicas);
}
}
@ -1926,11 +1959,8 @@ private int getReplication(Block block) {
return fileINode.getReplication();
}
/**
* Remove a datanode from the invalidatesSet
* @param n datanode
*/
void removeFromInvalidates(String storageID) {
/** Remove a datanode from the invalidatesSet */
public void removeFromInvalidates(String storageID) {
Collection<Block> blocks = recentInvalidateSets.remove(storageID);
if (blocks != null) {
pendingDeletionBlocksCount -= blocks.size();
@ -1998,7 +2028,7 @@ private int invalidateWorkForOneNode(String nodeId) {
//Returns the number of racks over which a given block is replicated
//decommissioning/decommissioned nodes are not counted. corrupt replicas
//are also ignored
int getNumberOfRacks(Block b) {
public int getNumberOfRacks(Block b) {
HashSet<String> rackSet = new HashSet<String>(0);
Collection<DatanodeDescriptor> corruptNodes =
corruptReplicas.getNodes(b);
@ -2056,32 +2086,32 @@ boolean isNeededReplication(Block b, int expectedReplication, int curReplicas) {
}
}
long getMissingBlocksCount() {
public long getMissingBlocksCount() {
// not locking
return this.neededReplications.getCorruptBlockSize();
}
BlockInfo addINode(BlockInfo block, INodeFile iNode) {
public BlockInfo addINode(BlockInfo block, INodeFile iNode) {
return blocksMap.addINode(block, iNode);
}
INodeFile getINode(Block b) {
public INodeFile getINode(Block b) {
return blocksMap.getINode(b);
}
void removeFromCorruptReplicasMap(Block block) {
public void removeFromCorruptReplicasMap(Block block) {
corruptReplicas.removeFromCorruptReplicasMap(block);
}
int numCorruptReplicas(Block block) {
public int numCorruptReplicas(Block block) {
return corruptReplicas.numCorruptReplicas(block);
}
void removeBlockFromMap(Block block) {
public void removeBlockFromMap(Block block) {
blocksMap.removeBlock(block);
}
int getCapacity() {
public int getCapacity() {
namesystem.readLock();
try {
return blocksMap.getCapacity();
@ -2104,7 +2134,7 @@ int getCapacity() {
* @return Up to numExpectedBlocks blocks from startingBlockId if it exists
*
*/
long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
public long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
Long startingBlockId) {
return corruptReplicas.getCorruptReplicaBlockIds(numExpectedBlocks,
startingBlockId);
@ -2113,7 +2143,7 @@ long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
/**
* Return an iterator over the set of blocks for which there are no replicas.
*/
UnderReplicatedBlocks.BlockIterator getCorruptReplicaBlockIterator() {
public BlockIterator getCorruptReplicaBlockIterator() {
return neededReplications
.iterator(UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
}

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.ArrayList;
import java.util.Collection;
@ -26,6 +26,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.ReflectionUtils;
@ -94,7 +96,7 @@ final DatanodeDescriptor[] chooseTarget(String srcPath,
* @return array of DatanodeDescriptor instances chosen as target
* and sorted as a pipeline.
*/
abstract DatanodeDescriptor[] chooseTarget(String srcPath,
public abstract DatanodeDescriptor[] chooseTarget(String srcPath,
int numOfReplicas,
DatanodeDescriptor writer,
List<DatanodeDescriptor> chosenNodes,
@ -222,11 +224,11 @@ DatanodeDescriptor[] chooseTarget(String srcPath,
* @param numOfReplicas number of replicas wanted.
* @param writer the writer's machine, null if not in the cluster.
* @param blocksize size of the data to be written.
* @param excludedNodes: datanodes that should not be considered as targets.
* @param excludedNodes datanodes that should not be considered as targets.
* @return array of DatanodeDescriptor instances chosen as targets
* and sorted as a pipeline.
*/
DatanodeDescriptor[] chooseTarget(String srcPath,
public DatanodeDescriptor[] chooseTarget(String srcPath,
int numOfReplicas,
DatanodeDescriptor writer,
HashMap<Node, Node> excludedNodes,

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.ArrayList;
import java.util.Collection;
@ -32,6 +32,9 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
@ -89,7 +92,7 @@ public DatanodeDescriptor[] chooseTarget(String srcPath,
/** {@inheritDoc} */
@Override
DatanodeDescriptor[] chooseTarget(String srcPath,
public DatanodeDescriptor[] chooseTarget(String srcPath,
int numOfReplicas,
DatanodeDescriptor writer,
List<DatanodeDescriptor> chosenNodes,

View File

@ -15,11 +15,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.Iterator;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.GSet;
import org.apache.hadoop.hdfs.util.LightWeightGSet;
@ -28,7 +29,7 @@
* block's metadata currently includes INode it belongs to and
* the datanodes that store the block.
*/
class BlocksMap {
public class BlocksMap {
private static class NodeIterator implements Iterator<DatanodeDescriptor> {
private BlockInfo blockInfo;
private int nextIdx = 0;
@ -100,7 +101,7 @@ INodeFile getINode(Block b) {
/**
* Add block b belonging to the specified file inode to the map.
*/
BlockInfo addINode(BlockInfo b, INodeFile iNode) {
public BlockInfo addINode(BlockInfo b, INodeFile iNode) {
BlockInfo info = blocks.get(b);
if (info != b) {
info = b;
@ -136,7 +137,7 @@ BlockInfo getStoredBlock(Block b) {
* Searches for the block in the BlocksMap and
* returns Iterator that iterates through the nodes the block belongs to.
*/
Iterator<DatanodeDescriptor> nodeIterator(Block b) {
public Iterator<DatanodeDescriptor> nodeIterator(Block b) {
return nodeIterator(blocks.get(b));
}
@ -185,7 +186,7 @@ Iterable<BlockInfo> getBlocks() {
/**
* Check if the block exists in map
*/
boolean contains(Block block) {
public boolean contains(Block block) {
return blocks.contains(block);
}

View File

@ -15,10 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.Server;
import java.util.*;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.DataInput;
import java.io.IOException;
@ -26,6 +26,7 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.io.WritableUtils;
@ -44,7 +45,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
// Stores status of decommissioning.
// If node is not decommissioning, do not use this object for anything.
DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
public DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
/** Block and targets pair */
@InterfaceAudience.Private
@ -96,8 +97,8 @@ boolean contains(E e) {
private int numBlocks = 0;
// isAlive == heartbeats.contains(this)
// This is an optimization, because contains takes O(n) time on Arraylist
protected boolean isAlive = false;
protected boolean needKeyUpdate = false;
public boolean isAlive = false;
public boolean needKeyUpdate = false;
/** A queue of blocks to be replicated by this datanode */
private BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<BlockTargetPair>();
@ -204,7 +205,7 @@ public DatanodeDescriptor(DatanodeID nodeID,
* Add datanode to the block.
* Add block to the head of the list of blocks belonging to the data-node.
*/
boolean addBlock(BlockInfo b) {
public boolean addBlock(BlockInfo b) {
if(!b.addNode(this))
return false;
// add to the head of the data-node list
@ -217,7 +218,7 @@ boolean addBlock(BlockInfo b) {
* Remove block from the list of blocks belonging to the data-node.
* Remove datanode from the block.
*/
boolean removeBlock(BlockInfo b) {
public boolean removeBlock(BlockInfo b) {
blockList = b.listRemove(blockList, this);
if ( b.removeNode(this) ) {
numBlocks--;
@ -242,7 +243,7 @@ void moveBlockToHead(BlockInfo b) {
* @param newBlock - a replacement block
* @return the new block
*/
BlockInfo replaceBlock(BlockInfo oldBlock, BlockInfo newBlock) {
public BlockInfo replaceBlock(BlockInfo oldBlock, BlockInfo newBlock) {
boolean done = removeBlock(oldBlock);
assert done : "Old block should belong to the data-node when replacing";
done = addBlock(newBlock);
@ -250,7 +251,7 @@ BlockInfo replaceBlock(BlockInfo oldBlock, BlockInfo newBlock) {
return newBlock;
}
void resetBlocks() {
public void resetBlocks() {
this.capacity = 0;
this.remaining = 0;
this.blockPoolUsed = 0;
@ -268,7 +269,7 @@ public int numBlocks() {
/**
* Updates stats from datanode heartbeat.
*/
void updateHeartbeat(long capacity, long dfsUsed, long remaining,
public void updateHeartbeat(long capacity, long dfsUsed, long remaining,
long blockPoolUsed, int xceiverCount, int volFailures) {
this.capacity = capacity;
this.dfsUsed = dfsUsed;
@ -283,7 +284,7 @@ void updateHeartbeat(long capacity, long dfsUsed, long remaining,
/**
* Iterates over the list of blocks belonging to the datanode.
*/
static class BlockIterator implements Iterator<BlockInfo> {
public static class BlockIterator implements Iterator<BlockInfo> {
private BlockInfo current;
private DatanodeDescriptor node;
@ -307,7 +308,7 @@ public void remove() {
}
}
Iterator<BlockInfo> getBlockIterator() {
public Iterator<BlockInfo> getBlockIterator() {
return new BlockIterator(this.blockList, this);
}
@ -361,11 +362,11 @@ int getNumberOfBlocksToBeInvalidated() {
}
}
List<BlockTargetPair> getReplicationCommand(int maxTransfers) {
public List<BlockTargetPair> getReplicationCommand(int maxTransfers) {
return replicateBlocks.poll(maxTransfers);
}
BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) {
public BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) {
List<BlockInfoUnderConstruction> blocks = recoverBlocks.poll(maxTransfers);
if(blocks == null)
return null;
@ -375,7 +376,7 @@ BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) {
/**
* Remove the specified number of blocks to be invalidated
*/
Block[] getInvalidateBlocks(int maxblocks) {
public Block[] getInvalidateBlocks(int maxblocks) {
return getBlockArray(invalidateBlocks, maxblocks);
}
@ -418,7 +419,7 @@ static private Block[] getBlockArray(Collection<Block> blocks, int max) {
}
/** Serialization for FSEditLog */
void readFieldsFromFSEditLog(DataInput in) throws IOException {
public void readFieldsFromFSEditLog(DataInput in) throws IOException {
this.name = DeprecatedUTF8.readString(in);
this.storageID = DeprecatedUTF8.readString(in);
this.infoPort = in.readShort() & 0x0000ffff;
@ -445,7 +446,7 @@ public int getBlocksScheduled() {
/**
* Increments counter for number of blocks scheduled.
*/
void incBlocksScheduled() {
public void incBlocksScheduled() {
currApproxBlocksScheduled++;
}
@ -485,12 +486,13 @@ public boolean equals(Object obj) {
// by DatanodeID
return (this == obj) || super.equals(obj);
}
class DecommissioningStatus {
int underReplicatedBlocks;
int decommissionOnlyReplicas;
int underReplicatedInOpenFiles;
long startTime;
/** Decommissioning status */
public class DecommissioningStatus {
private int underReplicatedBlocks;
private int decommissionOnlyReplicas;
private int underReplicatedInOpenFiles;
private long startTime;
synchronized void set(int underRep,
int onlyRep, int underConstruction) {
@ -501,32 +503,34 @@ synchronized void set(int underRep,
decommissionOnlyReplicas = onlyRep;
underReplicatedInOpenFiles = underConstruction;
}
synchronized int getUnderReplicatedBlocks() {
/** @return the number of under-replicated blocks */
public synchronized int getUnderReplicatedBlocks() {
if (isDecommissionInProgress() == false) {
return 0;
}
return underReplicatedBlocks;
}
synchronized int getDecommissionOnlyReplicas() {
/** @return the number of decommission-only replicas */
public synchronized int getDecommissionOnlyReplicas() {
if (isDecommissionInProgress() == false) {
return 0;
}
return decommissionOnlyReplicas;
}
synchronized int getUnderReplicatedInOpenFiles() {
/** @return the number of under-replicated blocks in open files */
public synchronized int getUnderReplicatedInOpenFiles() {
if (isDecommissionInProgress() == false) {
return 0;
}
return underReplicatedInOpenFiles;
}
synchronized void setStartTime(long time) {
/** Set start time */
public synchronized void setStartTime(long time) {
startTime = time;
}
synchronized long getStartTime() {
/** @return start time */
public synchronized long getStartTime() {
if (isDecommissionInProgress() == false) {
return 0;
}
@ -538,11 +542,11 @@ synchronized long getStartTime() {
* Set the flag to indicate if this datanode is disallowed from communicating
* with the namenode.
*/
void setDisallowed(boolean flag) {
public void setDisallowed(boolean flag) {
disallowed = flag;
}
boolean isDisallowed() {
/** Is the datanode disallowed from communicating with the namenode? */
public boolean isDisallowed() {
return disallowed;
}

View File

@ -15,9 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.util.*;
import java.io.*;

View File

@ -15,17 +15,18 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.*;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
/* Class for keeping track of under replication blocks
* Blocks have replication priority, with priority 0 indicating the highest
* Blocks have only one replicas has the highest
*/
class UnderReplicatedBlocks implements Iterable<Block> {
public class UnderReplicatedBlocks implements Iterable<Block> {
static final int LEVEL = 5;
static public final int QUEUE_WITH_CORRUPT_BLOCKS = 4;
private List<TreeSet<Block>> priorityQueues = new ArrayList<TreeSet<Block>>();
@ -47,7 +48,7 @@ void clear() {
}
/* Return the total number of under replication blocks */
synchronized int size() {
public synchronized int size() {
int size = 0;
for (int i=0; i<LEVEL; i++) {
size += priorityQueues.get(i).size();
@ -70,7 +71,7 @@ synchronized int getCorruptBlockSize() {
}
/* Check if a block is in the neededReplication queue */
synchronized boolean contains(Block block) {
public synchronized boolean contains(Block block) {
for(TreeSet<Block> set:priorityQueues) {
if(set.contains(block)) { return true; }
}
@ -218,7 +219,7 @@ public synchronized BlockIterator iterator() {
return new BlockIterator();
}
class BlockIterator implements Iterator<Block> {
public class BlockIterator implements Iterator<Block> {
private int level;
private boolean isIteratorForLevel = false;
private List<Iterator<Block>> iterators = new ArrayList<Iterator<Block>>();

View File

@ -26,8 +26,8 @@
import java.net.Socket;
import java.net.URL;
import java.net.URLEncoder;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
@ -45,14 +45,13 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.http.HtmlQuoting;
import org.apache.hadoop.io.Text;

View File

@ -21,6 +21,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.util.CyclicIteration;
/**

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
@ -24,35 +26,40 @@
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.*;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.util.ByteArray;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.hdfs.DFSConfigKeys;
/*************************************************
* FSDirectory stores the filesystem directory state.
@ -63,7 +70,7 @@
* and logged to disk.
*
*************************************************/
class FSDirectory implements Closeable {
public class FSDirectory implements Closeable {
INodeDirectoryWithQuota rootDir;
FSImage fsImage;
@ -1332,7 +1339,7 @@ boolean isDir(String src) throws UnresolvedLinkException {
* @throws QuotaExceededException if the new count violates any quota limit
* @throws FileNotFound if path does not exist.
*/
void updateSpaceConsumed(String path, long nsDelta, long dsDelta)
public void updateSpaceConsumed(String path, long nsDelta, long dsDelta)
throws QuotaExceededException,
FileNotFoundException,
UnresolvedLinkException {

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.EOFException;
@ -32,10 +34,30 @@
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ClearNSQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ConcatDeleteOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.*;
public class FSEditLogLoader {
private final FSNamesystem fsNamesys;

View File

@ -32,6 +32,8 @@
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.io.BytesWritable;

View File

@ -42,6 +42,7 @@
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.io.Text;

View File

@ -22,7 +22,6 @@
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -33,6 +32,9 @@
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;

View File

@ -97,15 +97,20 @@
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.blockmanagement.UnderReplicatedBlocks;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
@ -235,7 +240,7 @@ private static final void logAuditEvent(UserGroupInformation ugi,
// Stores the correct file name hierarchy
//
public FSDirectory dir;
BlockManager blockManager;
public BlockManager blockManager;
// Block pool ID used by this namenode
String blockPoolId;
@ -270,10 +275,10 @@ private static final void logAuditEvent(UserGroupInformation ugi,
* Stores a set of DatanodeDescriptor objects.
* This is a subset of {@link #datanodeMap}, containing nodes that are
* considered alive.
* The {@link HeartbeatMonitor} periodically checks for outdated entries,
* The HeartbeatMonitor periodically checks for out-dated entries,
* and removes them from the list.
*/
ArrayList<DatanodeDescriptor> heartbeats = new ArrayList<DatanodeDescriptor>();
public ArrayList<DatanodeDescriptor> heartbeats = new ArrayList<DatanodeDescriptor>();
public LeaseManager leaseManager = new LeaseManager(this);
@ -314,8 +319,8 @@ private static final void logAuditEvent(UserGroupInformation ugi,
private volatile SafeModeInfo safeMode; // safe mode information
private Host2NodesMap host2DataNodeMap = new Host2NodesMap();
// datanode networktoplogy
NetworkTopology clusterMap = new NetworkTopology();
/** datanode network toplogy */
public NetworkTopology clusterMap = new NetworkTopology();
private DNSToSwitchMapping dnsToSwitchMapping;
private HostsFileReader hostsReader;
@ -329,7 +334,7 @@ private static final void logAuditEvent(UserGroupInformation ugi,
private final GenerationStamp generationStamp = new GenerationStamp();
// Ask Datanode only up to this many blocks to delete.
int blockInvalidateLimit = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
public int blockInvalidateLimit = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
// precision of access times.
private long accessTimePrecision = 0;
@ -472,23 +477,23 @@ public static Collection<URI> getNamespaceEditsDirs(Configuration conf) {
}
// utility methods to acquire and release read lock and write lock
void readLock() {
public void readLock() {
this.fsLock.readLock().lock();
}
void readUnlock() {
public void readUnlock() {
this.fsLock.readLock().unlock();
}
void writeLock() {
public void writeLock() {
this.fsLock.writeLock().lock();
}
void writeUnlock() {
public void writeUnlock() {
this.fsLock.writeLock().unlock();
}
boolean hasWriteLock() {
public boolean hasWriteLock() {
return this.fsLock.isWriteLockedByCurrentThread();
}
@ -1014,7 +1019,7 @@ LocatedBlocks getBlockLocationsInternal(INodeFile inode,
}
/** Create a LocatedBlock. */
LocatedBlock createLocatedBlock(final Block b, final DatanodeInfo[] locations,
public LocatedBlock createLocatedBlock(final Block b, final DatanodeInfo[] locations,
final long offset, final boolean corrupt) throws IOException {
return new LocatedBlock(getExtendedBlock(b), locations, offset, corrupt);
}
@ -3013,7 +3018,7 @@ private void setDatanodeDead(DatanodeDescriptor node) throws IOException {
* @return an array of datanode commands
* @throws IOException
*/
DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
long capacity, long dfsUsed, long remaining, long blockPoolUsed,
int xceiverCount, int xmitsInProgress, int failedVolumes)
throws IOException {
@ -3521,7 +3526,7 @@ public void processReport(DatanodeID nodeID, String poolId,
* If no such a node is available,
* then pick a node with least free space
*/
void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess,
public void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess,
Block b, short replication,
DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint,
@ -3979,45 +3984,6 @@ short adjustReplication(short replication) {
return replication;
}
/**
* A immutable object that stores the number of live replicas and
* the number of decommissined Replicas.
*/
static class NumberReplicas {
private int liveReplicas;
int decommissionedReplicas;
private int corruptReplicas;
private int excessReplicas;
NumberReplicas() {
initialize(0, 0, 0, 0);
}
NumberReplicas(int live, int decommissioned, int corrupt, int excess) {
initialize(live, decommissioned, corrupt, excess);
}
void initialize(int live, int decommissioned, int corrupt, int excess) {
liveReplicas = live;
decommissionedReplicas = decommissioned;
corruptReplicas = corrupt;
excessReplicas = excess;
}
int liveReplicas() {
return liveReplicas;
}
int decommissionedReplicas() {
return decommissionedReplicas;
}
int corruptReplicas() {
return corruptReplicas;
}
int excessReplicas() {
return excessReplicas;
}
}
/**
* Change, if appropriate, the admin state of a datanode to
* decommission completed. Return true if decommission is complete.
@ -4675,7 +4641,7 @@ private void checkSafeMode() {
* Check whether the name node is in safe mode.
* @return true if safe mode is ON, false otherwise
*/
boolean isInSafeMode() {
public boolean isInSafeMode() {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null)
@ -4686,7 +4652,7 @@ boolean isInSafeMode() {
/**
* Check whether the name node is in startup mode.
*/
boolean isInStartupSafeMode() {
public boolean isInStartupSafeMode() {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null)
@ -4697,7 +4663,7 @@ boolean isInStartupSafeMode() {
/**
* Check whether replication queues are populated.
*/
boolean isPopulatingReplQueues() {
public boolean isPopulatingReplQueues() {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null)
@ -4709,7 +4675,7 @@ boolean isPopulatingReplQueues() {
* Increment number of blocks that reached minimal replication.
* @param replication current replication
*/
void incrementSafeBlockCount(int replication) {
public void incrementSafeBlockCount(int replication) {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null)
@ -4720,7 +4686,7 @@ void incrementSafeBlockCount(int replication) {
/**
* Decrement number of blocks that reached minimal replication.
*/
void decrementSafeBlockCount(Block b) {
public void decrementSafeBlockCount(Block b) {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null) // mostly true
@ -5042,13 +5008,13 @@ public long getFilesTotal() {
@Override // FSNamesystemMBean
@Metric
public long getPendingReplicationBlocks() {
return blockManager.pendingReplicationBlocksCount;
return blockManager.getPendingReplicationBlocksCount();
}
@Override // FSNamesystemMBean
@Metric
public long getUnderReplicatedBlocks() {
return blockManager.underReplicatedBlocksCount;
return blockManager.getUnderReplicatedBlocksCount();
}
/** Return number of under-replicated but not missing blocks */
@ -5059,23 +5025,23 @@ public long getUnderReplicatedNotMissingBlocks() {
/** Returns number of blocks with corrupt replicas */
@Metric({"CorruptBlocks", "Number of blocks with corrupt replicas"})
public long getCorruptReplicaBlocks() {
return blockManager.corruptReplicaBlocksCount;
return blockManager.getCorruptReplicaBlocksCount();
}
@Override // FSNamesystemMBean
@Metric
public long getScheduledReplicationBlocks() {
return blockManager.scheduledReplicationBlocksCount;
return blockManager.getScheduledReplicationBlocksCount();
}
@Metric
public long getPendingDeletionBlocks() {
return blockManager.pendingDeletionBlocksCount;
return blockManager.getPendingDeletionBlocksCount();
}
@Metric
public long getExcessBlocks() {
return blockManager.excessBlocksCount;
return blockManager.getExcessBlocksCount();
}
@Metric
@ -5444,7 +5410,7 @@ public int numCorruptReplicas(Block blk) {
}
/** Get a datanode descriptor given corresponding storageID */
DatanodeDescriptor getDatanode(String nodeID) {
public DatanodeDescriptor getDatanode(String nodeID) {
assert hasReadOrWriteLock();
return datanodeMap.get(nodeID);
}
@ -5508,7 +5474,7 @@ Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path,
if (startBlockAfter != null) {
startBlockId = Block.filename2id(startBlockAfter);
}
BlockIterator blkIterator = blockManager.getCorruptReplicaBlockIterator();
UnderReplicatedBlocks.BlockIterator blkIterator = blockManager.getCorruptReplicaBlockIterator();
while (blkIterator.hasNext()) {
Block blk = blkIterator.next();
INode inode = blockManager.getINode(blk);

View File

@ -17,10 +17,13 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.*;
import java.util.HashMap;
import java.util.Random;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
class Host2NodesMap {
private HashMap<String, DatanodeDescriptor[]> map
= new HashMap<String, DatanodeDescriptor[]>();

View File

@ -20,11 +20,13 @@
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.permission.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.util.StringUtils;
/**
@ -32,7 +34,7 @@
* This is a base INode class containing common fields for file and
* directory inodes.
*/
abstract class INode implements Comparable<byte[]>, FSInodeInfo {
public abstract class INode implements Comparable<byte[]>, FSInodeInfo {
/*
* The inode name is in java UTF8 encoding;
* The name in HdfsFileStatus should keep the same encoding as this.
@ -324,7 +326,7 @@ void setAccessTime(long atime) {
/**
* Is this inode being constructed?
*/
boolean isUnderConstruction() {
public boolean isUnderConstruction() {
return false;
}

View File

@ -24,8 +24,11 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
class INodeFile extends INode {
/** I-node for closed file. */
public class INodeFile extends INode {
static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
//Number of bits for Block size
@ -106,7 +109,7 @@ public void setPreferredBlockSize(long preferredBlkSize)
* Get file blocks
* @return file blocks
*/
BlockInfo[] getBlocks() {
public BlockInfo[] getBlocks() {
return this.blocks;
}
@ -149,7 +152,7 @@ void addBlock(BlockInfo newblock) {
/**
* Set file block
*/
void setBlock(int idx, BlockInfo blk) {
public void setBlock(int idx, BlockInfo blk) {
this.blocks[idx] = blk;
}
@ -237,7 +240,7 @@ BlockInfo getPenultimateBlock() {
* Get the last block of the file.
* Make sure it has the right type.
*/
<T extends BlockInfo> T getLastBlock() throws IOException {
public <T extends BlockInfo> T getLastBlock() throws IOException {
if (blocks == null || blocks.length == 0)
return null;
T returnBlock = null;
@ -252,7 +255,8 @@ <T extends BlockInfo> T getLastBlock() throws IOException {
return returnBlock;
}
int numBlocks() {
/** @return the number of blocks */
public int numBlocks() {
return blocks == null ? 0 : blocks.length;
}
}

View File

@ -21,10 +21,15 @@
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
class INodeFileUnderConstruction extends INodeFile {
/**
* I-node for file being written.
*/
public class INodeFileUnderConstruction extends INodeFile {
private String clientName; // lease holder
private final String clientMachine;
private final DatanodeDescriptor clientNode; // if client is a cluster node too.
@ -43,7 +48,7 @@ class INodeFileUnderConstruction extends INodeFile {
this.clientNode = clientNode;
}
public INodeFileUnderConstruction(byte[] name,
INodeFileUnderConstruction(byte[] name,
short blockReplication,
long modificationTime,
long preferredBlockSize,
@ -80,7 +85,7 @@ DatanodeDescriptor getClientNode() {
* Is this inode being constructed?
*/
@Override
boolean isUnderConstruction() {
public boolean isUnderConstruction() {
return true;
}
@ -122,7 +127,7 @@ void removeLastBlock(Block oldblock) throws IOException {
* Convert the last block of the file to an under-construction block.
* Set its locations.
*/
BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
DatanodeDescriptor[] targets)
throws IOException {
if (blocks == null || blocks.length == 0) {

View File

@ -163,7 +163,7 @@ synchronized Lease reassignLease(Lease lease, String src, String newHolder) {
/**
* Finds the pathname for the specified pendingFile
*/
synchronized String findPath(INodeFileUnderConstruction pendingFile)
public synchronized String findPath(INodeFileUnderConstruction pendingFile)
throws IOException {
Lease lease = getLease(pendingFile.getClientName());
if (lease != null) {

View File

@ -247,7 +247,7 @@ public static void format(Configuration conf) throws IOException {
/** Return the {@link FSNamesystem} object.
* @return {@link FSNamesystem} object.
*/
FSNamesystem getNamesystem() {
public FSNamesystem getNamesystem() {
return namesystem;
}

View File

@ -36,20 +36,20 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
/**

View File

@ -36,24 +36,24 @@
import javax.servlet.jsp.JspWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.VersionInfo;
import org.znerd.xmlenc.*;
import org.znerd.xmlenc.XMLOutputter;
class NamenodeJspHelper {
static String getSafeModeText(FSNamesystem fsn) {

View File

@ -17,15 +17,20 @@
*/
package org.apache.hadoop.hdfs.server.protocol;
import java.io.*;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.io.*;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
/****************************************************

View File

@ -61,10 +61,10 @@
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;

View File

@ -25,7 +25,7 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
/**
* This class tests DatanodeDescriptor.getBlocksScheduled() at the

View File

@ -15,17 +15,20 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import java.util.*;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
/**
@ -79,7 +82,6 @@ public void testCorruptReplicaInfo() throws IOException,
DatanodeDescriptor dn1 = new DatanodeDescriptor();
DatanodeDescriptor dn2 = new DatanodeDescriptor();
DatanodeDescriptor dn3 = new DatanodeDescriptor();
crm.addToCorruptReplicasMap(getBlock(0), dn1);
assertEquals("Number of corrupt blocks not returning correctly",

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.ArrayList;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import java.util.ArrayList;
@ -29,6 +29,7 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import junit.framework.TestCase;
import java.lang.System;

View File

@ -15,8 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import java.util.ArrayList;
@ -24,18 +23,19 @@
import java.util.HashMap;
import java.util.List;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
public class TestReplicationPolicy extends TestCase {
private static final int BLOCK_SIZE = 1024;

View File

@ -15,7 +15,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
package org.apache.hadoop.hdfs.server.blockmanagement;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@ -25,8 +27,7 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
public class TestUnderReplicatedBlocks extends TestCase {
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {

View File

@ -17,30 +17,31 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import static org.apache.hadoop.test.MetricsAsserts.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assume.assumeTrue;
/**
* Test reporting of DN volume failure counts and metrics.

View File

@ -23,6 +23,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.Storage;

View File

@ -19,10 +19,11 @@
import java.io.IOException;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.ipc.Server;
/**
* This is a utility class to expose NameNode functionality for unit tests.

View File

@ -34,6 +34,8 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
import org.junit.AfterClass;
import org.junit.BeforeClass;

View File

@ -17,14 +17,15 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import junit.framework.TestCase;
/**
* Test if FSNamesystem handles heartbeat right
*/

View File

@ -29,9 +29,9 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;

View File

@ -17,19 +17,15 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Random;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@ -40,8 +36,10 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* This class tests the decommissioning of nodes.

View File

@ -18,10 +18,11 @@
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
public class TestHost2NodesMap extends TestCase {
static private Host2NodesMap map = new Host2NodesMap();
private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {

View File

@ -21,17 +21,17 @@
import java.io.File;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;

View File

@ -21,6 +21,8 @@
import java.util.Iterator;
import java.util.concurrent.TimeoutException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -30,9 +32,8 @@
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
/**
* Test if live nodes count per node is correct

View File

@ -20,20 +20,21 @@
import java.io.File;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import junit.framework.TestCase;
public class TestOverReplicatedBlocks extends TestCase {
/** Test processOverReplicatedBlock can handle corrupt replicas fine.

View File

@ -17,34 +17,35 @@
*/
package org.apache.hadoop.hdfs.server.namenode.metrics;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.log4j.Level;
import org.apache.commons.logging.LogFactory;
import static org.apache.hadoop.test.MetricsAsserts.*;
/**
* Test for metrics published by the Namenode
*/

View File

@ -24,8 +24,8 @@
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
public class TestNetworkTopology extends TestCase {
private final static NetworkTopology cluster = new NetworkTopology();

View File

@ -19,6 +19,16 @@
package org.apache.hadoop.hdfs.server.namenode;
import static junit.framework.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
@ -34,20 +44,16 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.Test;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.*;
import java.io.File;
import java.io.IOException;
public class TestNNLeaseRecovery {
private static final Log LOG = LogFactory.getLog(TestNNLeaseRecovery.class);

View File

@ -59,19 +59,9 @@
%>
<%@ page
contentType="application/xml"
import="java.io.IOException"
import="java.util.Iterator"
import="org.apache.hadoop.conf.Configuration"
import="org.apache.hadoop.hdfs.protocol.Block"
import="org.apache.hadoop.hdfs.server.namenode.INode"
import="org.apache.hadoop.hdfs.server.namenode.BlocksMap"
import="org.apache.hadoop.hdfs.server.namenode.BlockInfo"
import="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
import="org.apache.hadoop.hdfs.server.namenode.NamenodeJspHelper.XMLBlockInfo"
import="org.apache.hadoop.hdfs.server.common.JspHelper"
import="org.apache.hadoop.util.ServletUtil"
import="org.znerd.xmlenc.*"
%>
<%!
//for java.io.Serializable