HDFS-5210. Fix some failing unit tests on HDFS-4949 branch. (Contributed by Andrew Wang)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1523754 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
68ec07cade
commit
85c2036029
@ -53,3 +53,5 @@ HDFS-4949 (Unreleased)
|
|||||||
HDFS-5201. NativeIO: consolidate getrlimit into NativeIO#getMemlockLimit
|
HDFS-5201. NativeIO: consolidate getrlimit into NativeIO#getMemlockLimit
|
||||||
(Contributed by Colin Patrick McCabe)
|
(Contributed by Colin Patrick McCabe)
|
||||||
|
|
||||||
|
HDFS-5210. Fix some failing unit tests on HDFS-4949 branch.
|
||||||
|
(Contributed by Andrew Wang)
|
||||||
|
@ -167,12 +167,14 @@ public void close() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void clearQueues() {
|
public void clearQueues() {
|
||||||
|
if (isCachingEnabled) {
|
||||||
blocksToUncache.clear();
|
blocksToUncache.clear();
|
||||||
synchronized (neededCacheBlocks) {
|
synchronized (neededCacheBlocks) {
|
||||||
neededCacheBlocks.clear();
|
neededCacheBlocks.clear();
|
||||||
}
|
}
|
||||||
pendingCacheBlocks.clear();
|
pendingCacheBlocks.clear();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public boolean isCachingEnabled() {
|
public boolean isCachingEnabled() {
|
||||||
return isCachingEnabled;
|
return isCachingEnabled;
|
||||||
@ -571,7 +573,8 @@ private void updateNeededCaching(final Block block,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the safely cached replicas of a block in a BlocksMap
|
* Return the safe replicas (not corrupt or decomissioning/decommissioned) of
|
||||||
|
* a block in a BlocksMap
|
||||||
*/
|
*/
|
||||||
List<DatanodeDescriptor> getSafeReplicas(BlocksMap map, Block block) {
|
List<DatanodeDescriptor> getSafeReplicas(BlocksMap map, Block block) {
|
||||||
List<DatanodeDescriptor> nodes = new ArrayList<DatanodeDescriptor>(3);
|
List<DatanodeDescriptor> nodes = new ArrayList<DatanodeDescriptor>(3);
|
||||||
|
@ -156,7 +156,7 @@ private void computeCachingWorkForBlocks(List<Block> blocksToCache) {
|
|||||||
}
|
}
|
||||||
// Choose some replicas to cache if needed
|
// Choose some replicas to cache if needed
|
||||||
additionalRepl = requiredRepl - effectiveRepl;
|
additionalRepl = requiredRepl - effectiveRepl;
|
||||||
targets = new ArrayList<DatanodeDescriptor>(storedNodes);
|
targets = new ArrayList<DatanodeDescriptor>(storedNodes.size());
|
||||||
// Only target replicas that aren't already cached.
|
// Only target replicas that aren't already cached.
|
||||||
for (DatanodeDescriptor dn: storedNodes) {
|
for (DatanodeDescriptor dn: storedNodes) {
|
||||||
if (!cachedNodes.contains(dn)) {
|
if (!cachedNodes.contains(dn)) {
|
||||||
|
@ -35,6 +35,9 @@
|
|||||||
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||||
public class CacheReplicationPolicy {
|
public class CacheReplicationPolicy {
|
||||||
|
|
||||||
|
// Not thread-safe, but only accessed by the CacheReplicationMonitor
|
||||||
|
private static RandomData random = new RandomDataImpl();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return List of datanodes with sufficient capacity to cache the block
|
* @return List of datanodes with sufficient capacity to cache the block
|
||||||
*/
|
*/
|
||||||
@ -53,8 +56,7 @@ private static List<DatanodeDescriptor> selectSufficientCapacity(Block block,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a random datanode from targets, weighted by the amount of free
|
* Returns a random datanode from targets, weighted by the amount of free
|
||||||
* cache capacity on the datanode. Prunes unsuitable datanodes from the
|
* cache capacity on the datanode.
|
||||||
* targets list.
|
|
||||||
*
|
*
|
||||||
* @param block Block to be cached
|
* @param block Block to be cached
|
||||||
* @param targets List of potential cache targets
|
* @param targets List of potential cache targets
|
||||||
@ -75,8 +77,7 @@ private static DatanodeDescriptor randomDatanodeByRemainingCache(Block block,
|
|||||||
lottery.put(totalCacheAvailable, dn);
|
lottery.put(totalCacheAvailable, dn);
|
||||||
}
|
}
|
||||||
// Pick our lottery winner
|
// Pick our lottery winner
|
||||||
RandomData r = new RandomDataImpl();
|
long winningTicket = random.nextLong(0, totalCacheAvailable - 1);
|
||||||
long winningTicket = r.nextLong(0, totalCacheAvailable - 1);
|
|
||||||
Entry<Long, DatanodeDescriptor> winner = lottery.higherEntry(winningTicket);
|
Entry<Long, DatanodeDescriptor> winner = lottery.higherEntry(winningTicket);
|
||||||
return winner.getValue();
|
return winner.getValue();
|
||||||
}
|
}
|
||||||
@ -94,7 +95,10 @@ static List<DatanodeDescriptor> chooseTargetsToCache(Block block,
|
|||||||
List<DatanodeDescriptor> chosen =
|
List<DatanodeDescriptor> chosen =
|
||||||
new ArrayList<DatanodeDescriptor>(numTargets);
|
new ArrayList<DatanodeDescriptor>(numTargets);
|
||||||
for (int i = 0; i < numTargets && !sufficient.isEmpty(); i++) {
|
for (int i = 0; i < numTargets && !sufficient.isEmpty(); i++) {
|
||||||
chosen.add(randomDatanodeByRemainingCache(block, sufficient));
|
DatanodeDescriptor choice =
|
||||||
|
randomDatanodeByRemainingCache(block, sufficient);
|
||||||
|
chosen.add(choice);
|
||||||
|
sufficient.remove(choice);
|
||||||
}
|
}
|
||||||
return chosen;
|
return chosen;
|
||||||
}
|
}
|
||||||
|
@ -368,12 +368,6 @@ void scheduleBlockReport(long delay) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void scheduleCacheReport(long delay) {
|
|
||||||
for (BPServiceActor actor: bpServices) {
|
|
||||||
actor.scheduleCacheReport(delay);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ask each of the actors to report a bad block hosted on another DN.
|
* Ask each of the actors to report a bad block hosted on another DN.
|
||||||
*/
|
*/
|
||||||
|
@ -242,17 +242,6 @@ void scheduleBlockReport(long delay) {
|
|||||||
resetBlockReportTime = true; // reset future BRs for randomness
|
resetBlockReportTime = true; // reset future BRs for randomness
|
||||||
}
|
}
|
||||||
|
|
||||||
void scheduleCacheReport(long delay) {
|
|
||||||
if (delay > 0) {
|
|
||||||
// Uniform random jitter by the delay
|
|
||||||
lastCacheReport = Time.monotonicNow()
|
|
||||||
- dnConf.cacheReportInterval
|
|
||||||
+ DFSUtil.getRandom().nextInt(((int)delay));
|
|
||||||
} else { // send at next heartbeat
|
|
||||||
lastCacheReport = lastCacheReport - dnConf.cacheReportInterval;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void reportBadBlocks(ExtendedBlock block) {
|
void reportBadBlocks(ExtendedBlock block) {
|
||||||
if (bpRegistration == null) {
|
if (bpRegistration == null) {
|
||||||
return;
|
return;
|
||||||
@ -445,6 +434,10 @@ DatanodeCommand blockReport() throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
DatanodeCommand cacheReport() throws IOException {
|
DatanodeCommand cacheReport() throws IOException {
|
||||||
|
// If caching is disabled, do not send a cache report
|
||||||
|
if (dn.getFSDataset().getCacheCapacity() == 0) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
// send cache report if timer has expired.
|
// send cache report if timer has expired.
|
||||||
DatanodeCommand cmd = null;
|
DatanodeCommand cmd = null;
|
||||||
long startTime = Time.monotonicNow();
|
long startTime = Time.monotonicNow();
|
||||||
|
@ -1916,7 +1916,6 @@ static StartupOption getStartupOption(Configuration conf) {
|
|||||||
public void scheduleAllBlockReport(long delay) {
|
public void scheduleAllBlockReport(long delay) {
|
||||||
for(BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
|
for(BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
|
||||||
bpos.scheduleBlockReport(delay);
|
bpos.scheduleBlockReport(delay);
|
||||||
bpos.scheduleCacheReport(delay);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,7 +225,7 @@ public void verifyChecksum() throws IOException, ChecksumException {
|
|||||||
blockBuf.flip();
|
blockBuf.flip();
|
||||||
// Number of read chunks, including partial chunk at end
|
// Number of read chunks, including partial chunk at end
|
||||||
int chunks = (bytesRead+bytesPerChecksum-1) / bytesPerChecksum;
|
int chunks = (bytesRead+bytesPerChecksum-1) / bytesPerChecksum;
|
||||||
checksumBuf.limit(chunks*bytesPerChecksum);
|
checksumBuf.limit(chunks*checksumSize);
|
||||||
fillBuffer(metaChannel, checksumBuf);
|
fillBuffer(metaChannel, checksumBuf);
|
||||||
checksumBuf.flip();
|
checksumBuf.flip();
|
||||||
checksum.verifyChunkedSums(blockBuf, checksumBuf, block.getBlockName(),
|
checksum.verifyChunkedSums(blockBuf, checksumBuf, block.getBlockName(),
|
||||||
|
@ -186,6 +186,8 @@ private synchronized Fallible<PathBasedCacheEntry> addDirective(
|
|||||||
// TODO: adjustable cache replication factor
|
// TODO: adjustable cache replication factor
|
||||||
namesystem.setCacheReplicationInt(directive.getPath(),
|
namesystem.setCacheReplicationInt(directive.getPath(),
|
||||||
file.getBlockReplication());
|
file.getBlockReplication());
|
||||||
|
} else {
|
||||||
|
LOG.warn("Path " + directive.getPath() + " is not a file");
|
||||||
}
|
}
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
LOG.info("addDirective " + directive +": failed to cache file: " +
|
LOG.info("addDirective " + directive +": failed to cache file: " +
|
||||||
|
@ -48,9 +48,11 @@
|
|||||||
|
|
||||||
public class TestCacheReplicationManager {
|
public class TestCacheReplicationManager {
|
||||||
|
|
||||||
|
private static final long BLOCK_SIZE = 512;
|
||||||
|
private static final int REPL_FACTOR = 3;
|
||||||
|
private static final int NUM_DATANODES = 4;
|
||||||
// Most Linux installs allow a default of 64KB locked memory
|
// Most Linux installs allow a default of 64KB locked memory
|
||||||
private static final long CACHE_CAPACITY = 64 * 1024;
|
private static final long CACHE_CAPACITY = 64 * 1024 / NUM_DATANODES;
|
||||||
private static final long BLOCK_SIZE = 4096;
|
|
||||||
|
|
||||||
private static Configuration conf;
|
private static Configuration conf;
|
||||||
private static MiniDFSCluster cluster = null;
|
private static MiniDFSCluster cluster = null;
|
||||||
@ -75,7 +77,7 @@ public void setUp() throws Exception {
|
|||||||
conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000);
|
conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000);
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.numDataNodes(1).build();
|
.numDataNodes(NUM_DATANODES).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
@ -106,6 +108,25 @@ private void waitForExpectedNumCachedBlocks(final int expected)
|
|||||||
Thread.sleep(500);
|
Thread.sleep(500);
|
||||||
actual = countNumCachedBlocks();
|
actual = countNumCachedBlocks();
|
||||||
}
|
}
|
||||||
|
waitForExpectedNumCachedReplicas(expected*REPL_FACTOR);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void waitForExpectedNumCachedReplicas(final int expected)
|
||||||
|
throws Exception {
|
||||||
|
BlocksMap cachedBlocksMap = cacheReplManager.cachedBlocksMap;
|
||||||
|
int actual = 0;
|
||||||
|
while (expected != actual) {
|
||||||
|
Thread.sleep(500);
|
||||||
|
nn.getNamesystem().readLock();
|
||||||
|
try {
|
||||||
|
actual = 0;
|
||||||
|
for (BlockInfo b : cachedBlocksMap.getBlocks()) {
|
||||||
|
actual += cachedBlocksMap.numNodes(b);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
nn.getNamesystem().readUnlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=60000)
|
@Test(timeout=60000)
|
||||||
@ -114,7 +135,7 @@ public void testCachePaths() throws Exception {
|
|||||||
final String pool = "friendlyPool";
|
final String pool = "friendlyPool";
|
||||||
nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
|
nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
|
||||||
// Create some test files
|
// Create some test files
|
||||||
final int numFiles = 3;
|
final int numFiles = 2;
|
||||||
final int numBlocksPerFile = 2;
|
final int numBlocksPerFile = 2;
|
||||||
final List<String> paths = new ArrayList<String>(numFiles);
|
final List<String> paths = new ArrayList<String>(numFiles);
|
||||||
for (int i=0; i<numFiles; i++) {
|
for (int i=0; i<numFiles; i++) {
|
||||||
|
Loading…
Reference in New Issue
Block a user