HDFS-2500. Avoid file system operations in BPOfferService thread while processing deletes. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1190071 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2011-10-27 22:47:19 +00:00
parent 78336e717b
commit 221aadbc5b
4 changed files with 15 additions and 7 deletions

View File

@ -843,6 +843,9 @@ Release 0.23.0 - Unreleased
HDFS-2118. Couple dfs data dir improvements. (eli)
HDFS-2500. Avoid file system operations in BPOfferService thread while
processing deletes. (todd)
BUG FIXES
HDFS-2347. Fix checkpointTxnCount's comment about editlog size.

View File

@ -1151,8 +1151,15 @@ private void offerService() throws Exception {
if (!heartbeatsDisabledForTests) {
DatanodeCommand[] cmds = sendHeartBeat();
metrics.addHeartbeat(now() - startTime);
long startProcessCommands = now();
if (!processCommand(cmds))
continue;
long endProcessCommands = now();
if (endProcessCommands - startProcessCommands > 2000) {
LOG.info("Took " + (endProcessCommands - startProcessCommands) +
"ms to process " + cmds.length + " commands from NN");
}
}
}
if (pendingReceivedRequests > 0

View File

@ -2087,10 +2087,9 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
volumeMap.remove(bpid, invalidBlks[i]);
}
File metaFile = getMetaFile(f, invalidBlks[i].getGenerationStamp());
long dfsBytes = f.length() + metaFile.length();
// Delete the block asynchronously to make sure we can do it fast enough
asyncDiskService.deleteAsync(v, f, metaFile, dfsBytes,
asyncDiskService.deleteAsync(v, f, metaFile,
new ExtendedBlock(bpid, invalidBlks[i]));
}
if (error) {

View File

@ -152,11 +152,11 @@ synchronized void shutdown() {
* dfsUsed statistics accordingly.
*/
void deleteAsync(FSDataset.FSVolume volume, File blockFile, File metaFile,
long dfsBytes, ExtendedBlock block) {
ExtendedBlock block) {
DataNode.LOG.info("Scheduling block " + block.getLocalBlock().toString()
+ " file " + blockFile + " for deletion");
ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask(dataset,
volume, blockFile, metaFile, dfsBytes, block);
volume, blockFile, metaFile, block);
execute(volume.getCurrentDir(), deletionTask);
}
@ -168,16 +168,14 @@ static class ReplicaFileDeleteTask implements Runnable {
final FSDataset.FSVolume volume;
final File blockFile;
final File metaFile;
final long dfsBytes;
final ExtendedBlock block;
ReplicaFileDeleteTask(FSDataset dataset, FSDataset.FSVolume volume, File blockFile,
File metaFile, long dfsBytes, ExtendedBlock block) {
File metaFile, ExtendedBlock block) {
this.dataset = dataset;
this.volume = volume;
this.blockFile = blockFile;
this.metaFile = metaFile;
this.dfsBytes = dfsBytes;
this.block = block;
}
@ -195,6 +193,7 @@ public String toString() {
@Override
public void run() {
long dfsBytes = blockFile.length() + metaFile.length();
if ( !blockFile.delete() || ( !metaFile.delete() && metaFile.exists() ) ) {
DataNode.LOG.warn("Unexpected error trying to delete block "
+ block.getBlockPoolId() + " " + block.getLocalBlock().toString()