HDFS-8492. DN should notify NN when client requests a missing block (Contributed by Walter Su)

This commit is contained in:
Vinayakumar B 2016-10-27 16:44:00 +05:30
parent 4e403def80
commit 1cf6ec4ad4
3 changed files with 44 additions and 3 deletions

View File

@ -36,6 +36,7 @@
import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
@ -322,6 +323,12 @@ class BlockSender implements java.io.Closeable {
} else { } else {
LOG.warn("Could not find metadata file for " + block); LOG.warn("Could not find metadata file for " + block);
} }
} catch (FileNotFoundException e) {
// The replica is on its volume map but not on disk
datanode.notifyNamenodeDeletedBlock(block, replica.getStorageUuid());
datanode.data.invalidate(block.getBlockPoolId(),
new Block[]{block.getLocalBlock()});
throw e;
} finally { } finally {
if (!keepMetaInOpen) { if (!keepMetaInOpen) {
IOUtils.closeStream(metaIn); IOUtils.closeStream(metaIn);

View File

@ -738,7 +738,7 @@ private ReplicaInfo getBlockReplica(ExtendedBlock b) throws IOException {
ReplicaInfo getBlockReplica(String bpid, long blockId) throws IOException { ReplicaInfo getBlockReplica(String bpid, long blockId) throws IOException {
ReplicaInfo r = validateBlockFile(bpid, blockId); ReplicaInfo r = validateBlockFile(bpid, blockId);
if (r == null) { if (r == null) {
throw new IOException("BlockId " + blockId + " is not valid."); throw new FileNotFoundException("BlockId " + blockId + " is not valid.");
} }
return r; return r;
} }

View File

@ -29,16 +29,20 @@
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties; import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager; import org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager;
import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager; import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.util.HostsFileWriter; import org.apache.hadoop.hdfs.util.HostsFileWriter;
import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
/** /**
@ -144,7 +148,37 @@ public void testDatanodeReport() throws Exception {
cluster.shutdown(); cluster.shutdown();
} }
} }
@Test
public void testDatanodeReportMissingBlock() throws Exception {
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
conf.setLong(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_OF_DATANODES).build();
try {
// wait until the cluster is up
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
Path p = new Path("/testDatanodeReportMissingBlock");
DFSTestUtil.writeFile(fs, p, new String("testdata"));
LocatedBlock lb = fs.getClient().getLocatedBlocks(p.toString(), 0).get(0);
assertEquals(3, lb.getLocations().length);
ExtendedBlock b = lb.getBlock();
cluster.corruptBlockOnDataNodesByDeletingBlockFile(b);
try {
DFSTestUtil.readFile(fs, p);
Assert.fail("Must throw exception as the block doesn't exists on disk");
} catch (IOException e) {
// all bad datanodes
}
cluster.triggerHeartbeats(); // IBR delete ack
lb = fs.getClient().getLocatedBlocks(p.toString(), 0).get(0);
assertEquals(0, lb.getLocations().length);
} finally {
cluster.shutdown();
}
}
final static Comparator<StorageReport> CMP = new Comparator<StorageReport>() { final static Comparator<StorageReport> CMP = new Comparator<StorageReport>() {
@Override @Override
public int compare(StorageReport left, StorageReport right) { public int compare(StorageReport left, StorageReport right) {