diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 69ae147a95..5fe31ce4ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -69,6 +69,7 @@ import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.DataChecksum.Type; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.ShutdownHookManager; @@ -802,6 +803,10 @@ private long validateIntegrityAndSetLength(File blockFile, long genStamp) { // read and handle the common header here. For now just a version final DataChecksum checksum = BlockMetadataHeader.readDataChecksum( checksumIn, metaFile); + if (Type.NULL.equals(checksum.getChecksumType())) { + // in case of NULL checksum type consider full file as valid + return blockFileLen; + } int bytesPerChecksum = checksum.getBytesPerChecksum(); int checksumSize = checksum.getChecksumSize(); long numChunks = Math.min( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java index eb4f124bc4..0d322daebb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java @@ -20,10 +20,13 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import static org.hamcrest.CoreMatchers.equalTo; import java.io.File; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Random; import org.apache.hadoop.conf.Configuration; @@ -31,6 +34,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; import org.apache.hadoop.hdfs.client.impl.DfsClientConf; @@ -176,4 +180,43 @@ public void testOpenInfo() throws IOException { cluster.shutdown(); } } + + @Test + public void testNullCheckSumWhenDNRestarted() + throws IOException, InterruptedException { + Configuration conf = new Configuration(); + conf.set(HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL"); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2) + .build(); + cluster.waitActive(); + try { + DistributedFileSystem fs = cluster.getFileSystem(); + + int chunkSize = 512; + Random r = new Random(12345L); + byte[] data = new byte[chunkSize]; + r.nextBytes(data); + + Path file = new Path("/testfile"); + try (FSDataOutputStream fout = fs.create(file)) { + fout.write(data); + fout.hflush(); + cluster.restartDataNode(0, true, true); + } + + // wait for block to load + Thread.sleep(1000); + + // fetch live DN + final List live = new ArrayList(); + cluster.getNameNode().getNamesystem().getBlockManager() + .getDatanodeManager().fetchDatanodes(live, null, false); + assertTrue("DN start should be success and live dn should be 2", + live.size() == 2); + assertTrue("File size should be " + chunkSize, + fs.getFileStatus(file).getLen() == chunkSize); + } finally { + cluster.shutdown(); + } + } }