diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4ca203b6df..2773c50a4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -306,6 +306,9 @@ Release 0.23.1 - UNRELEASED HDFS-2826. Add test case for HDFS-1476 (safemode can initialize replication queues before exiting) (todd) + HDFS-2864. Remove some redundant methods and the constant METADATA_VERSION + from FSDataset. (szetszwo) + BUG FIXES HDFS-2541. For a sufficiently large value of blocks, the DN Scanner diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index 6c7f829f56..3090419fb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -31,13 +31,12 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; -import org.apache.hadoop.hdfs.server.datanode.FSDataset; import org.apache.hadoop.hdfs.util.DirectBufferPool; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.token.Token; @@ -183,7 +182,7 @@ static BlockReaderLocal newBlockReader(Configuration conf, String file, BlockMetadataHeader header = BlockMetadataHeader .readHeader(new DataInputStream(checksumIn)); short version = header.getVersion(); - if (version != FSDataset.METADATA_VERSION) { + if (version != BlockMetadataHeader.VERSION) { LOG.warn("Wrong version (" + version + ") for metadata file for " + blk + " ignoring ..."); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java index fd8aec7bac..e97b668d9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java @@ -42,7 +42,7 @@ @InterfaceStability.Evolving public class BlockMetadataHeader { - static final short METADATA_VERSION = FSDataset.METADATA_VERSION; + public static final short VERSION = 1; /** * Header includes everything except the checksum(s) themselves. @@ -138,7 +138,7 @@ private static void writeHeader(DataOutputStream out, */ static void writeHeader(DataOutputStream out, DataChecksum checksum) throws IOException { - writeHeader(out, new BlockMetadataHeader(METADATA_VERSION, checksum)); + writeHeader(out, new BlockMetadataHeader(VERSION, checksum)); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java index 1f45a7bb56..0a42d0e951 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java @@ -425,9 +425,8 @@ private void verifyBlock(ExtendedBlock block) { updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, false); // If the block does not exists anymore, then its not an error - if ( dataset.getFile(block.getBlockPoolId(), block.getLocalBlock()) == null ) { - LOG.info("Verification failed for " + block + ". Its ok since " + - "it not in datanode dataset anymore."); + if (!dataset.contains(block)) { + LOG.info(block + " is no longer in the dataset."); deleteBlock(block.getLocalBlock()); return; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index a59a559636..1aeb7e1f41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -226,7 +226,7 @@ class BlockSender implements java.io.Closeable { // read and handle the common header here. For now just a version BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); short version = header.getVersion(); - if (version != FSDataset.METADATA_VERSION) { + if (version != BlockMetadataHeader.VERSION) { LOG.warn("Wrong version (" + version + ") for metadata file for " + block + " ignoring ..."); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java index f885c8b21c..6e6367c776 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -470,7 +470,7 @@ private long validateIntegrity(File blockFile, long genStamp) { // read and handle the common header here. For now just a version BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); short version = header.getVersion(); - if (version != FSDataset.METADATA_VERSION) { + if (version != BlockMetadataHeader.VERSION) { DataNode.LOG.warn("Wrong version (" + version + ") for metadata file " + metaFile + " ignoring ..."); } @@ -945,8 +945,7 @@ private void shutdown() { ////////////////////////////////////////////////////// //Find better place? - public static final String METADATA_EXTENSION = ".meta"; - public static final short METADATA_VERSION = 1; + static final String METADATA_EXTENSION = ".meta"; static final String UNLINK_BLOCK_SUFFIX = ".unlinked"; private static boolean isUnlinkTmpFile(File f) { @@ -1031,15 +1030,10 @@ private static long parseGenerationStamp(File blockFile, File metaFile } } - /** Return the block file for the given ID */ - public File findBlockFile(String bpid, long blockId) { - return getFile(bpid, blockId); - } - @Override // FSDatasetInterface public synchronized Block getStoredBlock(String bpid, long blkid) throws IOException { - File blockfile = findBlockFile(bpid, blkid); + File blockfile = getFile(bpid, blkid); if (blockfile == null) { return null; } @@ -1259,8 +1253,7 @@ public File getBlockFile(ExtendedBlock b) throws IOException { /** * Get File name for a given block. */ - public File getBlockFile(String bpid, Block b) - throws IOException { + File getBlockFile(String bpid, Block b) throws IOException { File f = validateBlockFile(bpid, b); if(f == null) { if (DataNode.LOG.isDebugEnabled()) { @@ -1291,7 +1284,10 @@ public InputStream getBlockInputStream(ExtendedBlock b) */ private File getBlockFileNoExistsCheck(ExtendedBlock b) throws IOException { - File f = getFile(b.getBlockPoolId(), b.getLocalBlock()); + final File f; + synchronized(this) { + f = getFile(b.getBlockPoolId(), b.getLocalBlock().getBlockId()); + } if (f == null) { throw new IOException("Block " + b + " is not valid"); } @@ -2021,7 +2017,10 @@ private boolean isValid(final ExtendedBlock b, final ReplicaState state) { */ File validateBlockFile(String bpid, Block b) { //Should we check for metadata file too? - File f = getFile(bpid, b); + final File f; + synchronized(this) { + f = getFile(bpid, b.getBlockId()); + } if(f != null ) { if(f.exists()) @@ -2071,7 +2070,7 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException { File f = null; FSVolume v; synchronized (this) { - f = getFile(bpid, invalidBlks[i]); + f = getFile(bpid, invalidBlks[i].getBlockId()); ReplicaInfo dinfo = volumeMap.get(bpid, invalidBlks[i]); if (dinfo == null || dinfo.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) { @@ -2130,11 +2129,10 @@ public void notifyNamenodeDeletedBlock(ExtendedBlock block){ datanode.notifyNamenodeDeletedBlock(block); } - /** - * Turn the block identifier into a filename; ignore generation stamp!!! - */ - public synchronized File getFile(String bpid, Block b) { - return getFile(bpid, b.getBlockId()); + @Override // {@link FSDatasetInterface} + public synchronized boolean contains(final ExtendedBlock block) { + final long blockId = block.getLocalBlock().getBlockId(); + return getFile(block.getBlockPoolId(), blockId) != null; } /** @@ -2143,7 +2141,7 @@ public synchronized File getFile(String bpid, Block b) { * @param blockId a block's id * @return on disk data file path; null if the replica does not exist */ - private File getFile(String bpid, long blockId) { + File getFile(final String bpid, final long blockId) { ReplicaInfo info = volumeMap.get(bpid, blockId); if (info != null) { return info.getBlockFile(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java index f447116890..627ac27f95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java @@ -19,7 +19,6 @@ import java.io.Closeable; -import java.io.File; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; @@ -27,13 +26,13 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; -import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; -import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; +import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; +import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DiskChecker.DiskErrorException; @@ -303,6 +302,9 @@ public void recoverClose(ExtendedBlock b, */ public BlockListAsLongs getBlockReport(String bpid); + /** Does the dataset contain the block? */ + public boolean contains(ExtendedBlock block); + /** * Is the block valid? * @param b diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index bd4961afc0..4423696611 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -45,6 +45,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.FSDataset; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.io.IOUtils; @@ -1165,7 +1166,7 @@ static List getBlockFiles(MiniDFSCluster cluster) throws IOException { for(int i = 0; i < blocks.length; i++) { FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset(); for(Block b : blocks[i]) { - files.add(ds.getBlockFile(poolId, b)); + files.add(DataNodeTestUtils.getBlockFile(ds, poolId, b.getBlockId())); } } return files; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index 39ea15bb5f..2f65c1a49d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.FSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; @@ -139,7 +140,8 @@ public void testCopyOnWrite() throws IOException { // for (int i = 0; i < blocks.size(); i = i + 2) { ExtendedBlock b = blocks.get(i).getBlock(); - File f = dataset.getFile(b.getBlockPoolId(), b.getLocalBlock()); + final File f = DataNodeTestUtils.getBlockFile(dataset, + b.getBlockPoolId(), b.getLocalBlock().getBlockId()); File link = new File(f.toString() + ".link"); System.out.println("Creating hardlink for File " + f + " to " + link); HardLink.createHardLink(f, link); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index 3a02ae6e8b..32d38230d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.FSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -831,7 +832,8 @@ public void testLeaseExpireHardLimit() throws Exception { FSDataset dataset = (FSDataset)datanode.data; ExtendedBlock blk = locatedblock.getBlock(); Block b = dataset.getStoredBlock(blk.getBlockPoolId(), blk.getBlockId()); - File blockfile = dataset.findBlockFile(blk.getBlockPoolId(), b.getBlockId()); + final File blockfile = DataNodeTestUtils.getBlockFile(dataset, + blk.getBlockPoolId(), b.getBlockId()); System.out.println("blockfile=" + blockfile); if (blockfile != null) { BufferedReader in = new BufferedReader(new FileReader(blockfile)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 2e3bd92373..b810d1ba7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode; +import java.io.File; import java.io.IOException; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -37,5 +38,8 @@ public class DataNodeTestUtils { getDNRegistrationForBP(DataNode dn, String bpid) throws IOException { return dn.getDNRegistrationForBP(bpid); } - + + public static File getBlockFile(FSDataset fsdataset, String bpid, long bid) { + return fsdataset.getFile(bpid, bid); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 9dbcc2f736..2aa93ee036 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -81,8 +81,8 @@ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{ CHECKSUM_NULL, 16*1024 ); byte[] nullCrcHeader = checksum.getHeader(); nullCrcFileData = new byte[2 + nullCrcHeader.length]; - nullCrcFileData[0] = (byte) ((FSDataset.METADATA_VERSION >>> 8) & 0xff); - nullCrcFileData[1] = (byte) (FSDataset.METADATA_VERSION & 0xff); + nullCrcFileData[0] = (byte) ((BlockMetadataHeader.VERSION >>> 8) & 0xff); + nullCrcFileData[1] = (byte) (BlockMetadataHeader.VERSION & 0xff); for (int i = 0; i < nullCrcHeader.length; i++) { nullCrcFileData[i+2] = nullCrcHeader[i]; } @@ -390,9 +390,7 @@ public synchronized void injectBlocks(String bpid, Iterable injectBlocks) throws IOException { ExtendedBlock blk = new ExtendedBlock(); if (injectBlocks != null) { - int numInjectedBlocks = 0; for (Block b: injectBlocks) { // if any blocks in list is bad, reject list - numInjectedBlocks++; if (b == null) { throw new NullPointerException("Null blocks in block list"); } @@ -555,31 +553,27 @@ public synchronized void invalidate(String bpid, Block[] invalidBlks) } } + private BInfo getBInfo(final ExtendedBlock b) { + final Map map = blockMap.get(b.getBlockPoolId()); + return map == null? null: map.get(b.getLocalBlock()); + } + + @Override // {@link FSDatasetInterface} + public boolean contains(ExtendedBlock block) { + return getBInfo(block) != null; + } + @Override // FSDatasetInterface public synchronized boolean isValidBlock(ExtendedBlock b) { - final Map map = blockMap.get(b.getBlockPoolId()); - if (map == null) { - return false; - } - BInfo binfo = map.get(b.getLocalBlock()); - if (binfo == null) { - return false; - } - return binfo.isFinalized(); + final BInfo binfo = getBInfo(b); + return binfo != null && binfo.isFinalized(); } /* check if a block is created but not finalized */ @Override public synchronized boolean isValidRbw(ExtendedBlock b) { - final Map map = blockMap.get(b.getBlockPoolId()); - if (map == null) { - return false; - } - BInfo binfo = map.get(b.getLocalBlock()); - if (binfo == null) { - return false; - } - return !binfo.isFinalized(); + final BInfo binfo = getBInfo(b); + return binfo != null && !binfo.isFinalized(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 7410d42679..208a16ad1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -352,7 +352,7 @@ private void verifyAddition(long blockId, long genStamp, long size) { // Added block has the same file as the one created by the test File file = new File(getBlockFile(blockId)); - assertEquals(file.getName(), fds.findBlockFile(bpid, blockId).getName()); + assertEquals(file.getName(), fds.getFile(bpid, blockId).getName()); // Generation stamp is same as that of created file assertEquals(genStamp, replicainfo.getGenerationStamp()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java index 45356d9d19..214b4e71a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java @@ -101,7 +101,7 @@ public void testGetMetaData() throws IOException { InputStream metaInput = fsdataset.getMetaDataInputStream(b); DataInputStream metaDataInput = new DataInputStream(metaInput); short version = metaDataInput.readShort(); - assertEquals(FSDataset.METADATA_VERSION, version); + assertEquals(BlockMetadataHeader.VERSION, version); DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput); assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType()); assertEquals(0, checksum.getChecksumSize());