HDFS-15574. Remove unnecessary sort of block list in DirectoryScanner. Contributed by Stephen O'Donnell.

This commit is contained in:
hemanthboyina 2020-09-17 09:40:36 +05:30
parent 74c0764343
commit 94e5c5257f
8 changed files with 49 additions and 12 deletions

View File

@ -482,8 +482,7 @@ private void scan() {
Collection<ScanInfo> diffRecord = new ArrayList<>();
statsRecord.totalBlocks = blockpoolReport.size();
final List<ReplicaInfo> bl = dataset.getFinalizedBlocks(bpid);
Collections.sort(bl); // Sort based on blockId
final List<ReplicaInfo> bl = dataset.getSortedFinalizedBlocks(bpid);
int d = 0; // index for blockpoolReport
int m = 0; // index for memReprot

View File

@ -237,16 +237,17 @@ StorageReport[] getStorageReports(String bpid)
VolumeFailureSummary getVolumeFailureSummary();
/**
* Gets a list of references to the finalized blocks for the given block pool.
* Gets a sorted list of references to the finalized blocks for the given
* block pool. The list is sorted by blockID.
* <p>
* Callers of this function should call
* {@link FsDatasetSpi#acquireDatasetLock} to avoid blocks' status being
* changed during list iteration.
* </p>
* @return a list of references to the finalized blocks for the given block
* pool.
* pool. The list is sorted by blockID.
*/
List<ReplicaInfo> getFinalizedBlocks(String bpid);
List<ReplicaInfo> getSortedFinalizedBlocks(String bpid);
/**
* Check whether the in-memory block record matches the block on the disk,

View File

@ -1936,17 +1936,18 @@ public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
}
/**
* Gets a list of references to the finalized blocks for the given block pool.
* Gets a list of references to the finalized blocks for the given block pool,
* sorted by blockID.
* <p>
* Callers of this function should call
* {@link FsDatasetSpi#acquireDatasetLock()} to avoid blocks' status being
* changed during list iteration.
* </p>
* @return a list of references to the finalized blocks for the given block
* pool.
* pool. The list is sorted by blockID.
*/
@Override
public List<ReplicaInfo> getFinalizedBlocks(String bpid) {
public List<ReplicaInfo> getSortedFinalizedBlocks(String bpid) {
try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
final List<ReplicaInfo> finalized = new ArrayList<ReplicaInfo>(
volumeMap.size(bpid));

View File

@ -173,7 +173,7 @@ private void thistest(Configuration conf, DFSTestUtil util) throws Exception {
final DataNode dn = cluster.getDataNodes().get(dnIdx);
final String bpid = cluster.getNamesystem().getBlockPoolId();
List<ReplicaInfo> replicas =
dn.getFSDataset().getFinalizedBlocks(bpid);
dn.getFSDataset().getSortedFinalizedBlocks(bpid);
assertTrue("Replicas do not exist", !replicas.isEmpty());
for (int idx = 0; idx < replicas.size(); idx++) {

View File

@ -540,7 +540,7 @@ private void testErasureCodingWorkerXmitsWeight(
writeFile(fs, "/ec-xmits-weight", fileLen);
DataNode dn = cluster.getDataNodes().get(0);
int corruptBlocks = dn.getFSDataset().getFinalizedBlocks(
int corruptBlocks = dn.getFSDataset().getSortedFinalizedBlocks(
cluster.getNameNode().getNamesystem().getBlockPoolId()).size();
int expectedXmits = corruptBlocks * expectedWeight;

View File

@ -1507,7 +1507,7 @@ public StorageReport[] getStorageReports(String bpid) {
}
@Override
public List<ReplicaInfo> getFinalizedBlocks(String bpid) {
public List<ReplicaInfo> getSortedFinalizedBlocks(String bpid) {
throw new UnsupportedOperationException();
}

View File

@ -90,7 +90,7 @@ public Map<String, Object> getVolumeInfoMap() {
}
@Override
public List<ReplicaInfo> getFinalizedBlocks(String bpid) {
public List<ReplicaInfo> getSortedFinalizedBlocks(String bpid) {
return null;
}

View File

@ -79,6 +79,7 @@
import java.io.Writer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.HashSet;
import java.util.List;
@ -472,6 +473,41 @@ public void testAddVolumeFailureReleasesInUseLock() throws IOException {
FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
@Test
/**
* This test is here primarily to catch any case where the datanode replica
* map structure is changed to a new structure which is not sorted and hence
* reading the blocks from it directly would not be sorted.
*/
public void testSortedFinalizedBlocksAreSorted() throws IOException {
this.conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
ds.addBlockPool(BLOCKPOOL, conf);
// Load 1000 blocks with random blockIDs
for (int i=0; i<=1000; i++) {
ExtendedBlock eb = new ExtendedBlock(
BLOCKPOOL, new Random().nextInt(), 1000, 1000 + i);
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
}
// Get the sorted blocks and validate the arrayList is sorted
List<ReplicaInfo> replicaList = ds.getSortedFinalizedBlocks(BLOCKPOOL);
for (int i=0; i<replicaList.size() - 1; i++) {
if (replicaList.get(i).compareTo(replicaList.get(i+1)) > 0) {
// Not sorted so fail the test
fail("ArrayList is not sorted, and it should be");
}
}
} finally {
cluster.shutdown();
}
}
@Test
public void testDeletingBlocks() throws IOException {