HDFS-16075. Use empty array constants present in StorageType and DatanodeInfo to avoid creating redundant objects (#3115)
Reviewed-by: Hui Fei <ferhui@apache.org>
(cherry picked from commit c488abbc79
)
This commit is contained in:
parent
7d202e8365
commit
b8a98e4f82
@ -74,7 +74,7 @@ public class BlockLocation implements Serializable {
|
||||
|
||||
private static final String[] EMPTY_STR_ARRAY = new String[0];
|
||||
private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY =
|
||||
new StorageType[0];
|
||||
StorageType.EMPTY_ARRAY;
|
||||
|
||||
/**
|
||||
* Default Constructor.
|
||||
|
@ -27,7 +27,7 @@ public class TestBlockLocation {
|
||||
|
||||
private static final String[] EMPTY_STR_ARRAY = new String[0];
|
||||
private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY =
|
||||
new StorageType[0];
|
||||
StorageType.EMPTY_ARRAY;
|
||||
|
||||
private static void checkBlockLocation(final BlockLocation loc)
|
||||
throws Exception {
|
||||
|
@ -1675,7 +1675,7 @@ public void updatePipeline(long newGS) throws IOException {
|
||||
|
||||
DatanodeInfo[] getExcludedNodes() {
|
||||
return excludedNodes.getAllPresent(excludedNodes.asMap().keySet())
|
||||
.keySet().toArray(new DatanodeInfo[0]);
|
||||
.keySet().toArray(DatanodeInfo.EMPTY_ARRAY);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -131,7 +131,7 @@ public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||
public int findBlock(long offset) {
|
||||
// create fake block of size 0 as a key
|
||||
LocatedBlock key = new LocatedBlock(
|
||||
new ExtendedBlock(), new DatanodeInfo[0]);
|
||||
new ExtendedBlock(), DatanodeInfo.EMPTY_ARRAY);
|
||||
key.setStartOffset(offset);
|
||||
key.getBlock().setNumBytes(1);
|
||||
Comparator<LocatedBlock> comp =
|
||||
|
@ -535,7 +535,7 @@ private static HdfsFileStatus getMockHdfsFileStatus(
|
||||
*/
|
||||
private static LocatedBlock getMockLocatedBlock(final String nsId) {
|
||||
LocatedBlock lb = mock(LocatedBlock.class);
|
||||
when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]);
|
||||
when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
|
||||
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0",
|
||||
1111, 1112, 1113, 1114);
|
||||
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
|
||||
|
@ -1210,7 +1210,7 @@ public void testProxyGetAdditionalDatanode()
|
||||
newRouterFile, clientName, null, null,
|
||||
status.getFileId(), null, null);
|
||||
|
||||
DatanodeInfo[] exclusions = new DatanodeInfo[0];
|
||||
DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY;
|
||||
LocatedBlock newBlock = routerProtocol.getAdditionalDatanode(
|
||||
newRouterFile, status.getFileId(), block.getBlock(),
|
||||
block.getLocations(), block.getStorageIDs(), exclusions, 1, clientName);
|
||||
|
@ -312,7 +312,7 @@ testPath, new FsPermission("777"), clientName,
|
||||
assertEquals(1, proxyNumAddBlock2 - proxyNumAddBlock);
|
||||
|
||||
// Get additionalDatanode via router and block is not null.
|
||||
DatanodeInfo[] exclusions = new DatanodeInfo[0];
|
||||
DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY;
|
||||
LocatedBlock newBlock = clientProtocol.getAdditionalDatanode(
|
||||
testPath, status.getFileId(), blockTwo.getBlock(),
|
||||
blockTwo.getLocations(), blockTwo.getStorageIDs(), exclusions,
|
||||
|
@ -209,7 +209,7 @@ static LocatedBlock prepareFileForAppend(final FSNamesystem fsn,
|
||||
BlockInfo lastBlock = file.getLastBlock();
|
||||
if (lastBlock != null) {
|
||||
ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock);
|
||||
ret = new LocatedBlock(blk, new DatanodeInfo[0]);
|
||||
ret = new LocatedBlock(blk, DatanodeInfo.EMPTY_ARRAY);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -883,7 +883,7 @@ public void testClientDNProtocolTimeout() throws IOException {
|
||||
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
|
||||
|
||||
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
|
||||
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
|
||||
LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY);
|
||||
|
||||
ClientDatanodeProtocol proxy = null;
|
||||
|
||||
|
@ -242,7 +242,7 @@ public void testReadWithPreferredCachingReplica() throws IOException {
|
||||
DFSInputStream dfsInputStream =
|
||||
(DFSInputStream) fs.open(filePath).getWrappedStream();
|
||||
LocatedBlock lb = mock(LocatedBlock.class);
|
||||
when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]);
|
||||
when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
|
||||
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111,
|
||||
1112, 1113, 1114);
|
||||
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
|
||||
@ -271,7 +271,7 @@ public void testReadWithoutPreferredCachingReplica() throws IOException {
|
||||
DFSInputStream dfsInputStream =
|
||||
(DFSInputStream) fs.open(filePath).getWrappedStream();
|
||||
LocatedBlock lb = mock(LocatedBlock.class);
|
||||
when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]);
|
||||
when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
|
||||
DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111,
|
||||
1112, 1113, 1114);
|
||||
DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
|
||||
|
@ -461,8 +461,8 @@ public void testAbortedRecovery() throws Exception {
|
||||
|
||||
// Add a block to the file
|
||||
ExtendedBlock block = client.getNamenode().addBlock(
|
||||
file, client.clientName, null, new DatanodeInfo[0], stat.getFileId(),
|
||||
new String[0], null).getBlock();
|
||||
file, client.clientName, null, DatanodeInfo.EMPTY_ARRAY,
|
||||
stat.getFileId(), new String[0], null).getBlock();
|
||||
|
||||
// update the pipeline to get a new genstamp.
|
||||
ExtendedBlock updatedBlock = client.getNamenode()
|
||||
@ -578,7 +578,7 @@ private void createCommittedNotCompleteFile(DFSClient client, String file,
|
||||
// Add a block to the file
|
||||
LocatedBlock blk = client.getNamenode()
|
||||
.addBlock(file, client.clientName, null,
|
||||
new DatanodeInfo[0], stat.getFileId(), new String[0], null);
|
||||
DatanodeInfo.EMPTY_ARRAY, stat.getFileId(), new String[0], null);
|
||||
ExtendedBlock finalBlock = blk.getBlock();
|
||||
if (bytesToWrite != null) {
|
||||
// Here we create a output stream and then abort it so the block gets
|
||||
|
@ -65,7 +65,7 @@ public void testDefaultPolicy() throws Exception {
|
||||
|
||||
final DatanodeInfo[] infos = new DatanodeInfo[5];
|
||||
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
|
||||
datanodes[0] = new DatanodeInfo[0];
|
||||
datanodes[0] = DatanodeInfo.EMPTY_ARRAY;
|
||||
for(int i = 0; i < infos.length; ) {
|
||||
infos[i] = DFSTestUtil.getLocalDatanodeInfo(9867 + i);
|
||||
i++;
|
||||
|
@ -31,7 +31,7 @@ public class TestLocatedBlock {
|
||||
|
||||
@Test(timeout = 10000)
|
||||
public void testAddCachedLocWhenEmpty() {
|
||||
DatanodeInfo[] ds = new DatanodeInfo[0];
|
||||
DatanodeInfo[] ds = DatanodeInfo.EMPTY_ARRAY;
|
||||
ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
|
||||
LocatedBlock l1 = new LocatedBlock(b1, ds);
|
||||
DatanodeDescriptor dn = new DatanodeDescriptor(
|
||||
|
@ -391,7 +391,7 @@ private void testBlockTokenRpcLeak(boolean enableProtobuf) throws Exception {
|
||||
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
|
||||
|
||||
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
|
||||
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
|
||||
LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY);
|
||||
fakeBlock.setBlockToken(token);
|
||||
|
||||
// Create another RPC proxy with the same configuration - this will never
|
||||
|
@ -391,7 +391,8 @@ public void HelperFunction(String scriptFileName, int providedStorages)
|
||||
storageTypesList.add(StorageType.PROVIDED);
|
||||
}
|
||||
|
||||
StorageType[] storageTypes= storageTypesList.toArray(new StorageType[0]);
|
||||
StorageType[] storageTypes = storageTypesList.toArray(
|
||||
StorageType.EMPTY_ARRAY);
|
||||
|
||||
for (int i = 0; i < totalDNs; i++) {
|
||||
// register new datanode
|
||||
@ -693,7 +694,8 @@ public void testGetBlockLocationConsiderStorageType()
|
||||
List<StorageType> storageTypesList =
|
||||
new ArrayList<>(Arrays.asList(StorageType.ARCHIVE, StorageType.DISK,
|
||||
StorageType.SSD, StorageType.DEFAULT, StorageType.SSD));
|
||||
StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]);
|
||||
StorageType[] storageTypes = storageTypesList.toArray(
|
||||
StorageType.EMPTY_ARRAY);
|
||||
|
||||
for (int i = 0; i < totalDNs; i++) {
|
||||
// Register new datanode.
|
||||
@ -778,7 +780,8 @@ public void testGetBlockLocationConsiderStorageTypeAndLoad()
|
||||
List<StorageType> storageTypesList =
|
||||
new ArrayList<>(Arrays.asList(StorageType.DISK, StorageType.DISK,
|
||||
StorageType.DEFAULT, StorageType.SSD, StorageType.SSD));
|
||||
StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]);
|
||||
StorageType[] storageTypes = storageTypesList.toArray(
|
||||
StorageType.EMPTY_ARRAY);
|
||||
|
||||
for (int i = 0; i < totalDNs; i++) {
|
||||
// Register new datanode.
|
||||
|
@ -127,8 +127,8 @@ private void issueWriteBlockCall(DataXceiver xceiver, boolean lazyPersist)
|
||||
StorageType.RAM_DISK,
|
||||
null,
|
||||
"Dummy-Client",
|
||||
new DatanodeInfo[0],
|
||||
new StorageType[0],
|
||||
DatanodeInfo.EMPTY_ARRAY,
|
||||
StorageType.EMPTY_ARRAY,
|
||||
mock(DatanodeInfo.class),
|
||||
BlockConstructionStage.PIPELINE_SETUP_CREATE,
|
||||
0, 0, 0, 0,
|
||||
|
@ -165,7 +165,7 @@ public void testReplicationError() throws Exception {
|
||||
DataChecksum.Type.CRC32, 512);
|
||||
new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT,
|
||||
BlockTokenSecretManager.DUMMY_TOKEN, "",
|
||||
new DatanodeInfo[0], new StorageType[0], null,
|
||||
DatanodeInfo.EMPTY_ARRAY, StorageType.EMPTY_ARRAY, null,
|
||||
BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
|
||||
checksum, CachingStrategy.newDefaultStrategy(), false, false,
|
||||
null, null, new String[0]);
|
||||
|
@ -119,7 +119,7 @@ private void doTestChooseTargetNormalCase() throws Exception {
|
||||
LocatedBlock additionalLocatedBlock =
|
||||
nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
|
||||
locatedBlock.getBlock(), locatedBlock.getLocations(),
|
||||
locatedBlock.getStorageIDs(), new DatanodeInfo[0],
|
||||
locatedBlock.getStorageIDs(), DatanodeInfo.EMPTY_ARRAY,
|
||||
additionalReplication, clientMachine);
|
||||
doTestLocatedBlock(replication + additionalReplication, additionalLocatedBlock);
|
||||
}
|
||||
@ -159,7 +159,7 @@ private void doTestChooseTargetSpecialCase() throws Exception {
|
||||
LocatedBlock additionalLocatedBlock =
|
||||
nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
|
||||
locatedBlock.getBlock(), partLocs,
|
||||
partStorageIDs, new DatanodeInfo[0],
|
||||
partStorageIDs, DatanodeInfo.EMPTY_ARRAY,
|
||||
j, clientMachine);
|
||||
doTestLocatedBlock(i + j, additionalLocatedBlock);
|
||||
}
|
||||
|
@ -857,7 +857,7 @@ public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
|
||||
pathString,
|
||||
client.getClientName(),
|
||||
new ExtendedBlock(previousBlock),
|
||||
new DatanodeInfo[0],
|
||||
DatanodeInfo.EMPTY_ARRAY,
|
||||
DFSClientAdapter.getFileId((DFSOutputStream) create
|
||||
.getWrappedStream()), null, null);
|
||||
cluster.restartNameNode(0, true);
|
||||
|
Loading…
Reference in New Issue
Block a user