HDFS-16616. remove use of org.apache.hadoop.util.Sets (#4400)

Co-Authored by: Samrat Deb
This commit is contained in:
Samrat 2022-06-22 10:17:36 +05:30 committed by GitHub
parent cbdabe9ec8
commit e8fd914c58
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 53 additions and 52 deletions

View File

@ -39,7 +39,6 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.util.Sets;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -57,6 +56,8 @@
import java.net.URL;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
@ -120,7 +121,7 @@ public Set<String> getGroupsSet(String user) throws IOException {
LOG.info("Getting groups in MockUnixGroupsMapping");
String g1 = user + (10 * i + 1);
String g2 = user + (10 * i + 2);
Set<String> s = Sets.newHashSet(g1, g2);
Set<String> s = new HashSet<>(Arrays.asList(g1, g2));
i++;
return s;
}

View File

@ -76,7 +76,6 @@
import org.apache.hadoop.net.DomainNameResolverFactory;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Sets;
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -725,8 +724,9 @@ private static Collection<String> getParentNameServices(Configuration conf)
} else {
// Ensure that the internal service is indeed in the list of all available
// nameservices.
Set<String> availableNameServices = Sets.newHashSet(conf
.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES));
Collection<String> namespaces = conf
.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES);
Set<String> availableNameServices = new HashSet<>(namespaces);
for (String nsId : parentNameServices) {
if (!availableNameServices.contains(nsId)) {
throw new IOException("Unknown nameservice: " + nsId);

View File

@ -39,7 +39,6 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Sets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -51,6 +50,7 @@
import java.net.URISyntaxException;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
@ -273,7 +273,7 @@ private List<InetSocketAddress> getOtherJournalNodeAddrs() {
}
if (uriStr == null || uriStr.isEmpty()) {
HashSet<String> sharedEditsUri = Sets.newHashSet();
HashSet<String> sharedEditsUri = new HashSet<>();
if (nameServiceId != null) {
Collection<String> nnIds = DFSUtilClient.getNameNodeIds(
conf, nameServiceId);
@ -315,7 +315,7 @@ private List<InetSocketAddress> getJournalAddrList(String uriStr) throws
IOException {
URI uri = new URI(uriStr);
return Util.getLoggerAddresses(uri,
Sets.newHashSet(jn.getBoundIpcAddress()), conf);
new HashSet<>(Arrays.asList(jn.getBoundIpcAddress())), conf);
}
private void getMissingLogSegments(List<RemoteEditLog> thisJournalEditLogs,

View File

@ -40,6 +40,7 @@
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
@ -143,11 +144,11 @@ void writeUnlock() {
void refreshNNList(String serviceId, List<String> nnIds,
ArrayList<InetSocketAddress> addrs,
ArrayList<InetSocketAddress> lifelineAddrs) throws IOException {
Set<InetSocketAddress> oldAddrs = Sets.newHashSet();
Set<InetSocketAddress> oldAddrs = new HashSet<>();
for (BPServiceActor actor : bpServices) {
oldAddrs.add(actor.getNNSocketAddress());
}
Set<InetSocketAddress> newAddrs = Sets.newHashSet(addrs);
Set<InetSocketAddress> newAddrs = new HashSet<>(addrs);
// Process added NNs
Set<InetSocketAddress> addedNNs = Sets.difference(newAddrs, oldAddrs);

View File

@ -196,8 +196,8 @@ private void doRefreshNamenodes(
// Step 2. Any nameservices we currently have but are no longer present
// need to be removed.
toRemove = Sets.newHashSet(Sets.difference(
bpByNameserviceId.keySet(), addrMap.keySet()));
toRemove = Sets.difference(
bpByNameserviceId.keySet(), addrMap.keySet());
assert toRefresh.size() + toAdd.size() ==
addrMap.size() :

View File

@ -121,7 +121,6 @@
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Sets;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Timer;
@ -404,11 +403,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
*/
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
Collection<StorageLocation> dataLocations, DataStorage storage) {
Set<StorageLocation> failedLocationSet = Sets.newHashSetWithExpectedSize(
dataLocations.size());
for (StorageLocation sl: dataLocations) {
failedLocationSet.add(sl);
}
Set<StorageLocation> failedLocationSet = new HashSet<>(dataLocations);
for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
it.hasNext(); ) {
Storage.StorageDirectory sd = it.next();

View File

@ -31,6 +31,7 @@
import java.util.Map;
import java.util.PriorityQueue;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.CopyOnWriteArrayList;
import org.slf4j.Logger;
@ -42,7 +43,6 @@
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Sets;
import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.util.Preconditions;
@ -677,7 +677,7 @@ public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId) {
// storage directory with ancient logs. Clear out any logs we've
// accumulated so far, and then skip to the next segment of logs
// after the gap.
SortedSet<Long> startTxIds = Sets.newTreeSet(logsByStartTxId.keySet());
SortedSet<Long> startTxIds = new TreeSet<>(logsByStartTxId.keySet());
startTxIds = startTxIds.tailSet(curStartTxId);
if (startTxIds.isEmpty()) {
break;

View File

@ -95,6 +95,7 @@ public int getAttemptedItemsCount() {
@VisibleForTesting
public void updateAttemptedItemsCount() {
storagePolicySatisfier.getAttemptedItemsMonitor().getStorageMovementAttemptedItems()
.add(new StoragePolicySatisfier.AttemptedItemInfo(0, 1, 1, new HashSet<>(), 1));
.add(new StoragePolicySatisfier.AttemptedItemInfo(0, 1,
1, new HashSet<>(), 1));
}
}

View File

@ -71,6 +71,7 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -87,7 +88,6 @@
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Sets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@ -653,7 +653,7 @@ public DataNode getDatanode() {
private boolean federation;
private boolean checkExitOnShutdown = true;
protected final int storagesPerDatanode;
private Set<FileSystem> fileSystems = Sets.newHashSet();
private Set<FileSystem> fileSystems = new HashSet<>();
private List<long[]> storageCap = Lists.newLinkedList();

View File

@ -53,6 +53,7 @@
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@ -80,7 +81,6 @@
import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.Sets;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -1042,10 +1042,10 @@ public void testGetNNServiceRpcAddressesForNsIds() throws IOException {
{
Collection<String> internal = DFSUtil.getInternalNameServices(conf);
assertEquals(Sets.newHashSet("nn1"), internal);
assertEquals(new HashSet<>(Arrays.asList("nn1")), internal);
Collection<String> all = DFSUtilClient.getNameServiceIds(conf);
assertEquals(Sets.newHashSet("nn1", "nn2"), all);
assertEquals(new HashSet<>(Arrays.asList("nn1", "nn2")), all);
}
Map<String, Map<String, InetSocketAddress>> nnMap = DFSUtil

View File

@ -26,7 +26,6 @@
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.Sets;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@ -34,6 +33,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
@ -309,11 +309,11 @@ public void testChooseRandomWithStorageType() throws Exception {
// test the choose random can return desired storage type nodes without
// exclude
Set<String> diskUnderL1 =
Sets.newHashSet("host2", "host4", "host5", "host6");
Set<String> archiveUnderL1 = Sets.newHashSet("host1", "host3");
Set<String> ramdiskUnderL1 = Sets.newHashSet("host7");
Set<String> ssdUnderL1 = Sets.newHashSet("host8");
Set<String> nvdimmUnderL1 = Sets.newHashSet("host9");
new HashSet<>(Arrays.asList("host2", "host4", "host5", "host6"));
Set<String> archiveUnderL1 = new HashSet<>(Arrays.asList("host1", "host3"));
Set<String> ramdiskUnderL1 = new HashSet<>(Arrays.asList("host7"));
Set<String> ssdUnderL1 = new HashSet<>(Arrays.asList("host8"));
Set<String> nvdimmUnderL1 = new HashSet<>(Arrays.asList("host9"));
for (int i = 0; i < 10; i++) {
n = CLUSTER.chooseRandomWithStorageType("/l1", null, null,
StorageType.DISK);
@ -396,7 +396,7 @@ public void testChooseRandomWithStorageTypeWithExcluded() throws Exception {
assertEquals("host6", dd.getHostName());
// exclude the host on r4 (since there is only one host, no randomness here)
excluded.add(n);
Set<String> expectedSet = Sets.newHashSet("host4", "host5");
Set<String> expectedSet = new HashSet<>(Arrays.asList("host4", "host5"));
for (int i = 0; i < 10; i++) {
// under l1, there are four hosts with DISK:
// /l1/d1/r1/host2, /l1/d1/r2/host4, /l1/d1/r2/host5 and /l1/d2/r3/host6

View File

@ -34,6 +34,7 @@
import java.util.Map;
import java.util.Random;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
@ -51,7 +52,6 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Sets;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
@ -108,7 +108,7 @@ private static long determineMaxIpcNumber() throws Exception {
qjm.format(FAKE_NSINFO, false);
doWorkload(cluster, qjm);
SortedSet<Integer> ipcCounts = Sets.newTreeSet();
SortedSet<Integer> ipcCounts = new TreeSet<>();
for (AsyncLogger l : qjm.getLoggerSetForTests().getLoggersForTests()) {
InvocationCountingChannel ch = (InvocationCountingChannel)l;
ch.waitForAllPendingCalls();

View File

@ -23,6 +23,7 @@
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashSet;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
@ -73,13 +74,13 @@ public void testRefreshNamenodes() throws IOException {
// Ensure a BPOfferService in the datanodes corresponds to
// a namenode in the cluster
Set<InetSocketAddress> nnAddrsFromCluster = Sets.newHashSet();
Set<InetSocketAddress> nnAddrsFromCluster = new HashSet<>();
for (int i = 0; i < 4; i++) {
assertTrue(nnAddrsFromCluster.add(
cluster.getNameNode(i).getNameNodeAddress()));
}
Set<InetSocketAddress> nnAddrsFromDN = Sets.newHashSet();
Set<InetSocketAddress> nnAddrsFromDN = new HashSet<>();
for (BPOfferService bpos : dn.getAllBpOs()) {
for (BPServiceActor bpsa : bpos.getBPServiceActors()) {
assertTrue(nnAddrsFromDN.add(bpsa.getNNSocketAddress()));

View File

@ -416,7 +416,7 @@ public static void assertFileContentsSame(File... files) throws Exception {
if (files.length < 2) return;
Map<File, String> md5s = getFileMD5s(files);
if (Sets.newHashSet(md5s.values()).size() > 1) {
if (new HashSet<>(md5s.values()).size() > 1) {
fail("File contents differed:\n " +
Joiner.on("\n ")
.withKeyValueSeparator("=")
@ -433,7 +433,8 @@ public static void assertFileContentsDifferent(
File... files) throws Exception
{
Map<File, String> md5s = getFileMD5s(files);
if (Sets.newHashSet(md5s.values()).size() != expectedUniqueHashes) {
int uniqueHashes = new HashSet<>(md5s.values()).size();
if (uniqueHashes != expectedUniqueHashes) {
fail("Expected " + expectedUniqueHashes + " different hashes, got:\n " +
Joiner.on("\n ")
.withKeyValueSeparator("=")

View File

@ -50,6 +50,7 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
@ -118,7 +119,6 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Sets;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
@ -386,15 +386,15 @@ public void testFsckMove() throws Exception {
cluster.getNameNodePort()), conf);
String[] fileNames = util.getFileNames(topDir);
CorruptedTestFile[] ctFiles = new CorruptedTestFile[]{
new CorruptedTestFile(fileNames[0], Sets.newHashSet(0),
new CorruptedTestFile(fileNames[0], new HashSet<>(Arrays.asList(0)),
dfsClient, numDatanodes, dfsBlockSize),
new CorruptedTestFile(fileNames[1], Sets.newHashSet(2, 3),
new CorruptedTestFile(fileNames[1], new HashSet<>(Arrays.asList(2, 3)),
dfsClient, numDatanodes, dfsBlockSize),
new CorruptedTestFile(fileNames[2], Sets.newHashSet(4),
new CorruptedTestFile(fileNames[2], new HashSet<>(Arrays.asList(4)),
dfsClient, numDatanodes, dfsBlockSize),
new CorruptedTestFile(fileNames[3], Sets.newHashSet(0, 1, 2, 3),
new CorruptedTestFile(fileNames[3], new HashSet<>(Arrays.asList(0, 1, 2, 3)),
dfsClient, numDatanodes, dfsBlockSize),
new CorruptedTestFile(fileNames[4], Sets.newHashSet(1, 2, 3, 4),
new CorruptedTestFile(fileNames[4], new HashSet<>(Arrays.asList(1, 2, 3, 4)),
dfsClient, numDatanodes, dfsBlockSize)
};
int totalMissingBlocks = 0;
@ -2215,7 +2215,7 @@ public void testFsckMoveAfterCorruption() throws Exception {
new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
final String blockFileToCorrupt = fileNames[0];
final CorruptedTestFile ctf = new CorruptedTestFile(blockFileToCorrupt,
Sets.newHashSet(0), dfsClient, numDatanodes, dfsBlockSize);
new HashSet<>(Arrays.asList(0)), dfsClient, numDatanodes, dfsBlockSize);
ctf.corruptBlocks(cluster);
// Wait for fsck to discover all the missing blocks

View File

@ -28,6 +28,7 @@
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
@ -49,7 +50,6 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Sets;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import org.junit.runner.RunWith;
@ -299,7 +299,7 @@ public long getLastValidTxId() {
@Override
public Set<Long> getValidTxIds() {
return Sets.newHashSet(0L);
return new HashSet<>(Arrays.asList(0L));
}
public int getMaxOpSize() {
@ -341,7 +341,7 @@ public long getLastValidTxId() {
@Override
public Set<Long> getValidTxIds() {
return Sets.newHashSet(0L);
return new HashSet<>(Arrays.asList(0L));
}
}
@ -387,7 +387,7 @@ public long getLastValidTxId() {
@Override
public Set<Long> getValidTxIds() {
return Sets.newHashSet(1L , 2L, 3L, 5L, 6L, 7L, 8L, 9L, 10L);
return new HashSet<>(Arrays.asList(1L, 2L, 3L, 5L, 6L, 7L, 8L, 9L, 10L));
}
}

View File

@ -34,6 +34,8 @@
import java.net.URL;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
@ -47,7 +49,6 @@
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Sets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
@ -93,7 +94,7 @@ public Set<String> getGroupsSet(String user) {
LOG.info("Getting groups in MockUnixGroupsMapping");
String g1 = user + (10 * i + 1);
String g2 = user + (10 * i + 2);
Set<String> s = Sets.newHashSet(g1, g2);
Set<String> s = new HashSet<>(Arrays.asList(g1, g2));
i++;
return s;
}