diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e55f3404b9..0c56f2b08f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -682,6 +682,9 @@ Release 2.8.0 - UNRELEASED HDFS-8623. Refactor NameNode handling of invalid, corrupt, and under-recovery blocks. (Zhe Zhang via jing9) + HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and + DatanodeStorageInfo. (Zhe Zhang via wang) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index a0f35039e4..2f81ddfad7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -452,8 +452,8 @@ private void rescanFile(CacheDirective directive, INodeFile file) { file.getFullPathName(), cachedTotal, neededTotal); } - private String findReasonForNotCaching(CachedBlock cblock, - BlockInfo blockInfo) { + private String findReasonForNotCaching(CachedBlock cblock, + BlockInfo blockInfo) { if (blockInfo == null) { // Somehow, a cache report with the block arrived, but the block // reports from the DataNode haven't (yet?) described such a block. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index dd7b3011df..99def6b1f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -64,7 +64,8 @@ public class DatanodeDescriptor extends DatanodeInfo { // Stores status of decommissioning. // If node is not decommissioning, do not use this object for anything. - public final DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); + public final DecommissioningStatus decommissioningStatus = + new DecommissioningStatus(); private long curBlockReportId = 0; @@ -115,7 +116,7 @@ synchronized List poll(int numBlocks) { return null; } - List results = new ArrayList(); + List results = new ArrayList<>(); for(; !blockq.isEmpty() && numBlocks > 0; numBlocks--) { results.add(blockq.poll()); } @@ -135,7 +136,7 @@ synchronized void clear() { } private final Map storageMap = - new HashMap(); + new HashMap<>(); /** * A list of CachedBlock objects on this datanode. @@ -217,12 +218,14 @@ public CachedBlocksList getPendingUncached() { private long bandwidth; /** A queue of blocks to be replicated by this datanode */ - private final BlockQueue replicateBlocks = new BlockQueue(); + private final BlockQueue replicateBlocks = + new BlockQueue<>(); /** A queue of blocks to be recovered by this datanode */ private final BlockQueue recoverBlocks = - new BlockQueue(); + new BlockQueue<>(); /** A set of blocks to be invalidated by this datanode */ - private final LightWeightHashSet invalidateBlocks = new LightWeightHashSet(); + private final LightWeightHashSet invalidateBlocks = + new LightWeightHashSet<>(); /* Variables for maintaining number of blocks scheduled to be written to * this storage. This count is approximate and might be slightly bigger @@ -230,9 +233,9 @@ public CachedBlocksList getPendingUncached() { * while writing the block). */ private EnumCounters currApproxBlocksScheduled - = new EnumCounters(StorageType.class); + = new EnumCounters<>(StorageType.class); private EnumCounters prevApproxBlocksScheduled - = new EnumCounters(StorageType.class); + = new EnumCounters<>(StorageType.class); private long lastBlocksScheduledRollTime = 0; private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min private int volumeFailures = 0; @@ -276,6 +279,7 @@ public DatanodeStorageInfo getStorageInfo(String storageID) { return storageMap.get(storageID); } } + DatanodeStorageInfo[] getStorageInfos() { synchronized (storageMap) { final Collection storages = storageMap.values(); @@ -321,7 +325,7 @@ List removeZombieStorages() { Long.toHexString(curBlockReportId)); iter.remove(); if (zombies == null) { - zombies = new LinkedList(); + zombies = new LinkedList<>(); } zombies.add(storageInfo); } @@ -350,10 +354,7 @@ boolean removeBlock(BlockInfo b) { */ boolean removeBlock(String storageID, BlockInfo b) { DatanodeStorageInfo s = getStorageInfo(storageID); - if (s != null) { - return s.removeBlock(b); - } - return false; + return s != null && s.removeBlock(b); } public void resetBlocks() { @@ -449,7 +450,7 @@ public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity, + this.volumeFailures + " to " + volFailures); synchronized (storageMap) { failedStorageInfos = - new HashSet(storageMap.values()); + new HashSet<>(storageMap.values()); } } @@ -505,7 +506,7 @@ private void pruneStorageMap(final StorageReport[] reports) { HashMap excessStorages; // Init excessStorages with all known storages. - excessStorages = new HashMap(storageMap); + excessStorages = new HashMap<>(storageMap); // Remove storages that the DN reported in the heartbeat. for (final StorageReport report : reports) { @@ -542,7 +543,7 @@ private static class BlockIterator implements Iterator { private final List> iterators; private BlockIterator(final DatanodeStorageInfo... storages) { - List> iterators = new ArrayList>(); + List> iterators = new ArrayList<>(); for (DatanodeStorageInfo e : storages) { iterators.add(e.getBlockIterator()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 8143fb44a1..4266004f49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -85,7 +85,7 @@ public class DatanodeManager { * Mapping: StorageID -> DatanodeDescriptor */ private final Map datanodeMap - = new HashMap(); + = new HashMap<>(); /** Cluster network topology */ private final NetworkTopology networktopology; @@ -162,7 +162,7 @@ public class DatanodeManager { * Software version -> Number of datanodes with this version */ private HashMap datanodesSoftwareVersions = - new HashMap(4, 0.75f); + new HashMap<>(4, 0.75f); /** * The minimum time between resending caching directives to Datanodes, @@ -217,7 +217,7 @@ public class DatanodeManager { // locations of those hosts in the include list and store the mapping // in the cache; so future calls to resolve will be fast. if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { - final ArrayList locations = new ArrayList(); + final ArrayList locations = new ArrayList<>(); for (InetSocketAddress addr : hostFileManager.getIncludes()) { locations.add(addr.getAddress().getHostAddress()); } @@ -370,7 +370,7 @@ public void sortLocatedBlocks(final String targethost, // here we should get node but not datanode only . Node client = getDatanodeByHost(targethost); if (client == null) { - List hosts = new ArrayList (1); + List hosts = new ArrayList<> (1); hosts.add(targethost); List resolvedHosts = dnsToSwitchMapping.resolve(hosts); if (resolvedHosts != null && !resolvedHosts.isEmpty()) { @@ -522,7 +522,7 @@ public DatanodeStorageInfo[] getDatanodeStorageInfos( void datanodeDump(final PrintWriter out) { synchronized (datanodeMap) { Map sortedDatanodeMap = - new TreeMap(datanodeMap); + new TreeMap<>(datanodeMap); out.println("Metasave: Number of datanodes: " + datanodeMap.size()); for (DatanodeDescriptor node : sortedDatanodeMap.values()) { out.println(node.dumpDatanode()); @@ -660,7 +660,7 @@ private boolean shouldCountVersion(DatanodeDescriptor node) { private void countSoftwareVersions() { synchronized(datanodeMap) { - HashMap versionCount = new HashMap(); + HashMap versionCount = new HashMap<>(); for(DatanodeDescriptor dn: datanodeMap.values()) { // Check isAlive too because right after removeDatanode(), // isDatanodeDead() is still true @@ -677,7 +677,7 @@ private void countSoftwareVersions() { public HashMap getDatanodesSoftwareVersions() { synchronized(datanodeMap) { - return new HashMap (this.datanodesSoftwareVersions); + return new HashMap<> (this.datanodesSoftwareVersions); } } @@ -710,7 +710,7 @@ private String resolveNetworkLocationWithFallBackToDefaultLocation ( */ private String resolveNetworkLocation (DatanodeID node) throws UnresolvedTopologyException { - List names = new ArrayList(1); + List names = new ArrayList<>(1); if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { names.add(node.getIpAddr()); } else { @@ -1000,7 +1000,7 @@ nodes with its data cleared (or user can just remove the StorageID // If the network location is invalid, clear the cached mappings // so that we have a chance to re-add this DataNode with the // correct network location later. - List invalidNodeNames = new ArrayList(3); + List invalidNodeNames = new ArrayList<>(3); // clear cache for nodes in IP or Hostname invalidNodeNames.add(nodeReg.getIpAddr()); invalidNodeNames.add(nodeReg.getHostName()); @@ -1275,7 +1275,7 @@ public List getDatanodeListForReport( final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes(); synchronized(datanodeMap) { - nodes = new ArrayList(datanodeMap.size()); + nodes = new ArrayList<>(datanodeMap.size()); for (DatanodeDescriptor dn : datanodeMap.values()) { final boolean isDead = isDatanodeDead(dn); final boolean isDecommissioning = dn.isDecommissionInProgress(); @@ -1351,7 +1351,7 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, VolumeFailureSummary volumeFailureSummary) throws IOException { synchronized (heartbeatManager) { synchronized (datanodeMap) { - DatanodeDescriptor nodeinfo = null; + DatanodeDescriptor nodeinfo; try { nodeinfo = getDatanode(nodeReg); } catch(UnregisteredNodeException e) { @@ -1389,7 +1389,7 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations(); // Skip stale nodes during recovery - not heart beated for some time (30s by default). final List recoveryLocations = - new ArrayList(storages.length); + new ArrayList<>(storages.length); for (int i = 0; i < storages.length; i++) { if (!storages[i].getDatanodeDescriptor().isStale(staleInterval)) { recoveryLocations.add(storages[i]); @@ -1431,7 +1431,7 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, return new DatanodeCommand[] { brCommand }; } - final List cmds = new ArrayList(); + final List cmds = new ArrayList<>(); //check pending replication List pendingList = nodeinfo.getReplicationCommand( maxTransfers); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index 65b83e1a24..92841a634c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -37,8 +37,9 @@ public class DatanodeStorageInfo { public static final DatanodeStorageInfo[] EMPTY_ARRAY = {}; - public static DatanodeInfo[] toDatanodeInfos(DatanodeStorageInfo[] storages) { - return toDatanodeInfos(Arrays.asList(storages)); + public static DatanodeInfo[] toDatanodeInfos( + DatanodeStorageInfo[] storages) { + return storages == null ? null: toDatanodeInfos(Arrays.asList(storages)); } static DatanodeInfo[] toDatanodeInfos(List storages) { final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()]; @@ -58,6 +59,9 @@ static DatanodeDescriptor[] toDatanodeDescriptors( } public static String[] toStorageIDs(DatanodeStorageInfo[] storages) { + if (storages == null) { + return null; + } String[] storageIDs = new String[storages.length]; for(int i = 0; i < storageIDs.length; i++) { storageIDs[i] = storages[i].getStorageID(); @@ -66,6 +70,9 @@ public static String[] toStorageIDs(DatanodeStorageInfo[] storages) { } public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) { + if (storages == null) { + return null; + } StorageType[] storageTypes = new StorageType[storages.length]; for(int i = 0; i < storageTypes.length; i++) { storageTypes[i] = storages[i].getStorageType(); @@ -380,6 +387,6 @@ static DatanodeStorageInfo getDatanodeStorageInfo( } static enum AddBlockResult { - ADDED, REPLACED, ALREADY_EXIST; + ADDED, REPLACED, ALREADY_EXIST } }