diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1c084f9a1b..2320019f5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -52,6 +52,8 @@ Trunk (unreleased changes) HDFS-2334. Add Closeable to JournalManager. (Ivan Kelly via jitendra) + HDFS-2564. Cleanup unnecessary exceptions thrown and unnecessary casts. + (Hari Mankude via eli) OPTIMIZATIONS HDFS-2477. Optimize computing the diff between a block report and the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 71ec00e20e..53fa4d2ce6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -543,7 +543,7 @@ public synchronized int read(byte buf[], int off, int len) throws IOException { if (pos > blockEnd) { currentNode = blockSeekTo(pos); } - int realLen = (int) Math.min((long) len, (blockEnd - pos + 1L)); + int realLen = (int) Math.min(len, (blockEnd - pos + 1L)); int result = readBuffer(buf, off, realLen, corruptedBlockMap); if (result >= 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 54363172c0..5cb94d1e83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -239,8 +239,7 @@ public class DataNode extends Configured * Use {@link NetUtils#createSocketAddr(String)} instead. */ @Deprecated - public static InetSocketAddress createSocketAddr(String target - ) throws IOException { + public static InetSocketAddress createSocketAddr(String target) { return NetUtils.createSocketAddr(target); } @@ -334,14 +333,14 @@ public Object run() throws Exception { } } - void joinAll() throws InterruptedException { + void joinAll() { for (BPOfferService bpos: this.getAllNamenodeThreads()) { bpos.join(); } } void refreshNamenodes(Configuration conf) - throws IOException, InterruptedException { + throws IOException { LOG.info("Refresh request received for nameservices: " + conf.get(DFS_FEDERATION_NAMESERVICES)); List newAddresses = @@ -859,8 +858,7 @@ private void checkNNVersion(NamespaceInfo nsInfo) private void connectToNNAndHandshake() throws IOException { // get NN proxy - bpNamenode = - (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class, + bpNamenode = (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class, DatanodeProtocol.versionID, nnAddr, dn.getConf()); // First phase of the handshake with NN - get the namespace @@ -2120,7 +2118,7 @@ private class DataTransfer implements Runnable { * entire target list, the block, and the data. */ DataTransfer(DatanodeInfo targets[], ExtendedBlock b, BlockConstructionStage stage, - final String clientname) throws IOException { + final String clientname) { if (DataTransferProtocol.LOG.isDebugEnabled()) { DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": " + b + " (numBytes=" + b.getNumBytes() + ")" @@ -2896,13 +2894,7 @@ public synchronized String getClusterId() { } public void refreshNamenodes(Configuration conf) throws IOException { - try { - blockPoolManager.refreshNamenodes(conf); - } catch (InterruptedException ex) { - IOException eio = new IOException(); - eio.initCause(ex); - throw eio; - } + blockPoolManager.refreshNamenodes(conf); } @Override //ClientDatanodeProtocol diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java index e309dc1f47..fee2760490 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -459,7 +459,7 @@ private long validateIntegrity(File blockFile, long genStamp) { long metaFileLen = metaFile.length(); int crcHeaderLen = DataChecksum.getChecksumHeaderSize(); if (!blockFile.exists() || blockFileLen == 0 || - !metaFile.exists() || metaFileLen < (long)crcHeaderLen) { + !metaFile.exists() || metaFileLen < crcHeaderLen) { return 0; } checksumIn = new DataInputStream( @@ -578,7 +578,7 @@ long getBlockPoolUsed(String bpid) throws IOException { * reserved capacity. * @return the unreserved number of bytes left in this filesystem. May be zero. */ - long getCapacity() throws IOException { + long getCapacity() { long remaining = usage.getCapacity() - reserved; return remaining > 0 ? remaining : 0; } @@ -818,7 +818,7 @@ private long getBlockPoolUsed(String bpid) throws IOException { return dfsUsed; } - private long getCapacity() throws IOException { + private long getCapacity() { long capacity = 0L; for (FSVolume vol : volumes) { capacity += vol.getCapacity(); @@ -1667,7 +1667,7 @@ private void bumpReplicaGS(ReplicaInfo replicaInfo, } if (!oldmeta.renameTo(newmeta)) { replicaInfo.setGenerationStamp(oldGS); // restore old GS - throw new IOException("Block " + (Block)replicaInfo + " reopen failed. " + + throw new IOException("Block " + replicaInfo + " reopen failed. " + " Unable to move meta file " + oldmeta + " to " + newmeta); } @@ -2018,7 +2018,7 @@ private boolean isValid(final ExtendedBlock b, final ReplicaState state) { /** * Find the file corresponding to the block and return it if it exists. */ - File validateBlockFile(String bpid, Block b) throws IOException { + File validateBlockFile(String bpid, Block b) { //Should we check for metadata file too? File f = getFile(bpid, b); @@ -2327,7 +2327,7 @@ public void checkAndUpdate(String bpid, long blockId, File diskFile, if (datanode.blockScanner != null) { datanode.blockScanner.addBlock(new ExtendedBlock(bpid, diskBlockInfo)); } - DataNode.LOG.warn("Added missing block to memory " + (Block)diskBlockInfo); + DataNode.LOG.warn("Added missing block to memory " + diskBlockInfo); return; } /* @@ -2600,7 +2600,7 @@ public synchronized void shutdownBlockPool(String bpid) { * get list of all bpids * @return list of bpids */ - public String [] getBPIdlist() throws IOException { + public String [] getBPIdlist() { return volumeMap.getBlockPoolList(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 207a6787dc..ca1acdfb41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1421,7 +1421,7 @@ LocatedBlock appendFile(String src, String holder, String clientMachine) try { lb = startFileInternal(src, null, holder, clientMachine, EnumSet.of(CreateFlag.APPEND), - false, blockManager.maxReplication, (long)0); + false, blockManager.maxReplication, 0); } finally { writeUnlock(); } @@ -1504,7 +1504,7 @@ LocatedBlock getAdditionalBlock(String src, fileLength = pendingFile.computeContentSummary().getLength(); blockSize = pendingFile.getPreferredBlockSize(); clientNode = pendingFile.getClientNode(); - replication = (int)pendingFile.getReplication(); + replication = pendingFile.getReplication(); } finally { writeUnlock(); } @@ -2214,6 +2214,7 @@ boolean internalReleaseLease(Lease lease, String src, // If the penultimate block is not COMPLETE, then it must be COMMITTED. if(nrCompleteBlocks < nrBlocks - 2 || nrCompleteBlocks == nrBlocks - 2 && + curBlock != null && curBlock.getBlockUCState() != BlockUCState.COMMITTED) { final String message = "DIR* NameSystem.internalReleaseLease: " + "attempt to release a create lock on " @@ -2299,7 +2300,7 @@ private Lease reassignLease(Lease lease, String src, String newHolder, } Lease reassignLeaseInternal(Lease lease, String src, String newHolder, - INodeFileUnderConstruction pendingFile) throws IOException { + INodeFileUnderConstruction pendingFile) { assert hasWriteLock(); pendingFile.setClientName(newHolder); return leaseManager.reassignLease(lease, src, newHolder); @@ -2402,7 +2403,7 @@ void commitBlockSynchronization(ExtendedBlock lastblock, newtargets[i]); } } - if (closeFile) { + if ((closeFile) && (descriptors != null)) { // the file is getting closed. Insert block locations into blockManager. // Otherwise fsck will report these blocks as MISSING, especially if the // blocksReceived from Datanodes take a long time to arrive. @@ -3088,7 +3089,7 @@ private synchronized void setBlockTotal(int total) { this.blockTotal = total; this.blockThreshold = (int) (blockTotal * threshold); this.blockReplQueueThreshold = - (int) (((double) blockTotal) * replQueueThreshold); + (int) (blockTotal * replQueueThreshold); checkMode(); } @@ -3098,7 +3099,7 @@ private synchronized void setBlockTotal(int total) { * @param replication current replication */ private synchronized void incrementSafeBlockCount(short replication) { - if ((int)replication == safeReplication) + if (replication == safeReplication) this.blockSafe++; checkMode(); } @@ -3230,6 +3231,7 @@ public String toString() { /** * Checks consistency of the class state. * This is costly and currently called only in assert. + * @throws IOException */ private boolean isConsistent() throws IOException { if (blockTotal == -1 && blockSafe == -1) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 740a2b7b11..a5f11fab68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -258,11 +258,12 @@ boolean isRole(NamenodeRole that) { * If the service rpc is not configured returns null */ protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) - throws IOException { + throws IOException { return NameNode.getServiceAddress(conf, false); } - protected InetSocketAddress getRpcServerAddress(Configuration conf) throws IOException { + protected InetSocketAddress getRpcServerAddress(Configuration conf) + throws IOException { return getAddress(conf); }