From c8182ea76412e49c0c98ee252321c584fabb4c59 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Wed, 26 Feb 2014 17:07:56 +0000 Subject: [PATCH] HDFS-6020. Fix the five findbugs warnings. Contributed by Kihwal Lee. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1572165 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES_HDFS-5535.txt | 1 + .../hdfs/server/datanode/BlockReceiver.java | 5 +++-- .../datanode/fsdataset/impl/BlockPoolSlice.java | 16 ++++++++++++---- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt index 2e3e1c3016..ec8e7f50ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt @@ -104,3 +104,4 @@ HDFS-5535 subtasks: TestRollingUpgrade and TestRollingUpgradeRollback. (Haohui Mai via Arpit Agarwal) + HDFS-6020. Fix the five findbugs warnings. (kihwal) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 3e6f5c8a41..03dcb6d9d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -752,8 +752,9 @@ void receiveBlock( File blockFile = ((ReplicaInPipeline)replicaInfo).getBlockFile(); File restartMeta = new File(blockFile.getParent() + File.pathSeparator + "." + blockFile.getName() + ".restart"); - if (restartMeta.exists()) { - restartMeta.delete(); + if (restartMeta.exists() && !restartMeta.delete()) { + LOG.warn("Failed to delete restart meta file: " + + restartMeta.getPath()); } try { FileWriter out = new FileWriter(restartMeta); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index f77f12b886..6eeb23e529 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -201,23 +201,28 @@ long loadDfsUsed() { */ void saveDfsUsed() { File outFile = new File(currentDir, DU_CACHE_FILE); - if (outFile.exists()) { - outFile.delete(); + if (outFile.exists() && !outFile.delete()) { + FsDatasetImpl.LOG.warn("Failed to delete old dfsUsed file in " + + outFile.getParent()); } + FileWriter out = null; try { long used = getDfsUsed(); if (used > 0) { - FileWriter out = new FileWriter(outFile); + out = new FileWriter(outFile); // mtime is written last, so that truncated writes won't be valid. out.write(Long.toString(used) + " " + Long.toString(Time.now())); out.flush(); out.close(); + out = null; } } catch (IOException ioe) { // If write failed, the volume might be bad. Since the cache file is // not critical, log the error and continue. FsDatasetImpl.LOG.warn("Failed to write dfsUsed to " + outFile, ioe); + } finally { + IOUtils.cleanup(null, out); } } @@ -297,7 +302,10 @@ void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized loadRwr = false; } sc.close(); - restartMeta.delete(); + if (restartMeta.delete()) { + FsDatasetImpl.LOG.warn("Failed to delete restart meta file: " + + restartMeta.getPath()); + } } catch (FileNotFoundException fnfe) { // nothing to do here } finally {