HDFS-6020. Fix the five findbugs warnings. Contributed by Kihwal Lee.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1572165 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9df1c533dc
commit
c8182ea764
@ -104,3 +104,4 @@ HDFS-5535 subtasks:
|
|||||||
TestRollingUpgrade and TestRollingUpgradeRollback. (Haohui Mai via
|
TestRollingUpgrade and TestRollingUpgradeRollback. (Haohui Mai via
|
||||||
Arpit Agarwal)
|
Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-6020. Fix the five findbugs warnings. (kihwal)
|
||||||
|
@ -752,8 +752,9 @@ void receiveBlock(
|
|||||||
File blockFile = ((ReplicaInPipeline)replicaInfo).getBlockFile();
|
File blockFile = ((ReplicaInPipeline)replicaInfo).getBlockFile();
|
||||||
File restartMeta = new File(blockFile.getParent() +
|
File restartMeta = new File(blockFile.getParent() +
|
||||||
File.pathSeparator + "." + blockFile.getName() + ".restart");
|
File.pathSeparator + "." + blockFile.getName() + ".restart");
|
||||||
if (restartMeta.exists()) {
|
if (restartMeta.exists() && !restartMeta.delete()) {
|
||||||
restartMeta.delete();
|
LOG.warn("Failed to delete restart meta file: " +
|
||||||
|
restartMeta.getPath());
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
FileWriter out = new FileWriter(restartMeta);
|
FileWriter out = new FileWriter(restartMeta);
|
||||||
|
@ -201,23 +201,28 @@ long loadDfsUsed() {
|
|||||||
*/
|
*/
|
||||||
void saveDfsUsed() {
|
void saveDfsUsed() {
|
||||||
File outFile = new File(currentDir, DU_CACHE_FILE);
|
File outFile = new File(currentDir, DU_CACHE_FILE);
|
||||||
if (outFile.exists()) {
|
if (outFile.exists() && !outFile.delete()) {
|
||||||
outFile.delete();
|
FsDatasetImpl.LOG.warn("Failed to delete old dfsUsed file in " +
|
||||||
|
outFile.getParent());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FileWriter out = null;
|
||||||
try {
|
try {
|
||||||
long used = getDfsUsed();
|
long used = getDfsUsed();
|
||||||
if (used > 0) {
|
if (used > 0) {
|
||||||
FileWriter out = new FileWriter(outFile);
|
out = new FileWriter(outFile);
|
||||||
// mtime is written last, so that truncated writes won't be valid.
|
// mtime is written last, so that truncated writes won't be valid.
|
||||||
out.write(Long.toString(used) + " " + Long.toString(Time.now()));
|
out.write(Long.toString(used) + " " + Long.toString(Time.now()));
|
||||||
out.flush();
|
out.flush();
|
||||||
out.close();
|
out.close();
|
||||||
|
out = null;
|
||||||
}
|
}
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
// If write failed, the volume might be bad. Since the cache file is
|
// If write failed, the volume might be bad. Since the cache file is
|
||||||
// not critical, log the error and continue.
|
// not critical, log the error and continue.
|
||||||
FsDatasetImpl.LOG.warn("Failed to write dfsUsed to " + outFile, ioe);
|
FsDatasetImpl.LOG.warn("Failed to write dfsUsed to " + outFile, ioe);
|
||||||
|
} finally {
|
||||||
|
IOUtils.cleanup(null, out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -297,7 +302,10 @@ void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized
|
|||||||
loadRwr = false;
|
loadRwr = false;
|
||||||
}
|
}
|
||||||
sc.close();
|
sc.close();
|
||||||
restartMeta.delete();
|
if (restartMeta.delete()) {
|
||||||
|
FsDatasetImpl.LOG.warn("Failed to delete restart meta file: " +
|
||||||
|
restartMeta.getPath());
|
||||||
|
}
|
||||||
} catch (FileNotFoundException fnfe) {
|
} catch (FileNotFoundException fnfe) {
|
||||||
// nothing to do here
|
// nothing to do here
|
||||||
} finally {
|
} finally {
|
||||||
|
Loading…
Reference in New Issue
Block a user