HDFS-4693. Some test cases in TestCheckpoint do not clean up after themselves. Contributed by Arpit Agarwal and Suresh.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1468457 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0e01f26821
commit
1822529e88
@ -461,6 +461,9 @@ Release 2.0.5-beta - UNRELEASED
|
||||
HDFS-4679. Namenode operation checks should be done in a consistent
|
||||
manner. (suresh)
|
||||
|
||||
HDFS-4693. Some test cases in TestCheckpoint do not clean up after
|
||||
themselves. (Arpit Agarwal, suresh via suresh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -290,6 +290,7 @@ private void join() {
|
||||
try {
|
||||
infoServer.join();
|
||||
} catch (InterruptedException ie) {
|
||||
LOG.debug("Exception ", ie);
|
||||
}
|
||||
}
|
||||
|
||||
@ -309,15 +310,25 @@ public void shutdown() {
|
||||
}
|
||||
}
|
||||
try {
|
||||
if (infoServer != null) infoServer.stop();
|
||||
if (infoServer != null) {
|
||||
infoServer.stop();
|
||||
infoServer = null;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception shutting down SecondaryNameNode", e);
|
||||
}
|
||||
try {
|
||||
if (checkpointImage != null) checkpointImage.close();
|
||||
if (checkpointImage != null) {
|
||||
checkpointImage.close();
|
||||
checkpointImage = null;
|
||||
}
|
||||
} catch(IOException e) {
|
||||
LOG.warn("Exception while closing CheckpointStorage", e);
|
||||
}
|
||||
if (namesystem != null) {
|
||||
namesystem.shutdown();
|
||||
namesystem = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -109,6 +109,7 @@ public static void initialize() throws Exception {
|
||||
config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeStorage.toString());
|
||||
config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, datanodeStorage.toString());
|
||||
MiniDFSCluster cluster = null;
|
||||
String bpid = null;
|
||||
try {
|
||||
// format data-node
|
||||
createEmptyDirs(new String[] {datanodeStorage.toString()});
|
||||
@ -149,6 +150,7 @@ public static void initialize() throws Exception {
|
||||
// write more files
|
||||
writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
|
||||
writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
|
||||
bpid = cluster.getNamesystem(0).getBlockPoolId();
|
||||
} finally {
|
||||
// shutdown
|
||||
if (cluster != null) cluster.shutdown();
|
||||
@ -160,7 +162,6 @@ public static void initialize() throws Exception {
|
||||
File dnCurDir = new File(datanodeStorage, "current");
|
||||
datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir);
|
||||
|
||||
String bpid = cluster.getNamesystem(0).getBlockPoolId();
|
||||
File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
|
||||
"current");
|
||||
blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir);
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user