HDFS-3709. TestStartup tests still binding to the ephemeral port. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1364865 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-07-24 01:53:29 +00:00
parent 97ed48e035
commit 1fd21078d8
2 changed files with 9 additions and 9 deletions

View File

@ -525,6 +525,8 @@ Branch-2 ( Unreleased changes )
HDFS-3608. fuse_dfs: detect changes in UID ticket cache. (Colin Patrick HDFS-3608. fuse_dfs: detect changes in UID ticket cache. (Colin Patrick
McCabe via atm) McCabe via atm)
HDFS-3709. TestStartup tests still binding to the ephemeral port. (eli)
BREAKDOWN OF HDFS-3042 SUBTASKS BREAKDOWN OF HDFS-3042 SUBTASKS
HDFS-2185. HDFS portion of ZK-based FailoverController (todd) HDFS-2185. HDFS portion of ZK-based FailoverController (todd)

View File

@ -443,16 +443,15 @@ public void testImageChecksum() throws Exception {
private void testImageChecksum(boolean compress) throws Exception { private void testImageChecksum(boolean compress) throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
Configuration conf = new HdfsConfiguration();
if (compress) { if (compress) {
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true); config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
} }
try { try {
LOG.info("\n===========================================\n" + LOG.info("\n===========================================\n" +
"Starting empty cluster"); "Starting empty cluster");
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(config)
.numDataNodes(0) .numDataNodes(0)
.format(true) .format(true)
.build(); .build();
@ -479,7 +478,7 @@ private void testImageChecksum(boolean compress) throws Exception {
LOG.info("\n===========================================\n" + LOG.info("\n===========================================\n" +
"Starting same cluster after simulated crash"); "Starting same cluster after simulated crash");
try { try {
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(config)
.numDataNodes(0) .numDataNodes(0)
.format(false) .format(false)
.build(); .build();
@ -507,19 +506,18 @@ public void testNNRestart() throws IOException, InterruptedException {
FileSystem localFileSys; FileSystem localFileSys;
Path hostsFile; Path hostsFile;
Path excludeFile; Path excludeFile;
Configuration conf = new HdfsConfiguration();
int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds
// Set up the hosts/exclude files. // Set up the hosts/exclude files.
localFileSys = FileSystem.getLocal(conf); localFileSys = FileSystem.getLocal(config);
Path workingDir = localFileSys.getWorkingDirectory(); Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, "build/test/data/work-dir/restartnn"); Path dir = new Path(workingDir, "build/test/data/work-dir/restartnn");
hostsFile = new Path(dir, "hosts"); hostsFile = new Path(dir, "hosts");
excludeFile = new Path(dir, "exclude"); excludeFile = new Path(dir, "exclude");
// Setup conf // Setup conf
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
writeConfigFile(localFileSys, excludeFile, null); writeConfigFile(localFileSys, excludeFile, null);
conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath()); config.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
// write into hosts file // write into hosts file
ArrayList<String>list = new ArrayList<String>(); ArrayList<String>list = new ArrayList<String>();
byte b[] = {127, 0, 0, 1}; byte b[] = {127, 0, 0, 1};
@ -529,7 +527,7 @@ public void testNNRestart() throws IOException, InterruptedException {
int numDatanodes = 1; int numDatanodes = 1;
try { try {
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(config)
.numDataNodes(numDatanodes).setupHostsFile(true).build(); .numDataNodes(numDatanodes).setupHostsFile(true).build();
cluster.waitActive(); cluster.waitActive();