diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 197315052c..acb720e544 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -556,14 +556,6 @@ public class DataNodeProperties {
this.ipcPort = ipcPort;
}
- public Configuration getConf() {
- return conf;
- }
-
- public DataNode getDatanode() {
- return datanode;
- }
-
public void setDnArgs(String ... args) {
dnArgs = args;
}
@@ -1103,6 +1095,9 @@ private void configureNameService(MiniDFSNNTopology.NSConf nameservice, int nsCo
*/
public static void configureNameNodes(MiniDFSNNTopology nnTopology, boolean federation,
Configuration conf) throws IOException {
+ Preconditions.checkArgument(nnTopology.countNameNodes() > 0,
+ "empty NN topology: no namenodes specified!");
+
if (!federation && nnTopology.countNameNodes() == 1) {
NNConf onlyNN = nnTopology.getOnlyNameNode();
// we only had one NN, set DEFAULT_NAME for it. If not explicitly
@@ -1617,7 +1612,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
dnConf.addResource(dnConfOverlays[i]);
}
// Set up datanode address
- setupDatanodeAddress(i, dnConf, setupHostsFile, checkDataNodeAddrConfig);
+ setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
if (manageDfsDirs) {
String dirs = makeDataNodeDirs(i, storageTypes == null ?
null : storageTypes[i - curDatanodesNum]);
@@ -2378,7 +2373,7 @@ public synchronized boolean restartDataNode(DataNodeProperties dnprop,
conf.set(DFS_DATANODE_ADDRESS_KEY,
addr.getAddress().getHostAddress() + ":" + addr.getPort());
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
- addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort);
+ addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort);
}
final DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
@@ -2919,19 +2914,16 @@ public File getProvidedStorageDir(int dnIndex, int dirIndex) {
/**
* Get a storage directory for a datanode.
- * For examples,
*
- * - /data/dn0_data0
- * - /data/dn0_data1
- * - /data/dn1_data0
- * - /data/dn1_data1
+ * - /data/data<2*dnIndex + 1>
+ * - /data/data<2*dnIndex + 2>
*
*
* @param dnIndex datanode index (starts from 0)
* @param dirIndex directory index.
* @return Storage directory
*/
- public static File getStorageDir(int dnIndex, int dirIndex) {
+ public File getStorageDir(int dnIndex, int dirIndex) {
return new File(getBaseDirectory(), getStorageDirPath(dnIndex, dirIndex));
}
@@ -2942,8 +2934,8 @@ public static File getStorageDir(int dnIndex, int dirIndex) {
* @param dirIndex directory index.
* @return storage directory path
*/
- private static String getStorageDirPath(int dnIndex, int dirIndex) {
- return "data/dn" + dnIndex + "_data" + dirIndex;
+ private String getStorageDirPath(int dnIndex, int dirIndex) {
+ return "data/data" + (storagesPerDatanode * dnIndex + 1 + dirIndex);
}
/**
@@ -3208,36 +3200,35 @@ public void setBlockRecoveryTimeout(long timeout) {
}
}
- protected void setupDatanodeAddress(
- int i, Configuration dnConf, boolean setupHostsFile,
- boolean checkDataNodeAddrConfig) throws IOException {
+ protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
+ boolean checkDataNodeAddrConfig) throws IOException {
if (setupHostsFile) {
- String hostsFile = dnConf.get(DFS_HOSTS, "").trim();
+ String hostsFile = conf.get(DFS_HOSTS, "").trim();
if (hostsFile.length() == 0) {
throw new IOException("Parameter dfs.hosts is not setup in conf");
}
// Setup datanode in the include file, if it is defined in the conf
String address = "127.0.0.1:" + NetUtils.getFreeSocketPort();
if (checkDataNodeAddrConfig) {
- dnConf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address);
+ conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address);
} else {
- dnConf.set(DFS_DATANODE_ADDRESS_KEY, address);
+ conf.set(DFS_DATANODE_ADDRESS_KEY, address);
}
addToFile(hostsFile, address);
LOG.info("Adding datanode " + address + " to hosts file " + hostsFile);
} else {
if (checkDataNodeAddrConfig) {
- dnConf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
+ conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
} else {
- dnConf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
+ conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
}
}
if (checkDataNodeAddrConfig) {
- dnConf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
- dnConf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
+ conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
+ conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
} else {
- dnConf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
- dnConf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
+ conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
+ conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
index 898ec68886..5c011e31ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
@@ -117,7 +117,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
Configuration dnConf = new HdfsConfiguration(conf);
// Set up datanode address
- setupDatanodeAddress(i, dnConf, setupHostsFile, checkDataNodeAddrConfig);
+ setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
if (manageDfsDirs) {
String dirs = makeDataNodeDirs(i, storageTypes == null ? null : storageTypes[i]);
dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-22-dfs-dir.tgz b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-22-dfs-dir.tgz
index 4788046dae..c4959b4509 100644
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-22-dfs-dir.tgz and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-22-dfs-dir.tgz differ