() {
+ @Override
+ public boolean apply(DatanodeStorageInfo storage) {
+ return storage.getState() == state;
+ }
+ });
+ }
+
/**
* For a block that has already been retrieved from the BlocksMap
* returns {@link Iterable} of the storages the block belongs to.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 3106f7f688..c1ed03ceb3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -78,7 +78,6 @@ class BlockReceiver implements Closeable {
private boolean needsChecksumTranslation;
private OutputStream out = null; // to block file at local disk
private FileDescriptor outFd;
- private OutputStream cout = null; // output stream for cehcksum file
private DataOutputStream checksumOut = null; // to crc file at local disk
private int bytesPerChecksum;
private int checksumSize;
@@ -223,9 +222,8 @@ class BlockReceiver implements Closeable {
LOG.warn("Could not get file descriptor for outputstream of class " +
out.getClass());
}
- this.cout = streams.getChecksumOut();
this.checksumOut = new DataOutputStream(new BufferedOutputStream(
- cout, HdfsConstants.SMALL_BUFFER_SIZE));
+ streams.getChecksumOut(), HdfsConstants.SMALL_BUFFER_SIZE));
// write data chunk header if creating a new replica
if (isCreate) {
BlockMetadataHeader.writeHeader(checksumOut, diskChecksum);
@@ -280,9 +278,9 @@ public void close() throws IOException {
long flushStartNanos = System.nanoTime();
checksumOut.flush();
long flushEndNanos = System.nanoTime();
- if (syncOnClose && (cout instanceof FileOutputStream)) {
+ if (syncOnClose) {
long fsyncStartNanos = flushEndNanos;
- ((FileOutputStream)cout).getChannel().force(true);
+ streams.syncChecksumOut();
datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
}
flushTotalNanos += flushEndNanos - flushStartNanos;
@@ -302,9 +300,9 @@ public void close() throws IOException {
long flushStartNanos = System.nanoTime();
out.flush();
long flushEndNanos = System.nanoTime();
- if (syncOnClose && (out instanceof FileOutputStream)) {
+ if (syncOnClose) {
long fsyncStartNanos = flushEndNanos;
- ((FileOutputStream)out).getChannel().force(true);
+ streams.syncDataOut();
datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
}
flushTotalNanos += flushEndNanos - flushStartNanos;
@@ -338,9 +336,9 @@ void flushOrSync(boolean isSync) throws IOException {
long flushStartNanos = System.nanoTime();
checksumOut.flush();
long flushEndNanos = System.nanoTime();
- if (isSync && (cout instanceof FileOutputStream)) {
+ if (isSync) {
long fsyncStartNanos = flushEndNanos;
- ((FileOutputStream)cout).getChannel().force(true);
+ streams.syncChecksumOut();
datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
}
flushTotalNanos += flushEndNanos - flushStartNanos;
@@ -349,9 +347,9 @@ void flushOrSync(boolean isSync) throws IOException {
long flushStartNanos = System.nanoTime();
out.flush();
long flushEndNanos = System.nanoTime();
- if (isSync && (out instanceof FileOutputStream)) {
+ if (isSync) {
long fsyncStartNanos = flushEndNanos;
- ((FileOutputStream)out).getChannel().force(true);
+ streams.syncDataOut();
datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
}
flushTotalNanos += flushEndNanos - flushStartNanos;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaOutputStreams.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaOutputStreams.java
index 3866392d93..95044c825d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaOutputStreams.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaOutputStreams.java
@@ -18,7 +18,9 @@
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
import java.io.Closeable;
+import java.io.FileOutputStream;
import java.io.OutputStream;
+import java.io.IOException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
@@ -62,4 +64,23 @@ public void close() {
IOUtils.closeStream(dataOut);
IOUtils.closeStream(checksumOut);
}
-}
\ No newline at end of file
+
+ /**
+ * Sync the data stream if it supports it.
+ */
+ public void syncDataOut() throws IOException {
+ if (dataOut instanceof FileOutputStream) {
+ ((FileOutputStream)dataOut).getChannel().force(true);
+ }
+ }
+
+ /**
+ * Sync the checksum stream if it supports it.
+ */
+ public void syncChecksumOut() throws IOException {
+ if (checksumOut instanceof FileOutputStream) {
+ ((FileOutputStream)checksumOut).getChannel().force(true);
+ }
+ }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index a865a24a66..0a55c2b98f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -139,6 +139,7 @@ void format(FSNamesystem fsn, String clusterId) throws IOException {
"FSImage.format should be called with an uninitialized namesystem, has " +
fileCount + " files");
NamespaceInfo ns = NNStorage.newNamespaceInfo();
+ LOG.info("Allocated new BlockPoolId: " + ns.getBlockPoolID());
ns.clusterID = clusterId;
storage.format(ns);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 40a03ce971..5018aa03f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -75,7 +75,7 @@ public final class FSImageFormatPBINode {
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType
.values();
- private static final Log LOG = LogFactory.getLog(FSImageFormatProtobuf.class);
+ private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class);
public final static class Loader {
public static PermissionStatus loadPermission(long id,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
index 57a5274886..adc913758b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
@@ -267,7 +267,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
}
break;
default:
- LOG.warn("Unregconized section " + n);
+ LOG.warn("Unrecognized section " + n);
break;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 1f811bbeb0..49874b6a73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -58,6 +58,8 @@
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.net.NetUtils;
@@ -378,11 +380,13 @@ void check(String parent, HdfsFileStatus file, Result res) throws IOException {
boolean isCorrupt = lBlk.isCorrupt();
String blkName = block.toString();
DatanodeInfo[] locs = lBlk.getLocations();
- res.totalReplicas += locs.length;
+ NumberReplicas numberReplicas = namenode.getNamesystem().getBlockManager().countNodes(block.getLocalBlock());
+ int liveReplicas = numberReplicas.liveReplicas();
+ res.totalReplicas += liveReplicas;
short targetFileReplication = file.getReplication();
res.numExpectedReplicas += targetFileReplication;
- if (locs.length > targetFileReplication) {
- res.excessiveReplicas += (locs.length - targetFileReplication);
+ if (liveReplicas > targetFileReplication) {
+ res.excessiveReplicas += (liveReplicas - targetFileReplication);
res.numOverReplicatedBlocks += 1;
}
// Check if block is Corrupt
@@ -392,10 +396,10 @@ void check(String parent, HdfsFileStatus file, Result res) throws IOException {
out.print("\n" + path + ": CORRUPT blockpool " + block.getBlockPoolId() +
" block " + block.getBlockName()+"\n");
}
- if (locs.length >= minReplication)
+ if (liveReplicas >= minReplication)
res.numMinReplicatedBlocks++;
- if (locs.length < targetFileReplication && locs.length > 0) {
- res.missingReplicas += (targetFileReplication - locs.length);
+ if (liveReplicas < targetFileReplication && liveReplicas > 0) {
+ res.missingReplicas += (targetFileReplication - liveReplicas);
res.numUnderReplicatedBlocks += 1;
underReplicatedPerFile++;
if (!showFiles) {
@@ -404,7 +408,7 @@ void check(String parent, HdfsFileStatus file, Result res) throws IOException {
out.println(" Under replicated " + block +
". Target Replicas is " +
targetFileReplication + " but found " +
- locs.length + " replica(s).");
+ liveReplicas + " replica(s).");
}
// verify block placement policy
BlockPlacementStatus blockPlacementStatus = bpPolicy
@@ -421,13 +425,13 @@ void check(String parent, HdfsFileStatus file, Result res) throws IOException {
block + ". " + blockPlacementStatus.getErrorDescription());
}
report.append(i + ". " + blkName + " len=" + block.getNumBytes());
- if (locs.length == 0) {
+ if (liveReplicas == 0) {
report.append(" MISSING!");
res.addMissing(block.toString(), block.getNumBytes());
missing++;
missize += block.getNumBytes();
} else {
- report.append(" repl=" + locs.length);
+ report.append(" repl=" + liveReplicas);
if (showLocations || showRacks) {
StringBuilder sb = new StringBuilder("[");
for (int j = 0; j < locs.length; j++) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
index 271f71091d..09675cdf3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
@@ -28,7 +28,18 @@ public class DatanodeStorage {
/** The state of the storage. */
public enum State {
NORMAL,
- READ_ONLY
+
+ /**
+ * A storage that represents a read-only path to replicas stored on a shared storage device.
+ * Replicas on {@link #READ_ONLY_SHARED} storage are not counted towards live replicas.
+ *
+ *
+ * In certain implementations, a {@link #READ_ONLY_SHARED} storage may be correlated to
+ * its {@link #NORMAL} counterpart using the {@link DatanodeStorage#storageID}. This
+ * property should be used for debugging purposes only.
+ *
+ */
+ READ_ONLY_SHARED;
}
private final String storageID;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
index 160f953b29..5e78dcfe7f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
@@ -50,7 +50,7 @@ message DatanodeRegistrationProto {
message DatanodeStorageProto {
enum StorageState {
NORMAL = 0;
- READ_ONLY = 1;
+ READ_ONLY_SHARED = 1;
}
required string storageUuid = 1;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
index f3b3ad2af5..01d2c85afe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
@@ -68,9 +68,10 @@ public class DataNodeCluster {
static String dataNodeDirs = DATANODE_DIRS;
static final String USAGE =
"Usage: datanodecluster " +
- " -n " +
+ " -n " +
+ " -bpid " +
" [-racks ] " +
- " [-simulated] " +
+ " [-simulated []] " +
" [-inject startingBlockId numBlocksPerDN]" +
" [-r replicationFactorForInjectedBlocks]" +
" [-d dataNodeDirs]\n" +
@@ -91,7 +92,7 @@ static void printUsageExit(String err) {
printUsageExit();
}
- public static void main(String[] args) {
+ public static void main(String[] args) throws InterruptedException {
int numDataNodes = 0;
int numRacks = 0;
boolean inject = false;
@@ -99,6 +100,8 @@ public static void main(String[] args) {
int numBlocksPerDNtoInject = 0;
int replication = 1;
boolean checkDataNodeAddrConfig = false;
+ long simulatedCapacityPerDn = SimulatedFSDataset.DEFAULT_CAPACITY;
+ String bpid = null;
Configuration conf = new HdfsConfiguration();
@@ -115,7 +118,7 @@ public static void main(String[] args) {
numRacks = Integer.parseInt(args[i]);
} else if (args[i].equals("-r")) {
if (++i >= args.length || args[i].startsWith("-")) {
- printUsageExit("Missing replicaiton factor");
+ printUsageExit("Missing replication factor");
}
replication = Integer.parseInt(args[i]);
} else if (args[i].equals("-d")) {
@@ -125,6 +128,14 @@ public static void main(String[] args) {
dataNodeDirs = args[i];
} else if (args[i].equals("-simulated")) {
SimulatedFSDataset.setFactory(conf);
+ if ((i+1) < args.length && !args[i+1].startsWith("-")) {
+ simulatedCapacityPerDn = Long.parseLong(args[++i]);
+ }
+ } else if (args[i].equals("-bpid")) {
+ if (++i >= args.length || args[i].startsWith("-")) {
+ printUsageExit("Missing blockpoolid parameter");
+ }
+ bpid = args[i];
} else if (args[i].equals("-inject")) {
if (!FsDatasetSpi.Factory.getFactory(conf).isSimulated()) {
System.out.print("-inject is valid only for simulated");
@@ -153,6 +164,9 @@ public static void main(String[] args) {
printUsageExit("Replication must be less than or equal to numDataNodes");
}
+ if (bpid == null) {
+ printUsageExit("BlockPoolId must be provided");
+ }
String nameNodeAdr = FileSystem.getDefaultUri(conf).getAuthority();
if (nameNodeAdr == null) {
System.out.println("No name node address and port in config");
@@ -162,9 +176,14 @@ public static void main(String[] args) {
System.out.println("Starting " + numDataNodes +
(simulated ? " Simulated " : " ") +
" Data Nodes that will connect to Name Node at " + nameNodeAdr);
-
+
System.setProperty("test.build.data", dataNodeDirs);
+ long simulatedCapacities[] = new long[numDataNodes];
+ for (int i = 0; i < numDataNodes; ++i) {
+ simulatedCapacities[i] = simulatedCapacityPerDn;
+ }
+
MiniDFSCluster mc = new MiniDFSCluster();
try {
mc.formatDataNodeDirs();
@@ -182,13 +201,12 @@ public static void main(String[] args) {
//rack4DataNode[i] = racks[i%numRacks];
rack4DataNode[i] = rackPrefix + "-" + i%numRacks;
System.out.println("Data Node " + i + " using " + rack4DataNode[i]);
-
-
}
}
try {
mc.startDataNodes(conf, numDataNodes, true, StartupOption.REGULAR,
- rack4DataNode, null, null, false, checkDataNodeAddrConfig);
+ rack4DataNode, null, simulatedCapacities, false, checkDataNodeAddrConfig);
+ Thread.sleep(10*1000); // Give the DN some time to connect to NN and init storage directories.
if (inject) {
long blockSize = 10;
System.out.println("Injecting " + numBlocksPerDNtoInject +
@@ -203,7 +221,7 @@ public static void main(String[] args) {
}
for (int i = 1; i <= replication; ++i) {
// inject blocks for dn_i into dn_i and replica in dn_i's neighbors
- mc.injectBlocks((i_dn + i- 1)% numDataNodes, Arrays.asList(blocks));
+ mc.injectBlocks((i_dn + i- 1)% numDataNodes, Arrays.asList(blocks), bpid);
System.out.println("Injecting blocks of dn " + i_dn + " into dn" +
((i_dn + i- 1)% numDataNodes));
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index f163faea63..97c9a456a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -157,6 +157,7 @@ public static class Builder {
private boolean checkExitOnShutdown = true;
private boolean checkDataNodeAddrConfig = false;
private boolean checkDataNodeHostConfig = false;
+ private Configuration[] dnConfOverlays;
public Builder(Configuration conf) {
this.conf = conf;
@@ -333,6 +334,19 @@ public Builder nnTopology(MiniDFSNNTopology topology) {
return this;
}
+ /**
+ * Default: null
+ *
+ * An array of {@link Configuration} objects that will overlay the
+ * global MiniDFSCluster Configuration for the corresponding DataNode.
+ *
+ * Useful for setting specific per-DataNode configuration parameters.
+ */
+ public Builder dataNodeConfOverlays(Configuration[] dnConfOverlays) {
+ this.dnConfOverlays = dnConfOverlays;
+ return this;
+ }
+
/**
* Construct the actual MiniDFSCluster
*/
@@ -375,7 +389,8 @@ protected MiniDFSCluster(Builder builder) throws IOException {
builder.nnTopology,
builder.checkExitOnShutdown,
builder.checkDataNodeAddrConfig,
- builder.checkDataNodeHostConfig);
+ builder.checkDataNodeHostConfig,
+ builder.dnConfOverlays);
}
public class DataNodeProperties {
@@ -625,7 +640,7 @@ public MiniDFSCluster(int nameNodePort,
manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs,
operation, null, racks, hosts,
simulatedCapacities, null, true, false,
- MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false);
+ MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false, null);
}
private void initMiniDFSCluster(
@@ -638,7 +653,8 @@ private void initMiniDFSCluster(
boolean waitSafeMode, boolean setupHostsFile,
MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
boolean checkDataNodeAddrConfig,
- boolean checkDataNodeHostConfig)
+ boolean checkDataNodeHostConfig,
+ Configuration[] dnConfOverlays)
throws IOException {
ExitUtil.disableSystemExit();
@@ -703,7 +719,7 @@ private void initMiniDFSCluster(
startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
dnStartOpt != null ? dnStartOpt : startOpt,
racks, hosts, simulatedCapacities, setupHostsFile,
- checkDataNodeAddrConfig, checkDataNodeHostConfig);
+ checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
waitClusterUp();
//make sure ProxyUsers uses the latest conf
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@@ -1110,7 +1126,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
long[] simulatedCapacities,
boolean setupHostsFile) throws IOException {
startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, hosts,
- simulatedCapacities, setupHostsFile, false, false);
+ simulatedCapacities, setupHostsFile, false, false, null);
}
/**
@@ -1124,7 +1140,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
boolean setupHostsFile,
boolean checkDataNodeAddrConfig) throws IOException {
startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, hosts,
- simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false);
+ simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null);
}
/**
@@ -1151,7 +1167,8 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
* @param setupHostsFile add new nodes to dfs hosts files
* @param checkDataNodeAddrConfig if true, only set DataNode port addresses if not already set in config
* @param checkDataNodeHostConfig if true, only set DataNode hostname key if not already set in config
- *
+ * @param dnConfOverlays An array of {@link Configuration} objects that will overlay the
+ * global MiniDFSCluster Configuration for the corresponding DataNode.
* @throws IllegalStateException if NameNode has been shutdown
*/
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
@@ -1160,7 +1177,8 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
long[] simulatedCapacities,
boolean setupHostsFile,
boolean checkDataNodeAddrConfig,
- boolean checkDataNodeHostConfig) throws IOException {
+ boolean checkDataNodeHostConfig,
+ Configuration[] dnConfOverlays) throws IOException {
if (operation == StartupOption.RECOVER) {
return;
}
@@ -1200,6 +1218,13 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
+ simulatedCapacities.length
+ "] is less than the number of datanodes [" + numDataNodes + "].");
}
+
+ if (dnConfOverlays != null
+ && numDataNodes > dnConfOverlays.length) {
+ throw new IllegalArgumentException( "The length of dnConfOverlays ["
+ + dnConfOverlays.length
+ + "] is less than the number of datanodes [" + numDataNodes + "].");
+ }
String [] dnArgs = (operation == null ||
operation != StartupOption.ROLLBACK) ?
@@ -1208,6 +1233,9 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
Configuration dnConf = new HdfsConfiguration(conf);
+ if (dnConfOverlays != null) {
+ dnConf.addResource(dnConfOverlays[i]);
+ }
// Set up datanode address
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
if (manageDfsDirs) {
@@ -2057,17 +2085,19 @@ public List