HDFS-2209. Make MiniDFS easier to embed in other apps.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1180077 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
02a81203bd
commit
aace6cb631
@ -794,6 +794,8 @@ Release 0.23.0 - Unreleased
|
|||||||
HDFS-2363. Move datanodes size printing from FSNamesystem.metasave(..)
|
HDFS-2363. Move datanodes size printing from FSNamesystem.metasave(..)
|
||||||
to BlockManager. (Uma Maheswara Rao G via szetszwo)
|
to BlockManager. (Uma Maheswara Rao G via szetszwo)
|
||||||
|
|
||||||
|
HDFS-2209. Make MiniDFS easier to embed in other apps. (stevel)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
|
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.commons.math.stat.descriptive.rank.Min;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
@ -86,6 +87,10 @@ public class MiniDFSCluster {
|
|||||||
|
|
||||||
private static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
|
private static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
|
||||||
private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);
|
private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);
|
||||||
|
/** System property to set the data dir: {@value} */
|
||||||
|
public static final String PROP_TEST_BUILD_DATA = "test.build.data";
|
||||||
|
/** Configuration option to set the data dir: {@value} */
|
||||||
|
public static final String HDFS_MINIDFS_BASEDIR = "hdfs.minidfs.basedir";
|
||||||
|
|
||||||
static { DefaultMetricsSystem.setMiniClusterMode(true); }
|
static { DefaultMetricsSystem.setMiniClusterMode(true); }
|
||||||
|
|
||||||
@ -495,7 +500,7 @@ private void initMiniDFSCluster(int nameNodePort, int nameNodeHttpPort,
|
|||||||
boolean waitSafeMode, boolean setupHostsFile, boolean federation)
|
boolean waitSafeMode, boolean setupHostsFile, boolean federation)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
base_dir = new File(getBaseDirectory());
|
base_dir = new File(determineDfsBaseDir());
|
||||||
data_dir = new File(base_dir, "data");
|
data_dir = new File(base_dir, "data");
|
||||||
this.federation = federation;
|
this.federation = federation;
|
||||||
this.waitSafeMode = waitSafeMode;
|
this.waitSafeMode = waitSafeMode;
|
||||||
@ -504,7 +509,7 @@ private void initMiniDFSCluster(int nameNodePort, int nameNodeHttpPort,
|
|||||||
String rpcEngineName = System.getProperty("hdfs.rpc.engine");
|
String rpcEngineName = System.getProperty("hdfs.rpc.engine");
|
||||||
if (rpcEngineName != null && !"".equals(rpcEngineName)) {
|
if (rpcEngineName != null && !"".equals(rpcEngineName)) {
|
||||||
|
|
||||||
System.out.println("HDFS using RPCEngine: "+rpcEngineName);
|
LOG.info("HDFS using RPCEngine: " + rpcEngineName);
|
||||||
try {
|
try {
|
||||||
Class<?> rpcEngine = conf.getClassByName(rpcEngineName);
|
Class<?> rpcEngine = conf.getClassByName(rpcEngineName);
|
||||||
setRpcEngine(conf, NamenodeProtocols.class, rpcEngine);
|
setRpcEngine(conf, NamenodeProtocols.class, rpcEngine);
|
||||||
@ -858,8 +863,8 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
|
|||||||
// Set up datanode address
|
// Set up datanode address
|
||||||
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
|
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
|
||||||
if (manageDfsDirs) {
|
if (manageDfsDirs) {
|
||||||
File dir1 = getStorageDir(i, 0);
|
File dir1 = getInstanceStorageDir(i, 0);
|
||||||
File dir2 = getStorageDir(i, 1);
|
File dir2 = getInstanceStorageDir(i, 1);
|
||||||
dir1.mkdirs();
|
dir1.mkdirs();
|
||||||
dir2.mkdirs();
|
dir2.mkdirs();
|
||||||
if (!dir1.isDirectory() || !dir2.isDirectory()) {
|
if (!dir1.isDirectory() || !dir2.isDirectory()) {
|
||||||
@ -875,17 +880,17 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
|
|||||||
dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
|
dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
|
||||||
simulatedCapacities[i-curDatanodesNum]);
|
simulatedCapacities[i-curDatanodesNum]);
|
||||||
}
|
}
|
||||||
System.out.println("Starting DataNode " + i + " with "
|
LOG.info("Starting DataNode " + i + " with "
|
||||||
+ DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
|
+ DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
|
||||||
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
|
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
|
||||||
if (hosts != null) {
|
if (hosts != null) {
|
||||||
dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
|
dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
|
||||||
System.out.println("Starting DataNode " + i + " with hostname set to: "
|
LOG.info("Starting DataNode " + i + " with hostname set to: "
|
||||||
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
|
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
|
||||||
}
|
}
|
||||||
if (racks != null) {
|
if (racks != null) {
|
||||||
String name = hosts[i - curDatanodesNum];
|
String name = hosts[i - curDatanodesNum];
|
||||||
System.out.println("Adding node with hostname : " + name + " to rack "+
|
LOG.info("Adding node with hostname : " + name + " to rack " +
|
||||||
racks[i-curDatanodesNum]);
|
racks[i-curDatanodesNum]);
|
||||||
StaticMapping.addNodeToRack(name,
|
StaticMapping.addNodeToRack(name,
|
||||||
racks[i-curDatanodesNum]);
|
racks[i-curDatanodesNum]);
|
||||||
@ -903,7 +908,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
|
|||||||
String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
|
String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
|
||||||
if (racks != null) {
|
if (racks != null) {
|
||||||
int port = dn.getSelfAddr().getPort();
|
int port = dn.getSelfAddr().getPort();
|
||||||
System.out.println("Adding node with IP:port : " + ipAddr + ":" + port+
|
LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +
|
||||||
" to rack " + racks[i-curDatanodesNum]);
|
" to rack " + racks[i-curDatanodesNum]);
|
||||||
StaticMapping.addNodeToRack(ipAddr + ":" + port,
|
StaticMapping.addNodeToRack(ipAddr + ":" + port,
|
||||||
racks[i-curDatanodesNum]);
|
racks[i-curDatanodesNum]);
|
||||||
@ -1099,7 +1104,7 @@ public int getNameNodePort(int nnIndex) {
|
|||||||
* Shutdown all the nodes in the cluster.
|
* Shutdown all the nodes in the cluster.
|
||||||
*/
|
*/
|
||||||
public void shutdown() {
|
public void shutdown() {
|
||||||
System.out.println("Shutting down the Mini HDFS Cluster");
|
LOG.info("Shutting down the Mini HDFS Cluster");
|
||||||
shutdownDataNodes();
|
shutdownDataNodes();
|
||||||
for (NameNodeInfo nnInfo : nameNodes) {
|
for (NameNodeInfo nnInfo : nameNodes) {
|
||||||
NameNode nameNode = nnInfo.nameNode;
|
NameNode nameNode = nnInfo.nameNode;
|
||||||
@ -1139,7 +1144,7 @@ public synchronized void shutdownNameNodes() {
|
|||||||
public synchronized void shutdownNameNode(int nnIndex) {
|
public synchronized void shutdownNameNode(int nnIndex) {
|
||||||
NameNode nn = nameNodes[nnIndex].nameNode;
|
NameNode nn = nameNodes[nnIndex].nameNode;
|
||||||
if (nn != null) {
|
if (nn != null) {
|
||||||
System.out.println("Shutting down the namenode");
|
LOG.info("Shutting down the namenode");
|
||||||
nn.stop();
|
nn.stop();
|
||||||
nn.join();
|
nn.join();
|
||||||
Configuration conf = nameNodes[nnIndex].conf;
|
Configuration conf = nameNodes[nnIndex].conf;
|
||||||
@ -1183,9 +1188,9 @@ public synchronized void restartNameNode(int nnIndex, boolean waitActive)
|
|||||||
nameNodes[nnIndex] = new NameNodeInfo(nn, conf);
|
nameNodes[nnIndex] = new NameNodeInfo(nn, conf);
|
||||||
if (waitActive) {
|
if (waitActive) {
|
||||||
waitClusterUp();
|
waitClusterUp();
|
||||||
System.out.println("Restarted the namenode");
|
LOG.info("Restarted the namenode");
|
||||||
waitActive();
|
waitActive();
|
||||||
System.out.println("Cluster is active");
|
LOG.info("Cluster is active");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1261,7 +1266,7 @@ public synchronized DataNodeProperties stopDataNode(int i) {
|
|||||||
}
|
}
|
||||||
DataNodeProperties dnprop = dataNodes.remove(i);
|
DataNodeProperties dnprop = dataNodes.remove(i);
|
||||||
DataNode dn = dnprop.datanode;
|
DataNode dn = dnprop.datanode;
|
||||||
System.out.println("MiniDFSCluster Stopping DataNode " +
|
LOG.info("MiniDFSCluster Stopping DataNode " +
|
||||||
dn.getMachineName() +
|
dn.getMachineName() +
|
||||||
" from a total of " + (dataNodes.size() + 1) +
|
" from a total of " + (dataNodes.size() + 1) +
|
||||||
" datanodes.");
|
" datanodes.");
|
||||||
@ -1350,7 +1355,7 @@ public synchronized boolean restartDataNodes(boolean keepPort)
|
|||||||
for (int i = dataNodes.size() - 1; i >= 0; i--) {
|
for (int i = dataNodes.size() - 1; i >= 0; i--) {
|
||||||
if (!restartDataNode(i, keepPort))
|
if (!restartDataNode(i, keepPort))
|
||||||
return false;
|
return false;
|
||||||
System.out.println("Restarted DataNode " + i);
|
LOG.info("Restarted DataNode " + i);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -1377,8 +1382,8 @@ public boolean isNameNodeUp(int nnIndex) {
|
|||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
// This method above should never throw.
|
// This method above should never throw.
|
||||||
// It only throws IOE since it is exposed via RPC
|
// It only throws IOE since it is exposed via RPC
|
||||||
throw new AssertionError("Unexpected IOE thrown: "
|
throw (AssertionError)(new AssertionError("Unexpected IOE thrown: "
|
||||||
+ StringUtils.stringifyException(ioe));
|
+ StringUtils.stringifyException(ioe)).initCause(ioe));
|
||||||
}
|
}
|
||||||
boolean isUp = false;
|
boolean isUp = false;
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
@ -1524,7 +1529,7 @@ public void waitActive() throws IOException {
|
|||||||
failedCount++;
|
failedCount++;
|
||||||
// Cached RPC connection to namenode, if any, is expected to fail once
|
// Cached RPC connection to namenode, if any, is expected to fail once
|
||||||
if (failedCount > 1) {
|
if (failedCount > 1) {
|
||||||
System.out.println("Tried waitActive() " + failedCount
|
LOG.warn("Tried waitActive() " + failedCount
|
||||||
+ " time(s) and failed, giving up. "
|
+ " time(s) and failed, giving up. "
|
||||||
+ StringUtils.stringifyException(e));
|
+ StringUtils.stringifyException(e));
|
||||||
throw e;
|
throw e;
|
||||||
@ -1576,7 +1581,7 @@ private synchronized boolean shouldWait(DatanodeInfo[] dnInfo,
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void formatDataNodeDirs() throws IOException {
|
public void formatDataNodeDirs() throws IOException {
|
||||||
base_dir = new File(getBaseDirectory());
|
base_dir = new File(determineDfsBaseDir());
|
||||||
data_dir = new File(base_dir, "data");
|
data_dir = new File(base_dir, "data");
|
||||||
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
|
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
|
||||||
throw new IOException("Cannot remove data directory: " + data_dir);
|
throw new IOException("Cannot remove data directory: " + data_dir);
|
||||||
@ -1697,8 +1702,49 @@ public String getDataDirectory() {
|
|||||||
return data_dir.getAbsolutePath();
|
return data_dir.getAbsolutePath();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the base directory for this MiniDFS instance.
|
||||||
|
* <p/>
|
||||||
|
* Within the MiniDFCluster class and any subclasses, this method should be
|
||||||
|
* used instead of {@link #getBaseDirectory()} which doesn't support
|
||||||
|
* configuration-specific base directories.
|
||||||
|
* <p/>
|
||||||
|
* First the Configuration property {@link #HDFS_MINIDFS_BASEDIR} is fetched.
|
||||||
|
* If non-null, this is returned.
|
||||||
|
* If this is null, then {@link #getBaseDirectory()} is called.
|
||||||
|
* @return the base directory for this instance.
|
||||||
|
*/
|
||||||
|
protected String determineDfsBaseDir() {
|
||||||
|
String dfsdir = conf.get(HDFS_MINIDFS_BASEDIR, null);
|
||||||
|
if (dfsdir == null) {
|
||||||
|
dfsdir = getBaseDirectory();
|
||||||
|
}
|
||||||
|
return dfsdir;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the base directory for any DFS cluster whose configuration does
|
||||||
|
* not explicitly set it. This is done by retrieving the system property
|
||||||
|
* {@link #PROP_TEST_BUILD_DATA} (defaulting to "build/test/data" ),
|
||||||
|
* and returning that directory with a subdir of /dfs.
|
||||||
|
* @return a directory for use as a miniDFS filesystem.
|
||||||
|
*/
|
||||||
public static String getBaseDirectory() {
|
public static String getBaseDirectory() {
|
||||||
return System.getProperty("test.build.data", "build/test/data") + "/dfs/";
|
return System.getProperty(PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a storage directory for a datanode in this specific instance of
|
||||||
|
* a MiniCluster.
|
||||||
|
*
|
||||||
|
* @param dnIndex datanode index (starts from 0)
|
||||||
|
* @param dirIndex directory index (0 or 1). Index 0 provides access to the
|
||||||
|
* first storage directory. Index 1 provides access to the second
|
||||||
|
* storage directory.
|
||||||
|
* @return Storage directory
|
||||||
|
*/
|
||||||
|
public File getInstanceStorageDir(int dnIndex, int dirIndex) {
|
||||||
|
return new File(base_dir, getStorageDirPath(dnIndex, dirIndex));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1716,13 +1762,25 @@ public static String getBaseDirectory() {
|
|||||||
* @return Storage directory
|
* @return Storage directory
|
||||||
*/
|
*/
|
||||||
public static File getStorageDir(int dnIndex, int dirIndex) {
|
public static File getStorageDir(int dnIndex, int dirIndex) {
|
||||||
return new File(getBaseDirectory() + "data/data" + (2*dnIndex + 1 + dirIndex));
|
return new File(getBaseDirectory(), getStorageDirPath(dnIndex, dirIndex));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get current directory corresponding to the datanode
|
* Calculate the DN instance-specific path for appending to the base dir
|
||||||
* @param storageDir
|
* to determine the location of the storage of a DN instance in the mini cluster
|
||||||
* @return current directory
|
* @param dnIndex datanode index
|
||||||
|
* @param dirIndex directory index (0 or 1).
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
private static String getStorageDirPath(int dnIndex, int dirIndex) {
|
||||||
|
return "data/data" + (2 * dnIndex + 1 + dirIndex);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current directory corresponding to the datanode as defined in
|
||||||
|
* (@link Storage#STORAGE_DIR_CURRENT}
|
||||||
|
* @param storageDir the storage directory of a datanode.
|
||||||
|
* @return the datanode current directory
|
||||||
*/
|
*/
|
||||||
public static String getDNCurrentDir(File storageDir) {
|
public static String getDNCurrentDir(File storageDir) {
|
||||||
return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/";
|
return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/";
|
||||||
@ -1730,8 +1788,8 @@ public static String getDNCurrentDir(File storageDir) {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Get directory corresponding to block pool directory in the datanode
|
* Get directory corresponding to block pool directory in the datanode
|
||||||
* @param storageDir
|
* @param storageDir the storage directory of a datanode.
|
||||||
* @return current directory
|
* @return the block pool directory
|
||||||
*/
|
*/
|
||||||
public static String getBPDir(File storageDir, String bpid) {
|
public static String getBPDir(File storageDir, String bpid) {
|
||||||
return getDNCurrentDir(storageDir) + bpid + "/";
|
return getDNCurrentDir(storageDir) + bpid + "/";
|
||||||
@ -1778,6 +1836,16 @@ public static File getBlockFile(File storageDir, ExtendedBlock blk) {
|
|||||||
blk.getBlockName());
|
blk.getBlockName());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shut down a cluster if it is not null
|
||||||
|
* @param cluster cluster reference or null
|
||||||
|
*/
|
||||||
|
public static void shutdownCluster(MiniDFSCluster cluster) {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get all files related to a block from all the datanodes
|
* Get all files related to a block from all the datanodes
|
||||||
* @param block block for which corresponding files are needed
|
* @param block block for which corresponding files are needed
|
||||||
|
@ -83,7 +83,7 @@ private void thistest(Configuration conf, DFSTestUtil util) throws Exception {
|
|||||||
// file disallows this Datanode to send data to another datanode.
|
// file disallows this Datanode to send data to another datanode.
|
||||||
// However, a client is alowed access to this block.
|
// However, a client is alowed access to this block.
|
||||||
//
|
//
|
||||||
File storageDir = MiniDFSCluster.getStorageDir(0, 1);
|
File storageDir = cluster.getInstanceStorageDir(0, 1);
|
||||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
||||||
assertTrue("data directory does not exist", data_dir.exists());
|
assertTrue("data directory does not exist", data_dir.exists());
|
||||||
@ -142,7 +142,7 @@ private void thistest(Configuration conf, DFSTestUtil util) throws Exception {
|
|||||||
// Now deliberately corrupt all meta blocks from the second
|
// Now deliberately corrupt all meta blocks from the second
|
||||||
// directory of the first datanode
|
// directory of the first datanode
|
||||||
//
|
//
|
||||||
storageDir = MiniDFSCluster.getStorageDir(0, 1);
|
storageDir = cluster.getInstanceStorageDir(0, 1);
|
||||||
data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
||||||
assertTrue("data directory does not exist", data_dir.exists());
|
assertTrue("data directory does not exist", data_dir.exists());
|
||||||
blocks = data_dir.listFiles();
|
blocks = data_dir.listFiles();
|
||||||
|
@ -65,7 +65,7 @@ public void testFileCorruption() throws Exception {
|
|||||||
FileSystem fs = cluster.getFileSystem();
|
FileSystem fs = cluster.getFileSystem();
|
||||||
util.createFiles(fs, "/srcdat");
|
util.createFiles(fs, "/srcdat");
|
||||||
// Now deliberately remove the blocks
|
// Now deliberately remove the blocks
|
||||||
File storageDir = MiniDFSCluster.getStorageDir(2, 0);
|
File storageDir = cluster.getInstanceStorageDir(2, 0);
|
||||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
||||||
assertTrue("data directory does not exist", data_dir.exists());
|
assertTrue("data directory does not exist", data_dir.exists());
|
||||||
@ -127,11 +127,11 @@ public void testArrayOutOfBoundsException() throws Exception {
|
|||||||
|
|
||||||
// get the block
|
// get the block
|
||||||
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
File storageDir = MiniDFSCluster.getStorageDir(0, 0);
|
File storageDir = cluster.getInstanceStorageDir(0, 0);
|
||||||
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
||||||
ExtendedBlock blk = getBlock(bpid, dataDir);
|
ExtendedBlock blk = getBlock(bpid, dataDir);
|
||||||
if (blk == null) {
|
if (blk == null) {
|
||||||
storageDir = MiniDFSCluster.getStorageDir(0, 1);
|
storageDir = cluster.getInstanceStorageDir(0, 1);
|
||||||
dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
||||||
blk = getBlock(bpid, dataDir);
|
blk = getBlock(bpid, dataDir);
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,108 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests MiniDFS cluster setup/teardown and isolation.
|
||||||
|
* Every instance is brought up with a new data dir, to ensure that
|
||||||
|
* shutdown work in background threads don't interfere with bringing up
|
||||||
|
* the new cluster.
|
||||||
|
*/
|
||||||
|
public class TestMiniDFSCluster {
|
||||||
|
|
||||||
|
private static final String CLUSTER_1 = "cluster1";
|
||||||
|
private static final String CLUSTER_2 = "cluster2";
|
||||||
|
private static final String CLUSTER_3 = "cluster3";
|
||||||
|
protected String testDataPath;
|
||||||
|
protected File testDataDir;
|
||||||
|
@Before
|
||||||
|
public void setUp() {
|
||||||
|
testDataPath = System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
|
||||||
|
testDataDir = new File(new File(testDataPath).getParentFile(),
|
||||||
|
"miniclusters");
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
@After
|
||||||
|
public void tearDown() {
|
||||||
|
System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, testDataPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify that without system properties the cluster still comes up, provided
|
||||||
|
* the configuration is set
|
||||||
|
*
|
||||||
|
* @throws Throwable on a failure
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testClusterWithoutSystemProperties() throws Throwable {
|
||||||
|
System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
File testDataCluster1 = new File(testDataPath, CLUSTER_1);
|
||||||
|
String c1Path = testDataCluster1.getAbsolutePath();
|
||||||
|
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path);
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
|
try {
|
||||||
|
Assert.assertEquals(c1Path+"/data", cluster.getDataDirectory());
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Bring up two clusters and assert that they are in different directories.
|
||||||
|
* @throws Throwable on a failure
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testDualClusters() throws Throwable {
|
||||||
|
File testDataCluster2 = new File(testDataPath, CLUSTER_2);
|
||||||
|
File testDataCluster3 = new File(testDataPath, CLUSTER_3);
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
String c2Path = testDataCluster2.getAbsolutePath();
|
||||||
|
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c2Path);
|
||||||
|
MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).build();
|
||||||
|
MiniDFSCluster cluster3 = null;
|
||||||
|
try {
|
||||||
|
String dataDir2 = cluster2.getDataDirectory();
|
||||||
|
Assert.assertEquals(c2Path + "/data", dataDir2);
|
||||||
|
//change the data dir
|
||||||
|
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
|
||||||
|
testDataCluster3.getAbsolutePath());
|
||||||
|
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
|
||||||
|
cluster3 = builder.build();
|
||||||
|
String dataDir3 = cluster3.getDataDirectory();
|
||||||
|
Assert.assertTrue("Clusters are bound to the same directory: " + dataDir2,
|
||||||
|
!dataDir2.equals(dataDir3));
|
||||||
|
} finally {
|
||||||
|
MiniDFSCluster.shutdownCluster(cluster3);
|
||||||
|
MiniDFSCluster.shutdownCluster(cluster2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
@ -63,7 +63,7 @@ public void testProcesOverReplicateBlock() throws IOException {
|
|||||||
DataNodeProperties dnProps = cluster.stopDataNode(0);
|
DataNodeProperties dnProps = cluster.stopDataNode(0);
|
||||||
// remove block scanner log to trigger block scanning
|
// remove block scanner log to trigger block scanning
|
||||||
File scanLog = new File(MiniDFSCluster.getFinalizedDir(
|
File scanLog = new File(MiniDFSCluster.getFinalizedDir(
|
||||||
MiniDFSCluster.getStorageDir(0, 0),
|
cluster.getInstanceStorageDir(0, 0),
|
||||||
cluster.getNamesystem().getBlockPoolId()).getParent().toString()
|
cluster.getNamesystem().getBlockPoolId()).getParent().toString()
|
||||||
+ "/../dncp_block_verification.log.prev");
|
+ "/../dncp_block_verification.log.prev");
|
||||||
//wait for one minute for deletion to succeed;
|
//wait for one minute for deletion to succeed;
|
||||||
|
@ -324,7 +324,7 @@ private int countRealBlocks(Map<String, BlockLocs> map) {
|
|||||||
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
for(int i=0; i<dn_num; i++) {
|
for(int i=0; i<dn_num; i++) {
|
||||||
for(int j=0; j<=1; j++) {
|
for(int j=0; j<=1; j++) {
|
||||||
File storageDir = MiniDFSCluster.getStorageDir(i, j);
|
File storageDir = cluster.getInstanceStorageDir(i, j);
|
||||||
File dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
File dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
||||||
if(dir == null) {
|
if(dir == null) {
|
||||||
System.out.println("dir is null for dn=" + i + " and data_dir=" + j);
|
System.out.println("dir is null for dn=" + i + " and data_dir=" + j);
|
||||||
|
@ -234,8 +234,8 @@ private void testVolumeConfig(int volumesTolerated, int volumesFailed,
|
|||||||
// Fail the current directory since invalid storage directory perms
|
// Fail the current directory since invalid storage directory perms
|
||||||
// get fixed up automatically on datanode startup.
|
// get fixed up automatically on datanode startup.
|
||||||
File[] dirs = {
|
File[] dirs = {
|
||||||
new File(MiniDFSCluster.getStorageDir(dnIndex, 0), "current"),
|
new File(cluster.getInstanceStorageDir(dnIndex, 0), "current"),
|
||||||
new File(MiniDFSCluster.getStorageDir(dnIndex, 1), "current") };
|
new File(cluster.getInstanceStorageDir(dnIndex, 1), "current") };
|
||||||
|
|
||||||
try {
|
try {
|
||||||
for (int i = 0; i < volumesFailed; i++) {
|
for (int i = 0; i < volumesFailed; i++) {
|
||||||
@ -274,7 +274,7 @@ public void testFailedVolumeOnStartupIsCounted() throws Exception {
|
|||||||
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
|
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
|
||||||
).getDatanodeManager();
|
).getDatanodeManager();
|
||||||
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
|
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
|
||||||
File dir = new File(MiniDFSCluster.getStorageDir(0, 0), "current");
|
File dir = new File(cluster.getInstanceStorageDir(0, 0), "current");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
prepareDirToFail(dir);
|
prepareDirToFail(dir);
|
||||||
|
@ -64,10 +64,10 @@ public void testDeleteBlockPool() throws Exception {
|
|||||||
String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
|
String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
|
||||||
String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
|
String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
|
||||||
|
|
||||||
File dn1StorageDir1 = MiniDFSCluster.getStorageDir(0, 0);
|
File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
|
||||||
File dn1StorageDir2 = MiniDFSCluster.getStorageDir(0, 1);
|
File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
|
||||||
File dn2StorageDir1 = MiniDFSCluster.getStorageDir(1, 0);
|
File dn2StorageDir1 = cluster.getInstanceStorageDir(1, 0);
|
||||||
File dn2StorageDir2 = MiniDFSCluster.getStorageDir(1, 1);
|
File dn2StorageDir2 = cluster.getInstanceStorageDir(1, 1);
|
||||||
|
|
||||||
// Although namenode is shutdown, the bp offerservice is still running
|
// Although namenode is shutdown, the bp offerservice is still running
|
||||||
try {
|
try {
|
||||||
@ -171,8 +171,8 @@ public void testDfsAdminDeleteBlockPool() throws Exception {
|
|||||||
String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
|
String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
|
||||||
String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
|
String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
|
||||||
|
|
||||||
File dn1StorageDir1 = MiniDFSCluster.getStorageDir(0, 0);
|
File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
|
||||||
File dn1StorageDir2 = MiniDFSCluster.getStorageDir(0, 1);
|
File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
|
||||||
|
|
||||||
Configuration nn1Conf = cluster.getConfiguration(0);
|
Configuration nn1Conf = cluster.getConfiguration(0);
|
||||||
nn1Conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "namesServerId1");
|
nn1Conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "namesServerId1");
|
||||||
|
@ -86,9 +86,9 @@ public void testShutdown() throws Exception {
|
|||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
final int dnIndex = 0;
|
final int dnIndex = 0;
|
||||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
File storageDir = MiniDFSCluster.getStorageDir(dnIndex, 0);
|
File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
|
||||||
File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
|
File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
|
||||||
storageDir = MiniDFSCluster.getStorageDir(dnIndex, 1);
|
storageDir = cluster.getInstanceStorageDir(dnIndex, 1);
|
||||||
File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
|
File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
|
||||||
try {
|
try {
|
||||||
// make the data directory of the first datanode to be readonly
|
// make the data directory of the first datanode to be readonly
|
||||||
@ -155,9 +155,9 @@ public void testReplicationError() throws Exception {
|
|||||||
|
|
||||||
// the temporary block & meta files should be deleted
|
// the temporary block & meta files should be deleted
|
||||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
File storageDir = MiniDFSCluster.getStorageDir(sndNode, 0);
|
File storageDir = cluster.getInstanceStorageDir(sndNode, 0);
|
||||||
File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
|
File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
|
||||||
storageDir = MiniDFSCluster.getStorageDir(sndNode, 1);
|
storageDir = cluster.getInstanceStorageDir(sndNode, 1);
|
||||||
File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
|
File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
|
||||||
while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
|
while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
|
@ -466,7 +466,7 @@ public void testFsckListCorruptFilesBlocks() throws Exception {
|
|||||||
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
for (int i=0; i<4; i++) {
|
for (int i=0; i<4; i++) {
|
||||||
for (int j=0; j<=1; j++) {
|
for (int j=0; j<=1; j++) {
|
||||||
File storageDir = MiniDFSCluster.getStorageDir(i, j);
|
File storageDir = cluster.getInstanceStorageDir(i, j);
|
||||||
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
||||||
File[] blocks = data_dir.listFiles();
|
File[] blocks = data_dir.listFiles();
|
||||||
if (blocks == null)
|
if (blocks == null)
|
||||||
|
@ -80,7 +80,7 @@ public void testListCorruptFilesCorruptedBlock() throws Exception {
|
|||||||
|
|
||||||
// Now deliberately corrupt one block
|
// Now deliberately corrupt one block
|
||||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
File storageDir = MiniDFSCluster.getStorageDir(0, 1);
|
File storageDir = cluster.getInstanceStorageDir(0, 1);
|
||||||
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
||||||
assertTrue("data directory does not exist", data_dir.exists());
|
assertTrue("data directory does not exist", data_dir.exists());
|
||||||
File[] blocks = data_dir.listFiles();
|
File[] blocks = data_dir.listFiles();
|
||||||
@ -163,7 +163,7 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception {
|
|||||||
+ " corrupt files. Expecting None.", badFiles.size() == 0);
|
+ " corrupt files. Expecting None.", badFiles.size() == 0);
|
||||||
|
|
||||||
// Now deliberately corrupt one block
|
// Now deliberately corrupt one block
|
||||||
File storageDir = MiniDFSCluster.getStorageDir(0, 0);
|
File storageDir = cluster.getInstanceStorageDir(0, 0);
|
||||||
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir,
|
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir,
|
||||||
cluster.getNamesystem().getBlockPoolId());
|
cluster.getNamesystem().getBlockPoolId());
|
||||||
assertTrue("data directory does not exist", data_dir.exists());
|
assertTrue("data directory does not exist", data_dir.exists());
|
||||||
@ -284,7 +284,7 @@ public void testlistCorruptFileBlocks() throws Exception {
|
|||||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
for (int i = 0; i < 4; i++) {
|
for (int i = 0; i < 4; i++) {
|
||||||
for (int j = 0; j <= 1; j++) {
|
for (int j = 0; j <= 1; j++) {
|
||||||
File storageDir = MiniDFSCluster.getStorageDir(i, j);
|
File storageDir = cluster.getInstanceStorageDir(i, j);
|
||||||
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
||||||
File[] blocks = data_dir.listFiles();
|
File[] blocks = data_dir.listFiles();
|
||||||
if (blocks == null)
|
if (blocks == null)
|
||||||
@ -391,7 +391,7 @@ public void testlistCorruptFileBlocksDFS() throws Exception {
|
|||||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
// For loop through number of datadirectories per datanode (2)
|
// For loop through number of datadirectories per datanode (2)
|
||||||
for (int i = 0; i < 2; i++) {
|
for (int i = 0; i < 2; i++) {
|
||||||
File storageDir = MiniDFSCluster.getStorageDir(0, i);
|
File storageDir = cluster.getInstanceStorageDir(0, i);
|
||||||
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
||||||
File[] blocks = data_dir.listFiles();
|
File[] blocks = data_dir.listFiles();
|
||||||
if (blocks == null)
|
if (blocks == null)
|
||||||
@ -466,7 +466,7 @@ public void testMaxCorruptFiles() throws Exception {
|
|||||||
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
for (int i=0; i<4; i++) {
|
for (int i=0; i<4; i++) {
|
||||||
for (int j=0; j<=1; j++) {
|
for (int j=0; j<=1; j++) {
|
||||||
File storageDir = MiniDFSCluster.getStorageDir(i, j);
|
File storageDir = cluster.getInstanceStorageDir(i, j);
|
||||||
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
||||||
LOG.info("Removing files from " + data_dir);
|
LOG.info("Removing files from " + data_dir);
|
||||||
File[] blocks = data_dir.listFiles();
|
File[] blocks = data_dir.listFiles();
|
||||||
|
Loading…
Reference in New Issue
Block a user