HDFS-12289. [READ] HDFS-12091 breaks the tests for provided block reads
This commit is contained in:
parent
2407c9b93a
commit
aca023b72c
@ -147,6 +147,9 @@ public class MiniDFSCluster implements AutoCloseable {
|
||||
GenericTestUtils.SYSPROP_TEST_DATA_DIR;
|
||||
/** Configuration option to set the data dir: {@value} */
|
||||
public static final String HDFS_MINIDFS_BASEDIR = "hdfs.minidfs.basedir";
|
||||
/** Configuration option to set the provided data dir: {@value} */
|
||||
public static final String HDFS_MINIDFS_BASEDIR_PROVIDED =
|
||||
"hdfs.minidfs.basedir.provided";
|
||||
public static final String DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY
|
||||
= DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing";
|
||||
public static final String DFS_NAMENODE_DECOMMISSION_INTERVAL_TESTING_KEY
|
||||
@ -1397,7 +1400,12 @@ String makeDataNodeDirs(int dnIndex, StorageType[] storageTypes) throws IOExcept
|
||||
if ((storageTypes != null) && (j >= storageTypes.length)) {
|
||||
break;
|
||||
}
|
||||
File dir = getInstanceStorageDir(dnIndex, j);
|
||||
File dir;
|
||||
if (storageTypes != null && storageTypes[j] == StorageType.PROVIDED) {
|
||||
dir = getProvidedStorageDir(dnIndex, j);
|
||||
} else {
|
||||
dir = getInstanceStorageDir(dnIndex, j);
|
||||
}
|
||||
dir.mkdirs();
|
||||
if (!dir.isDirectory()) {
|
||||
throw new IOException("Mkdirs failed to create directory for DataNode " + dir);
|
||||
@ -2846,6 +2854,26 @@ public File getInstanceStorageDir(int dnIndex, int dirIndex) {
|
||||
return new File(base_dir, getStorageDirPath(dnIndex, dirIndex));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a storage directory for PROVIDED storages.
|
||||
* The PROVIDED directory to return can be set by using the configuration
|
||||
* parameter {@link #HDFS_MINIDFS_BASEDIR_PROVIDED}. If this parameter is
|
||||
* not set, this function behaves exactly the same as
|
||||
* {@link #getInstanceStorageDir(int, int)}. Currently, the two parameters
|
||||
* are ignored as only one PROVIDED storage is supported in HDFS-9806.
|
||||
*
|
||||
* @param dnIndex datanode index (starts from 0)
|
||||
* @param dirIndex directory index
|
||||
* @return Storage directory
|
||||
*/
|
||||
public File getProvidedStorageDir(int dnIndex, int dirIndex) {
|
||||
String base = conf.get(HDFS_MINIDFS_BASEDIR_PROVIDED, null);
|
||||
if (base == null) {
|
||||
return getInstanceStorageDir(dnIndex, dirIndex);
|
||||
}
|
||||
return new File(base);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a storage directory for a datanode.
|
||||
* <ol>
|
||||
|
@ -74,7 +74,7 @@ public class TestNameNodeProvidedImplementation {
|
||||
final Random r = new Random();
|
||||
final File fBASE = new File(MiniDFSCluster.getBaseDirectory());
|
||||
final Path BASE = new Path(fBASE.toURI().toString());
|
||||
final Path NAMEPATH = new Path(BASE, "providedDir");;
|
||||
final Path NAMEPATH = new Path(BASE, "providedDir");
|
||||
final Path NNDIRPATH = new Path(BASE, "nnDir");
|
||||
final Path BLOCKFILE = new Path(NNDIRPATH, "blocks.csv");
|
||||
final String SINGLEUSER = "usr1";
|
||||
@ -116,6 +116,8 @@ public void setSeed() throws Exception {
|
||||
BLOCKFILE.toString());
|
||||
conf.set(DFSConfigKeys.DFS_PROVIDED_BLOCK_MAP_DELIMITER, ",");
|
||||
|
||||
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR_PROVIDED,
|
||||
new File(NAMEPATH.toUri()).toString());
|
||||
File imageDir = new File(NAMEPATH.toUri());
|
||||
if (!imageDir.exists()) {
|
||||
LOG.info("Creating directory: " + imageDir);
|
||||
|
Loading…
Reference in New Issue
Block a user