HDFS-4260 Fix HDFS tests to set test dir to a valid HDFS path as opposed to the local build path (Chris Nauroth via Sanjay)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1418424 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Sanjay Radia 2012-12-07 18:32:27 +00:00
parent e1ba3f8158
commit ad619d34d0
13 changed files with 24 additions and 3 deletions

View File

@ -32,7 +32,7 @@
*/ */
public final class FileContextTestHelper { public final class FileContextTestHelper {
// The test root is relative to the <wd>/build/test/data by default // The test root is relative to the <wd>/build/test/data by default
public static final String TEST_ROOT_DIR = public static String TEST_ROOT_DIR =
System.getProperty("test.build.data", "build/test/data") + "/test"; System.getProperty("test.build.data", "build/test/data") + "/test";
private static final int DEFAULT_BLOCK_SIZE = 1024; private static final int DEFAULT_BLOCK_SIZE = 1024;
private static final int DEFAULT_NUM_BLOCKS = 2; private static final int DEFAULT_NUM_BLOCKS = 2;

View File

@ -34,7 +34,7 @@
*/ */
public final class FileSystemTestHelper { public final class FileSystemTestHelper {
// The test root is relative to the <wd>/build/test/data by default // The test root is relative to the <wd>/build/test/data by default
public static final String TEST_ROOT_DIR = public static String TEST_ROOT_DIR =
System.getProperty("test.build.data", "target/test/data") + "/test"; System.getProperty("test.build.data", "target/test/data") + "/test";
private static final int DEFAULT_BLOCK_SIZE = 1024; private static final int DEFAULT_BLOCK_SIZE = 1024;
private static final int DEFAULT_NUM_BLOCKS = 2; private static final int DEFAULT_NUM_BLOCKS = 2;

View File

@ -266,6 +266,9 @@ Trunk (Unreleased)
the nodes in the same nodegroup should also be excluded. (Junping Du the nodes in the same nodegroup should also be excluded. (Junping Du
via szetszwo) via szetszwo)
HDFS-4260 Fix HDFS tests to set test dir to a valid HDFS path as opposed
to the local build path (Chri Nauroth via Sanjay)
Release 2.0.3-alpha - Unreleased Release 2.0.3-alpha - Unreleased
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -41,6 +41,7 @@ public class TestFcHdfsCreateMkdir extends
@BeforeClass @BeforeClass
public static void clusterSetupAtBegining() public static void clusterSetupAtBegining()
throws IOException, LoginException, URISyntaxException { throws IOException, LoginException, URISyntaxException {
FileContextTestHelper.TEST_ROOT_DIR = "/tmp/TestFcHdfsCreateMkdir";
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fc = FileContext.getFileContext(cluster.getURI(0), conf); fc = FileContext.getFileContext(cluster.getURI(0), conf);

View File

@ -41,6 +41,7 @@ public class TestFcHdfsPermission extends FileContextPermissionBase {
@BeforeClass @BeforeClass
public static void clusterSetupAtBegining() public static void clusterSetupAtBegining()
throws IOException, LoginException, URISyntaxException { throws IOException, LoginException, URISyntaxException {
FileContextTestHelper.TEST_ROOT_DIR = "/tmp/TestFcHdfsPermission";
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fc = FileContext.getFileContext(cluster.getURI(0), conf); fc = FileContext.getFileContext(cluster.getURI(0), conf);

View File

@ -82,6 +82,7 @@ public class TestFcHdfsSetUMask {
@BeforeClass @BeforeClass
public static void clusterSetupAtBegining() public static void clusterSetupAtBegining()
throws IOException, LoginException, URISyntaxException { throws IOException, LoginException, URISyntaxException {
FileContextTestHelper.TEST_ROOT_DIR = "/tmp/TestFcHdfsSetUMask";
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
// set permissions very restrictive // set permissions very restrictive
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077"); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");

View File

@ -86,6 +86,7 @@ protected IOException unwrapException(IOException e) {
@BeforeClass @BeforeClass
public static void testSetUp() throws Exception { public static void testSetUp() throws Exception {
FileContextTestHelper.TEST_ROOT_DIR = "/tmp/TestFcHdfsSymlink";
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.set(FsPermission.UMASK_LABEL, "000"); conf.set(FsPermission.UMASK_LABEL, "000");

View File

@ -49,6 +49,8 @@ public class TestHDFSFileContextMainOperations extends
@BeforeClass @BeforeClass
public static void clusterSetupAtBegining() throws IOException, public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException { LoginException, URISyntaxException {
FileContextTestHelper.TEST_ROOT_DIR =
"/tmp/TestHDFSFileContextMainOperations";
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build(); cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
cluster.waitClusterUp(); cluster.waitClusterUp();
fc = FileContext.getFileContext(cluster.getURI(0), CONF); fc = FileContext.getFileContext(cluster.getURI(0), CONF);

View File

@ -25,6 +25,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -45,6 +46,7 @@ public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
@BeforeClass @BeforeClass
public static void clusterSetupAtBegining() throws IOException, public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException { LoginException, URISyntaxException {
FileSystemTestHelper.TEST_ROOT_DIR = "/tmp/TestViewFileSystemAtHdfsRoot";
SupportsBlocks = true; SupportsBlocks = true;
CONF.setBoolean( CONF.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

View File

@ -51,6 +51,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
@BeforeClass @BeforeClass
public static void clusterSetupAtBegining() throws IOException, public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException { LoginException, URISyntaxException {
FileSystemTestHelper.TEST_ROOT_DIR = "/tmp/TestViewFileSystemHdfs";
SupportsBlocks = true; SupportsBlocks = true;
CONF.setBoolean( CONF.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

View File

@ -23,6 +23,7 @@
import javax.security.auth.login.LoginException; import javax.security.auth.login.LoginException;
import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
@ -46,6 +47,7 @@ public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
@BeforeClass @BeforeClass
public static void clusterSetupAtBegining() throws IOException, public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException { LoginException, URISyntaxException {
FileContextTestHelper.TEST_ROOT_DIR = "/tmp/TestViewFsAtHdfsRoot";
SupportsBlocks = true; SupportsBlocks = true;
CONF.setBoolean( CONF.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

View File

@ -24,6 +24,7 @@
import javax.security.auth.login.LoginException; import javax.security.auth.login.LoginException;
import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -42,6 +43,7 @@ public class TestViewFsHdfs extends ViewFsBaseTest {
@BeforeClass @BeforeClass
public static void clusterSetupAtBegining() throws IOException, public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException { LoginException, URISyntaxException {
FileContextTestHelper.TEST_ROOT_DIR = "/tmp/TestViewFsHdfs";
SupportsBlocks = true; SupportsBlocks = true;
CONF.setBoolean( CONF.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

View File

@ -28,6 +28,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSMainOperationsBaseTest; import org.apache.hadoop.fs.FSMainOperationsBaseTest;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -53,6 +54,10 @@ public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest {
@BeforeClass @BeforeClass
public static void setupCluster() { public static void setupCluster() {
// Initialize the test root directory to a DFS like path
// since we are testing based on the MiniDFSCluster.
FileSystemTestHelper.TEST_ROOT_DIR = "/tmp/TestFSMainOperationsWebHdfs";
final Configuration conf = new Configuration(); final Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
try { try {
@ -132,4 +137,4 @@ public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
// also okay for HDFS. // also okay for HDFS.
} }
} }
} }