diff --git a/CHANGES.txt b/CHANGES.txt
index 80e8640a84..d2403da9f2 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -309,6 +309,9 @@ Trunk (unreleased changes)
HADOOP-6926. SocketInputStream incorrectly implements read().
(Todd Lipcon via tomwhite)
+ HADOOP-6899 RawLocalFileSystem#setWorkingDir() does not work for relative names
+ (Sanjay Radia)
+
Release 0.21.1 - Unreleased
IMPROVEMENTS
diff --git a/src/java/org/apache/hadoop/fs/Path.java b/src/java/org/apache/hadoop/fs/Path.java
index 496d9f933e..8ac3b6ae53 100644
--- a/src/java/org/apache/hadoop/fs/Path.java
+++ b/src/java/org/apache/hadoop/fs/Path.java
@@ -189,6 +189,15 @@ public FileSystem getFileSystem(Configuration conf) throws IOException {
return FileSystem.get(this.toUri(), conf);
}
+ /**
+ * Is an absolute path (ie a slash relative path part)
+ * AND a scheme is null AND authority is null.
+ */
+ public boolean isAbsoluteAndSchemeAuthorityNull() {
+ return (isUriPathAbsolute() &&
+ uri.getScheme() == null && uri.getAuthority() == null);
+ }
+
/**
* True if the path component (i.e. directory) of this URI is absolute.
*/
diff --git a/src/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/src/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 266510aa45..413ffd9383 100644
--- a/src/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/src/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -53,6 +53,14 @@ public RawLocalFileSystem() {
workingDir = getInitialWorkingDirectory();
}
+ private Path makeAbsolute(Path f) {
+ if (f.isAbsolute()) {
+ return f;
+ } else {
+ return new Path(workingDir, f);
+ }
+ }
+
/** Convert a path to a File. */
public File pathToFile(Path path) {
checkPath(path);
@@ -368,7 +376,9 @@ public Path getHomeDirectory() {
*/
@Override
public void setWorkingDirectory(Path newDir) {
- workingDir = newDir;
+ workingDir = makeAbsolute(newDir);
+ checkPath(workingDir);
+
}
@Override
@@ -545,4 +555,4 @@ private static String execCommand(File f, String... cmd) throws IOException {
return output;
}
-}
+}
\ No newline at end of file
diff --git a/src/java/org/apache/hadoop/fs/UnsupportedFileSystemException.java b/src/java/org/apache/hadoop/fs/UnsupportedFileSystemException.java
index fa7116daf7..e708063326 100644
--- a/src/java/org/apache/hadoop/fs/UnsupportedFileSystemException.java
+++ b/src/java/org/apache/hadoop/fs/UnsupportedFileSystemException.java
@@ -34,7 +34,7 @@ public class UnsupportedFileSystemException extends IOException {
* Constructs exception with the specified detail message.
* @param message exception message.
*/
- UnsupportedFileSystemException(final String message) {
+ public UnsupportedFileSystemException(final String message) {
super(message);
}
}
diff --git a/src/test/core/org/apache/hadoop/fs/FSMainOperationsBaseTest.java b/src/test/core/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
new file mode 100644
index 0000000000..9e976fb9ea
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
@@ -0,0 +1,1071 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+
+
+import org.apache.hadoop.fs.Options.Rename;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.hadoop.fs.FileSystemTestHelper.*;
+
+/**
+ *
+ * A collection of tests for the {@link FileSystem}.
+ * This test should be used for testing an instance of FileSystem
+ * that has been initialized to a specific default FileSystem such a
+ * LocalFileSystem, HDFS,S3, etc.
+ *
+ *
+ * To test a given {@link FileSystem} implementation create a subclass of this
+ * test and override {@link #setUp()} to initialize the fSys
+ * {@link FileSystem} instance variable.
+ *
+ * Since this a junit 4 you can also do a single setup before
+ * the start of any tests.
+ * E.g.
+ * @BeforeClass public static void clusterSetupAtBegining()
+ * @AfterClass public static void ClusterShutdownAtEnd()
+ *
+ */
+public abstract class FSMainOperationsBaseTest {
+
+ private static String TEST_DIR_AAA2 = "test/hadoop2/aaa";
+ private static String TEST_DIR_AAA = "test/hadoop/aaa";
+ private static String TEST_DIR_AXA = "test/hadoop/axa";
+ private static String TEST_DIR_AXX = "test/hadoop/axx";
+ private static int numBlocks = 2;
+
+ static final String LOCAL_FS_ROOT_URI = "file:///tmp/test";
+
+
+ protected static FileSystem fSys;
+
+ final private static PathFilter DEFAULT_FILTER = new PathFilter() {
+ public boolean accept(final Path file) {
+ return true;
+ }
+ };
+
+ //A test filter with returns any path containing a "b"
+ final private static PathFilter TEST_X_FILTER = new PathFilter() {
+ public boolean accept(Path file) {
+ if(file.getName().contains("x") || file.toString().contains("X"))
+ return true;
+ else
+ return false;
+ }
+ };
+
+ private static byte[] data = getFileData(numBlocks,
+ getDefaultBlockSize());
+
+ @Before
+ public void setUp() throws Exception {
+ fSys.mkdirs(getTestRootPath(fSys, "test"));
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ fSys.delete(new Path(getAbsoluteTestRootPath(fSys), new Path("test")), true);
+ fSys.delete(new Path(LOCAL_FS_ROOT_URI), true);
+ }
+
+
+ protected Path getDefaultWorkingDirectory() throws IOException {
+ return getTestRootPath(fSys,
+ "/user/" + System.getProperty("user.name")).makeQualified(
+ fSys.getUri(), fSys.getWorkingDirectory());
+ }
+
+ protected boolean renameSupported() {
+ return true;
+ }
+
+
+ protected IOException unwrapException(IOException e) {
+ return e;
+ }
+
+ @Test
+ public void testFsStatus() throws Exception {
+ FsStatus fsStatus = fSys.getStatus(null);
+ Assert.assertNotNull(fsStatus);
+ //used, free and capacity are non-negative longs
+ Assert.assertTrue(fsStatus.getUsed() >= 0);
+ Assert.assertTrue(fsStatus.getRemaining() >= 0);
+ Assert.assertTrue(fsStatus.getCapacity() >= 0);
+ }
+
+ @Test
+ public void testWorkingDirectory() throws Exception {
+
+ // First we cd to our test root
+ Path workDir = new Path(getAbsoluteTestRootPath(fSys), new Path("test"));
+ fSys.setWorkingDirectory(workDir);
+ Assert.assertEquals(workDir, fSys.getWorkingDirectory());
+
+ fSys.setWorkingDirectory(new Path("."));
+ Assert.assertEquals(workDir, fSys.getWorkingDirectory());
+
+ fSys.setWorkingDirectory(new Path(".."));
+ Assert.assertEquals(workDir.getParent(), fSys.getWorkingDirectory());
+
+ // cd using a relative path
+
+ // Go back to our test root
+ workDir = new Path(getAbsoluteTestRootPath(fSys), new Path("test"));
+ fSys.setWorkingDirectory(workDir);
+ Assert.assertEquals(workDir, fSys.getWorkingDirectory());
+
+ Path relativeDir = new Path("existingDir1");
+ Path absoluteDir = new Path(workDir,"existingDir1");
+ fSys.mkdirs(absoluteDir);
+ fSys.setWorkingDirectory(relativeDir);
+ Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
+ // cd using a absolute path
+ absoluteDir = getTestRootPath(fSys, "test/existingDir2");
+ fSys.mkdirs(absoluteDir);
+ fSys.setWorkingDirectory(absoluteDir);
+ Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
+
+ // Now open a file relative to the wd we just set above.
+ Path absolutePath = new Path(absoluteDir, "foo");
+ FileSystemTestHelper.createFile(fSys, absolutePath);
+ fSys.open(new Path("foo")).close();
+
+
+ // Now mkdir relative to the dir we cd'ed to
+ fSys.mkdirs(new Path("newDir"));
+ Assert.assertTrue(isDir(fSys, new Path(absoluteDir, "newDir")));
+
+ /**
+ * We cannot test this because FileSystem has never checked for
+ * existence of working dir - fixing this would break compatibility,
+ *
+ absoluteDir = getTestRootPath(fSys, "nonexistingPath");
+ try {
+ fSys.setWorkingDirectory(absoluteDir);
+ Assert.fail("cd to non existing dir should have failed");
+ } catch (Exception e) {
+ // Exception as expected
+ }
+ */
+ }
+
+
+ // Try a URI
+
+ @Test
+ public void testWDAbsolute() throws IOException {
+ Path absoluteDir = new Path(LOCAL_FS_ROOT_URI + "/existingDir");
+ fSys.mkdirs(absoluteDir);
+ fSys.setWorkingDirectory(absoluteDir);
+ Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
+ }
+
+ @Test
+ public void testMkdirs() throws Exception {
+ Path testDir = getTestRootPath(fSys, "test/hadoop");
+ Assert.assertFalse(exists(fSys, testDir));
+ Assert.assertFalse(isFile(fSys, testDir));
+
+ fSys.mkdirs(testDir);
+
+ Assert.assertTrue(exists(fSys, testDir));
+ Assert.assertFalse(isFile(fSys, testDir));
+
+ fSys.mkdirs(testDir);
+
+ Assert.assertTrue(exists(fSys, testDir));
+ Assert.assertFalse(isFile(fSys, testDir));
+
+ Path parentDir = testDir.getParent();
+ Assert.assertTrue(exists(fSys, parentDir));
+ Assert.assertFalse(isFile(fSys, parentDir));
+
+ Path grandparentDir = parentDir.getParent();
+ Assert.assertTrue(exists(fSys, grandparentDir));
+ Assert.assertFalse(isFile(fSys, grandparentDir));
+
+ }
+
+ @Test
+ public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
+ Path testDir = getTestRootPath(fSys, "test/hadoop");
+ Assert.assertFalse(exists(fSys, testDir));
+ fSys.mkdirs(testDir);
+ Assert.assertTrue(exists(fSys, testDir));
+
+ createFile(getTestRootPath(fSys, "test/hadoop/file"));
+
+ Path testSubDir = getTestRootPath(fSys, "test/hadoop/file/subdir");
+ try {
+ fSys.mkdirs(testSubDir);
+ Assert.fail("Should throw IOException.");
+ } catch (IOException e) {
+ // expected
+ }
+ Assert.assertFalse(exists(fSys, testSubDir));
+
+ Path testDeepSubDir = getTestRootPath(fSys, "test/hadoop/file/deep/sub/dir");
+ try {
+ fSys.mkdirs(testDeepSubDir);
+ Assert.fail("Should throw IOException.");
+ } catch (IOException e) {
+ // expected
+ }
+ Assert.assertFalse(exists(fSys, testDeepSubDir));
+
+ }
+
+ @Test
+ public void testGetFileStatusThrowsExceptionForNonExistentFile()
+ throws Exception {
+ try {
+ fSys.getFileStatus(getTestRootPath(fSys, "test/hadoop/file"));
+ Assert.fail("Should throw FileNotFoundException");
+ } catch (FileNotFoundException e) {
+ // expected
+ }
+ }
+
+ public void testListStatusThrowsExceptionForNonExistentFile()
+ throws Exception {
+ try {
+ fSys.listStatus(getTestRootPath(fSys, "test/hadoop/file"));
+ Assert.fail("Should throw FileNotFoundException");
+ } catch (FileNotFoundException fnfe) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testListStatus() throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, "test/hadoop/a"),
+ getTestRootPath(fSys, "test/hadoop/b"),
+ getTestRootPath(fSys, "test/hadoop/c/1"), };
+ Assert.assertFalse(exists(fSys, testDirs[0]));
+
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+
+ // test listStatus that returns an array
+ FileStatus[] paths = fSys.listStatus(getTestRootPath(fSys, "test"));
+ Assert.assertEquals(1, paths.length);
+ Assert.assertEquals(getTestRootPath(fSys, "test/hadoop"), paths[0].getPath());
+
+ paths = fSys.listStatus(getTestRootPath(fSys, "test/hadoop"));
+ Assert.assertEquals(3, paths.length);
+
+ Assert.assertTrue(containsPath(getTestRootPath(fSys, "test/hadoop/a"),
+ paths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys, "test/hadoop/b"),
+ paths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys, "test/hadoop/c"),
+ paths));
+
+ paths = fSys.listStatus(getTestRootPath(fSys, "test/hadoop/a"));
+ Assert.assertEquals(0, paths.length);
+
+ }
+
+ @Test
+ public void testListStatusFilterWithNoMatches() throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, TEST_DIR_AAA2),
+ getTestRootPath(fSys, TEST_DIR_AAA),
+ getTestRootPath(fSys, TEST_DIR_AXA),
+ getTestRootPath(fSys, TEST_DIR_AXX), };
+
+ if (exists(fSys, testDirs[0]) == false) {
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+ }
+
+ // listStatus with filters returns empty correctly
+ FileStatus[] filteredPaths = fSys.listStatus(
+ getTestRootPath(fSys, "test"), TEST_X_FILTER);
+ Assert.assertEquals(0,filteredPaths.length);
+
+ }
+
+ public void testListStatusFilterWithSomeMatches() throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, TEST_DIR_AAA),
+ getTestRootPath(fSys, TEST_DIR_AXA),
+ getTestRootPath(fSys, TEST_DIR_AXX),
+ getTestRootPath(fSys, TEST_DIR_AAA2), };
+
+ if (exists(fSys, testDirs[0]) == false) {
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+ }
+
+ // should return 2 paths ("/test/hadoop/axa" and "/test/hadoop/axx")
+ FileStatus[] filteredPaths = fSys.listStatus(
+ getTestRootPath(fSys, "test/hadoop"), TEST_X_FILTER);
+ Assert.assertEquals(2,filteredPaths.length);
+ Assert.assertTrue(containsPath(getTestRootPath(fSys,
+ TEST_DIR_AXA), filteredPaths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys,
+ TEST_DIR_AXX), filteredPaths));
+ }
+
+ @Test
+ public void testGlobStatusThrowsExceptionForNonExistentFile() throws Exception {
+ try {
+ // This should throw a FileNotFoundException
+ fSys.globStatus(
+ getTestRootPath(fSys, "test/hadoopfsdf/?"));
+ Assert.fail("Should throw FileNotFoundException");
+ } catch (FileNotFoundException fnfe) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testGlobStatusWithNoMatchesInPath() throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, TEST_DIR_AAA),
+ getTestRootPath(fSys, TEST_DIR_AXA),
+ getTestRootPath(fSys, TEST_DIR_AXX),
+ getTestRootPath(fSys, TEST_DIR_AAA2), };
+
+ if (exists(fSys, testDirs[0]) == false) {
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+ }
+
+ // should return nothing
+ FileStatus[] paths = fSys.globStatus(
+ getTestRootPath(fSys, "test/hadoop/?"));
+ Assert.assertEquals(0, paths.length);
+ }
+
+ @Test
+ public void testGlobStatusSomeMatchesInDirectories() throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, TEST_DIR_AAA),
+ getTestRootPath(fSys, TEST_DIR_AXA),
+ getTestRootPath(fSys, TEST_DIR_AXX),
+ getTestRootPath(fSys, TEST_DIR_AAA2), };
+
+ if (exists(fSys, testDirs[0]) == false) {
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+ }
+
+ // Should return two items ("/test/hadoop" and "/test/hadoop2")
+ FileStatus[] paths = fSys.globStatus(
+ getTestRootPath(fSys, "test/hadoop*"));
+ Assert.assertEquals(2, paths.length);
+ Assert.assertTrue(containsPath(getTestRootPath(fSys,
+ "test/hadoop"), paths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys,
+ "test/hadoop2"), paths));
+ }
+
+ @Test
+ public void testGlobStatusWithMultipleWildCardMatches() throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, TEST_DIR_AAA),
+ getTestRootPath(fSys, TEST_DIR_AXA),
+ getTestRootPath(fSys, TEST_DIR_AXX),
+ getTestRootPath(fSys, TEST_DIR_AAA2), };
+
+ if (exists(fSys, testDirs[0]) == false) {
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+ }
+
+ //Should return all 4 items ("/test/hadoop/aaa", "/test/hadoop/axa"
+ //"/test/hadoop/axx", and "/test/hadoop2/axx")
+ FileStatus[] paths = fSys.globStatus(
+ getTestRootPath(fSys, "test/hadoop*/*"));
+ Assert.assertEquals(4, paths.length);
+ Assert.assertTrue(containsPath(getTestRootPath(fSys, TEST_DIR_AAA), paths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys, TEST_DIR_AXA), paths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys, TEST_DIR_AXX), paths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys, TEST_DIR_AAA2), paths));
+ }
+
+ @Test
+ public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, TEST_DIR_AAA),
+ getTestRootPath(fSys, TEST_DIR_AXA),
+ getTestRootPath(fSys, TEST_DIR_AXX),
+ getTestRootPath(fSys, TEST_DIR_AAA2), };
+
+ if (exists(fSys, testDirs[0]) == false) {
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+ }
+
+ //Should return only 2 items ("/test/hadoop/axa", "/test/hadoop/axx")
+ FileStatus[] paths = fSys.globStatus(
+ getTestRootPath(fSys, "test/hadoop/ax?"));
+ Assert.assertEquals(2, paths.length);
+ Assert.assertTrue(containsPath(getTestRootPath(fSys,
+ TEST_DIR_AXA), paths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys,
+ TEST_DIR_AXX), paths));
+ }
+
+ @Test
+ public void testGlobStatusFilterWithEmptyPathResults() throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, TEST_DIR_AAA),
+ getTestRootPath(fSys, TEST_DIR_AXA),
+ getTestRootPath(fSys, TEST_DIR_AXX),
+ getTestRootPath(fSys, TEST_DIR_AXX), };
+
+ if (exists(fSys, testDirs[0]) == false) {
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+ }
+
+ //This should return an empty set
+ FileStatus[] filteredPaths = fSys.globStatus(
+ getTestRootPath(fSys, "test/hadoop/?"),
+ DEFAULT_FILTER);
+ Assert.assertEquals(0,filteredPaths.length);
+ }
+
+ @Test
+ public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter()
+ throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, TEST_DIR_AAA),
+ getTestRootPath(fSys, TEST_DIR_AXA),
+ getTestRootPath(fSys, TEST_DIR_AXX),
+ getTestRootPath(fSys, TEST_DIR_AXX), };
+
+ if (exists(fSys, testDirs[0]) == false) {
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+ }
+
+ //This should return all three (aaa, axa, axx)
+ FileStatus[] filteredPaths = fSys.globStatus(
+ getTestRootPath(fSys, "test/hadoop/*"),
+ DEFAULT_FILTER);
+ Assert.assertEquals(3, filteredPaths.length);
+ Assert.assertTrue(containsPath(getTestRootPath(fSys,
+ TEST_DIR_AAA), filteredPaths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys,
+ TEST_DIR_AXA), filteredPaths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys,
+ TEST_DIR_AXX), filteredPaths));
+ }
+
+ @Test
+ public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter()
+ throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, TEST_DIR_AAA),
+ getTestRootPath(fSys, TEST_DIR_AXA),
+ getTestRootPath(fSys, TEST_DIR_AXX),
+ getTestRootPath(fSys, TEST_DIR_AXX), };
+
+ if (exists(fSys, testDirs[0]) == false) {
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+ }
+
+ //This should return all three (aaa, axa, axx)
+ FileStatus[] filteredPaths = fSys.globStatus(
+ getTestRootPath(fSys, "test/hadoop/a??"),
+ DEFAULT_FILTER);
+ Assert.assertEquals(3, filteredPaths.length);
+ Assert.assertTrue(containsPath(getTestRootPath(fSys, TEST_DIR_AAA),
+ filteredPaths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys, TEST_DIR_AXA),
+ filteredPaths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys, TEST_DIR_AXX),
+ filteredPaths));
+ }
+
+ @Test
+ public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter()
+ throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, TEST_DIR_AAA),
+ getTestRootPath(fSys, TEST_DIR_AXA),
+ getTestRootPath(fSys, TEST_DIR_AXX),
+ getTestRootPath(fSys, TEST_DIR_AXX), };
+
+ if (exists(fSys, testDirs[0]) == false) {
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+ }
+
+ //This should return two (axa, axx)
+ FileStatus[] filteredPaths = fSys.globStatus(
+ getTestRootPath(fSys, "test/hadoop/*"),
+ TEST_X_FILTER);
+ Assert.assertEquals(2, filteredPaths.length);
+ Assert.assertTrue(containsPath(getTestRootPath(fSys,
+ TEST_DIR_AXA), filteredPaths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys,
+ TEST_DIR_AXX), filteredPaths));
+ }
+
+ @Test
+ public void testGlobStatusFilterWithNoMatchingPathsAndNonTrivialFilter()
+ throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, TEST_DIR_AAA),
+ getTestRootPath(fSys, TEST_DIR_AXA),
+ getTestRootPath(fSys, TEST_DIR_AXX),
+ getTestRootPath(fSys, TEST_DIR_AXX), };
+
+ if (exists(fSys, testDirs[0]) == false) {
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+ }
+
+ //This should return an empty set
+ FileStatus[] filteredPaths = fSys.globStatus(
+ getTestRootPath(fSys, "test/hadoop/?"),
+ TEST_X_FILTER);
+ Assert.assertEquals(0,filteredPaths.length);
+ }
+
+ @Test
+ public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter()
+ throws Exception {
+ Path[] testDirs = {
+ getTestRootPath(fSys, TEST_DIR_AAA),
+ getTestRootPath(fSys, TEST_DIR_AXA),
+ getTestRootPath(fSys, TEST_DIR_AXX),
+ getTestRootPath(fSys, TEST_DIR_AXX), };
+
+ if (exists(fSys, testDirs[0]) == false) {
+ for (Path path : testDirs) {
+ fSys.mkdirs(path);
+ }
+ }
+
+ //This should return two (axa, axx)
+ FileStatus[] filteredPaths = fSys.globStatus(
+ getTestRootPath(fSys, "test/hadoop/a??"),
+ TEST_X_FILTER);
+ Assert.assertEquals(2, filteredPaths.length);
+ Assert.assertTrue(containsPath(getTestRootPath(fSys, TEST_DIR_AXA),
+ filteredPaths));
+ Assert.assertTrue(containsPath(getTestRootPath(fSys, TEST_DIR_AXX),
+ filteredPaths));
+ }
+
+ @Test
+ public void testWriteReadAndDeleteEmptyFile() throws Exception {
+ writeReadAndDelete(0);
+ }
+
+ @Test
+ public void testWriteReadAndDeleteHalfABlock() throws Exception {
+ writeReadAndDelete(getDefaultBlockSize() / 2);
+ }
+
+ @Test
+ public void testWriteReadAndDeleteOneBlock() throws Exception {
+ writeReadAndDelete(getDefaultBlockSize());
+ }
+
+ @Test
+ public void testWriteReadAndDeleteOneAndAHalfBlocks() throws Exception {
+ int blockSize = getDefaultBlockSize();
+ writeReadAndDelete(blockSize + (blockSize / 2));
+ }
+
+ @Test
+ public void testWriteReadAndDeleteTwoBlocks() throws Exception {
+ writeReadAndDelete(getDefaultBlockSize() * 2);
+ }
+
+ private void writeReadAndDelete(int len) throws IOException {
+ Path path = getTestRootPath(fSys, "test/hadoop/file");
+
+ fSys.mkdirs(path.getParent());
+
+
+ FSDataOutputStream out =
+ fSys.create(path, false, 4096, (short) 1, getDefaultBlockSize() );
+ out.write(data, 0, len);
+ out.close();
+
+ Assert.assertTrue("Exists", exists(fSys, path));
+ Assert.assertEquals("Length", len, fSys.getFileStatus(path).getLen());
+
+ FSDataInputStream in = fSys.open(path);
+ byte[] buf = new byte[len];
+ in.readFully(0, buf);
+ in.close();
+
+ Assert.assertEquals(len, buf.length);
+ for (int i = 0; i < buf.length; i++) {
+ Assert.assertEquals("Position " + i, data[i], buf[i]);
+ }
+
+ Assert.assertTrue("Deleted", fSys.delete(path, false));
+
+ Assert.assertFalse("No longer exists", exists(fSys, path));
+
+ }
+
+ @Test
+ public void testOverwrite() throws IOException {
+ Path path = getTestRootPath(fSys, "test/hadoop/file");
+
+ fSys.mkdirs(path.getParent());
+
+ createFile(path);
+
+ Assert.assertTrue("Exists", exists(fSys, path));
+ Assert.assertEquals("Length", data.length, fSys.getFileStatus(path).getLen());
+
+ try {
+ createFile(path);
+ Assert.fail("Should throw IOException.");
+ } catch (IOException e) {
+ // Expected
+ }
+
+ FSDataOutputStream out = fSys.create(path, true, 4096);
+ out.write(data, 0, data.length);
+ out.close();
+
+ Assert.assertTrue("Exists", exists(fSys, path));
+ Assert.assertEquals("Length", data.length, fSys.getFileStatus(path).getLen());
+
+ }
+
+ @Test
+ public void testWriteInNonExistentDirectory() throws IOException {
+ Path path = getTestRootPath(fSys, "test/hadoop/file");
+ Assert.assertFalse("Parent doesn't exist", exists(fSys, path.getParent()));
+ createFile(path);
+
+ Assert.assertTrue("Exists", exists(fSys, path));
+ Assert.assertEquals("Length", data.length, fSys.getFileStatus(path).getLen());
+ Assert.assertTrue("Parent exists", exists(fSys, path.getParent()));
+ }
+
+ @Test
+ public void testDeleteNonExistentFile() throws IOException {
+ Path path = getTestRootPath(fSys, "test/hadoop/file");
+ Assert.assertFalse("Doesn't exist", exists(fSys, path));
+ Assert.assertFalse("No deletion", fSys.delete(path, true));
+ }
+
+ @Test
+ public void testDeleteRecursively() throws IOException {
+ Path dir = getTestRootPath(fSys, "test/hadoop");
+ Path file = getTestRootPath(fSys, "test/hadoop/file");
+ Path subdir = getTestRootPath(fSys, "test/hadoop/subdir");
+
+ createFile(file);
+ fSys.mkdirs(subdir);
+
+ Assert.assertTrue("File exists", exists(fSys, file));
+ Assert.assertTrue("Dir exists", exists(fSys, dir));
+ Assert.assertTrue("Subdir exists", exists(fSys, subdir));
+
+ try {
+ fSys.delete(dir, false);
+ Assert.fail("Should throw IOException.");
+ } catch (IOException e) {
+ // expected
+ }
+ Assert.assertTrue("File still exists", exists(fSys, file));
+ Assert.assertTrue("Dir still exists", exists(fSys, dir));
+ Assert.assertTrue("Subdir still exists", exists(fSys, subdir));
+
+ Assert.assertTrue("Deleted", fSys.delete(dir, true));
+ Assert.assertFalse("File doesn't exist", exists(fSys, file));
+ Assert.assertFalse("Dir doesn't exist", exists(fSys, dir));
+ Assert.assertFalse("Subdir doesn't exist", exists(fSys, subdir));
+ }
+
+ @Test
+ public void testDeleteEmptyDirectory() throws IOException {
+ Path dir = getTestRootPath(fSys, "test/hadoop");
+ fSys.mkdirs(dir);
+ Assert.assertTrue("Dir exists", exists(fSys, dir));
+ Assert.assertTrue("Deleted", fSys.delete(dir, false));
+ Assert.assertFalse("Dir doesn't exist", exists(fSys, dir));
+ }
+
+ @Test
+ public void testRenameNonExistentPath() throws Exception {
+ if (!renameSupported()) return;
+ Path src = getTestRootPath(fSys, "test/hadoop/nonExistent");
+ Path dst = getTestRootPath(fSys, "test/new/newpath");
+ try {
+ rename(src, dst, false, false, false, Rename.NONE);
+ Assert.fail("Should throw FileNotFoundException");
+ } catch (IOException e) {
+ Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
+ }
+
+ try {
+ rename(src, dst, false, false, false, Rename.OVERWRITE);
+ Assert.fail("Should throw FileNotFoundException");
+ } catch (IOException e) {
+ Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
+ }
+ }
+
+ @Test
+ public void testRenameFileToNonExistentDirectory() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = getTestRootPath(fSys, "test/hadoop/file");
+ createFile(src);
+ Path dst = getTestRootPath(fSys, "test/nonExistent/newfile");
+
+ try {
+ rename(src, dst, false, true, false, Rename.NONE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException e) {
+ Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
+ }
+
+ try {
+ rename(src, dst, false, true, false, Rename.OVERWRITE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException e) {
+ Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
+ }
+ }
+
+ @Test
+ public void testRenameFileToDestinationWithParentFile() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = getTestRootPath(fSys, "test/hadoop/file");
+ createFile(src);
+ Path dst = getTestRootPath(fSys, "test/parentFile/newfile");
+ createFile(dst.getParent());
+
+ try {
+ rename(src, dst, false, true, false, Rename.NONE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException e) {
+ }
+
+ try {
+ rename(src, dst, false, true, false, Rename.OVERWRITE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException e) {
+ }
+ }
+
+ @Test
+ public void testRenameFileToExistingParent() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = getTestRootPath(fSys, "test/hadoop/file");
+ createFile(src);
+ Path dst = getTestRootPath(fSys, "test/new/newfile");
+ fSys.mkdirs(dst.getParent());
+ rename(src, dst, true, false, true, Rename.OVERWRITE);
+ }
+
+ @Test
+ public void testRenameFileToItself() throws Exception {
+ if (!renameSupported()) return;
+ Path src = getTestRootPath(fSys, "test/hadoop/file");
+ createFile(src);
+ try {
+ rename(src, src, false, true, false, Rename.NONE);
+ Assert.fail("Renamed file to itself");
+ } catch (IOException e) {
+ Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+ }
+ // Also fails with overwrite
+ try {
+ rename(src, src, false, true, false, Rename.OVERWRITE);
+ Assert.fail("Renamed file to itself");
+ } catch (IOException e) {
+ // worked
+ }
+ }
+
+ @Test
+ public void testRenameFileAsExistingFile() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = getTestRootPath(fSys, "test/hadoop/file");
+ createFile(src);
+ Path dst = getTestRootPath(fSys, "test/new/existingFile");
+ createFile(dst);
+
+ // Fails without overwrite option
+ try {
+ rename(src, dst, false, true, false, Rename.NONE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException e) {
+ Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+ }
+
+ // Succeeds with overwrite option
+ rename(src, dst, true, false, true, Rename.OVERWRITE);
+ }
+
+ @Test
+ public void testRenameFileAsExistingDirectory() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = getTestRootPath(fSys, "test/hadoop/file");
+ createFile(src);
+ Path dst = getTestRootPath(fSys, "test/new/existingDir");
+ fSys.mkdirs(dst);
+
+ // Fails without overwrite option
+ try {
+ rename(src, dst, false, false, true, Rename.NONE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException e) {
+ }
+
+ // File cannot be renamed as directory
+ try {
+ rename(src, dst, false, false, true, Rename.OVERWRITE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException e) {
+ }
+ }
+
+ @Test
+ public void testRenameDirectoryToItself() throws Exception {
+ if (!renameSupported()) return;
+ Path src = getTestRootPath(fSys, "test/hadoop/dir");
+ fSys.mkdirs(src);
+ try {
+ rename(src, src, false, true, false, Rename.NONE);
+ Assert.fail("Renamed directory to itself");
+ } catch (IOException e) {
+ Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+ }
+ // Also fails with overwrite
+ try {
+ rename(src, src, false, true, false, Rename.OVERWRITE);
+ Assert.fail("Renamed directory to itself");
+ } catch (IOException e) {
+ // worked
+ }
+ }
+
+ @Test
+ public void testRenameDirectoryToNonExistentParent() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = getTestRootPath(fSys, "test/hadoop/dir");
+ fSys.mkdirs(src);
+ Path dst = getTestRootPath(fSys, "test/nonExistent/newdir");
+
+ try {
+ rename(src, dst, false, true, false, Rename.NONE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException e) {
+ Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
+ }
+
+ try {
+ rename(src, dst, false, true, false, Rename.OVERWRITE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException e) {
+ Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
+ }
+ }
+
+ @Test
+ public void testRenameDirectoryAsNonExistentDirectory() throws Exception {
+ testRenameDirectoryAsNonExistentDirectory(Rename.NONE);
+ tearDown();
+ testRenameDirectoryAsNonExistentDirectory(Rename.OVERWRITE);
+ }
+
+ private void testRenameDirectoryAsNonExistentDirectory(Rename... options) throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = getTestRootPath(fSys, "test/hadoop/dir");
+ fSys.mkdirs(src);
+ createFile(getTestRootPath(fSys, "test/hadoop/dir/file1"));
+ createFile(getTestRootPath(fSys, "test/hadoop/dir/subdir/file2"));
+
+ Path dst = getTestRootPath(fSys, "test/new/newdir");
+ fSys.mkdirs(dst.getParent());
+
+ rename(src, dst, true, false, true, options);
+ Assert.assertFalse("Nested file1 exists",
+ exists(fSys, getTestRootPath(fSys, "test/hadoop/dir/file1")));
+ Assert.assertFalse("Nested file2 exists",
+ exists(fSys, getTestRootPath(fSys, "test/hadoop/dir/subdir/file2")));
+ Assert.assertTrue("Renamed nested file1 exists",
+ exists(fSys, getTestRootPath(fSys, "test/new/newdir/file1")));
+ Assert.assertTrue("Renamed nested exists",
+ exists(fSys, getTestRootPath(fSys, "test/new/newdir/subdir/file2")));
+ }
+
+ @Test
+ public void testRenameDirectoryAsEmptyDirectory() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = getTestRootPath(fSys, "test/hadoop/dir");
+ fSys.mkdirs(src);
+ createFile(getTestRootPath(fSys, "test/hadoop/dir/file1"));
+ createFile(getTestRootPath(fSys, "test/hadoop/dir/subdir/file2"));
+
+ Path dst = getTestRootPath(fSys, "test/new/newdir");
+ fSys.mkdirs(dst);
+
+ // Fails without overwrite option
+ try {
+ rename(src, dst, false, true, false, Rename.NONE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException e) {
+ // Expected (cannot over-write non-empty destination)
+ Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+ }
+ // Succeeds with the overwrite option
+ rename(src, dst, true, false, true, Rename.OVERWRITE);
+ }
+
+ @Test
+ public void testRenameDirectoryAsNonEmptyDirectory() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = getTestRootPath(fSys, "test/hadoop/dir");
+ fSys.mkdirs(src);
+ createFile(getTestRootPath(fSys, "test/hadoop/dir/file1"));
+ createFile(getTestRootPath(fSys, "test/hadoop/dir/subdir/file2"));
+
+ Path dst = getTestRootPath(fSys, "test/new/newdir");
+ fSys.mkdirs(dst);
+ createFile(getTestRootPath(fSys, "test/new/newdir/file1"));
+ // Fails without overwrite option
+ try {
+ rename(src, dst, false, true, false, Rename.NONE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException e) {
+ // Expected (cannot over-write non-empty destination)
+ Assert.assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+ }
+ // Fails even with the overwrite option
+ try {
+ rename(src, dst, false, true, false, Rename.OVERWRITE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException ex) {
+ // Expected (cannot over-write non-empty destination)
+ }
+ }
+
+ @Test
+ public void testRenameDirectoryAsFile() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = getTestRootPath(fSys, "test/hadoop/dir");
+ fSys.mkdirs(src);
+ Path dst = getTestRootPath(fSys, "test/new/newfile");
+ createFile(dst);
+ // Fails without overwrite option
+ try {
+ rename(src, dst, false, true, true, Rename.NONE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException e) {
+ }
+ // Directory cannot be renamed as existing file
+ try {
+ rename(src, dst, false, true, true, Rename.OVERWRITE);
+ Assert.fail("Expected exception was not thrown");
+ } catch (IOException ex) {
+ }
+ }
+
+ @Test
+ public void testInputStreamClosedTwice() throws IOException {
+ //HADOOP-4760 according to Closeable#close() closing already-closed
+ //streams should have no effect.
+ Path src = getTestRootPath(fSys, "test/hadoop/file");
+ createFile(src);
+ FSDataInputStream in = fSys.open(src);
+ in.close();
+ in.close();
+ }
+
+ @Test
+ public void testOutputStreamClosedTwice() throws IOException {
+ //HADOOP-4760 according to Closeable#close() closing already-closed
+ //streams should have no effect.
+ Path src = getTestRootPath(fSys, "test/hadoop/file");
+ FSDataOutputStream out = fSys.create(src);
+
+ out.writeChar('H'); //write some data
+ out.close();
+ out.close();
+ }
+
+
+ protected void createFile(Path path) throws IOException {
+ FileSystemTestHelper.createFile(fSys, path);
+ }
+
+ @SuppressWarnings("deprecation")
+ private void rename(Path src, Path dst, boolean renameShouldSucceed,
+ boolean srcExists, boolean dstExists, Rename... options)
+ throws IOException {
+ fSys.rename(src, dst, options);
+ if (!renameShouldSucceed)
+ Assert.fail("rename should have thrown exception");
+ Assert.assertEquals("Source exists", srcExists, exists(fSys, src));
+ Assert.assertEquals("Destination exists", dstExists, exists(fSys, dst));
+ }
+ private boolean containsPath(Path path, FileStatus[] filteredPaths)
+ throws IOException {
+ for(int i = 0; i < filteredPaths.length; i ++) {
+ if (getTestRootPath(fSys, path.toString()).equals(
+ filteredPaths[i].getPath()))
+ return true;
+ }
+ return false;
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/FileSystemTestHelper.java b/src/test/core/org/apache/hadoop/fs/FileSystemTestHelper.java
new file mode 100644
index 0000000000..17c95738c8
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/FileSystemTestHelper.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+
+import org.apache.hadoop.io.IOUtils;
+import org.junit.Assert;
+
+
+/**
+ * Helper class for unit tests.
+ */
+public final class FileSystemTestHelper {
+ // The test root is relative to the /build/test/data by default
+ public static final String TEST_ROOT_DIR =
+ System.getProperty("test.build.data", "build/test/data") + "/test";
+ private static final int DEFAULT_BLOCK_SIZE = 1024;
+ private static final int DEFAULT_NUM_BLOCKS = 2;
+ private static String absTestRootDir = null;
+
+ /** Hidden constructor */
+ private FileSystemTestHelper() {}
+
+ public static int getDefaultBlockSize() {
+ return DEFAULT_BLOCK_SIZE;
+ }
+
+ public static byte[] getFileData(int numOfBlocks, long blockSize) {
+ byte[] data = new byte[(int) (numOfBlocks * blockSize)];
+ for (int i = 0; i < data.length; i++) {
+ data[i] = (byte) (i % 10);
+ }
+ return data;
+ }
+
+ public static Path getTestRootPath(FileSystem fSys) {
+ return fSys.makeQualified(new Path(TEST_ROOT_DIR));
+ }
+
+ public static Path getTestRootPath(FileSystem fSys, String pathString) {
+ return fSys.makeQualified(new Path(TEST_ROOT_DIR, pathString));
+ }
+
+
+ // the getAbsolutexxx method is needed because the root test dir
+ // can be messed up by changing the working dir.
+
+ public static String getAbsoluteTestRootDir(FileSystem fSys)
+ throws IOException {
+ if (absTestRootDir == null) {
+ if (TEST_ROOT_DIR.startsWith("/")) {
+ absTestRootDir = TEST_ROOT_DIR;
+ } else {
+ absTestRootDir = fSys.getWorkingDirectory().toString() + "/"
+ + TEST_ROOT_DIR;
+ }
+ }
+ return absTestRootDir;
+ }
+
+ public static Path getAbsoluteTestRootPath(FileSystem fSys) throws IOException {
+ return fSys.makeQualified(new Path(getAbsoluteTestRootDir(fSys)));
+ }
+
+ public static Path getDefaultWorkingDirectory(FileSystem fSys)
+ throws IOException {
+ return getTestRootPath(fSys, "/user/" + System.getProperty("user.name"))
+ .makeQualified(fSys.getUri(),
+ fSys.getWorkingDirectory());
+ }
+
+ /*
+ * Create files with numBlocks blocks each with block size blockSize.
+ */
+ public static void createFile(FileSystem fSys, Path path, int numBlocks,
+ int blockSize, boolean createParent) throws IOException {
+ FSDataOutputStream out =
+ fSys.create(path, false, 4096, fSys.getDefaultReplication(), blockSize );
+
+ byte[] data = getFileData(numBlocks, blockSize);
+ out.write(data, 0, data.length);
+ out.close();
+ }
+
+ public static void createFile(FileSystem fSys, Path path, int numBlocks,
+ int blockSize) throws IOException {
+ createFile(fSys, path, numBlocks, blockSize, true);
+ }
+
+ public static void createFile(FileSystem fSys, Path path) throws IOException {
+ createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, true);
+ }
+
+ public static Path createFile(FileSystem fSys, String name) throws IOException {
+ Path path = getTestRootPath(fSys, name);
+ createFile(fSys, path);
+ return path;
+ }
+
+ public static boolean exists(FileSystem fSys, Path p) throws IOException {
+ return fSys.exists(p);
+ }
+
+ public static boolean isFile(FileSystem fSys, Path p) throws IOException {
+ try {
+ return fSys.getFileStatus(p).isFile();
+ } catch (FileNotFoundException e) {
+ return false;
+ }
+ }
+
+ public static boolean isDir(FileSystem fSys, Path p) throws IOException {
+ try {
+ return fSys.getFileStatus(p).isDirectory();
+ } catch (FileNotFoundException e) {
+ return false;
+ }
+ }
+
+
+ public static void writeFile(FileSystem fSys, Path path,byte b[])
+ throws Exception {
+ FSDataOutputStream out =
+ fSys.create(path);
+ out.write(b);
+ out.close();
+ }
+
+ public static byte[] readFile(FileSystem fSys, Path path, int len )
+ throws Exception {
+ DataInputStream dis = fSys.open(path);
+ byte[] buffer = new byte[len];
+ IOUtils.readFully(dis, buffer, 0, len);
+ dis.close();
+ return buffer;
+ }
+ public static FileStatus containsPath(FileSystem fSys, Path path,
+ FileStatus[] dirList)
+ throws IOException {
+ for(int i = 0; i < dirList.length; i ++) {
+ if (getTestRootPath(fSys, path.toString()).equals(
+ dirList[i].getPath()))
+ return dirList[i];
+ }
+ return null;
+ }
+
+ public static FileStatus containsPath(Path path,
+ FileStatus[] dirList)
+ throws IOException {
+ for(int i = 0; i < dirList.length; i ++) {
+ if (path.equals(dirList[i].getPath()))
+ return dirList[i];
+ }
+ return null;
+ }
+
+
+ public static FileStatus containsPath(FileSystem fSys, String path, FileStatus[] dirList)
+ throws IOException {
+ return containsPath(fSys, new Path(path), dirList);
+ }
+
+ public static enum fileType {isDir, isFile, isSymlink};
+ public static void checkFileStatus(FileSystem aFs, String path,
+ fileType expectedType) throws IOException {
+ FileStatus s = aFs.getFileStatus(new Path(path));
+ Assert.assertNotNull(s);
+ if (expectedType == fileType.isDir) {
+ Assert.assertTrue(s.isDirectory());
+ } else if (expectedType == fileType.isFile) {
+ Assert.assertTrue(s.isFile());
+ } else if (expectedType == fileType.isSymlink) {
+ Assert.assertTrue(s.isSymlink());
+ }
+ Assert.assertEquals(aFs.makeQualified(new Path(path)), s.getPath());
+ }
+}
diff --git a/src/test/core/org/apache/hadoop/fs/TestFSMainOperationsLocalFileSystem.java b/src/test/core/org/apache/hadoop/fs/TestFSMainOperationsLocalFileSystem.java
new file mode 100644
index 0000000000..128c1fb088
--- /dev/null
+++ b/src/test/core/org/apache/hadoop/fs/TestFSMainOperationsLocalFileSystem.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestFSMainOperationsLocalFileSystem extends FSMainOperationsBaseTest {
+
+ @Before
+ public void setUp() throws Exception {
+ fSys = FileSystem.getLocal(new Configuration());
+ super.setUp();
+ }
+
+ static Path wd = null;
+ protected Path getDefaultWorkingDirectory() throws IOException {
+ if (wd == null)
+ wd = FileSystem.getLocal(new Configuration()).getWorkingDirectory();
+ return wd;
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ }
+
+ @Test
+ @Override
+ public void testWDAbsolute() throws IOException {
+ Path absoluteDir = FileSystemTestHelper.getTestRootPath(fSys,
+ "test/existingDir");
+ fSys.mkdirs(absoluteDir);
+ fSys.setWorkingDirectory(absoluteDir);
+ Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
+ }
+}