diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 721d0c058a..c0f81997b8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -696,11 +696,34 @@ public String getGroup() {
return super.getGroup();
}
+ /**
+ * Load file permission information (UNIX symbol rwxrwxrwx, sticky bit info).
+ *
+ * To improve peformance, give priority to native stat() call. First try get
+ * permission information by using native JNI call then fall back to use non
+ * native (ProcessBuilder) call in case native lib is not loaded or native
+ * call is not successful
+ */
+ private synchronized void loadPermissionInfo() {
+ if (!isPermissionLoaded() && NativeIO.isAvailable()) {
+ try {
+ loadPermissionInfoByNativeIO();
+ } catch (IOException ex) {
+ LOG.debug("Native call failed", ex);
+ }
+ }
+
+ if (!isPermissionLoaded()) {
+ loadPermissionInfoByNonNativeIO();
+ }
+ }
+
/// loads permissions, owner, and group from `ls -ld`
- private void loadPermissionInfo() {
+ @VisibleForTesting
+ void loadPermissionInfoByNonNativeIO() {
IOException e = null;
try {
- String output = FileUtil.execCommand(new File(getPath().toUri()),
+ String output = FileUtil.execCommand(new File(getPath().toUri()),
Shell.getGetPermissionCommand());
StringTokenizer t =
new StringTokenizer(output, Shell.TOKEN_SEPARATOR_REGEX);
@@ -716,16 +739,16 @@ private void loadPermissionInfo() {
t.nextToken();
String owner = t.nextToken();
+ String group = t.nextToken();
// If on windows domain, token format is DOMAIN\\user and we want to
// extract only the user name
+ // same as to the group name
if (Shell.WINDOWS) {
- int i = owner.indexOf('\\');
- if (i != -1)
- owner = owner.substring(i + 1);
+ owner = removeDomain(owner);
+ group = removeDomain(group);
}
setOwner(owner);
-
- setGroup(t.nextToken());
+ setGroup(group);
} catch (Shell.ExitCodeException ioe) {
if (ioe.getExitCode() != 1) {
e = ioe;
@@ -745,6 +768,46 @@ private void loadPermissionInfo() {
}
}
+ // In Windows, domain name is added.
+ // For example, given machine name (domain name) dname, user name i, then
+ // the result for user is dname\\i and for group is dname\\None. So we need
+ // remove domain name as follows:
+ // DOMAIN\\user => user, DOMAIN\\group => group
+ private String removeDomain(String str) {
+ int index = str.indexOf("\\");
+ if (index != -1) {
+ str = str.substring(index + 1);
+ }
+ return str;
+ }
+
+ // loads permissions, owner, and group from `ls -ld`
+ // but use JNI to more efficiently get file mode (permission, owner, group)
+ // by calling file stat() in *nix or some similar calls in Windows
+ @VisibleForTesting
+ void loadPermissionInfoByNativeIO() throws IOException {
+ Path path = getPath();
+ String pathName = path.toUri().getPath();
+ // remove leading slash for Windows path
+ if (Shell.WINDOWS && pathName.startsWith("/")) {
+ pathName = pathName.substring(1);
+ }
+ try {
+ NativeIO.POSIX.Stat stat = NativeIO.POSIX.getStat(pathName);
+ String owner = stat.getOwner();
+ String group = stat.getGroup();
+ int mode = stat.getMode();
+ setOwner(owner);
+ setGroup(group);
+ setPermission(new FsPermission(mode));
+ } catch (IOException e) {
+ setOwner(null);
+ setGroup(null);
+ setPermission(null);
+ throw e;
+ }
+ }
+
@Override
public void write(DataOutput out) throws IOException {
if (!isPermissionLoaded()) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index 031092bbff..51c113af27 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -90,6 +90,40 @@ public FsPermission(FsAction u, FsAction g, FsAction o, boolean sb) {
*/
public FsPermission(short mode) { fromShort(mode); }
+ /**
+ * Construct by the given mode.
+ *
+ * octal mask is applied.
+ *
+ *
+ * before mask after mask file type sticky bit
+ *
+ * octal 100644 644 file no
+ * decimal 33188 420
+ *
+ * octal 101644 1644 file yes
+ * decimal 33700 1420
+ *
+ * octal 40644 644 directory no
+ * decimal 16804 420
+ *
+ * octal 41644 1644 directory yes
+ * decimal 17316 1420
+ *
+ *
+ * 100644 becomes 644 while 644 remains as 644
+ *
+ * @param mode Mode is supposed to come from the result of native stat() call.
+ * It contains complete permission information: rwxrwxrwx, sticky
+ * bit, whether it is a directory or a file, etc. Upon applying
+ * mask, only permission and sticky bit info will be kept because
+ * they are the only parts to be used for now.
+ * @see #FsPermission(short mode)
+ */
+ public FsPermission(int mode) {
+ this((short)(mode & 01777));
+ }
+
/**
* Copy constructor
*
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index 84cd42c691..f601edd296 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.HardLink;
+import org.apache.hadoop.fs.PathIOException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
import org.apache.hadoop.util.NativeCodeLoader;
@@ -221,6 +222,8 @@ private static void assertCodeLoaded() throws IOException {
public static native FileDescriptor open(String path, int flags, int mode) throws IOException;
/** Wrapper around fstat(2) */
private static native Stat fstat(FileDescriptor fd) throws IOException;
+ /** Wrapper around stat(2). */
+ private static native Stat stat(String path) throws IOException;
/** Native chmod implementation. On UNIX, it is a wrapper around chmod(2) */
private static native void chmodImpl(String path, int mode) throws IOException;
@@ -428,6 +431,37 @@ public static Stat getFstat(FileDescriptor fd) throws IOException {
return stat;
}
+ /**
+ * Return the file stat for a file path.
+ *
+ * @param path file path
+ * @return the file stat
+ * @throws IOException thrown if there is an IO error while obtaining the
+ * file stat
+ */
+ public static Stat getStat(String path) throws IOException {
+ if (path == null) {
+ String errMessage = "Path is null";
+ LOG.warn(errMessage);
+ throw new IOException(errMessage);
+ }
+ Stat stat = null;
+ try {
+ if (!Shell.WINDOWS) {
+ stat = stat(path);
+ stat.owner = getName(IdCache.USER, stat.ownerId);
+ stat.group = getName(IdCache.GROUP, stat.groupId);
+ } else {
+ stat = stat(path);
+ }
+ } catch (NativeIOException nioe) {
+ LOG.warn("NativeIO.getStat error ({}): {} -- file path: {}",
+ nioe.getErrorCode(), nioe.getMessage(), path);
+ throw new PathIOException(path, nioe);
+ }
+ return stat;
+ }
+
private static String getName(IdCache domain, int id) throws IOException {
Map idNameCache = (domain == IdCache.USER)
? USER_ID_NAME_CACHE : GROUP_ID_NAME_CACHE;
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
index 242a45676b..2274d57ca9 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
@@ -383,7 +383,92 @@ cleanup:
#endif
}
+/*
+ * Class: org_apache_hadoop_io_nativeio_NativeIO_POSIX
+ * Method: stat
+ * Signature: (Ljava/lang/String;)Lorg/apache/hadoop/io/nativeio/NativeIO$POSIX$Stat;
+ * public static native Stat stat(String path);
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
+ */
+JNIEXPORT jobject JNICALL
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_stat(
+ JNIEnv *env, jclass clazz, jstring j_path)
+{
+#ifdef UNIX
+ jobject ret = NULL;
+ const char *c_path = (*env)->GetStringUTFChars(env, j_path, NULL);
+ if (c_path == NULL) {
+ goto cleanup;
+ }
+
+ struct stat s;
+ int rc = stat(c_path, &s);
+ if (rc != 0) {
+ throw_ioe(env, errno);
+ goto cleanup;
+ }
+
+ // Construct result
+ ret = (*env)->NewObject(env, stat_clazz, stat_ctor,
+ (jint)s.st_uid, (jint)s.st_gid, (jint)s.st_mode);
+
+cleanup:
+ if (c_path != NULL) {
+ (*env)->ReleaseStringUTFChars(env, j_path, c_path);
+ }
+ return ret;
+#endif
+
+#ifdef WINDOWS
+ LPWSTR owner = NULL;
+ LPWSTR group = NULL;
+ int mode = 0;
+ jstring jstr_owner = NULL;
+ jstring jstr_group = NULL;
+ int rc;
+ jobject ret = NULL;
+
+ LPCWSTR path = (LPCWSTR) (*env)->GetStringChars(env, j_path, NULL);
+ if (path == NULL) {
+ goto cleanup;
+ }
+
+ rc = FindFileOwnerAndPermission(path, TRUE, &owner, &group, &mode);
+ if (rc != ERROR_SUCCESS) {
+ throw_ioe(env, rc);
+ goto cleanup;
+ }
+
+ jstr_owner = (*env)->NewString(env, owner, (jsize) wcslen(owner));
+ if (jstr_owner == NULL) goto cleanup;
+
+ jstr_group = (*env)->NewString(env, group, (jsize) wcslen(group));
+ if (jstr_group == NULL) goto cleanup;
+
+ ret = (*env)->NewObject(env, stat_clazz, stat_ctor2,
+ jstr_owner, jstr_group, (jint)mode);
+
+cleanup:
+ if (path != NULL)
+ (*env)->ReleaseStringChars(env, j_path, (const jchar*) path);
+
+ if (ret == NULL) {
+ if (jstr_owner != NULL)
+ (*env)->ReleaseStringChars(env, jstr_owner, owner);
+
+ if (jstr_group != NULL)
+ (*env)->ReleaseStringChars(env, jstr_group, group);
+ }
+
+ LocalFree(owner);
+ LocalFree(group);
+
+ return ret;
+#endif
+}
/**
* public static native void posix_fadvise(
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
index b023c091d3..ebf9ea75e7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
@@ -17,10 +17,18 @@
*/
package org.apache.hadoop.fs;
+import java.io.File;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.StatUtils;
import org.apache.hadoop.util.Shell;
+
import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -78,4 +86,81 @@ protected Path getTestBaseDir() {
protected boolean filesystemIsCaseSensitive() {
return !(Shell.WINDOWS || Shell.MAC);
}
-}
\ No newline at end of file
+
+ // cross-check getPermission using both native/non-native
+ @Test
+ @SuppressWarnings("deprecation")
+ public void testPermission() throws Exception {
+ Path testDir = getTestBaseDir();
+ String testFilename = "teststat2File";
+ Path path = new Path(testDir, testFilename);
+
+ RawLocalFileSystem rfs = new RawLocalFileSystem();
+ Configuration conf = new Configuration();
+ rfs.initialize(rfs.getUri(), conf);
+ rfs.createNewFile(path);
+
+ File file = rfs.pathToFile(path);
+ long defaultBlockSize = rfs.getDefaultBlockSize(path);
+
+ //
+ // test initial permission
+ //
+ RawLocalFileSystem.DeprecatedRawLocalFileStatus fsNIO =
+ new RawLocalFileSystem.DeprecatedRawLocalFileStatus(
+ file, defaultBlockSize, rfs);
+ fsNIO.loadPermissionInfoByNativeIO();
+ RawLocalFileSystem.DeprecatedRawLocalFileStatus fsnonNIO =
+ new RawLocalFileSystem.DeprecatedRawLocalFileStatus(
+ file, defaultBlockSize, rfs);
+ fsnonNIO.loadPermissionInfoByNonNativeIO();
+
+ assertEquals(fsNIO.getOwner(), fsnonNIO.getOwner());
+ assertEquals(fsNIO.getGroup(), fsnonNIO.getGroup());
+ assertEquals(fsNIO.getPermission(), fsnonNIO.getPermission());
+
+ LOG.info("owner: {}, group: {}, permission: {}, isSticky: {}",
+ fsNIO.getOwner(), fsNIO.getGroup(), fsNIO.getPermission(),
+ fsNIO.getPermission().getStickyBit());
+
+ //
+ // test normal chmod - no sticky bit
+ //
+ StatUtils.setPermissionFromProcess("644", file.getPath());
+ fsNIO.loadPermissionInfoByNativeIO();
+ fsnonNIO.loadPermissionInfoByNonNativeIO();
+ assertEquals(fsNIO.getPermission(), fsnonNIO.getPermission());
+ assertEquals(644, fsNIO.getPermission().toOctal());
+ assertFalse(fsNIO.getPermission().getStickyBit());
+ assertFalse(fsnonNIO.getPermission().getStickyBit());
+
+ //
+ // test sticky bit
+ // unfortunately, cannot be done in Windows environments
+ //
+ if (!Shell.WINDOWS) {
+ //
+ // add sticky bit
+ //
+ StatUtils.setPermissionFromProcess("1644", file.getPath());
+ fsNIO.loadPermissionInfoByNativeIO();
+ fsnonNIO.loadPermissionInfoByNonNativeIO();
+ assertEquals(fsNIO.getPermission(), fsnonNIO.getPermission());
+ assertEquals(1644, fsNIO.getPermission().toOctal());
+ assertEquals(true, fsNIO.getPermission().getStickyBit());
+ assertEquals(true, fsnonNIO.getPermission().getStickyBit());
+
+ //
+ // remove sticky bit
+ //
+ StatUtils.setPermissionFromProcess("-t", file.getPath());
+ fsNIO.loadPermissionInfoByNativeIO();
+ fsnonNIO.loadPermissionInfoByNonNativeIO();
+ assertEquals(fsNIO.getPermission(), fsnonNIO.getPermission());
+ assertEquals(644, fsNIO.getPermission().toOctal());
+ assertEquals(false, fsNIO.getPermission().getStickyBit());
+ assertEquals(false, fsnonNIO.getPermission().getStickyBit());
+ }
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
index afddf80a25..0c5b415f28 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
@@ -264,6 +264,37 @@ private boolean isCorrectExceptionMessage(String msg, String umask) {
msg.contains("octal or symbolic");
}
+ /**
+ * test FsPermission(int) constructor.
+ */
+ @Test
+ public void testIntPermission() {
+ // Octal Decimals Masked OCT Masked DEC
+ // 100644 33188 644 420
+ // 101644 33700 1644 932
+ // 40644 16804 644 420
+ // 41644 17316 1644 932
+ // 644 420 644 420
+ // 1644 932 1644 932
+
+ int[][] permission_mask_maps = {
+ // Octal Decimal Unix Symbolic
+ { 0100644, 0644, 0 }, // 33188 -rw-r--
+ { 0101644, 01644, 1 }, // 33700 -rw-r-t
+ { 040644, 0644, 0 }, // 16804 drw-r--
+ { 041644, 01644, 1 } // 17316 drw-r-t
+ };
+
+ for (int[] permission_mask_map : permission_mask_maps) {
+ int original_permission_value = permission_mask_map[0];
+ int masked_permission_value = permission_mask_map[1];
+ boolean hasStickyBit = permission_mask_map[2] == 1;
+ FsPermission fsPermission = new FsPermission(original_permission_value);
+ assertEquals(masked_permission_value, fsPermission.toShort());
+ assertEquals(hasStickyBit, fsPermission.getStickyBit());
+ }
+ }
+
// Symbolic umask list is generated in linux shell using by the command:
// umask 0; umask ; umask -S
static final String[][] SYMBOLIC = new String[][] {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
index 6989905d75..6b3c2325d8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
@@ -29,37 +29,45 @@
import java.nio.channels.FileChannel;
import java.nio.channels.FileChannel.MapMode;
import java.util.Random;
-import java.util.concurrent.atomic.AtomicReference;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.commons.io.FileUtils;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIOException;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.junit.Assume.*;
-import static org.junit.Assert.*;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.apache.hadoop.test.StatUtils;
+import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.hadoop.util.Time;
+import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.*;
+import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.Stat.*;
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.NativeCodeLoader;
-import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assume.*;
+import static org.junit.Assert.*;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.*;
-import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.Stat.*;
-
public class TestNativeIO {
static final Logger LOG = LoggerFactory.getLogger(TestNativeIO.class);
@@ -163,6 +171,110 @@ public void testFstatClosedFd() throws Exception {
}
}
+ @Test (timeout = 30000)
+ public void testStat() throws Exception {
+ Configuration conf = new Configuration();
+ FileSystem fileSystem = FileSystem.getLocal(conf).getRawFileSystem();
+ Path path = new Path(TEST_DIR.getPath(), "teststat2");
+ fileSystem.createNewFile(path);
+ String testFilePath = path.toString();
+
+ try {
+ doStatTest(testFilePath);
+ LOG.info("testStat() is successful.");
+ } finally {
+ ContractTestUtils.cleanup("cleanup test file: " + path.toString(),
+ fileSystem, path);
+ }
+ }
+
+ private boolean doStatTest(String testFilePath) throws Exception {
+ NativeIO.POSIX.Stat stat = NativeIO.POSIX.getStat(testFilePath);
+ String owner = stat.getOwner();
+ String group = stat.getGroup();
+ int mode = stat.getMode();
+
+ // direct check with System
+ String expectedOwner = System.getProperty("user.name");
+ assertEquals(expectedOwner, owner);
+ assertNotNull(group);
+ assertTrue(!group.isEmpty());
+
+ // cross check with ProcessBuilder
+ StatUtils.Permission expected =
+ StatUtils.getPermissionFromProcess(testFilePath);
+ StatUtils.Permission permission =
+ new StatUtils.Permission(owner, group, new FsPermission(mode));
+
+ assertEquals(expected.getOwner(), permission.getOwner());
+ assertEquals(expected.getGroup(), permission.getGroup());
+ assertEquals(expected.getFsPermission(), permission.getFsPermission());
+
+ LOG.info("Load permission test is successful for path: {}, stat: {}",
+ testFilePath, stat);
+ LOG.info("On mask, stat is owner: {}, group: {}, permission: {}",
+ owner, group, permission.getFsPermission().toOctal());
+ return true;
+ }
+
+ @Test
+ public void testStatOnError() throws Exception {
+ final String testNullFilePath = null;
+ LambdaTestUtils.intercept(IOException.class,
+ "Path is null",
+ () -> NativeIO.POSIX.getStat(testNullFilePath));
+
+ final String testInvalidFilePath = "C:\\nonexisting_path\\nonexisting_file";
+ LambdaTestUtils.intercept(IOException.class,
+ PathIOException.class.getName(),
+ () -> NativeIO.POSIX.getStat(testInvalidFilePath));
+ }
+
+ @Test (timeout = 30000)
+ public void testMultiThreadedStat() throws Exception {
+ Configuration conf = new Configuration();
+ FileSystem fileSystem = FileSystem.getLocal(conf).getRawFileSystem();
+ Path path = new Path(TEST_DIR.getPath(), "teststat2");
+ fileSystem.createNewFile(path);
+ String testFilePath = path.toString();
+
+ int numOfThreads = 10;
+ ExecutorService executorService =
+ Executors.newFixedThreadPool(numOfThreads);
+ executorService.awaitTermination(1000, TimeUnit.MILLISECONDS);
+ try {
+ for (int i = 0; i < numOfThreads; i++){
+ Future result =
+ executorService.submit(() -> doStatTest(testFilePath));
+ assertTrue(result.get());
+ }
+ LOG.info("testMultiThreadedStat() is successful.");
+ } finally {
+ executorService.shutdown();
+ ContractTestUtils.cleanup("cleanup test file: " + path.toString(),
+ fileSystem, path);
+ }
+ }
+
+ @Test
+ public void testMultiThreadedStatOnError() throws Exception {
+ final String testInvalidFilePath = "C:\\nonexisting_path\\nonexisting_file";
+
+ int numOfThreads = 10;
+ ExecutorService executorService =
+ Executors.newFixedThreadPool(numOfThreads);
+ for (int i = 0; i < numOfThreads; i++) {
+ try {
+ Future result =
+ executorService.submit(() -> doStatTest(testInvalidFilePath));
+ result.get();
+ } catch (Exception e) {
+ assertTrue(e.getCause() instanceof PathIOException);
+ }
+ }
+ executorService.shutdown();
+ }
+
@Test (timeout = 30000)
public void testSetFilePointer() throws Exception {
assumeWindows();
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/StatUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/StatUtils.java
new file mode 100644
index 0000000000..fef35d0561
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/StatUtils.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.test;
+
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.StringTokenizer;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Helper class for stat/permission utility methods. Forks processes to query
+ * permission info.
+ */
+public class StatUtils {
+ public static class Permission {
+ private String owner;
+ private String group;
+ private FsPermission fsPermission;
+
+ public Permission(String owner, String group, FsPermission fsPermission) {
+ this.owner = owner;
+ this.group = group;
+ this.fsPermission = fsPermission;
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+
+ public String getGroup() {
+ return group;
+ }
+
+ public FsPermission getFsPermission() {
+ return fsPermission;
+ }
+ }
+
+ public static Permission getPermissionFromProcess(String filePath)
+ throws Exception {
+ String[] shellCommand = Shell.getGetPermissionCommand();
+ String sPerm = getPermissionStringFromProcess(shellCommand, filePath);
+
+ StringTokenizer tokenizer =
+ new StringTokenizer(sPerm, Shell.TOKEN_SEPARATOR_REGEX);
+ String symbolicPermission = tokenizer.nextToken();
+ tokenizer.nextToken(); // skip hard link
+ String owner = tokenizer.nextToken();
+ String group = tokenizer.nextToken();
+ if (Shell.WINDOWS) {
+ owner = removeDomain(owner);
+ group = removeDomain(group);
+ }
+
+ Permission permission =
+ new Permission(owner, group, FsPermission.valueOf(symbolicPermission));
+
+ return permission;
+ }
+
+ public static void setPermissionFromProcess(String chmod, String filePath)
+ throws Exception {
+ setPermissionFromProcess(chmod, false, filePath);
+ }
+
+ public static void setPermissionFromProcess(String chmod, boolean recursive,
+ String filePath) throws Exception {
+ String[] shellCommand = Shell.getSetPermissionCommand(chmod, recursive);
+ getPermissionStringFromProcess(shellCommand, filePath);
+ }
+
+ private static String removeDomain(String str) {
+ int index = str.indexOf("\\");
+ if (index != -1) {
+ str = str.substring(index + 1);
+ }
+ return str;
+ }
+
+ private static String getPermissionStringFromProcess(String[] shellCommand,
+ String testFilePath) throws Exception {
+ List cmd = new ArrayList(Arrays.asList(shellCommand));
+ cmd.add(testFilePath);
+
+ ProcessBuilder processBuilder = new ProcessBuilder(cmd);
+ Process process = processBuilder.start();
+
+ ExecutorService executorService = Executors.newSingleThreadExecutor();
+ executorService.awaitTermination(2000, TimeUnit.MILLISECONDS);
+ try {
+ Future future =
+ executorService.submit(() -> new BufferedReader(
+ new InputStreamReader(process.getInputStream(),
+ Charset.defaultCharset())).lines().findFirst().orElse(""));
+ return future.get();
+ } finally {
+ process.destroy();
+ executorService.shutdown();
+ }
+ }
+}