diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c index 59a5f47622..a26bf34c12 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c @@ -383,6 +383,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mlock_1native( JNIEnv *env, jclass clazz, jobject buffer, jlong len) { +#ifdef UNIX void* buf = (void*)(*env)->GetDirectBufferAddress(env, buffer); PASS_EXCEPTIONS(env); @@ -390,6 +391,12 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mlock_1native( CHECK_DIRECT_BUFFER_ADDRESS(buf); throw_ioe(env, errno); } +#endif + +#ifdef WINDOWS + THROW(env, "java/io/IOException", + "The function POSIX.mlock_native() is not supported on Windows"); +#endif } /** @@ -404,6 +411,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munlock_1native( JNIEnv *env, jclass clazz, jobject buffer, jlong len) { +#ifdef UNIX void* buf = (void*)(*env)->GetDirectBufferAddress(env, buffer); PASS_EXCEPTIONS(env); @@ -411,6 +419,12 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munlock_1native( CHECK_DIRECT_BUFFER_ADDRESS(buf); throw_ioe(env, errno); } +#endif + +#ifdef WINDOWS + THROW(env, "java/io/IOException", + "The function POSIX.munlock_native() is not supported on Windows"); +#endif } #ifdef __FreeBSD__ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 03e9438a26..db8e2762e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -115,3 +115,6 @@ HDFS-4949 (Unreleased) HDFS-5385. Caching RPCs are AtMostOnce, but do not persist client ID and call ID to edit log. (Chris Nauroth via Colin Patrick McCabe) + HDFS-5404. Resolve regressions in Windows compatibility on HDFS-4949 + branch. (Chris Nauroth via Andrew Wang) + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index 86afd857f3..e6e87b9cb3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.junit.Assert.assertEquals; +import static org.junit.Assume.assumeTrue; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyInt; @@ -50,6 +51,7 @@ import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; @@ -72,6 +74,8 @@ public class TestFsDatasetCache { @Before public void setUp() throws Exception { + assumeTrue(!Path.WINDOWS); + assumeTrue(NativeIO.isAvailable()); conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java index 53b7fba174..dce0b17e7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java @@ -447,77 +447,74 @@ public void testAddRemoveDirectives() throws Exception { @Test(timeout=60000) public void testCacheManagerRestart() throws Exception { + cluster.shutdown(); + cluster = null; HdfsConfiguration conf = createCachingConf(); - MiniDFSCluster cluster = - new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); - try { - cluster.waitActive(); - DistributedFileSystem dfs = cluster.getFileSystem(); + cluster.waitActive(); + DistributedFileSystem dfs = cluster.getFileSystem(); - // Create and validate a pool - final String pool = "poolparty"; - String groupName = "partygroup"; - FsPermission mode = new FsPermission((short)0777); - int weight = 747; - dfs.addCachePool(new CachePoolInfo(pool) - .setGroupName(groupName) - .setMode(mode) - .setWeight(weight)); - RemoteIterator pit = dfs.listCachePools(); - assertTrue("No cache pools found", pit.hasNext()); - CachePoolInfo info = pit.next(); - assertEquals(pool, info.getPoolName()); - assertEquals(groupName, info.getGroupName()); - assertEquals(mode, info.getMode()); - assertEquals(weight, (int)info.getWeight()); - assertFalse("Unexpected # of cache pools found", pit.hasNext()); + // Create and validate a pool + final String pool = "poolparty"; + String groupName = "partygroup"; + FsPermission mode = new FsPermission((short)0777); + int weight = 747; + dfs.addCachePool(new CachePoolInfo(pool) + .setGroupName(groupName) + .setMode(mode) + .setWeight(weight)); + RemoteIterator pit = dfs.listCachePools(); + assertTrue("No cache pools found", pit.hasNext()); + CachePoolInfo info = pit.next(); + assertEquals(pool, info.getPoolName()); + assertEquals(groupName, info.getGroupName()); + assertEquals(mode, info.getMode()); + assertEquals(weight, (int)info.getWeight()); + assertFalse("Unexpected # of cache pools found", pit.hasNext()); - // Create some cache entries - int numEntries = 10; - String entryPrefix = "/party-"; - for (int i=0; i dit - = dfs.listPathBasedCacheDescriptors(null, null); - for (int i=0; i dit + = dfs.listPathBasedCacheDescriptors(null, null); + for (int i=0; i