HDFS-13560. Insufficient system resources exist to complete the requested service for some tests on Windows. Contributed by Anbang Hu.
This commit is contained in:
parent
a97a2042f2
commit
53b807a6a8
@ -1388,4 +1388,19 @@ public static Set<Shell> getAllShells() {
|
|||||||
return new HashSet<>(CHILD_SHELLS.keySet());
|
return new HashSet<>(CHILD_SHELLS.keySet());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Static method to return the memory lock limit for datanode.
|
||||||
|
* @param ulimit max value at which memory locked should be capped.
|
||||||
|
* @return long value specifying the memory lock limit.
|
||||||
|
*/
|
||||||
|
public static Long getMemlockLimit(Long ulimit) {
|
||||||
|
if (WINDOWS) {
|
||||||
|
// HDFS-13560: if ulimit is too large on Windows, Windows will complain
|
||||||
|
// "1450: Insufficient system resources exist to complete the requested
|
||||||
|
// service". Thus, cap Windows memory lock limit at Integer.MAX_VALUE.
|
||||||
|
return Math.min(Integer.MAX_VALUE, ulimit);
|
||||||
|
}
|
||||||
|
return ulimit;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.datanode;
|
package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.util.Shell.getMemlockLimit;
|
||||||
import static org.hamcrest.MatcherAssert.assertThat;
|
import static org.hamcrest.MatcherAssert.assertThat;
|
||||||
import static org.hamcrest.core.Is.is;
|
import static org.hamcrest.core.Is.is;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
@ -99,7 +100,7 @@ public class TestDirectoryScanner {
|
|||||||
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
|
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
|
||||||
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||||
CONF.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
CONF.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
||||||
Long.MAX_VALUE);
|
getMemlockLimit(Long.MAX_VALUE));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
import static org.apache.hadoop.fs.StorageType.DEFAULT;
|
import static org.apache.hadoop.fs.StorageType.DEFAULT;
|
||||||
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
|
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
|
import static org.apache.hadoop.util.Shell.getMemlockLimit;
|
||||||
import static org.hamcrest.core.Is.is;
|
import static org.hamcrest.core.Is.is;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertThat;
|
import static org.junit.Assert.assertThat;
|
||||||
@ -421,7 +422,7 @@ public void build() throws IOException {
|
|||||||
private StorageType[] storageTypes = null;
|
private StorageType[] storageTypes = null;
|
||||||
private int ramDiskReplicaCapacity = -1;
|
private int ramDiskReplicaCapacity = -1;
|
||||||
private long ramDiskStorageLimit = -1;
|
private long ramDiskStorageLimit = -1;
|
||||||
private long maxLockedMemory = Long.MAX_VALUE;
|
private long maxLockedMemory = getMemlockLimit(Long.MAX_VALUE);
|
||||||
private boolean hasTransientStorage = true;
|
private boolean hasTransientStorage = true;
|
||||||
private boolean useScr = false;
|
private boolean useScr = false;
|
||||||
private boolean useLegacyBlockReaderLocal = false;
|
private boolean useLegacyBlockReaderLocal = false;
|
||||||
|
@ -75,6 +75,7 @@
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.util.Shell.getMemlockLimit;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
import static org.junit.Assert.assertNull;
|
import static org.junit.Assert.assertNull;
|
||||||
@ -103,8 +104,10 @@ public class TestNameNodeMXBean {
|
|||||||
@Test
|
@Test
|
||||||
public void testNameNodeMXBeanInfo() throws Exception {
|
public void testNameNodeMXBeanInfo() throws Exception {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
Long maxLockedMemory = getMemlockLimit(
|
||||||
NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
|
NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
|
||||||
|
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
||||||
|
maxLockedMemory);
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -256,7 +259,7 @@ public void testNameNodeMXBeanInfo() throws Exception {
|
|||||||
assertEquals(1, statusMap.get("active").size());
|
assertEquals(1, statusMap.get("active").size());
|
||||||
assertEquals(1, statusMap.get("failed").size());
|
assertEquals(1, statusMap.get("failed").size());
|
||||||
assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
|
assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
|
||||||
assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
|
assertEquals(maxLockedMemory *
|
||||||
cluster.getDataNodes().size(),
|
cluster.getDataNodes().size(),
|
||||||
mbs.getAttribute(mxbeanName, "CacheCapacity"));
|
mbs.getAttribute(mxbeanName, "CacheCapacity"));
|
||||||
assertNull("RollingUpgradeInfo should be null when there is no rolling"
|
assertNull("RollingUpgradeInfo should be null when there is no rolling"
|
||||||
|
Loading…
Reference in New Issue
Block a user