HDFS-12802. RBF: Control MountTableResolver cache size. Contrubuted by Inigo Goiri.

This commit is contained in:
Inigo Goiri 2018-01-09 18:53:25 -08:00
parent 55066cc53d
commit d9006d8a4e
4 changed files with 93 additions and 22 deletions

View File

@ -1281,6 +1281,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final long FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS_DEFAULT = public static final long FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS_DEFAULT =
TimeUnit.MINUTES.toMillis(5); TimeUnit.MINUTES.toMillis(5);
// HDFS Router-based federation mount table entries
/** Maximum number of cache entries to have. */
public static final String FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE =
DFSConfigKeys.FEDERATION_ROUTER_PREFIX + "mount-table.max-cache-size";
/** Remove cache entries if we have more than 10k. */
public static final int FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT = 10000;
// HDFS Router-based federation admin // HDFS Router-based federation admin
public static final String DFS_ROUTER_ADMIN_HANDLER_COUNT_KEY = public static final String DFS_ROUTER_ADMIN_HANDLER_COUNT_KEY =
FEDERATION_ROUTER_PREFIX + "admin.handler.count"; FEDERATION_ROUTER_PREFIX + "admin.handler.count";

View File

@ -18,10 +18,13 @@
package org.apache.hadoop.hdfs.server.federation.resolver; package org.apache.hadoop.hdfs.server.federation.resolver;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
import static org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
import static org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -30,9 +33,10 @@
import java.util.SortedMap; import java.util.SortedMap;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ExecutionException;
import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
@ -55,6 +59,8 @@
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
/** /**
* Mount table to map between global paths and remote locations. This allows the * Mount table to map between global paths and remote locations. This allows the
@ -81,8 +87,7 @@ public class MountTableResolver
/** Path -> Remote HDFS location. */ /** Path -> Remote HDFS location. */
private final TreeMap<String, MountTable> tree = new TreeMap<>(); private final TreeMap<String, MountTable> tree = new TreeMap<>();
/** Path -> Remote location. */ /** Path -> Remote location. */
private final ConcurrentNavigableMap<String, PathLocation> locationCache = private final Cache<String, PathLocation> locationCache;
new ConcurrentSkipListMap<>();
/** Default nameservice when no mount matches the math. */ /** Default nameservice when no mount matches the math. */
private String defaultNameService = ""; private String defaultNameService = "";
@ -99,20 +104,30 @@ public MountTableResolver(Configuration conf) {
} }
public MountTableResolver(Configuration conf, Router routerService) { public MountTableResolver(Configuration conf, Router routerService) {
this(conf, routerService, null);
}
public MountTableResolver(Configuration conf, StateStoreService store) {
this(conf, null, store);
}
public MountTableResolver(Configuration conf, Router routerService,
StateStoreService store) {
this.router = routerService; this.router = routerService;
if (this.router != null) { if (store != null) {
this.stateStore = store;
} else if (this.router != null) {
this.stateStore = this.router.getStateStore(); this.stateStore = this.router.getStateStore();
} else { } else {
this.stateStore = null; this.stateStore = null;
} }
registerCacheExternal(); int maxCacheSize = conf.getInt(
initDefaultNameService(conf); FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE,
} FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT);
this.locationCache = CacheBuilder.newBuilder()
public MountTableResolver(Configuration conf, StateStoreService store) { .maximumSize(maxCacheSize)
this.router = null; .build();
this.stateStore = store;
registerCacheExternal(); registerCacheExternal();
initDefaultNameService(conf); initDefaultNameService(conf);
@ -210,16 +225,26 @@ public void removeEntry(final String srcPath) {
* @param path Source path. * @param path Source path.
*/ */
private void invalidateLocationCache(final String path) { private void invalidateLocationCache(final String path) {
if (locationCache.isEmpty()) { LOG.debug("Invalidating {} from {}", path, locationCache);
if (locationCache.size() == 0) {
return; return;
} }
// Determine next lexicographic entry after source path
String nextSrc = path + Character.MAX_VALUE; // Go through the entries and remove the ones from the path to invalidate
ConcurrentNavigableMap<String, PathLocation> subMap = ConcurrentMap<String, PathLocation> map = locationCache.asMap();
locationCache.subMap(path, nextSrc); Set<Entry<String, PathLocation>> entries = map.entrySet();
for (final String key : subMap.keySet()) { Iterator<Entry<String, PathLocation>> it = entries.iterator();
locationCache.remove(key); while (it.hasNext()) {
Entry<String, PathLocation> entry = it.next();
PathLocation loc = entry.getValue();
String src = loc.getSourcePath();
if (src.startsWith(path)) {
LOG.debug("Removing {}", src);
it.remove();
}
} }
LOG.debug("Location cache after invalidation: {}", locationCache);
} }
/** /**
@ -312,7 +337,7 @@ public void clear() {
LOG.info("Clearing all mount location caches"); LOG.info("Clearing all mount location caches");
writeLock.lock(); writeLock.lock();
try { try {
this.locationCache.clear(); this.locationCache.invalidateAll();
this.tree.clear(); this.tree.clear();
} finally { } finally {
writeLock.unlock(); writeLock.unlock();
@ -325,8 +350,15 @@ public PathLocation getDestinationForPath(final String path)
verifyMountTable(); verifyMountTable();
readLock.lock(); readLock.lock();
try { try {
return this.locationCache.computeIfAbsent( Callable<? extends PathLocation> meh = new Callable<PathLocation>() {
path, this::lookupLocation); @Override
public PathLocation call() throws Exception {
return lookupLocation(path);
}
};
return this.locationCache.get(path, meh);
} catch (ExecutionException e) {
throw new IOException(e);
} finally { } finally {
readLock.unlock(); readLock.unlock();
} }
@ -544,4 +576,12 @@ private List<MountTable> getTreeValues(final String path, boolean reverse) {
} }
return ret; return ret;
} }
/**
* Get the size of the cache.
* @return Size of the cache.
*/
protected long getCacheSize() {
return this.locationCache.size();
}
} }

View File

@ -5117,4 +5117,13 @@
</description> </description>
</property> </property>
<property>
<name>dfs.federation.router.mount-table.max-cache-size</name>
<value>10000</value>
<description>
Maximum number of mount table cache entries to have.
By default, remove cache entries if we have more than 10k.
</description>
</property>
</configuration> </configuration>

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.federation.resolver; package org.apache.hadoop.hdfs.server.federation.resolver;
import static org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
@ -48,6 +49,8 @@ public class TestMountTableResolver {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(TestMountTableResolver.class); LoggerFactory.getLogger(TestMountTableResolver.class);
private static final int TEST_MAX_CACHE_SIZE = 10;
private MountTableResolver mountTable; private MountTableResolver mountTable;
private Map<String, String> getMountTableEntry( private Map<String, String> getMountTableEntry(
@ -77,6 +80,8 @@ private Map<String, String> getMountTableEntry(
*/ */
private void setupMountTable() throws IOException { private void setupMountTable() throws IOException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setInt(
FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE, TEST_MAX_CACHE_SIZE);
mountTable = new MountTableResolver(conf); mountTable = new MountTableResolver(conf);
// Root mount point // Root mount point
@ -441,4 +446,14 @@ public void testUpdate() throws IOException {
MountTable entry2 = mountTable.getMountPoint("/testupdate"); MountTable entry2 = mountTable.getMountPoint("/testupdate");
assertNull(entry2); assertNull(entry2);
} }
@Test
public void testCacheCleaning() throws Exception {
for (int i = 0; i < 1000; i++) {
String filename = String.format("/user/a/file-%04d.txt", i);
mountTable.getDestinationForPath(filename);
}
long cacheSize = mountTable.getCacheSize();
assertTrue(cacheSize <= TEST_MAX_CACHE_SIZE);
}
} }