diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryCacheStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/CacheStats.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryCacheStats.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/CacheStats.java index d276c27525..f79b7c7374 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryCacheStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/CacheStats.java @@ -27,7 +27,7 @@ /** * Keeps statistics for the memory cache. */ -class MemoryCacheStats { +class CacheStats { /** * The approximate amount of cache space in use. @@ -47,7 +47,7 @@ class MemoryCacheStats { */ private final long maxBytes; - MemoryCacheStats(long maxBytes) { + CacheStats(long maxBytes) { this.usedBytesCount = new UsedBytesCount(); this.maxBytes = maxBytes; } @@ -81,7 +81,7 @@ public long roundDown(long count) { private class UsedBytesCount { private final AtomicLong usedBytes = new AtomicLong(0); - private MemoryCacheStats.PageRounder rounder = new PageRounder(); + private CacheStats.PageRounder rounder = new PageRounder(); /** * Try to reserve more bytes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index 37e548e220..1514927212 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -23,7 +23,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS_DEFAULT; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -137,7 +136,7 @@ public boolean shouldAdvertise() { */ private final MappableBlockLoader cacheLoader; - private final MemoryCacheStats memCacheStats; + private final CacheStats memCacheStats; /** * Number of cache commands that could not be completed successfully @@ -178,30 +177,17 @@ public FsDatasetCache(FsDatasetImpl dataset) throws IOException { ". Reconfigure this to " + minRevocationPollingMs); } this.revocationPollingMs = confRevocationPollingMs; - // Both lazy writer and read cache are sharing this statistics. - this.memCacheStats = new MemoryCacheStats( - dataset.datanode.getDnConf().getMaxLockedMemory()); this.cacheLoader = MappableBlockLoaderFactory.createCacheLoader( this.getDnConf()); - cacheLoader.initialize(this); - } - - /** - * Check if pmem cache is enabled. - */ - private boolean isPmemCacheEnabled() { - return !cacheLoader.isTransientCache(); + // Both lazy writer and read cache are sharing this statistics. + this.memCacheStats = cacheLoader.initialize(this.getDnConf()); } DNConf getDnConf() { return this.dataset.datanode.getDnConf(); } - MemoryCacheStats getMemCacheStats() { - return memCacheStats; - } - /** * Get the cache path if the replica is cached into persistent memory. */ @@ -557,37 +543,32 @@ public void run() { /** * Get the approximate amount of DRAM cache space used. */ - public long getCacheUsed() { + public long getMemCacheUsed() { return memCacheStats.getCacheUsed(); } /** - * Get the approximate amount of persistent memory cache space used. - * TODO: advertise this metric to NameNode by FSDatasetMBean + * Get the approximate amount of cache space used either on DRAM or + * on persistent memory. + * @return */ - public long getPmemCacheUsed() { - if (isPmemCacheEnabled()) { - return cacheLoader.getCacheUsed(); - } - return 0; + public long getCacheUsed() { + return cacheLoader.getCacheUsed(); } /** - * Get the maximum amount of bytes we can cache on DRAM. This is a constant. + * Get the maximum amount of bytes we can cache on DRAM. This is a constant. */ - public long getCacheCapacity() { + public long getMemCacheCapacity() { return memCacheStats.getCacheCapacity(); } /** - * Get cache capacity of persistent memory. - * TODO: advertise this metric to NameNode by FSDatasetMBean + * Get the maximum amount of bytes we can cache either on DRAM or + * on persistent memory. This is a constant. */ - public long getPmemCacheCapacity() { - if (isPmemCacheEnabled()) { - return cacheLoader.getCacheCapacity(); - } - return 0; + public long getCacheCapacity() { + return cacheLoader.getCacheCapacity(); } public long getNumBlocksFailedToCache() { @@ -608,11 +589,6 @@ public synchronized boolean isCached(String bpid, long blockId) { return (val != null) && val.state.shouldAdvertise(); } - @VisibleForTesting - MappableBlockLoader getCacheLoader() { - return cacheLoader; - } - /** * This method can be executed during DataNode shutdown. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index f8507633fb..ce96400890 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -3164,10 +3164,10 @@ private boolean saveNextReplica() { public void evictBlocks(long bytesNeeded) throws IOException { int iterations = 0; - final long cacheCapacity = cacheManager.getCacheCapacity(); + final long cacheCapacity = cacheManager.getMemCacheCapacity(); while (iterations++ < MAX_BLOCK_EVICTIONS_PER_ITERATION && - (cacheCapacity - cacheManager.getCacheUsed()) < bytesNeeded) { + (cacheCapacity - cacheManager.getMemCacheUsed()) < bytesNeeded) { RamDiskReplica replicaState = ramDiskReplicaTracker.getNextCandidateForEviction(); if (replicaState == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java index 5b9ba3a1d6..5118774433 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; +import org.apache.hadoop.hdfs.server.datanode.DNConf; import org.apache.hadoop.util.DataChecksum; import java.io.BufferedInputStream; @@ -43,7 +44,7 @@ public abstract class MappableBlockLoader { /** * Initialize a specific MappableBlockLoader. */ - abstract void initialize(FsDatasetCache cacheManager) throws IOException; + abstract CacheStats initialize(DNConf dnConf) throws IOException; /** * Load the block. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappableBlockLoader.java index dd4188c0b1..f5a9a41ecc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MemoryMappableBlockLoader.java @@ -22,6 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.ExtendedBlockId; +import org.apache.hadoop.hdfs.server.datanode.DNConf; import org.apache.hadoop.io.nativeio.NativeIO; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,12 +40,13 @@ public class MemoryMappableBlockLoader extends MappableBlockLoader { private static final Logger LOG = LoggerFactory.getLogger(MemoryMappableBlockLoader.class); - private MemoryCacheStats memCacheStats; + private CacheStats memCacheStats; @Override - void initialize(FsDatasetCache cacheManager) throws IOException { + CacheStats initialize(DNConf dnConf) throws IOException { LOG.info("Initializing cache loader: MemoryMappableBlockLoader."); - this.memCacheStats = cacheManager.getMemCacheStats(); + this.memCacheStats = new CacheStats(dnConf.getMaxLockedMemory()); + return memCacheStats; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java index 09e9454e76..a5af437024 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; +import org.apache.hadoop.hdfs.server.datanode.DNConf; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO.POSIX; import org.apache.hadoop.util.DataChecksum; @@ -47,8 +48,8 @@ public class NativePmemMappableBlockLoader extends PmemMappableBlockLoader { LoggerFactory.getLogger(NativePmemMappableBlockLoader.class); @Override - void initialize(FsDatasetCache cacheManager) throws IOException { - super.initialize(cacheManager); + CacheStats initialize(DNConf dnConf) throws IOException { + return super.initialize(dnConf); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappableBlockLoader.java index 70a42c41f2..19dcc4b0f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemMappableBlockLoader.java @@ -42,11 +42,16 @@ public class PmemMappableBlockLoader extends MappableBlockLoader { private PmemVolumeManager pmemVolumeManager; @Override - void initialize(FsDatasetCache cacheManager) throws IOException { + CacheStats initialize(DNConf dnConf) throws IOException { LOG.info("Initializing cache loader: " + this.getClass().getName()); - DNConf dnConf = cacheManager.getDnConf(); PmemVolumeManager.init(dnConf.getPmemVolumes()); pmemVolumeManager = PmemVolumeManager.getInstance(); + // The configuration for max locked memory is shaded. + LOG.info("Persistent memory is used for caching data instead of " + + "DRAM. Max locked memory is set to zero to disable DRAM cache"); + // TODO: PMem is not supporting Lazy Writer now, will refine this stats + // while implementing it. + return new CacheStats(0L); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java index 58812db7b4..3c1b70503c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java @@ -84,7 +84,6 @@ public class TestCacheByPmemMappableBlockLoader { private static DistributedFileSystem fs; private static DataNode dn; private static FsDatasetCache cacheManager; - private static PmemMappableBlockLoader cacheLoader; /** * Used to pause DN BPServiceActor threads. BPSA threads acquire the * shared read lock. The test acquires the write lock for exclusive access. @@ -131,8 +130,6 @@ public void setUp() throws Exception { DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100); conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); - conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, - CACHE_CAPACITY); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY, 10); @@ -153,7 +150,6 @@ public void setUp() throws Exception { fs = cluster.getFileSystem(); dn = cluster.getDataNodes().get(0); cacheManager = ((FsDatasetImpl) dn.getFSDataset()).cacheManager; - cacheLoader = (PmemMappableBlockLoader) cacheManager.getCacheLoader(); } @After @@ -216,7 +212,9 @@ public void testCacheAndUncache() throws Exception { Ints.checkedCast(CACHE_CAPACITY / BLOCK_SIZE); BlockReaderTestUtil.enableHdfsCachingTracing(); Assert.assertEquals(0, CACHE_CAPACITY % BLOCK_SIZE); - assertEquals(CACHE_CAPACITY, cacheManager.getPmemCacheCapacity()); + assertEquals(CACHE_CAPACITY, cacheManager.getCacheCapacity()); + // DRAM cache is expected to be disabled. + assertEquals(0L, cacheManager.getMemCacheCapacity()); final Path testFile = new Path("/testFile"); final long testFileLen = maxCacheBlocksNum * BLOCK_SIZE; @@ -246,7 +244,9 @@ public Boolean get() { }, 1000, 30000); // The pmem cache space is expected to have been used up. - assertEquals(CACHE_CAPACITY, cacheManager.getPmemCacheUsed()); + assertEquals(CACHE_CAPACITY, cacheManager.getCacheUsed()); + // There should be no cache used on DRAM. + assertEquals(0L, cacheManager.getMemCacheUsed()); Map blockKeyToVolume = PmemVolumeManager.getInstance().getBlockKeyToVolume(); // All block keys should be kept in blockKeyToVolume @@ -318,7 +318,7 @@ public Boolean get() { }, 1000, 30000); // It is expected that no pmem cache space is used. - assertEquals(0, cacheManager.getPmemCacheUsed()); + assertEquals(0, cacheManager.getCacheUsed()); // No record should be kept by blockKeyToVolume after testFile is uncached. assertEquals(blockKeyToVolume.size(), 0); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java index ac9c2fc832..ae42bd8994 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java @@ -63,7 +63,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MemoryCacheStats.PageRounder; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.CacheStats.PageRounder; import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand;