From aaad947c740c91c36139f3f0569ae78b53bca682 Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Thu, 16 Apr 2020 20:16:41 -0700 Subject: [PATCH] HDFS-15283. Cache pool MAXTTL is not persisted and restored on cluster restart. Contributed by Stephen O'Donnell. Signed-off-by: Wei-Chiu Chuang (cherry picked from commit 3481895f8a9ea9f6e217a0ba158c48da89b3faf2) --- .../apache/hadoop/hdfs/server/namenode/CacheManager.java | 8 ++++++++ .../hadoop/hdfs/server/namenode/TestCacheDirectives.java | 6 +++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 33bb74caab..66a8713f54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -1071,6 +1071,10 @@ public class CacheManager { if (p.getLimit() != null) b.setLimit(p.getLimit()); + if (p.getMaxRelativeExpiryMs() != null) { + b.setMaxRelativeExpiry(p.getMaxRelativeExpiryMs()); + } + pools.add(b.build()); } @@ -1136,6 +1140,10 @@ public class CacheManager { if (p.hasLimit()) info.setLimit(p.getLimit()); + if (p.hasMaxRelativeExpiry()) { + info.setMaxRelativeExpiryMs(p.getMaxRelativeExpiry()); + } + addCachePool(info); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index 58baf730d7..a7e21f4639 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -630,10 +630,12 @@ public class TestCacheDirectives { String groupName = "partygroup"; FsPermission mode = new FsPermission((short)0777); long limit = 747; + long maxExpiry = 1234567890; dfs.addCachePool(new CachePoolInfo(pool) .setGroupName(groupName) .setMode(mode) - .setLimit(limit)); + .setLimit(limit) + .setMaxRelativeExpiryMs(maxExpiry)); RemoteIterator pit = dfs.listCachePools(); assertTrue("No cache pools found", pit.hasNext()); CachePoolInfo info = pit.next().getInfo(); @@ -641,6 +643,7 @@ public class TestCacheDirectives { assertEquals(groupName, info.getGroupName()); assertEquals(mode, info.getMode()); assertEquals(limit, (long)info.getLimit()); + assertEquals(maxExpiry, (long)info.getMaxRelativeExpiryMs()); assertFalse("Unexpected # of cache pools found", pit.hasNext()); // Create some cache entries @@ -701,6 +704,7 @@ public class TestCacheDirectives { assertEquals(groupName, info.getGroupName()); assertEquals(mode, info.getMode()); assertEquals(limit, (long)info.getLimit()); + assertEquals(maxExpiry, (long)info.getMaxRelativeExpiryMs()); assertFalse("Unexpected # of cache pools found", pit.hasNext()); dit = dfs.listCacheDirectives(null);