diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java index 57c567e5e9..f906ab660e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java @@ -25,6 +25,10 @@ import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.Config; +import org.apache.hadoop.hdds.conf.ConfigGroup; +import org.apache.hadoop.hdds.conf.ConfigType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -38,14 +42,9 @@ import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .SCM_CONTAINER_CLIENT_MAX_SIZE_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; +import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE; /** * XceiverClientManager is responsible for the lifecycle of XceiverClient @@ -76,20 +75,21 @@ public class XceiverClientManager implements Closeable { * @param conf configuration */ public XceiverClientManager(Configuration conf) { + this(conf, OzoneConfiguration.of(conf).getObject(ScmClientConfig.class)); + } + + public XceiverClientManager(Configuration conf, ScmClientConfig clientConf) { + Preconditions.checkNotNull(clientConf); Preconditions.checkNotNull(conf); - int maxSize = conf.getInt(SCM_CONTAINER_CLIENT_MAX_SIZE_KEY, - SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT); - long staleThresholdMs = conf.getTimeDuration( - SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY, - SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT, TimeUnit.MILLISECONDS); + long staleThresholdMs = clientConf.getStaleThreshold(MILLISECONDS); this.useRatis = conf.getBoolean( ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); this.conf = conf; this.isSecurityEnabled = OzoneSecurityUtil.isSecurityEnabled(conf); this.clientCache = CacheBuilder.newBuilder() - .expireAfterAccess(staleThresholdMs, TimeUnit.MILLISECONDS) - .maximumSize(maxSize) + .expireAfterAccess(staleThresholdMs, MILLISECONDS) + .maximumSize(clientConf.getMaxSize()) .removalListener( new RemovalListener() { @Override @@ -299,4 +299,65 @@ public synchronized static XceiverClientMetrics getXceiverClientMetrics() { return metrics; } + + /** + * Configuration for HDDS client. + */ + @ConfigGroup(prefix = "scm.container.client") + public static class ScmClientConfig { + + private int maxSize; + private long staleThreshold; + private int maxOutstandingRequests; + + public long getStaleThreshold(TimeUnit unit) { + return unit.convert(staleThreshold, MILLISECONDS); + } + + @Config(key = "idle.threshold", + type = ConfigType.TIME, timeUnit = MILLISECONDS, + defaultValue = "10s", + tags = { OZONE, PERFORMANCE }, + description = + "In the standalone pipelines, the SCM clients use netty to " + + " communicate with the container. It also uses connection pooling" + + " to reduce client side overheads. This allows a connection to" + + " stay idle for a while before the connection is closed." + ) + public void setStaleThreshold(long staleThreshold) { + this.staleThreshold = staleThreshold; + } + + public int getMaxSize() { + return maxSize; + } + + @Config(key = "max.size", + defaultValue = "256", + tags = { OZONE, PERFORMANCE }, + description = + "Controls the maximum number of connections that are cached via" + + " client connection pooling. If the number of connections" + + " exceed this count, then the oldest idle connection is evicted." + ) + public void setMaxSize(int maxSize) { + this.maxSize = maxSize; + } + + public int getMaxOutstandingRequests() { + return maxOutstandingRequests; + } + + @Config(key = "max.outstanding.requests", + defaultValue = "100", + tags = { OZONE, PERFORMANCE }, + description = + "Controls the maximum number of outstanding async requests that can" + + " be handled by the Standalone as well as Ratis client." + ) + public void setMaxOutstandingRequests(int maxOutstandingRequests) { + this.maxOutstandingRequests = maxOutstandingRequests; + } + } + } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java index 97b8f95b87..1fa2665d07 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.XceiverClientManager.ScmClientConfig; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; @@ -285,10 +285,9 @@ public static CloseableHttpClient newHttpClient(Configuration conf) { * Standalone and Ratis client. */ public static int getMaxOutstandingRequests(Configuration config) { - return config - .getInt(ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS, - ScmConfigKeys - .SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS_DEFAULT); + return OzoneConfiguration.of(config) + .getObject(ScmClientConfig.class) + .getMaxOutstandingRequests(); } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index b32ad63353..c0486335cd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -34,6 +34,7 @@ import java.util.List; import java.util.Properties; +import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -46,6 +47,14 @@ public class OzoneConfiguration extends Configuration { activate(); } + public static OzoneConfiguration of(Configuration conf) { + Preconditions.checkNotNull(conf); + + return conf instanceof OzoneConfiguration + ? (OzoneConfiguration) conf + : new OzoneConfiguration(conf); + } + public OzoneConfiguration() { OzoneConfiguration.activate(); loadDefaults(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 9c93a63b17..f00ecb23c3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -36,21 +36,6 @@ public final class ScmConfigKeys { // performance. public static final String OZONE_SCM_DB_DIRS = "ozone.scm.db.dirs"; - public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY = - "scm.container.client.idle.threshold"; - public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT = - "10s"; - - public static final String SCM_CONTAINER_CLIENT_MAX_SIZE_KEY = - "scm.container.client.max.size"; - public static final int SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT = - 256; - - public static final String SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS = - "scm.container.client.max.outstanding.requests"; - public static final int SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS_DEFAULT - = 100; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY = "dfs.container.ratis.enabled"; public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 52e83698f4..82307a4962 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1061,39 +1061,6 @@ - - - scm.container.client.idle.threshold - 10s - OZONE, PERFORMANCE - - In the standalone pipelines, the SCM clients use netty to - communicate with the container. It also uses connection pooling to - reduce client side overheads. This allows a connection to stay idle for - a while before the connection is closed. - - - - scm.container.client.max.size - 256 - OZONE, PERFORMANCE - - Controls the maximum number of connections that we cached via - clientconnection pooling. If the number of connection - exceed this count then the oldest idle connection is evicted. - - - - - scm.container.client.max.outstanding.requests - 100 - OZONE, PERFORMANCE - - Controls the maximum number of outstanding async requests that can be - handled by the Standalone as well as Ratis client. - - - ozone.scm.container.creation.lease.timeout 60s diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java index 9d4ed68082..a92cd3a729 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.scm; import com.google.common.cache.Cache; +import org.apache.hadoop.hdds.scm.XceiverClientManager.ScmClientConfig; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -39,8 +40,6 @@ import java.util.UUID; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; -import static org.apache.hadoop.hdds.scm - .ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_KEY; /** * Test for XceiverClientManager caching and eviction. @@ -110,11 +109,13 @@ public void testCaching() throws IOException { @Test public void testFreeByReference() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(SCM_CONTAINER_CLIENT_MAX_SIZE_KEY, 1); + ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class); + clientConfig.setMaxSize(1); String metaDir = GenericTestUtils.getTempPath( TestXceiverClientManager.class.getName() + UUID.randomUUID()); conf.set(HDDS_METADATA_DIR_NAME, metaDir); - XceiverClientManager clientManager = new XceiverClientManager(conf); + XceiverClientManager clientManager = + new XceiverClientManager(conf, clientConfig); Cache cache = clientManager.getClientCache(); @@ -166,11 +167,13 @@ public void testFreeByReference() throws IOException { @Test public void testFreeByEviction() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(SCM_CONTAINER_CLIENT_MAX_SIZE_KEY, 1); + ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class); + clientConfig.setMaxSize(1); String metaDir = GenericTestUtils.getTempPath( TestXceiverClientManager.class.getName() + UUID.randomUUID()); conf.set(HDDS_METADATA_DIR_NAME, metaDir); - XceiverClientManager clientManager = new XceiverClientManager(conf); + XceiverClientManager clientManager = + new XceiverClientManager(conf, clientConfig); Cache cache = clientManager.getClientCache(); @@ -216,8 +219,10 @@ public void testFreeByEviction() throws IOException { @Test public void testFreeByRetryFailure() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(SCM_CONTAINER_CLIENT_MAX_SIZE_KEY, 1); - XceiverClientManager clientManager = new XceiverClientManager(conf); + ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class); + clientConfig.setMaxSize(1); + XceiverClientManager clientManager = + new XceiverClientManager(conf, clientConfig); Cache cache = clientManager.getClientCache(); diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index e00208761a..5e1a50d31c 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -109,12 +109,7 @@ public BasicOzoneClientAdapterImpl(String omHost, int omPort, ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); Thread.currentThread().setContextClassLoader(null); - OzoneConfiguration conf; - if (hadoopConf instanceof OzoneConfiguration) { - conf = (OzoneConfiguration) hadoopConf; - } else { - conf = new OzoneConfiguration(hadoopConf); - } + OzoneConfiguration conf = OzoneConfiguration.of(hadoopConf); if (omHost == null && OmUtils.isServiceIdsDefined(conf)) { // When the host name or service id isn't given diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index 1759e5c62e..c1e67c9d91 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -43,7 +43,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token;