From 08bb0362e0c57f562e2f2e366cba725649d1d9c8 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Wed, 31 Oct 2018 11:23:15 -0700 Subject: [PATCH] HDDS-759. Create config settings for SCM and OM DB directories. Contributed by Arpit Agarwal. --- .../apache/hadoop/hdds/HddsConfigKeys.java | 7 ++ .../org/apache/hadoop/hdds/HddsUtils.java | 2 +- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 5 + .../apache/hadoop/ozone/OzoneConfigKeys.java | 3 - .../src/main/resources/ozone-default.xml | 43 ++++++--- .../hadoop/hdds/scm/HddsServerUtil.java | 16 ++-- .../ozone/container/common/SCMTestUtils.java | 3 +- .../common/TestDatanodeStateMachine.java | 3 +- .../ozoneimpl/TestOzoneContainer.java | 4 +- .../hadoop/hdds/server/ServerUtils.java | 49 ++++++++-- .../hdds/scm/block/DeletedBlockLogImpl.java | 10 +- .../scm/container/SCMContainerManager.java | 4 +- .../hdds/scm/pipeline/SCMPipelineManager.java | 6 +- .../hadoop/hdds/scm/server/SCMStorage.java | 4 +- .../hadoop/hdds/scm/TestHddsServerUtils.java | 50 ++++++++++ .../hdds/scm/block/TestBlockManager.java | 4 +- .../hdds/scm/block/TestDeletedBlockLog.java | 4 +- .../TestCloseContainerEventHandler.java | 4 +- .../container/TestContainerReportHandler.java | 4 +- .../container/TestSCMContainerManager.java | 4 +- .../hdds/scm/node/TestContainerPlacement.java | 4 +- .../hdds/scm/node/TestDeadNodeHandler.java | 4 +- .../hadoop/hdds/scm/node/TestNodeManager.java | 4 +- .../ozone/container/common/TestEndPoint.java | 3 +- .../java/org/apache/hadoop/ozone/OmUtils.java | 42 +++++++++ .../apache/hadoop/ozone/om/OMConfigKeys.java | 3 + .../org/apache/hadoop/ozone/TestOmUtils.java | 91 +++++++++++++++++++ .../scm/pipeline/TestSCMPipelineManager.java | 4 +- .../hadoop/ozone/MiniOzoneClusterImpl.java | 5 +- .../hadoop/ozone/TestMiniOzoneCluster.java | 7 +- .../ozone/TestStorageContainerManager.java | 9 +- .../hadoop/ozone/om/TestOzoneManager.java | 3 +- .../org/apache/hadoop/ozone/om/OMStorage.java | 5 +- .../ozone/om/OmMetadataManagerImpl.java | 5 +- .../apache/hadoop/ozone/om/TestOmSQLCli.java | 3 +- .../hadoop/ozone/scm/TestContainerSQLCli.java | 3 +- 36 files changed, 342 insertions(+), 82 deletions(-) create mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index 210b075fd3..abacafe772 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -97,4 +97,11 @@ private HddsConfigKeys() { "hdds.lock.max.concurrency"; public static final int HDDS_LOCK_MAX_CONCURRENCY_DEFAULT = 100; + // This configuration setting is used as a fallback location by all + // Ozone/HDDS services for their metadata. It is useful as a single + // config point for test/PoC clusters. + // + // In any real cluster where performance matters, the SCM, OM and DN + // metadata locations must be configured explicitly. + public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index 09fc75b333..89edfddd09 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -305,7 +305,7 @@ public static boolean isHddsEnabled(Configuration conf) { public static String getDatanodeIdFilePath(Configuration conf) { String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID); if (dataNodeIDPath == null) { - String metaPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); + String metaPath = conf.get(HddsConfigKeys.OZONE_METADATA_DIRS); if (Strings.isNullOrEmpty(metaPath)) { // this means meta data is not found, in theory should not happen at // this point because should've failed earlier. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index f95b74867c..56692afaa8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -31,6 +31,11 @@ @InterfaceStability.Unstable public final class ScmConfigKeys { + // Location of SCM DB files. For now we just support a single + // metadata dir but in future we may support multiple for redundancy or + // performance. + public static final String OZONE_SCM_DB_DIRS = "ozone.scm.db.dirs"; + public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY = "scm.container.client.idle.threshold"; public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index c931dcfd2f..3b4f017c19 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -74,9 +74,6 @@ public final class OzoneConfigKeys { "ozone.trace.enabled"; public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false; - public static final String OZONE_METADATA_DIRS = - "ozone.metadata.dirs"; - public static final String OZONE_METADATA_STORE_IMPL = "ozone.metastore.impl"; public static final String OZONE_METADATA_STORE_IMPL_LEVELDB = diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 237f8d8a22..eb686626f4 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -490,22 +490,31 @@ Default user permissions used in OM. + + ozone.om.db.dirs + + OZONE, OM, STORAGE, PERFORMANCE + + Directory where the OzoneManager stores its metadata. This should + be specified as a single directory. If the directory does not + exist then the OM will attempt to create it. + + If undefined, then the OM will log a warning and fallback to + ozone.metadata.dirs. + + ozone.metadata.dirs - OZONE, OM, SCM, CONTAINER, REQUIRED, STORAGE + OZONE, OM, SCM, CONTAINER, STORAGE - Ozone metadata is shared among OM, which acts as the namespace - manager for ozone, SCM which acts as the block manager and data nodes - which maintain the name of the key(Key Name and BlockIDs). This - replicated and distributed metadata store is maintained under the - directory pointed by this key. Since metadata can be I/O intensive, at - least on OM and SCM we recommend having SSDs. If you have the luxury - of mapping this path to SSDs on all machines in the cluster, that will - be excellent. + This setting is the fallback location for SCM, OM and DataNodes + to store their metadata. This setting may be used in test/PoC clusters + to simplify configuration. - If Ratis metadata directories are not specified, Ratis server will emit a - warning and use this path for storing its metadata too. + For production clusters or any time you care about performance, it is + recommended that ozone.om.db.dirs, ozone.scm.db.dirs and + dfs.container.ratis.datanode.storage.dir be configured separately. @@ -533,7 +542,19 @@ Check the rocksdb documentation for more details. + + ozone.scm.db.dirs + + OZONE, SCM, STORAGE, PERFORMANCE + + Directory where the StorageContainerManager stores its metadata. + This should be specified as a single directory. If the directory + does not exist then the SCM will attempt to create it. + If undefined, then the SCM will log a warning and fallback to + ozone.metadata.dirs. + + ozone.scm.block.client.address diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java index 395a77d4be..738a2ef510 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java @@ -18,9 +18,10 @@ package org.apache.hadoop.hdds.scm; import com.google.common.base.Optional; -import com.google.common.base.Preconditions; import com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.slf4j.Logger; @@ -331,19 +332,16 @@ public static int getContainerPort(Configuration conf) { } public static String getOzoneDatanodeRatisDirectory(Configuration conf) { - final String ratisDir = File.separator + "ratis"; String storageDir = conf.get( OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); if (Strings.isNullOrEmpty(storageDir)) { - storageDir = conf.get(OzoneConfigKeys - .OZONE_METADATA_DIRS); - Preconditions.checkNotNull(storageDir, "ozone.metadata.dirs " + - "cannot be null, Please check your configs."); - storageDir = storageDir.concat(ratisDir); LOG.warn("Storage directory for Ratis is not configured." + - "Mapping Ratis storage under {}. It is a good idea " + - "to map this to an SSD disk.", storageDir); + "Mapping Ratis storage under {}. It is a good idea " + + "to map this to an SSD disk. Falling back to {}", + storageDir, HddsConfigKeys.OZONE_METADATA_DIRS); + File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf); + storageDir = (new File (metaDirPath, "ratis")).getPath(); } return storageDir; } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java index a24f096ddb..8e8e4f0369 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java @@ -18,6 +18,7 @@ import com.google.protobuf.BlockingService; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto @@ -117,7 +118,7 @@ public static OzoneConfiguration getConf() { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, GenericTestUtils .getRandomizedTempPath()); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, GenericTestUtils + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, GenericTestUtils .getRandomizedTempPath()); return conf; } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 3fc0dd04e5..260b15893b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -20,6 +20,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ipc.RPC; @@ -113,7 +114,7 @@ public void setUp() throws Exception { if (!dataDir.mkdirs()) { LOG.info("Data dir create failed."); } - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, new File(testRoot, "scm").getAbsolutePath()); path = Paths.get(path.toString(), TestDatanodeStateMachine.class.getSimpleName() + ".id").toString(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index fea126b5da..2326818d0c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -20,10 +20,10 @@ import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; @@ -62,7 +62,7 @@ public void setUp() throws Exception { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.getRoot() .getAbsolutePath()); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath()); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index c6d85d8b4f..fae68397c2 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -17,17 +17,18 @@ package org.apache.hadoop.hdds.server; -import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.http.client.methods.HttpRequestBase; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.net.InetSocketAddress; +import java.util.Collection; /** * Generic utilities for all HDDS/Ozone servers. @@ -116,6 +117,38 @@ public static void releaseConnection(HttpRequestBase request) { } } + /** + * Get the location where SCM should store its metadata directories. + * Fall back to OZONE_METADATA_DIRS if not defined. + * + * @param conf + * @return + */ + public static File getScmDbDir(Configuration conf) { + final Collection metadirs = conf.getTrimmedStringCollection( + ScmConfigKeys.OZONE_SCM_DB_DIRS); + + if (metadirs.size() > 1) { + throw new IllegalArgumentException( + "Bad config setting " + ScmConfigKeys.OZONE_SCM_DB_DIRS + + ". SCM does not support multiple metadata dirs currently"); + } + + if (metadirs.size() == 1) { + final File dbDirPath = new File(metadirs.iterator().next()); + if (!dbDirPath.exists() && !dbDirPath.mkdirs()) { + throw new IllegalArgumentException("Unable to create directory " + + dbDirPath + " specified in configuration setting " + + ScmConfigKeys.OZONE_SCM_DB_DIRS); + } + return dbDirPath; + } + + LOG.warn("{} is not configured. We recommend adding this setting. " + + "Falling back to {} instead.", + ScmConfigKeys.OZONE_SCM_DB_DIRS, HddsConfigKeys.OZONE_METADATA_DIRS); + return getOzoneMetaDirPath(conf); + } /** * Checks and creates Ozone Metadir Path if it does not exist. @@ -125,9 +158,13 @@ public static void releaseConnection(HttpRequestBase request) { * @return File MetaDir */ public static File getOzoneMetaDirPath(Configuration conf) { - String metaDirPath = conf.getTrimmed(OzoneConfigKeys - .OZONE_METADATA_DIRS); - Preconditions.checkNotNull(metaDirPath); + String metaDirPath = conf.getTrimmed(HddsConfigKeys.OZONE_METADATA_DIRS); + + if (metaDirPath == null || metaDirPath.isEmpty()) { + throw new IllegalArgumentException( + HddsConfigKeys.OZONE_METADATA_DIRS + " must be defined."); + } + File dirPath = new File(metaDirPath); if (!dirPath.exists() && !dirPath.mkdirs()) { throw new IllegalArgumentException("Unable to create paths. Path: " + @@ -138,7 +175,7 @@ public static File getOzoneMetaDirPath(Configuration conf) { public static void setOzoneMetaDirPath(OzoneConfiguration conf, String path) { - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, path); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index 2a8a3e31e0..a5ee1303dc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.server.events.EventHandler; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdfs.DFSUtil; @@ -69,7 +70,6 @@ .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; import static org.apache.hadoop.ozone.OzoneConsts.DELETED_BLOCK_DB; /** @@ -105,10 +105,10 @@ public DeletedBlockLogImpl(Configuration conf, maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT); - File metaDir = getOzoneMetaDirPath(conf); - String scmMetaDataDir = metaDir.getPath(); - File deletedLogDbPath = new File(scmMetaDataDir, DELETED_BLOCK_DB); - int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, + final File metaDir = ServerUtils.getScmDbDir(conf); + final String scmMetaDataDir = metaDir.getPath(); + final File deletedLogDbPath = new File(scmMetaDataDir, DELETED_BLOCK_DB); + final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, OZONE_SCM_DB_CACHE_SIZE_DEFAULT); // Load store of all transactions. deletedStore = MetadataStoreBuilder.newBuilder() diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java index 4e6f09e315..0ea749f633 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java @@ -41,6 +41,7 @@ .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.lease.Lease; @@ -73,7 +74,6 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes .FAILED_TO_CHANGE_CONTAINER_STATE; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB; /** @@ -111,7 +111,7 @@ public SCMContainerManager(final Configuration conf, final NodeManager nodeManager, PipelineManager pipelineManager, final EventPublisher eventPublisher) throws IOException { - final File metaDir = getOzoneMetaDirPath(conf); + final File metaDir = ServerUtils.getScmDbDir(conf); final File containerDBPath = new File(metaDir, SCM_CONTAINER_DB); final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, OZONE_SCM_DB_CACHE_SIZE_DEFAULT); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index a8536931cf..1c217482c6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.utils.MetadataStore; @@ -46,7 +47,6 @@ .ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm .ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; import static org.apache.hadoop.ozone.OzoneConsts.SCM_PIPELINE_DB; /** @@ -74,8 +74,8 @@ public SCMPipelineManager(Configuration conf, NodeManager nodeManager, this.pipelineFactory = new PipelineFactory(nodeManager, stateManager, conf); int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, OZONE_SCM_DB_CACHE_SIZE_DEFAULT); - File metaDir = getOzoneMetaDirPath(conf); - File pipelineDBPath = new File(metaDir, SCM_PIPELINE_DB); + final File metaDir = ServerUtils.getScmDbDir(conf); + final File pipelineDBPath = new File(metaDir, SCM_PIPELINE_DB); this.pipelineStore = MetadataStoreBuilder.newBuilder() .setCreateIfMissing(true) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorage.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorage.java index be6c1af186..8a19850106 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorage.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorage.java @@ -19,13 +19,13 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; +import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.ozone.common.Storage; import java.io.IOException; import java.util.Properties; import java.util.UUID; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID; import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR; @@ -40,7 +40,7 @@ public class SCMStorage extends Storage { * @throws IOException if any directories are inaccessible. */ public SCMStorage(OzoneConfiguration conf) throws IOException { - super(NodeType.SCM, getOzoneMetaDirPath(conf), STORAGE_DIR); + super(NodeType.SCM, ServerUtils.getScmDbDir(conf), STORAGE_DIR); } public void setScmId(String scmId) throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java index 21acda8543..6c4f249f76 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java @@ -18,8 +18,12 @@ package org.apache.hadoop.hdds.scm; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.test.PathUtils; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -27,6 +31,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; import java.net.InetSocketAddress; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; @@ -34,6 +39,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** * Unit tests for {@link HddsServerUtil} @@ -150,4 +156,48 @@ public void testClientFailsWithMultipleScmNames() { thrown.expect(IllegalArgumentException.class); HddsServerUtil.getScmAddressForDataNodes(conf); } + + /** + * Test {@link ServerUtils#getScmDbDir}. + */ + @Test + public void testGetScmDbDir() { + final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class); + final File dbDir = new File(testDir, "scmDbDir"); + final File metaDir = new File(testDir, "metaDir"); // should be ignored. + final Configuration conf = new OzoneConfiguration(); + conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); + + try { + assertEquals(dbDir, ServerUtils.getScmDbDir(conf)); + assertTrue(dbDir.exists()); // should have been created. + } finally { + FileUtils.deleteQuietly(dbDir); + } + } + + /** + * Test {@link ServerUtils#getScmDbDir} with fallback to OZONE_METADATA_DIRS + * when OZONE_SCM_DB_DIRS is undefined. + */ + @Test + public void testGetScmDbDirWithFallback() { + final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class); + final File metaDir = new File(testDir, "metaDir"); + final Configuration conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); + try { + assertEquals(metaDir, ServerUtils.getScmDbDir(conf)); + assertTrue(metaDir.exists()); // should have been created. + } finally { + FileUtils.deleteQuietly(metaDir); + } + } + + @Test + public void testNoScmDbDirConfigured() { + thrown.expect(IllegalArgumentException.class); + ServerUtils.getScmDbDir(new OzoneConfiguration()); + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index 32e8640a1f..aa940dfa10 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -20,6 +20,7 @@ import java.util.UUID; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.SCMContainerManager; @@ -34,7 +35,6 @@ import org.apache.hadoop.hdds.server.events.EventHandler; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.Storage.StorageState; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.test.GenericTestUtils; @@ -79,7 +79,7 @@ public void setUp() throws Exception { String path = GenericTestUtils .getTempPath(TestBlockManager.class.getSimpleName()); testDir = Paths.get(path).toFile(); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, path); eventQueue = new EventQueue(); boolean folderExisted = testDir.exists() || testDir.mkdirs(); if (!folderExisted) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index 06f4f5eb74..268cf8be84 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -19,6 +19,7 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.SCMContainerManager; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -60,7 +61,6 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_BLOCK_DELETION_MAX_RETRY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.when; @@ -81,7 +81,7 @@ public void setup() throws Exception { TestDeletedBlockLog.class.getSimpleName()); conf = new OzoneConfiguration(); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); - conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); containerManager = Mockito.mock(SCMContainerManager.class); deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager); dnList = new ArrayList<>(3); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java index 44a8deb90f..b1d24d56c9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers @@ -28,7 +29,6 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; @@ -65,7 +65,7 @@ public static void setUp() throws Exception { testDir = GenericTestUtils .getTestDir(TestCloseContainerEventHandler.class.getSimpleName()); configuration - .set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + .set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); nodeManager = new MockNodeManager(true, 10); PipelineManager pipelineManager = new SCMPipelineManager(configuration, nodeManager, eventQueue); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java index a5475e21a8..faae6f5d9a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java @@ -21,6 +21,7 @@ import java.util.HashSet; import java.util.List; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @@ -40,7 +41,6 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; import org.junit.Before; @@ -74,7 +74,7 @@ public void test() throws IOException { this.getClass().getSimpleName()); //GIVEN OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir); EventQueue eventQueue = new EventQueue(); PipelineManager pipelineManager = new SCMPipelineManager(conf, nodeManager, eventQueue); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java index 02c292c7ec..be4cce6aa2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java @@ -18,6 +18,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -34,7 +35,6 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; @@ -77,7 +77,7 @@ public static void setUp() throws Exception { testDir = GenericTestUtils .getTestDir(TestSCMContainerManager.class.getSimpleName()); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); conf.setTimeDuration( ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index e283732585..7a9dbad79c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -21,6 +21,7 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.XceiverClientManager; @@ -36,7 +37,6 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.test.PathUtils; import org.junit.Ignore; @@ -130,7 +130,7 @@ public void testContainerPlacementCapacity() throws IOException, final File testDir = PathUtils.getTestDir( TestContainerPlacement.class); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java index 985fa2c6c1..51099f413d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java @@ -26,6 +26,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -48,7 +49,6 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; @@ -74,7 +74,7 @@ public void setup() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); storageDir = GenericTestUtils.getTempPath( TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID()); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, storageDir); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); eventQueue = new EventQueue(); nodeManager = new SCMNodeManager(conf, "cluster1", null, eventQueue); PipelineManager pipelineManager = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java index c899bdad18..08cbdd7022 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.node; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.TestUtils; @@ -30,7 +31,6 @@ .StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; @@ -104,7 +104,7 @@ public void cleanup() { */ OzoneConfiguration getConf() { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, TimeUnit.MILLISECONDS); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index b2ddf39a57..3a2ce78fbf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -75,6 +75,8 @@ import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; + +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.mockito.Mockito.mock; import java.io.File; @@ -82,7 +84,6 @@ import java.util.UUID; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils .createEndpoint; import static org.hamcrest.Matchers.lessThanOrEqualTo; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 097410405f..d8608beed5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -17,12 +17,20 @@ package org.apache.hadoop.ozone; +import java.io.File; import java.net.InetSocketAddress; +import java.util.Collection; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.net.NetUtils; import com.google.common.base.Optional; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys; import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; @@ -36,6 +44,7 @@ * communication. */ public final class OmUtils { + private static final Logger LOG = LoggerFactory.getLogger(OmUtils.class); private OmUtils() { } @@ -91,4 +100,37 @@ public static int getOmRestPort(Configuration conf) { getPortNumberFromConfigKeys(conf, OZONE_OM_HTTP_ADDRESS_KEY); return port.or(OZONE_OM_HTTP_BIND_PORT_DEFAULT); } + + /** + * Get the location where OM should store its metadata directories. + * Fall back to OZONE_METADATA_DIRS if not defined. + * + * @param conf + * @return + */ + public static File getOmDbDir(Configuration conf) { + final Collection dbDirs = conf.getTrimmedStringCollection( + OMConfigKeys.OZONE_OM_DB_DIRS); + + if (dbDirs.size() > 1) { + throw new IllegalArgumentException( + "Bad configuration setting " + OMConfigKeys.OZONE_OM_DB_DIRS + + ". OM does not support multiple metadata dirs currently."); + } + + if (dbDirs.size() == 1) { + final File dbDirPath = new File(dbDirs.iterator().next()); + if (!dbDirPath.exists() && !dbDirPath.mkdirs()) { + throw new IllegalArgumentException("Unable to create directory " + + dbDirPath + " specified in configuration setting " + + OMConfigKeys.OZONE_OM_DB_DIRS); + } + return dbDirPath; + } + + LOG.warn("{} is not configured. We recommend adding this setting. " + + "Falling back to {} instead.", + OMConfigKeys.OZONE_OM_DB_DIRS, HddsConfigKeys.OZONE_METADATA_DIRS); + return ServerUtils.getOzoneMetaDirPath(conf); + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index b9ca296631..6e9acb40aa 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -28,6 +28,9 @@ public final class OMConfigKeys { private OMConfigKeys() { } + // Location where the OM stores its DB files. In the future we may support + // multiple entries for performance (sharding).. + public static final String OZONE_OM_DB_DIRS = "ozone.om.db.dirs"; public static final String OZONE_OM_HANDLER_COUNT_KEY = "ozone.om.handler.count.key"; diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java new file mode 100644 index 0000000000..2001598f45 --- /dev/null +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.test.PathUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.Timeout; + +import java.io.File; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * Unit tests for {@link OmUtils}. + */ +public class TestOmUtils { + @Rule + public Timeout timeout = new Timeout(60_000); + + @Rule + public ExpectedException thrown= ExpectedException.none(); + + /** + * Test {@link OmUtils#getOmDbDir}. + */ + @Test + public void testGetOmDbDir() { + final File testDir = PathUtils.getTestDir(TestOmUtils.class); + final File dbDir = new File(testDir, "omDbDir"); + final File metaDir = new File(testDir, "metaDir"); // should be ignored. + final Configuration conf = new OzoneConfiguration(); + conf.set(OMConfigKeys.OZONE_OM_DB_DIRS, dbDir.getPath()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); + + try { + assertEquals(dbDir, OmUtils.getOmDbDir(conf)); + assertTrue(dbDir.exists()); // should have been created. + } finally { + FileUtils.deleteQuietly(dbDir); + } + } + + /** + * Test {@link OmUtils#getOmDbDir} with fallback to OZONE_METADATA_DIRS + * when OZONE_OM_DB_DIRS is undefined. + */ + @Test + public void testGetOmDbDirWithFallback() { + final File testDir = PathUtils.getTestDir(TestOmUtils.class); + final File metaDir = new File(testDir, "metaDir"); + final Configuration conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); + + try { + assertEquals(metaDir, OmUtils.getOmDbDir(conf)); + assertTrue(metaDir.exists()); // should have been created. + } finally { + FileUtils.deleteQuietly(metaDir); + } + } + + @Test + public void testNoOmDbDirConfigured() { + thrown.expect(IllegalArgumentException.class); + OmUtils.getOmDbDir(new OzoneConfiguration()); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index dab7fb62e4..44877662cf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -28,7 +29,6 @@ import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; import org.junit.Assert; @@ -54,7 +54,7 @@ public static void setUp() throws Exception { conf = new OzoneConfiguration(); testDir = GenericTestUtils .getTestDir(TestSCMPipelineManager.class.getSimpleName()); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); boolean folderExisted = testDir.exists() || testDir.mkdirs(); if (!folderExisted) { throw new IOException("Unable to create test directory path"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 6c0f408123..37b6fdcd0f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -24,6 +24,7 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -389,7 +390,7 @@ private void initializeConfiguration() throws IOException { conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, ozoneEnabled); Path metaDir = Paths.get(path, "ozone-meta"); Files.createDirectories(metaDir); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); configureTrace(); } @@ -466,7 +467,7 @@ private List createHddsDatanodes( Files.createDirectories(metaDir); Files.createDirectories(dataDir); Files.createDirectories(ratisDir); - dnConf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); + dnConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.toString()); dnConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, ratisDir.toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index e616eef12f..0ca47493b4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -47,7 +48,6 @@ import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; import static org.junit.Assert.*; /** @@ -65,8 +65,7 @@ public class TestMiniOzoneCluster { @BeforeClass public static void setup() { conf = new OzoneConfiguration(); - conf.set(OZONE_METADATA_DIRS, - TEST_ROOT.toString()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, TEST_ROOT.toString()); conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); WRITE_TMP.mkdirs(); READ_TMP.mkdirs(); @@ -156,7 +155,7 @@ public void testContainerRandomPort() throws IOException { Configuration ozoneConf = SCMTestUtils.getConf(); File testDir = PathUtils.getTestDir(TestOzoneContainer.class); ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath()); - ozoneConf.set(OZONE_METADATA_DIRS, + ozoneConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, TEST_ROOT.toString()); // Each instance of SM will create an ozone container diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index 52340a9fa8..23d371d687 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -34,6 +34,7 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -388,7 +389,7 @@ public void testSCMInitialization() throws Exception { final String path = GenericTestUtils.getTempPath( UUID.randomUUID().toString()); Path scmPath = Paths.get(path, "scm-meta"); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); StartupOption.INIT.setClusterId("testClusterId"); // This will initialize SCM @@ -410,7 +411,7 @@ public void testSCMReinitialization() throws Exception { final String path = GenericTestUtils.getTempPath( UUID.randomUUID().toString()); Path scmPath = Paths.get(path, "scm-meta"); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); //This will set the cluster id in the version file MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); @@ -430,7 +431,7 @@ public void testSCMInitializationFailure() throws IOException { final String path = GenericTestUtils.getTempPath(UUID.randomUUID().toString()); Path scmPath = Paths.get(path, "scm-meta"); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); exception.expect(SCMException.class); exception.expectMessage("SCM not initialized."); @@ -455,7 +456,7 @@ public void testScmInfo() throws Exception { final String path = GenericTestUtils.getTempPath(UUID.randomUUID().toString()); Path scmPath = Paths.get(path, "scm-meta"); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); SCMStorage scmStore = new SCMStorage(conf); String clusterId = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java index 44df68ae1b..99c07ffa76 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java @@ -18,6 +18,7 @@ import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdfs.DFSUtil; @@ -1323,7 +1324,7 @@ public void testOmInitializationFailure() throws Exception { final String path = GenericTestUtils.getTempPath(UUID.randomUUID().toString()); Path metaDirPath = Paths.get(path, "om-meta"); - config.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); + config.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); config.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); config.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); config.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java index 3820aed76a..b7b6929f40 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java @@ -22,11 +22,12 @@ import java.util.UUID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.common.Storage; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; /** * OMStorage is responsible for management of the StorageDirectories used by @@ -42,7 +43,7 @@ public class OMStorage extends Storage { * @throws IOException if any directories are inaccessible. */ public OMStorage(OzoneConfiguration conf) throws IOException { - super(NodeType.OM, getOzoneMetaDirPath(conf), STORAGE_DIR); + super(NodeType.OM, OmUtils.getOmDbDir(conf), STORAGE_DIR); } public void setScmId(String scmId) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 75bd712337..cb7ca1f893 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -22,7 +22,9 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -54,7 +56,6 @@ import java.util.Map; import java.util.stream.Collectors; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; @@ -114,7 +115,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager { private final Table s3Table; public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException { - File metaDir = getOzoneMetaDirPath(conf); + File metaDir = OmUtils.getOmDbDir(conf); this.lock = new OzoneManagerLock(conf); this.openKeyExpireThresholdMS = 1000 * conf.getInt( OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java index a3ff6c8068..1dc3cab785 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -171,7 +172,7 @@ public void testOmDB() throws Exception { String dbOutPath = GenericTestUtils.getTempPath( UUID.randomUUID() + "/out_sql.db"); - String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); + String dbRootPath = conf.get(HddsConfigKeys.OZONE_METADATA_DIRS); String dbPath = dbRootPath + "/" + OM_DB_NAME; String[] args = {"-p", dbPath, "-o", dbOutPath}; diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java index 054e6685fc..3247b87654 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.scm; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.SCMContainerManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; @@ -192,7 +193,7 @@ public void testConvertContainerDB() throws Exception { UUID.randomUUID() + "/out_sql.db"); // TODO : the following will fail due to empty Datanode list, need to fix. //String dnUUID = cluster.getDataNodes().get(0).getUuid(); - String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); + String dbRootPath = conf.get(HddsConfigKeys.OZONE_METADATA_DIRS); String dbPath = dbRootPath + "/" + SCM_CONTAINER_DB; String[] args = {"-p", dbPath, "-o", dbOutPath}; Connection conn;