diff --git a/.gitignore b/.gitignore index 428950ba99..d5550364b8 100644 --- a/.gitignore +++ b/.gitignore @@ -50,6 +50,10 @@ patchprocess/ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package-lock.json hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log +# Ignore files generated by HDDS acceptance tests. +hadoop-ozone/acceptance-test/docker-compose.log +hadoop-ozone/acceptance-test/junit-results.xml + #robotframework outputs log.html output.xml diff --git a/dev-support/bin/ozone-dist-layout-stitching b/dev-support/bin/ozone-dist-layout-stitching index be330d5aaa..c30a37d2e5 100755 --- a/dev-support/bin/ozone-dist-layout-stitching +++ b/dev-support/bin/ozone-dist-layout-stitching @@ -148,7 +148,7 @@ run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" mkdir -p "./share/hadoop/ozonefs" cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem.jar" # Optional documentation, could be missing -cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ksm/ +cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ozoneManager/ cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/ diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh index 6573a81eb5..3826f67a5e 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh @@ -404,13 +404,13 @@ esac # export HDFS_DFSROUTER_OPTS="" ### -# HDFS Key Space Manager specific parameters +# Ozone Manager specific parameters ### -# Specify the JVM options to be used when starting the HDFS Key Space Manager. +# Specify the JVM options to be used when starting the Ozone Manager. # These options will be appended to the options specified as HADOOP_OPTS # and therefore may override any similar flags set in HADOOP_OPTS # -# export HDFS_KSM_OPTS="" +# export HDFS_OM_OPTS="" ### # HDFS StorageContainerManager specific parameters diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml index 512c649e21..bb5e8dd535 100644 --- a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml +++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml @@ -25,17 +25,17 @@ services: command: ["/opt/hadoop/bin/ozone","datanode"] env_file: - ./docker-config - ksm: + ozoneManager: image: apache/hadoop-runner volumes: - ../../ozone:/opt/hadoop ports: - 9874:9874 environment: - ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION + ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION env_file: - ./docker-config - command: ["/opt/hadoop/bin/ozone","ksm"] + command: ["/opt/hadoop/bin/ozone","om"] scm: image: apache/hadoop-runner volumes: diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config index 632f8701d2..50abb18e1a 100644 --- a/hadoop-dist/src/main/compose/ozone/docker-config +++ b/hadoop-dist/src/main/compose/ozone/docker-config @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -OZONE-SITE.XML_ozone.ksm.address=ksm +OZONE-SITE.XML_ozone.om.address=ozoneManager OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.enabled=True OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id diff --git a/hadoop-dist/src/main/compose/ozoneperf/README.md b/hadoop-dist/src/main/compose/ozoneperf/README.md index a78f208732..527ff418d3 100644 --- a/hadoop-dist/src/main/compose/ozoneperf/README.md +++ b/hadoop-dist/src/main/compose/ozoneperf/README.md @@ -67,7 +67,7 @@ http://localhost:9090/graph Example queries: ``` -Hadoop_KeySpaceManager_NumKeyCommits -rate(Hadoop_KeySpaceManager_NumKeyCommits[10m]) +Hadoop_OzoneManager_NumKeyCommits +rate(Hadoop_OzoneManager_NumKeyCommits[10m]) rate(Hadoop_Ozone_BYTES_WRITTEN[10m]) ``` diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml index 3233c11641..6d1d9cadb3 100644 --- a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml +++ b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml @@ -26,7 +26,7 @@ services: command: ["/opt/hadoop/bin/ozone","datanode"] env_file: - ./docker-config - ksm: + ozoneManager: image: apache/hadoop-runner volumes: - ../../ozone:/opt/hadoop @@ -34,10 +34,10 @@ services: ports: - 9874:9874 environment: - ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION + ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION env_file: - ./docker-config - command: ["/opt/hadoop/bin/ozone","ksm"] + command: ["/opt/hadoop/bin/ozone","om"] scm: image: apache/hadoop-runner volumes: diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-config b/hadoop-dist/src/main/compose/ozoneperf/docker-config index 2be22a7792..253995014c 100644 --- a/hadoop-dist/src/main/compose/ozoneperf/docker-config +++ b/hadoop-dist/src/main/compose/ozoneperf/docker-config @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -OZONE-SITE.XML_ozone.ksm.address=ksm +OZONE-SITE.XML_ozone.om.address=ozoneManager OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.enabled=True OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index ad326dcb7f..4f1b1c8e2c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -94,7 +94,7 @@ public final class ScmConfigKeys { "ozone.scm.datanode.port"; public static final int OZONE_SCM_DATANODE_PORT_DEFAULT = 9861; - // OZONE_KSM_PORT_DEFAULT = 9862 + // OZONE_OM_PORT_DEFAULT = 9862 public static final String OZONE_SCM_BLOCK_CLIENT_PORT_KEY = "ozone.scm.block.client.port"; public static final int OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT = 9863; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 08a5ffdb87..4fad5d83a8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -93,7 +93,7 @@ public final class OzoneConsts { public static final String BLOCK_DB = "block.db"; public static final String OPEN_CONTAINERS_DB = "openContainers.db"; public static final String DELETED_BLOCK_DB = "deletedBlock.db"; - public static final String KSM_DB_NAME = "ksm.db"; + public static final String OM_DB_NAME = "om.db"; /** * Supports Bucket Versioning. @@ -119,13 +119,13 @@ public static Versioning getVersioning(boolean versioning) { public static final String OPEN_KEY_ID_DELIMINATOR = "#"; /** - * KSM LevelDB prefixes. + * OM LevelDB prefixes. * - * KSM DB stores metadata as KV pairs with certain prefixes, + * OM DB stores metadata as KV pairs with certain prefixes, * prefix is used to improve the performance to get related * metadata. * - * KSM DB Schema: + * OM DB Schema: * ---------------------------------------------------------- * | KEY | VALUE | * ---------------------------------------------------------- @@ -140,13 +140,13 @@ public static Versioning getVersioning(boolean versioning) { * | #deleting#/volumeName/bucketName/keyName | KeyInfo | * ---------------------------------------------------------- */ - public static final String KSM_VOLUME_PREFIX = "/#"; - public static final String KSM_BUCKET_PREFIX = "/#"; - public static final String KSM_KEY_PREFIX = "/"; - public static final String KSM_USER_PREFIX = "$"; + public static final String OM_VOLUME_PREFIX = "/#"; + public static final String OM_BUCKET_PREFIX = "/#"; + public static final String OM_KEY_PREFIX = "/"; + public static final String OM_USER_PREFIX = "$"; /** - * Max KSM Quota size of 1024 PB. + * Max OM Quota size of 1024 PB. */ public static final long MAX_QUOTA_IN_BYTES = 1024L * 1024 * TB; @@ -168,9 +168,9 @@ public static Versioning getVersioning(boolean versioning) { public static final int INVALID_PORT = -1; - // The ServiceListJSONServlet context attribute where KeySpaceManager + // The ServiceListJSONServlet context attribute where OzoneManager // instance gets stored. - public static final String KSM_CONTEXT_ATTRIBUTE = "ozone.ksm"; + public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om"; private OzoneConsts() { // Never Constructed diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java index fb30d921b8..a32d559023 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java @@ -38,7 +38,7 @@ * Local storage information is stored in a separate file VERSION. * It contains type of the node, * the storage layout version, the SCM id, and - * the KSM/SCM state creation time. + * the OM/SCM state creation time. * */ @InterfaceAudience.Private @@ -127,7 +127,7 @@ protected StorageInfo getStorageInfo() { abstract protected Properties getNodeProperties(); /** - * Sets the Node properties spaecific to KSM/SCM. + * Sets the Node properties spaecific to OM/SCM. */ private void setNodeProperties() { Properties nodeProperties = getNodeProperties(); @@ -152,7 +152,7 @@ private File getCurrentDir() { * File {@code VERSION} contains the following fields: *
    *
  1. node type
  2. - *
  3. KSM/SCM state creation time
  4. + *
  5. OM/SCM state creation time
  6. *
  7. other fields specific for this node type
  8. *
* The version file is always written last during storage directory updates. diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto index 7bea82ab86..53f408ae88 100644 --- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto +++ b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto @@ -46,7 +46,7 @@ message AllocateScmBlockRequestProto { } /** - * A delete key request sent by KSM to SCM, it contains + * A delete key request sent by OM to SCM, it contains * multiple number of keys (and their blocks). */ message DeleteScmKeyBlocksRequestProto { @@ -56,9 +56,9 @@ message DeleteScmKeyBlocksRequestProto { /** * A object key and all its associated blocks. * We need to encapsulate object key name plus the blocks in this potocol - * because SCM needs to response KSM with the keys it has deleted. + * because SCM needs to response OM with the keys it has deleted. * If the response only contains blocks, it will be very expensive for - * KSM to figure out what keys have been deleted. + * OM to figure out what keys have been deleted. */ message KeyBlocks { required string key = 1; @@ -66,7 +66,7 @@ message KeyBlocks { } /** - * A delete key response from SCM to KSM, it contains multiple child-results. + * A delete key response from SCM to OM, it contains multiple child-results. * Each child-result represents a key deletion result, only if all blocks of * a key are successfully deleted, this key result is considered as succeed. */ @@ -111,7 +111,7 @@ message AllocateScmBlockResponseProto { } /** - * Protocol used from KeySpaceManager to StorageContainerManager. + * Protocol used from OzoneManager to StorageContainerManager. * See request and response messages for details of the RPC calls. */ service ScmBlockLocationProtocolService { diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto index b9def2a3ec..a5ce994940 100644 --- a/hadoop-hdds/common/src/main/proto/hdds.proto +++ b/hadoop-hdds/common/src/main/proto/hdds.proto @@ -58,9 +58,9 @@ message KeyValue { * Type of the node. */ enum NodeType { - KSM = 1; - SCM = 2; - DATANODE = 3; + OM = 1; // Ozone Manager + SCM = 2; // Storage Container Manager + DATANODE = 3; // DataNode } // Should we rename NodeState to DatanodeState? diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 568d26712a..530fb09b24 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -21,7 +21,7 @@ - + @@ -254,122 +254,122 @@ Tells ozone which storage handler to use. The possible values are: distributed - The Ozone distributed storage handler, which speaks to - KSM/SCM on the backend and provides REST services to clients. + OM/SCM on the backend and provides REST services to clients. local - Local Storage handler strictly for testing - To be removed. ozone.key.deleting.limit.per.task 1000 - KSM, PERFORMANCE + OM, PERFORMANCE A maximum number of keys to be scanned by key deleting service - per time interval in KSM. Those keys are sent to delete metadata and + per time interval in OM. Those keys are sent to delete metadata and generate transactions in SCM for next async deletion between SCM and DataNode. - ozone.ksm.address + ozone.om.address - KSM, REQUIRED + OM, REQUIRED - The address of the Ozone KSM service. This allows clients to discover - the KSMs address. + The address of the Ozone OM service. This allows clients to discover + the address of the OM. - ozone.ksm.group.rights + ozone.om.group.rights READ_WRITE - KSM, SECURITY + OM, SECURITY - Default group permissions in Ozone KSM. + Default group permissions in Ozone OM. - ozone.ksm.handler.count.key + ozone.om.handler.count.key 20 - KSM, PERFORMANCE + OM, PERFORMANCE - The number of RPC handler threads for KSM service endpoints. + The number of RPC handler threads for OM service endpoints. - ozone.ksm.http-address + ozone.om.http-address 0.0.0.0:9874 - KSM, MANAGEMENT + OM, MANAGEMENT - The address and the base port where the KSM web UI will listen on. + The address and the base port where the OM web UI will listen on. If the port is 0, then the server will start on a free port. However, it is best to specify a well-known port, so it is easy to connect and see - the KSM management UI. + the OM management UI. - ozone.ksm.http-bind-host + ozone.om.http-bind-host 0.0.0.0 - KSM, MANAGEMENT + OM, MANAGEMENT - The actual address the KSM web server will bind to. If this optional + The actual address the OM web server will bind to. If this optional the address is set, it overrides only the hostname portion of - ozone.ksm.http-address. + ozone.om.http-address. - ozone.ksm.http.enabled + ozone.om.http.enabled true - KSM, MANAGEMENT + OM, MANAGEMENT - Property to enable or disable KSM web user interface. + Property to enable or disable OM web user interface. - ozone.ksm.https-address + ozone.om.https-address 0.0.0.0:9875 - KSM, MANAGEMENT, SECURITY + OM, MANAGEMENT, SECURITY - The address and the base port where the KSM web UI will listen + The address and the base port where the OM web UI will listen on using HTTPS. If the port is 0 then the server will start on a free port. - ozone.ksm.https-bind-host + ozone.om.https-bind-host 0.0.0.0 - KSM, MANAGEMENT, SECURITY + OM, MANAGEMENT, SECURITY - The actual address the KSM web server will bind to using HTTPS. + The actual address the OM web server will bind to using HTTPS. If this optional address is set, it overrides only the hostname portion of - ozone.ksm.http-address. + ozone.om.http-address. - ozone.ksm.keytab.file + ozone.om.keytab.file - KSM, SECURITY + OM, SECURITY - The keytab file for Kerberos authentication in KSM. + The keytab file for Kerberos authentication in OM. - ozone.ksm.db.cache.size.mb + ozone.om.db.cache.size.mb 128 - KSM, PERFORMANCE + OM, PERFORMANCE - The size of KSM DB cache in MB that used for caching files. + The size of OM DB cache in MB that used for caching files. This value is set to an abnormally low value in the default configuration. That is to make unit testing easy. Generally, this value should be set to something like 16GB or more, if you intend to use Ozone at scale. - A large value for this key allows a proportionally larger amount of KSM - metadata to be cached in memory. This makes KSM operations faster. + A large value for this key allows a proportionally larger amount of OM + metadata to be cached in memory. This makes OM operations faster. - ozone.ksm.user.max.volume + ozone.om.user.max.volume 1024 - KSM, MANAGEMENT + OM, MANAGEMENT The maximum number of volumes a user can have on a cluster.Increasing or decreasing this number has no real impact on ozone cluster. This is @@ -379,11 +379,11 @@ - ozone.ksm.user.rights + ozone.om.user.rights READ_WRITE - KSM, SECURITY + OM, SECURITY - Default user permissions used in KSM. + Default user permissions used in OM. @@ -393,20 +393,20 @@ This is used only for testing purposes. This value is used by the local storage handler to simulate a REST backend. This is useful only when - debugging the REST front end independent of KSM and SCM. To be removed. + debugging the REST front end independent of OM and SCM. To be removed. ozone.metadata.dirs - OZONE, KSM, SCM, CONTAINER, REQUIRED, STORAGE + OZONE, OM, SCM, CONTAINER, REQUIRED, STORAGE - Ozone metadata is shared among KSM, which acts as the namespace + Ozone metadata is shared among OM, which acts as the namespace manager for ozone, SCM which acts as the block manager and data nodes which maintain the name of the key(Key Name and BlockIDs). This replicated and distributed metadata store is maintained under the directory pointed by this key. Since metadata can be I/O intensive, at - least on KSM and SCM we recommend having SSDs. If you have the luxury + least on OM and SCM we recommend having SSDs. If you have the luxury of mapping this path to SSDs on all machines in the cluster, that will be excellent. @@ -417,10 +417,10 @@ ozone.metastore.impl RocksDB - OZONE, KSM, SCM, CONTAINER, STORAGE + OZONE, OM, SCM, CONTAINER, STORAGE Ozone metadata store implementation. Ozone metadata are well - distributed to multiple services such as ksm, scm. They are stored in + distributed to multiple services such as ozoneManager, scm. They are stored in some local key-value databases. This property determines which database library to use. Supported value is either LevelDB or RocksDB. @@ -429,7 +429,7 @@ ozone.metastore.rocksdb.statistics ALL - OZONE, KSM, SCM, STORAGE, PERFORMANCE + OZONE, OM, SCM, STORAGE, PERFORMANCE The statistics level of the rocksdb store. If you use any value from org.rocksdb.StatsLevel (eg. ALL or EXCEPT_DETAILED_TIMERS), the rocksdb @@ -672,7 +672,7 @@ The heartbeat interval from a data node to SCM. Yes, it is not three but 30, since most data nodes will heart beating via Ratis heartbeats. If a client is not able to talk to a data node, it will notify - KSM/SCM eventually. So a 30 second HB seems to work. This assumes that + OM/SCM eventually. So a 30 second HB seems to work. This assumes that replication strategy used is Ratis if not, this value should be set to something smaller like 3 seconds. @@ -808,7 +808,7 @@ OZONE, SECURITY - The server principal used by the SCM and KSM for web UI SPNEGO + The server principal used by the SCM and OM for web UI SPNEGO authentication when Kerberos security is enabled. This is typically set to HTTP/_HOST@REALM.TLD The SPNEGO server principal begins with the prefix HTTP/ by convention. @@ -867,9 +867,9 @@ ozone.key.preallocation.maxsize 134217728 - OZONE, KSM, PERFORMANCE + OZONE, OM, PERFORMANCE - When a new key write request is sent to KSM, if a size is requested, at most + When a new key write request is sent to OM, if a size is requested, at most 128MB of size is allocated at request time. If client needs more space for the write, separate block allocation requests will be made. @@ -938,7 +938,7 @@ ozone.open.key.cleanup.service.interval.seconds 86400 - OZONE, KSM, PERFORMANCE + OZONE, OM, PERFORMANCE A background job periodically checks open key entries and delete the expired ones. This entry controls the interval of this cleanup check. @@ -948,7 +948,7 @@ ozone.open.key.expire.threshold 86400 - OZONE, KSM, PERFORMANCE + OZONE, OM, PERFORMANCE Controls how long an open key operation is considered active. Specifically, if a key has been open longer than the value of this config entry, that open key is considered as @@ -958,12 +958,12 @@ hadoop.tags.custom - OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,KSM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE + OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE ozone.tags.system - OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,KSM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE + OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js index 411438aae4..c2ed2adce2 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js @@ -270,7 +270,7 @@ $http.get("conf?cmd=getOzoneTags") .then(function(response) { ctrl.tags = response.data; - var excludedTags = ['CBLOCK', 'KSM', 'SCM']; + var excludedTags = ['CBLOCK', 'OM', 'SCM']; for (var i = 0; i < excludedTags.length; i++) { var idx = ctrl.tags.indexOf(excludedTags[i]); // Remove CBLOCK related properties @@ -302,7 +302,7 @@ } ctrl.loadAll = function() { - $http.get("conf?cmd=getPropertyByTag&tags=KSM,SCM," + ctrl.tags) + $http.get("conf?cmd=getPropertyByTag&tags=OM,SCM," + ctrl.tags) .then(function(response) { ctrl.convertToArray(response.data); diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html index 6825750c33..b52f6533fc 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html @@ -27,8 +27,8 @@ ng-click="$ctrl.switchto('All')">All KSM + ng-class="$ctrl.allSelected('OM') ? 'btn-primary' :'btn-secondary'" + ng-click="$ctrl.switchto('OM')">OM SCM diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java index 4f4c75563b..28103bef95 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java @@ -28,7 +28,7 @@ /** * The DeletedBlockLog is a persisted log in SCM to keep tracking * container blocks which are under deletion. It maintains info - * about under-deletion container blocks that notified by KSM, + * about under-deletion container blocks that notified by OM, * and the state how it is processed. */ public interface DeletedBlockLog extends Closeable { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java index 3ada8fe192..c23b1fd17d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java @@ -53,9 +53,9 @@ * client to able to write to it. *

* 2. Owners - Each instance of Name service, for example, Namenode of HDFS or - * Key Space Manager (KSM) of Ozone or CBlockServer -- is an owner. It is - * possible to have many KSMs for a Ozone cluster and only one SCM. But SCM - * keeps the data from each KSM in separate bucket, never mixing them. To + * Ozone Manager (OM) of Ozone or CBlockServer -- is an owner. It is + * possible to have many OMs for a Ozone cluster and only one SCM. But SCM + * keeps the data from each OM in separate bucket, never mixing them. To * write data, often we have to find all open containers for a specific owner. *

* 3. ReplicationType - The clients are allowed to specify what kind of diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java index edbcfa12f2..996478caaa 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java @@ -33,7 +33,7 @@ /** * Command Queue is queue of commands for the datanode. *

- * Node manager, container Manager and key space managers can queue commands for + * Node manager, container Manager and Ozone managers can queue commands for * datanodes into this queue. These commands will be send in the order in which * there where queued. */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java index 98fe9a1137..3bb284e8d0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java @@ -152,7 +152,7 @@ public AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType @Override public List deleteKeyBlocks( List keyBlocksInfoList) throws IOException { - LOG.info("SCM is informed by KSM to delete {} blocks", keyBlocksInfoList + LOG.info("SCM is informed by OM to delete {} blocks", keyBlocksInfoList .size()); List results = new ArrayList<>(); for (BlockGroup keyBlocks : keyBlocksInfoList) { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java index 727c81a0d8..782844517b 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java @@ -27,7 +27,7 @@ import java.net.URISyntaxException; /** - * This class is the base CLI for scm, ksm and scmadm. + * This class is the base CLI for scm, om and scmadm. */ public abstract class OzoneBaseCLI extends Configured implements Tool { diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot index c741588c19..6d6fea0273 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot @@ -39,12 +39,12 @@ Test rest interface Should contain ${result} 200 OK Check webui static resources - ${result} = Execute on scm curl -s -I http://localhost:9876/static/bootstrap-3.3.7/js/bootstrap.min.js + ${result} = Execute on scm curl -s -I http://localhost:9876/static/bootstrap-3.3.7/js/bootstrap.min.js Should contain ${result} 200 - ${result} = Execute on ksm curl -s -I http://localhost:9874/static/bootstrap-3.3.7/js/bootstrap.min.js + ${result} = Execute on ozoneManager curl -s -I http://localhost:9874/static/bootstrap-3.3.7/js/bootstrap.min.js Should contain ${result} 200 Start freon testing - ${result} = Execute on ksm ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10 + ${result} = Execute on ozoneManager ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10 Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125 Should Not Contain ${result} ERROR diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml index b50f42d3e9..99f28310f1 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml @@ -25,18 +25,18 @@ services: command: ["/opt/hadoop/bin/ozone","datanode"] env_file: - ./docker-config - ksm: + ozoneManager: image: apache/hadoop-runner - hostname: ksm + hostname: ozoneManager volumes: - ${OZONEDIR}:/opt/hadoop ports: - 9874 environment: - ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION + ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION env_file: - ./docker-config - command: ["/opt/hadoop/bin/ozone","ksm"] + command: ["/opt/hadoop/bin/ozone","om"] scm: image: apache/hadoop-runner volumes: diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config index c3ec2ef71b..b72085b22f 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config @@ -14,8 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -OZONE-SITE.XML_ozone.ksm.address=ksm -OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874 +OZONE-SITE.XML_ozone.om.address=ozoneManager +OZONE-SITE.XML_ozone.om.http-address=ozoneManager:9874 OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.enabled=True OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot index 9521ad60be..f4be3e0f6a 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot @@ -28,22 +28,22 @@ ${PROJECTDIR} ${CURDIR}/../../../../../.. *** Test Cases *** RestClient without http port - Test ozone shell http:// ksm restwoport True + Test ozone shell http:// ozoneManager restwoport True RestClient with http port - Test ozone shell http:// ksm:9874 restwport True + Test ozone shell http:// ozoneManager:9874 restwport True RestClient without host name - Test ozone shell http:// ${EMPTY} restwohost True + Test ozone shell http:// ${EMPTY} restwohost True RpcClient with port - Test ozone shell o3:// ksm:9862 rpcwoport False + Test ozone shell o3:// ozoneManager:9862 rpcwoport False RpcClient without host - Test ozone shell o3:// ${EMPTY} rpcwport False + Test ozone shell o3:// ${EMPTY} rpcwport False RpcClient without scheme - Test ozone shell ${EMPTY} ${EMPTY} rpcwoscheme False + Test ozone shell ${EMPTY} ${EMPTY} rpcwoscheme False *** Keywords *** @@ -52,7 +52,7 @@ Test ozone shell ${result} = Execute on datanode ozone oz -createVolume ${protocol}${server}/${volume} -user bilbo -quota 100TB -root Should not contain ${result} Failed Should contain ${result} Creating Volume: ${volume} - ${result} = Execute on datanode ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")' + ${result} = Execute on datanode ozone oz -listVolume o3://ozoneManager -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")' Should contain ${result} createdOn Execute on datanode ozone oz -updateVolume ${protocol}${server}/${volume} -user bill -quota 10TB ${result} = Execute on datanode ozone oz -infoVolume ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name' @@ -66,7 +66,7 @@ Test ozone shell Should Be Equal ${result} GROUP ${result} = Execute on datanode ozone oz -updateBucket ${protocol}${server}/${volume}/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type' Should Be Equal ${result} USER - ${result} = Execute on datanode ozone oz -listBucket o3://ksm/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' + ${result} = Execute on datanode ozone oz -listBucket o3://ozoneManager/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName' Should Be Equal ${result} ${volume} Run Keyword and Return If ${withkeytest} Test key handling ${protocol} ${server} ${volume} Execute on datanode ozone oz -deleteBucket ${protocol}${server}/${volume}/bb1 @@ -80,6 +80,6 @@ Test key handling Execute on datanode ls -l NOTICE.txt.1 ${result} = Execute on datanode ozone oz -infoKey ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")' Should contain ${result} createdOn - ${result} = Execute on datanode ozone oz -listKey o3://ksm/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' + ${result} = Execute on datanode ozone oz -listKey o3://ozoneManager/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName' Should Be Equal ${result} key1 Execute on datanode ozone oz -deleteKey ${protocol}${server}/${volume}/bb1/key1 -v diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot index a5ea30af34..9235cd917a 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot @@ -21,12 +21,12 @@ Startup Ozone cluster with size Run echo "Starting new docker-compose environment" >> docker-compose.log ${rc} ${output} = Run docker compose up -d Should Be Equal As Integers ${rc} 0 - Wait Until Keyword Succeeds 1min 5sec Is Daemon started ksm HTTP server of KSM is listening + Wait Until Keyword Succeeds 1min 5sec Is Daemon started ozoneManager HTTP server of OZONEMANAGER is listening Daemons are running without error Scale datanodes up 5 Daemons are running without error - Is daemon running without error ksm + Is daemon running without error ozoneManager Is daemon running without error scm Is daemon running without error datanode diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml index 12022dfe61..6b7b7bd946 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml @@ -25,18 +25,18 @@ services: command: ["/opt/hadoop/bin/ozone","datanode"] env_file: - ./docker-config - ksm: + ozoneManager: image: apache/hadoop-runner - hostname: ksm + hostname: ozoneManager volumes: - ${OZONEDIR}:/opt/hadoop ports: - 9874 environment: - ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION + ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION env_file: - ./docker-config - command: ["/opt/hadoop/bin/ozone","ksm"] + command: ["/opt/hadoop/bin/ozone","om"] scm: image: apache/hadoop-runner volumes: diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config index e06d434bb4..b0129bce0f 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config @@ -15,8 +15,8 @@ # limitations under the License. CORE-SITE.XML_fs.o3.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem -OZONE-SITE.XML_ozone.ksm.address=ksm -OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874 +OZONE-SITE.XML_ozone.om.address=ozoneManager +OZONE-SITE.XML_ozone.om.http-address=ozoneManager:9874 OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.enabled=True OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot index 9e8a5d2004..ea473c0de7 100644 --- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot @@ -27,13 +27,13 @@ ${PROJECTDIR} ${CURDIR}/../../../../../.. *** Test Cases *** Create volume and bucket - Execute on datanode ozone oz -createVolume http://ksm/fstest -user bilbo -quota 100TB -root - Execute on datanode ozone oz -createBucket http://ksm/fstest/bucket1 + Execute on datanode ozone oz -createVolume http://ozoneManager/fstest -user bilbo -quota 100TB -root + Execute on datanode ozone oz -createBucket http://ozoneManager/fstest/bucket1 Check volume from ozonefs ${result} = Execute on hadooplast hdfs dfs -ls o3://bucket1.fstest/ Create directory from ozonefs Execute on hadooplast hdfs dfs -mkdir -p o3://bucket1.fstest/testdir/deep - ${result} = Execute on ksm ozone oz -listKey o3://ksm/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' + ${result} = Execute on ozoneManager ozone oz -listKey o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' Should contain ${result} testdir/deep diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java index 39b7bb8403..0da52dc033 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java @@ -81,7 +81,7 @@ public List getAcls() { } /** - * Returns new builder class that builds a KsmBucketInfo. + * Returns new builder class that builds a OmBucketInfo. * * @return Builder */ @@ -90,7 +90,7 @@ public static BucketArgs.Builder newBuilder() { } /** - * Builder for KsmBucketInfo. + * Builder for OmBucketInfo. */ public static class Builder { private Boolean versioning; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index 3085b0daa6..de0d166abd 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -21,7 +21,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.KsmUtils; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.client.rest.RestClient; import org.apache.hadoop.ozone.client.rpc.RpcClient; @@ -34,11 +34,9 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Proxy; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_CLIENT_PROTOCOL; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys - .OZONE_KSM_HTTP_ADDRESS_KEY; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_PROTOCOL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY; /** * Factory class to create different types of OzoneClients. @@ -97,46 +95,46 @@ public static OzoneClient getClient(Configuration config) /** * Returns an OzoneClient which will use RPC protocol. * - * @param ksmHost - * hostname of KeySpaceManager to connect. + * @param omHost + * hostname of OzoneManager to connect. * * @return OzoneClient * * @throws IOException */ - public static OzoneClient getRpcClient(String ksmHost) + public static OzoneClient getRpcClient(String omHost) throws IOException { Configuration config = new OzoneConfiguration(); - int port = KsmUtils.getKsmRpcPort(config); - return getRpcClient(ksmHost, port, config); + int port = OmUtils.getOmRpcPort(config); + return getRpcClient(omHost, port, config); } /** * Returns an OzoneClient which will use RPC protocol. * - * @param ksmHost - * hostname of KeySpaceManager to connect. + * @param omHost + * hostname of OzoneManager to connect. * - * @param ksmRpcPort - * RPC port of KeySpaceManager. + * @param omRpcPort + * RPC port of OzoneManager. * * @return OzoneClient * * @throws IOException */ - public static OzoneClient getRpcClient(String ksmHost, Integer ksmRpcPort) + public static OzoneClient getRpcClient(String omHost, Integer omRpcPort) throws IOException { - return getRpcClient(ksmHost, ksmRpcPort, new OzoneConfiguration()); + return getRpcClient(omHost, omRpcPort, new OzoneConfiguration()); } /** * Returns an OzoneClient which will use RPC protocol. * - * @param ksmHost - * hostname of KeySpaceManager to connect. + * @param omHost + * hostname of OzoneManager to connect. * - * @param ksmRpcPort - * RPC port of KeySpaceManager. + * @param omRpcPort + * RPC port of OzoneManager. * * @param config * Configuration to be used for OzoneClient creation @@ -145,13 +143,13 @@ public static OzoneClient getRpcClient(String ksmHost, Integer ksmRpcPort) * * @throws IOException */ - public static OzoneClient getRpcClient(String ksmHost, Integer ksmRpcPort, + public static OzoneClient getRpcClient(String omHost, Integer omRpcPort, Configuration config) throws IOException { - Preconditions.checkNotNull(ksmHost); - Preconditions.checkNotNull(ksmRpcPort); + Preconditions.checkNotNull(omHost); + Preconditions.checkNotNull(omRpcPort); Preconditions.checkNotNull(config); - config.set(OZONE_KSM_ADDRESS_KEY, ksmHost + ":" + ksmRpcPort); + config.set(OZONE_OM_ADDRESS_KEY, omHost + ":" + omRpcPort); return getRpcClient(config); } @@ -175,46 +173,46 @@ public static OzoneClient getRpcClient(Configuration config) /** * Returns an OzoneClient which will use REST protocol. * - * @param ksmHost - * hostname of KeySpaceManager to connect. + * @param omHost + * hostname of OzoneManager to connect. * * @return OzoneClient * * @throws IOException */ - public static OzoneClient getRestClient(String ksmHost) + public static OzoneClient getRestClient(String omHost) throws IOException { Configuration config = new OzoneConfiguration(); - int port = KsmUtils.getKsmRestPort(config); - return getRestClient(ksmHost, port, config); + int port = OmUtils.getOmRestPort(config); + return getRestClient(omHost, port, config); } /** * Returns an OzoneClient which will use REST protocol. * - * @param ksmHost - * hostname of KeySpaceManager to connect. + * @param omHost + * hostname of OzoneManager to connect. * - * @param ksmHttpPort - * HTTP port of KeySpaceManager. + * @param omHttpPort + * HTTP port of OzoneManager. * * @return OzoneClient * * @throws IOException */ - public static OzoneClient getRestClient(String ksmHost, Integer ksmHttpPort) + public static OzoneClient getRestClient(String omHost, Integer omHttpPort) throws IOException { - return getRestClient(ksmHost, ksmHttpPort, new OzoneConfiguration()); + return getRestClient(omHost, omHttpPort, new OzoneConfiguration()); } /** * Returns an OzoneClient which will use REST protocol. * - * @param ksmHost - * hostname of KeySpaceManager to connect. + * @param omHost + * hostname of OzoneManager to connect. * - * @param ksmHttpPort - * HTTP port of KeySpaceManager. + * @param omHttpPort + * HTTP port of OzoneManager. * * @param config * Configuration to be used for OzoneClient creation @@ -223,13 +221,13 @@ public static OzoneClient getRestClient(String ksmHost, Integer ksmHttpPort) * * @throws IOException */ - public static OzoneClient getRestClient(String ksmHost, Integer ksmHttpPort, + public static OzoneClient getRestClient(String omHost, Integer omHttpPort, Configuration config) throws IOException { - Preconditions.checkNotNull(ksmHost); - Preconditions.checkNotNull(ksmHttpPort); + Preconditions.checkNotNull(omHost); + Preconditions.checkNotNull(omHttpPort); Preconditions.checkNotNull(config); - config.set(OZONE_KSM_HTTP_ADDRESS_KEY, ksmHost + ":" + ksmHttpPort); + config.set(OZONE_OM_HTTP_ADDRESS_KEY, omHost + ":" + omHttpPort); return getRestClient(config); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java index 0c723dd8ab..7c93146abd 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java @@ -49,7 +49,7 @@ public class OzoneKey { private long modificationTime; /** - * Constructs OzoneKey from KsmKeyInfo. + * Constructs OzoneKey from OmKeyInfo. * */ public OzoneKey(String volumeName, String bucketName, diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java index f1aa03108a..ae1cfccd42 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java @@ -77,7 +77,7 @@ public List getAcls() { return acls; } /** - * Returns new builder class that builds a KsmVolumeArgs. + * Returns new builder class that builds a OmVolumeArgs. * * @return Builder */ @@ -86,7 +86,7 @@ public static VolumeArgs.Builder newBuilder() { } /** - * Builder for KsmVolumeArgs. + * Builder for OmVolumeArgs. */ public static class Builder { private String adminName; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java index edd85aabee..b3a566e43f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java @@ -23,8 +23,8 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; @@ -255,28 +255,29 @@ public boolean seekToNewSource(long targetPos) throws IOException { } } - public static LengthInputStream getFromKsmKeyInfo(KsmKeyInfo keyInfo, + public static LengthInputStream getFromOmKeyInfo( + OmKeyInfo keyInfo, XceiverClientManager xceiverClientManager, StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient, String requestId) - throws IOException { + storageContainerLocationClient, + String requestId) throws IOException { long length = 0; long containerKey; ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream(); groupInputStream.key = keyInfo.getKeyName(); - List keyLocationInfos = + List keyLocationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly(); groupInputStream.streamOffset = new long[keyLocationInfos.size()]; for (int i = 0; i < keyLocationInfos.size(); i++) { - KsmKeyLocationInfo ksmKeyLocationInfo = keyLocationInfos.get(i); - BlockID blockID = ksmKeyLocationInfo.getBlockID(); + OmKeyLocationInfo omKeyLocationInfo = keyLocationInfos.get(i); + BlockID blockID = omKeyLocationInfo.getBlockID(); long containerID = blockID.getContainerID(); ContainerWithPipeline containerWithPipeline = storageContainerLocationClient.getContainerWithPipeline(containerID); XceiverClientSpi xceiverClient = xceiverClientManager .acquireClient(containerWithPipeline.getPipeline(), containerID); boolean success = false; - containerKey = ksmKeyLocationInfo.getLocalID(); + containerKey = omKeyLocationInfo.getLocalID(); try { LOG.debug("get key accessing {} {}", containerID, containerKey); @@ -292,11 +293,10 @@ public static LengthInputStream getFromKsmKeyInfo(KsmKeyInfo keyInfo, } success = true; ChunkInputStream inputStream = new ChunkInputStream( - ksmKeyLocationInfo.getBlockID(), xceiverClientManager, - xceiverClient, + omKeyLocationInfo.getBlockID(), xceiverClientManager, xceiverClient, chunks, requestId); groupInputStream.addStream(inputStream, - ksmKeyLocationInfo.getLength()); + omKeyLocationInfo.getLength()); } finally { if (!success) { xceiverClientManager.releaseClient(xceiverClient); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java index d1a3b46b81..94433179f9 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java @@ -24,15 +24,15 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; -import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.container.common.helpers @@ -67,10 +67,10 @@ public class ChunkGroupOutputStream extends OutputStream { private final ArrayList streamEntries; private int currentStreamIndex; private long byteOffset; - private final KeySpaceManagerProtocolClientSideTranslatorPB ksmClient; + private final OzoneManagerProtocolClientSideTranslatorPB omClient; private final StorageContainerLocationProtocolClientSideTranslatorPB scmClient; - private final KsmKeyArgs keyArgs; + private final OmKeyArgs keyArgs; private final int openID; private final XceiverClientManager xceiverClientManager; private final int chunkSize; @@ -83,7 +83,7 @@ public class ChunkGroupOutputStream extends OutputStream { @VisibleForTesting public ChunkGroupOutputStream() { streamEntries = new ArrayList<>(); - ksmClient = null; + omClient = null; scmClient = null; keyArgs = null; openID = -1; @@ -113,16 +113,16 @@ public List getStreamEntries() { public ChunkGroupOutputStream( OpenKeySession handler, XceiverClientManager xceiverClientManager, StorageContainerLocationProtocolClientSideTranslatorPB scmClient, - KeySpaceManagerProtocolClientSideTranslatorPB ksmClient, + OzoneManagerProtocolClientSideTranslatorPB omClient, int chunkSize, String requestId, ReplicationFactor factor, ReplicationType type) throws IOException { this.streamEntries = new ArrayList<>(); this.currentStreamIndex = 0; this.byteOffset = 0; - this.ksmClient = ksmClient; + this.omClient = omClient; this.scmClient = scmClient; - KsmKeyInfo info = handler.getKeyInfo(); - this.keyArgs = new KsmKeyArgs.Builder() + OmKeyInfo info = handler.getKeyInfo(); + this.keyArgs = new OmKeyArgs.Builder() .setVolumeName(info.getVolumeName()) .setBucketName(info.getBucketName()) .setKeyName(info.getKeyName()) @@ -150,19 +150,19 @@ public ChunkGroupOutputStream( * @param openVersion the version corresponding to the pre-allocation. * @throws IOException */ - public void addPreallocateBlocks(KsmKeyLocationInfoGroup version, + public void addPreallocateBlocks(OmKeyLocationInfoGroup version, long openVersion) throws IOException { // server may return any number of blocks, (0 to any) // only the blocks allocated in this open session (block createVersion // equals to open session version) - for (KsmKeyLocationInfo subKeyInfo : version.getLocationList()) { + for (OmKeyLocationInfo subKeyInfo : version.getLocationList()) { if (subKeyInfo.getCreateVersion() == openVersion) { checkKeyLocationInfo(subKeyInfo); } } } - private void checkKeyLocationInfo(KsmKeyLocationInfo subKeyInfo) + private void checkKeyLocationInfo(OmKeyLocationInfo subKeyInfo) throws IOException { ContainerWithPipeline containerWithPipeline = scmClient .getContainerWithPipeline(subKeyInfo.getContainerID()); @@ -210,7 +210,7 @@ public synchronized void write(int b) throws IOException { checkNotClosed(); if (streamEntries.size() <= currentStreamIndex) { - Preconditions.checkNotNull(ksmClient); + Preconditions.checkNotNull(omClient); // allocate a new block, if a exception happens, log an error and // throw exception to the caller directly, and the write fails. try { @@ -258,7 +258,7 @@ public synchronized void write(byte[] b, int off, int len) int succeededAllocates = 0; while (len > 0) { if (streamEntries.size() <= currentStreamIndex) { - Preconditions.checkNotNull(ksmClient); + Preconditions.checkNotNull(omClient); // allocate a new block, if a exception happens, log an error and // throw exception to the caller directly, and the write fails. try { @@ -286,7 +286,7 @@ public synchronized void write(byte[] b, int off, int len) } /** - * Contact KSM to get a new block. Set the new block with the index (e.g. + * Contact OM to get a new block. Set the new block with the index (e.g. * first block has index = 0, second has index = 1 etc.) * * The returned block is made to new ChunkOutputStreamEntry to write. @@ -295,7 +295,7 @@ public synchronized void write(byte[] b, int off, int len) * @throws IOException */ private void allocateNewBlock(int index) throws IOException { - KsmKeyLocationInfo subKeyInfo = ksmClient.allocateBlock(keyArgs, openID); + OmKeyLocationInfo subKeyInfo = omClient.allocateBlock(keyArgs, openID); checkKeyLocationInfo(subKeyInfo); } @@ -311,7 +311,7 @@ public synchronized void flush() throws IOException { } /** - * Commit the key to KSM, this will add the blocks as the new key blocks. + * Commit the key to OM, this will add the blocks as the new key blocks. * * @throws IOException */ @@ -329,7 +329,7 @@ public synchronized void close() throws IOException { if (keyArgs != null) { // in test, this could be null keyArgs.setDataSize(byteOffset); - ksmClient.commitKey(keyArgs, openID); + omClient.commitKey(keyArgs, openID); } else { LOG.warn("Closing ChunkGroupOutputStream, but key args is null"); } @@ -342,7 +342,7 @@ public static class Builder { private OpenKeySession openHandler; private XceiverClientManager xceiverManager; private StorageContainerLocationProtocolClientSideTranslatorPB scmClient; - private KeySpaceManagerProtocolClientSideTranslatorPB ksmClient; + private OzoneManagerProtocolClientSideTranslatorPB omClient; private int chunkSize; private String requestID; private ReplicationType type; @@ -364,9 +364,9 @@ public Builder setScmClient( return this; } - public Builder setKsmClient( - KeySpaceManagerProtocolClientSideTranslatorPB client) { - this.ksmClient = client; + public Builder setOmClient( + OzoneManagerProtocolClientSideTranslatorPB client) { + this.omClient = client; return this; } @@ -392,7 +392,7 @@ public Builder setFactor(ReplicationFactor replicationFactor) { public ChunkGroupOutputStream build() throws IOException { return new ChunkGroupOutputStream(openHandler, xceiverManager, scmClient, - ksmClient, chunkSize, requestID, factor, type); + omClient, chunkSize, requestID, factor, type); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java index 93b3417b4b..abdc2fbe19 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.client.rest; -import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; +import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import java.util.List; import java.util.Random; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java index 6e3f617cd6..78fbe8d1f7 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java @@ -45,10 +45,9 @@ import org.apache.hadoop.ozone.client.rest.response.BucketInfo; import org.apache.hadoop.ozone.client.rest.response.KeyInfo; import org.apache.hadoop.ozone.client.rest.response.VolumeInfo; -import org.apache.hadoop.ozone.ksm.KSMConfigKeys; -import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; -import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ServicePort; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.helpers.ServiceInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; import org.apache.hadoop.ozone.web.response.ListBuckets; import org.apache.hadoop.ozone.web.response.ListKeys; import org.apache.hadoop.ozone.web.response.ListVolumes; @@ -152,8 +151,8 @@ public RestClient(Configuration conf) .build()) .build(); this.ugi = UserGroupInformation.getCurrentUser(); - this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS, - KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT); + this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS, + OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT); // TODO: Add new configuration parameter to configure RestServerSelector. RestServerSelector defaultSelector = new DefaultRestServerSelector(); @@ -171,11 +170,11 @@ public RestClient(Configuration conf) private InetSocketAddress getOzoneRestServerAddress( RestServerSelector selector) throws IOException { - String httpAddress = conf.get(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY); + String httpAddress = conf.get(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY); if (httpAddress == null) { throw new IllegalArgumentException( - KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY + " must be defined. See" + + OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY + " must be defined. See" + " https://wiki.apache.org/hadoop/Ozone#Configuration for" + " details on configuring Ozone."); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java index 54e219b92b..fbd6eb8ea9 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.client.rest; -import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; +import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import java.util.List; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 43b94a1529..fc70514453 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.KsmUtils; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -43,24 +43,22 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; -import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; -import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; -import org.apache.hadoop.ozone.ksm.protocolPB - .KeySpaceManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.ksm.protocolPB - .KeySpaceManagerProtocolPB; +import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.ServiceInfo; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.ksm.KSMConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ServicePort; + .OzoneManagerProtocolProtos.ServicePort; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.protocolPB.KSMPBHelper; +import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.protocolPB @@ -80,7 +78,7 @@ import java.util.stream.Collectors; /** - * Ozone RPC Client Implementation, it connects to KSM, SCM and DataNode + * Ozone RPC Client Implementation, it connects to OM, SCM and DataNode * to execute client calls. This uses RPC protocol for communication * with the servers. */ @@ -92,8 +90,8 @@ public class RpcClient implements ClientProtocol { private final OzoneConfiguration conf; private final StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private final KeySpaceManagerProtocolClientSideTranslatorPB - keySpaceManagerClient; + private final OzoneManagerProtocolClientSideTranslatorPB + ozoneManagerClient; private final XceiverClientManager xceiverClientManager; private final int chunkSize; private final UserGroupInformation ugi; @@ -109,20 +107,20 @@ public RpcClient(Configuration conf) throws IOException { Preconditions.checkNotNull(conf); this.conf = new OzoneConfiguration(conf); this.ugi = UserGroupInformation.getCurrentUser(); - this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS, - KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT); - this.groupRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS, - KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS_DEFAULT); - long ksmVersion = - RPC.getProtocolVersion(KeySpaceManagerProtocolPB.class); - InetSocketAddress ksmAddress = KsmUtils - .getKsmAddressForClients(conf); - RPC.setProtocolEngine(conf, KeySpaceManagerProtocolPB.class, + this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS, + OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT); + this.groupRights = conf.getEnum(OMConfigKeys.OZONE_OM_GROUP_RIGHTS, + OMConfigKeys.OZONE_OM_GROUP_RIGHTS_DEFAULT); + long omVersion = + RPC.getProtocolVersion(OzoneManagerProtocolPB.class); + InetSocketAddress omAddress = OmUtils + .getOmAddressForClients(conf); + RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class, ProtobufRpcEngine.class); - this.keySpaceManagerClient = - new KeySpaceManagerProtocolClientSideTranslatorPB( - RPC.getProxy(KeySpaceManagerProtocolPB.class, ksmVersion, - ksmAddress, UserGroupInformation.getCurrentUser(), conf, + this.ozoneManagerClient = + new OzoneManagerProtocolClientSideTranslatorPB( + RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, + omAddress, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), Client.getRpcTimeout(conf))); @@ -155,7 +153,7 @@ public RpcClient(Configuration conf) throws IOException { } private InetSocketAddress getScmAddressForClient() throws IOException { - List services = keySpaceManagerClient.getServiceList(); + List services = ozoneManagerClient.getServiceList(); ServiceInfo scmInfo = services.stream().filter( a -> a.getNodeType().equals(HddsProtos.NodeType.SCM)) .collect(Collectors.toList()).get(0); @@ -195,7 +193,7 @@ public void createVolume(String volumeName, VolumeArgs volArgs) listOfAcls.addAll(volArgs.getAcls()); } - KsmVolumeArgs.Builder builder = KsmVolumeArgs.newBuilder(); + OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); builder.setVolume(volumeName); builder.setAdminName(admin); builder.setOwnerName(owner); @@ -204,12 +202,12 @@ public void createVolume(String volumeName, VolumeArgs volArgs) //Remove duplicates and add ACLs for (OzoneAcl ozoneAcl : listOfAcls.stream().distinct().collect(Collectors.toList())) { - builder.addOzoneAcls(KSMPBHelper.convertOzoneAcl(ozoneAcl)); + builder.addOzoneAcls(OMPBHelper.convertOzoneAcl(ozoneAcl)); } LOG.info("Creating Volume: {}, with {} as owner and quota set to {} bytes.", volumeName, owner, quota); - keySpaceManagerClient.createVolume(builder.build()); + ozoneManagerClient.createVolume(builder.build()); } @Override @@ -217,7 +215,7 @@ public void setVolumeOwner(String volumeName, String owner) throws IOException { HddsClientUtils.verifyResourceName(volumeName); Preconditions.checkNotNull(owner); - keySpaceManagerClient.setOwner(volumeName, owner); + ozoneManagerClient.setOwner(volumeName, owner); } @Override @@ -226,14 +224,14 @@ public void setVolumeQuota(String volumeName, OzoneQuota quota) HddsClientUtils.verifyResourceName(volumeName); Preconditions.checkNotNull(quota); long quotaInBytes = quota.sizeInBytes(); - keySpaceManagerClient.setQuota(volumeName, quotaInBytes); + ozoneManagerClient.setQuota(volumeName, quotaInBytes); } @Override public OzoneVolume getVolumeDetails(String volumeName) throws IOException { HddsClientUtils.verifyResourceName(volumeName); - KsmVolumeArgs volume = keySpaceManagerClient.getVolumeInfo(volumeName); + OmVolumeArgs volume = ozoneManagerClient.getVolumeInfo(volumeName); return new OzoneVolume( conf, this, @@ -243,7 +241,7 @@ public OzoneVolume getVolumeDetails(String volumeName) volume.getQuotaInBytes(), volume.getCreationTime(), volume.getAclMap().ozoneAclGetProtobuf().stream(). - map(KSMPBHelper::convertOzoneAcl).collect(Collectors.toList())); + map(OMPBHelper::convertOzoneAcl).collect(Collectors.toList())); } @Override @@ -255,14 +253,14 @@ public boolean checkVolumeAccess(String volumeName, OzoneAcl acl) @Override public void deleteVolume(String volumeName) throws IOException { HddsClientUtils.verifyResourceName(volumeName); - keySpaceManagerClient.deleteVolume(volumeName); + ozoneManagerClient.deleteVolume(volumeName); } @Override public List listVolumes(String volumePrefix, String prevVolume, int maxListResult) throws IOException { - List volumes = keySpaceManagerClient.listAllVolumes( + List volumes = ozoneManagerClient.listAllVolumes( volumePrefix, prevVolume, maxListResult); return volumes.stream().map(volume -> new OzoneVolume( @@ -274,7 +272,7 @@ public List listVolumes(String volumePrefix, String prevVolume, volume.getQuotaInBytes(), volume.getCreationTime(), volume.getAclMap().ozoneAclGetProtobuf().stream(). - map(KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()))) + map(OMPBHelper::convertOzoneAcl).collect(Collectors.toList()))) .collect(Collectors.toList()); } @@ -282,7 +280,7 @@ public List listVolumes(String volumePrefix, String prevVolume, public List listVolumes(String user, String volumePrefix, String prevVolume, int maxListResult) throws IOException { - List volumes = keySpaceManagerClient.listVolumeByUser( + List volumes = ozoneManagerClient.listVolumeByUser( user, volumePrefix, prevVolume, maxListResult); return volumes.stream().map(volume -> new OzoneVolume( @@ -294,7 +292,7 @@ public List listVolumes(String user, String volumePrefix, volume.getQuotaInBytes(), volume.getCreationTime(), volume.getAclMap().ozoneAclGetProtobuf().stream(). - map(KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()))) + map(OMPBHelper::convertOzoneAcl).collect(Collectors.toList()))) .collect(Collectors.toList()); } @@ -329,7 +327,7 @@ public void createBucket( listOfAcls.addAll(bucketArgs.getAcls()); } - KsmBucketInfo.Builder builder = KsmBucketInfo.newBuilder(); + OmBucketInfo.Builder builder = OmBucketInfo.newBuilder(); builder.setVolumeName(volumeName) .setBucketName(bucketName) .setIsVersionEnabled(isVersionEnabled) @@ -339,7 +337,7 @@ public void createBucket( LOG.info("Creating Bucket: {}/{}, with Versioning {} and " + "Storage Type set to {}", volumeName, bucketName, isVersionEnabled, storageType); - keySpaceManagerClient.createBucket(builder.build()); + ozoneManagerClient.createBucket(builder.build()); } @Override @@ -348,11 +346,11 @@ public void addBucketAcls( throws IOException { HddsClientUtils.verifyResourceName(volumeName, bucketName); Preconditions.checkNotNull(addAcls); - KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder(); + OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); builder.setVolumeName(volumeName) .setBucketName(bucketName) .setAddAcls(addAcls); - keySpaceManagerClient.setBucketProperty(builder.build()); + ozoneManagerClient.setBucketProperty(builder.build()); } @Override @@ -361,11 +359,11 @@ public void removeBucketAcls( throws IOException { HddsClientUtils.verifyResourceName(volumeName, bucketName); Preconditions.checkNotNull(removeAcls); - KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder(); + OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); builder.setVolumeName(volumeName) .setBucketName(bucketName) .setRemoveAcls(removeAcls); - keySpaceManagerClient.setBucketProperty(builder.build()); + ozoneManagerClient.setBucketProperty(builder.build()); } @Override @@ -374,11 +372,11 @@ public void setBucketVersioning( throws IOException { HddsClientUtils.verifyResourceName(volumeName, bucketName); Preconditions.checkNotNull(versioning); - KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder(); + OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); builder.setVolumeName(volumeName) .setBucketName(bucketName) .setIsVersionEnabled(versioning); - keySpaceManagerClient.setBucketProperty(builder.build()); + ozoneManagerClient.setBucketProperty(builder.build()); } @Override @@ -387,18 +385,18 @@ public void setBucketStorageType( throws IOException { HddsClientUtils.verifyResourceName(volumeName, bucketName); Preconditions.checkNotNull(storageType); - KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder(); + OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); builder.setVolumeName(volumeName) .setBucketName(bucketName) .setStorageType(storageType); - keySpaceManagerClient.setBucketProperty(builder.build()); + ozoneManagerClient.setBucketProperty(builder.build()); } @Override public void deleteBucket( String volumeName, String bucketName) throws IOException { HddsClientUtils.verifyResourceName(volumeName, bucketName); - keySpaceManagerClient.deleteBucket(volumeName, bucketName); + ozoneManagerClient.deleteBucket(volumeName, bucketName); } @Override @@ -411,8 +409,8 @@ public void checkBucketAccess( public OzoneBucket getBucketDetails( String volumeName, String bucketName) throws IOException { HddsClientUtils.verifyResourceName(volumeName, bucketName); - KsmBucketInfo bucketArgs = - keySpaceManagerClient.getBucketInfo(volumeName, bucketName); + OmBucketInfo bucketArgs = + ozoneManagerClient.getBucketInfo(volumeName, bucketName); return new OzoneBucket( conf, this, @@ -428,7 +426,7 @@ public OzoneBucket getBucketDetails( public List listBuckets(String volumeName, String bucketPrefix, String prevBucket, int maxListResult) throws IOException { - List buckets = keySpaceManagerClient.listBuckets( + List buckets = ozoneManagerClient.listBuckets( volumeName, prevBucket, bucketPrefix, maxListResult); return buckets.stream().map(bucket -> new OzoneBucket( @@ -451,7 +449,7 @@ public OzoneOutputStream createKey( HddsClientUtils.verifyResourceName(volumeName, bucketName); HddsClientUtils.checkNotNull(keyName, type, factor); String requestId = UUID.randomUUID().toString(); - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) @@ -460,13 +458,13 @@ public OzoneOutputStream createKey( .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) .build(); - OpenKeySession openKey = keySpaceManagerClient.openKey(keyArgs); + OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); ChunkGroupOutputStream groupOutputStream = new ChunkGroupOutputStream.Builder() .setHandler(openKey) .setXceiverClientManager(xceiverClientManager) .setScmClient(storageContainerLocationClient) - .setKsmClient(keySpaceManagerClient) + .setOmClient(ozoneManagerClient) .setChunkSize(chunkSize) .setRequestID(requestId) .setType(HddsProtos.ReplicationType.valueOf(type.toString())) @@ -485,14 +483,14 @@ public OzoneInputStream getKey( HddsClientUtils.verifyResourceName(volumeName, bucketName); Preconditions.checkNotNull(keyName); String requestId = UUID.randomUUID().toString(); - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) .build(); - KsmKeyInfo keyInfo = keySpaceManagerClient.lookupKey(keyArgs); + OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); LengthInputStream lengthInputStream = - ChunkGroupInputStream.getFromKsmKeyInfo( + ChunkGroupInputStream.getFromOmKeyInfo( keyInfo, xceiverClientManager, storageContainerLocationClient, requestId); return new OzoneInputStream( @@ -505,12 +503,12 @@ public void deleteKey( throws IOException { HddsClientUtils.verifyResourceName(volumeName, bucketName); Preconditions.checkNotNull(keyName); - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) .build(); - keySpaceManagerClient.deleteKey(keyArgs); + ozoneManagerClient.deleteKey(keyArgs); } @Override @@ -518,12 +516,12 @@ public void renameKey(String volumeName, String bucketName, String fromKeyName, String toKeyName) throws IOException { HddsClientUtils.verifyResourceName(volumeName, bucketName); HddsClientUtils.checkNotNull(fromKeyName, toKeyName); - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(fromKeyName) .build(); - keySpaceManagerClient.renameKey(keyArgs, toKeyName); + ozoneManagerClient.renameKey(keyArgs, toKeyName); } @Override @@ -531,7 +529,7 @@ public List listKeys(String volumeName, String bucketName, String keyPrefix, String prevKey, int maxListResult) throws IOException { - List keys = keySpaceManagerClient.listKeys( + List keys = ozoneManagerClient.listKeys( volumeName, bucketName, prevKey, keyPrefix, maxListResult); return keys.stream().map(key -> new OzoneKey( @@ -551,12 +549,12 @@ public OzoneKey getKeyDetails( Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); Preconditions.checkNotNull(keyName); - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) .build(); - KsmKeyInfo keyInfo = keySpaceManagerClient.lookupKey(keyArgs); + OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); return new OzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName(), @@ -568,7 +566,7 @@ public OzoneKey getKeyDetails( @Override public void close() throws IOException { IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient); - IOUtils.cleanupWithLogger(LOG, keySpaceManagerClient); + IOUtils.cleanupWithLogger(LOG, ozoneManagerClient); IOUtils.cleanupWithLogger(LOG, xceiverClientManager); } } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java index a270f61a81..3aefe8ac23 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java @@ -20,7 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.ksm.KSMConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.junit.Rule; import org.junit.Test; @@ -30,7 +30,7 @@ import java.net.InetSocketAddress; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; -import static org.apache.hadoop.ozone.KsmUtils.getKsmAddress; +import static org.apache.hadoop.ozone.OmUtils.getOmAddress; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; @@ -79,27 +79,27 @@ public void testGetScmClientAddress() { } @Test - public void testGetKSMAddress() { + public void testGetOmAddress() { final Configuration conf = new OzoneConfiguration(); // First try a client address with just a host name. Verify it falls // back to the default port. - conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = getKsmAddress(conf); + conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4"); + InetSocketAddress addr = getOmAddress(conf); assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is(KSMConfigKeys.OZONE_KSM_PORT_DEFAULT)); + assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT)); // Next try a client address with just a host name and port. Verify the port - // is ignored and the default KSM port is used. - conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "1.2.3.4:100"); - addr = getKsmAddress(conf); + // is ignored and the default OM port is used. + conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4:100"); + addr = getOmAddress(conf); assertThat(addr.getHostString(), is("1.2.3.4")); assertThat(addr.getPort(), is(100)); // Assert the we are able to use default configs if no value is specified. - conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, ""); - addr = getKsmAddress(conf); + conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, ""); + addr = getOmAddress(conf); assertThat(addr.getHostString(), is("0.0.0.0")); - assertThat(addr.getPort(), is(KSMConfigKeys.OZONE_KSM_PORT_DEFAULT)); + assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT)); } } diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index d8581d1925..83d023e9fd 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -69,7 +69,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/src/main/proto - KeySpaceManagerProtocol.proto + OzoneManagerProtocol.proto diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone index 390f0895b7..9495eff0a9 100755 --- a/hadoop-ozone/common/src/main/bin/ozone +++ b/hadoop-ozone/common/src/main/bin/ozone @@ -38,10 +38,9 @@ function hadoop_usage hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" hadoop_add_subcommand "freon" client "runs an ozone data generator" hadoop_add_subcommand "genesis" client "runs a collection of ozone benchmarks to help with tuning." - hadoop_add_subcommand "getozoneconf" client "get ozone config values from - configuration" + hadoop_add_subcommand "getozoneconf" client "get ozone config values from configuration" hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode." - hadoop_add_subcommand "ksm" daemon "Ozone keyspace manager" + hadoop_add_subcommand "om" daemon "Ozone Manager" hadoop_add_subcommand "o3" client "command line interface for ozone" hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data" hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service" @@ -94,9 +93,9 @@ function ozonecmd_case getozoneconf) HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.OzoneGetConf; ;; - ksm) + om) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME=org.apache.hadoop.ozone.ksm.KeySpaceManager + HADOOP_CLASSNAME=org.apache.hadoop.ozone.om.OzoneManager ;; oz) HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh b/hadoop-ozone/common/src/main/bin/start-ozone.sh index 92bc4a8691..29c3674336 100644 --- a/hadoop-ozone/common/src/main/bin/start-ozone.sh +++ b/hadoop-ozone/common/src/main/bin/start-ozone.sh @@ -179,19 +179,19 @@ if [[ "${AUTOHA_ENABLED}" = "true" ]]; then fi #--------------------------------------------------------- -# Ozone keyspacemanager nodes -KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 2>/dev/null) -echo "Starting key space manager nodes [${KSM_NODES}]" -if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then - KSM_NODES=$(hostname) +# Ozone ozonemanager nodes +OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -ozonemanagers 2>/dev/null) +echo "Starting Ozone Manager nodes [${OM_NODES}]" +if [[ "${OM_NODES}" == "0.0.0.0" ]]; then + OM_NODES=$(hostname) fi -hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \ +hadoop_uservar_su hdfs om "${HADOOP_HDFS_HOME}/bin/ozone" \ --workers \ --config "${HADOOP_CONF_DIR}" \ - --hostnames "${KSM_NODES}" \ + --hostnames "${OM_NODES}" \ --daemon start \ - ksm + om HADOOP_JUMBO_RETCOUNTER=$? diff --git a/hadoop-ozone/common/src/main/bin/stop-ozone.sh b/hadoop-ozone/common/src/main/bin/stop-ozone.sh index be55be4e97..5f5faf0153 100644 --- a/hadoop-ozone/common/src/main/bin/stop-ozone.sh +++ b/hadoop-ozone/common/src/main/bin/stop-ozone.sh @@ -73,19 +73,19 @@ else fi #--------------------------------------------------------- -# Ozone keyspacemanager nodes -KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 2>/dev/null) -echo "Stopping key space manager nodes [${KSM_NODES}]" -if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then - KSM_NODES=$(hostname) +# Ozone Manager nodes +OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -ozonemanagers 2>/dev/null) +echo "Stopping Ozone Manager nodes [${OM_NODES}]" +if [[ "${OM_NODES}" == "0.0.0.0" ]]; then + OM_NODES=$(hostname) fi -hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \ +hadoop_uservar_su hdfs om "${HADOOP_HDFS_HOME}/bin/ozone" \ --workers \ --config "${HADOOP_CONF_DIR}" \ - --hostnames "${KSM_NODES}" \ + --hostnames "${OM_NODES}" \ --daemon stop \ - ksm + om #--------------------------------------------------------- # Ozone storagecontainermanager nodes diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java similarity index 63% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 1025963aa8..097410405f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -25,71 +25,70 @@ import com.google.common.base.Optional; import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys; import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_HTTP_BIND_PORT_DEFAULT; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys - .OZONE_KSM_BIND_HOST_DEFAULT; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_PORT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_BIND_HOST_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_PORT_DEFAULT; /** - * Stateless helper functions for the server and client side of KSM + * Stateless helper functions for the server and client side of OM * communication. */ -public final class KsmUtils { +public final class OmUtils { - private KsmUtils() { + private OmUtils() { } /** - * Retrieve the socket address that is used by KSM. + * Retrieve the socket address that is used by OM. * @param conf * @return Target InetSocketAddress for the SCM service endpoint. */ - public static InetSocketAddress getKsmAddress( + public static InetSocketAddress getOmAddress( Configuration conf) { final Optional host = getHostNameFromConfigKeys(conf, - OZONE_KSM_ADDRESS_KEY); + OZONE_OM_ADDRESS_KEY); return NetUtils.createSocketAddr( - host.or(OZONE_KSM_BIND_HOST_DEFAULT) + ":" + - getKsmRpcPort(conf)); + host.or(OZONE_OM_BIND_HOST_DEFAULT) + ":" + + getOmRpcPort(conf)); } /** * Retrieve the socket address that should be used by clients to connect - * to KSM. + * to OM. * @param conf - * @return Target InetSocketAddress for the KSM service endpoint. + * @return Target InetSocketAddress for the OM service endpoint. */ - public static InetSocketAddress getKsmAddressForClients( + public static InetSocketAddress getOmAddressForClients( Configuration conf) { final Optional host = getHostNameFromConfigKeys(conf, - OZONE_KSM_ADDRESS_KEY); + OZONE_OM_ADDRESS_KEY); if (!host.isPresent()) { throw new IllegalArgumentException( - OZONE_KSM_ADDRESS_KEY + " must be defined. See" + + OZONE_OM_ADDRESS_KEY + " must be defined. See" + " https://wiki.apache.org/hadoop/Ozone#Configuration for" + " details on configuring Ozone."); } return NetUtils.createSocketAddr( - host.get() + ":" + getKsmRpcPort(conf)); + host.get() + ":" + getOmRpcPort(conf)); } - public static int getKsmRpcPort(Configuration conf) { + public static int getOmRpcPort(Configuration conf) { // If no port number is specified then we'll just try the defaultBindPort. final Optional port = getPortNumberFromConfigKeys(conf, - OZONE_KSM_ADDRESS_KEY); - return port.or(OZONE_KSM_PORT_DEFAULT); + OZONE_OM_ADDRESS_KEY); + return port.or(OZONE_OM_PORT_DEFAULT); } - public static int getKsmRestPort(Configuration conf) { + public static int getOmRestPort(Configuration conf) { // If no port number is specified then we'll just try the default // HTTP BindPort. final Optional port = - getPortNumberFromConfigKeys(conf, OZONE_KSM_HTTP_ADDRESS_KEY); - return port.or(OZONE_KSM_HTTP_BIND_PORT_DEFAULT); + getPortNumberFromConfigKeys(conf, OZONE_OM_HTTP_ADDRESS_KEY); + return port.or(OZONE_OM_HTTP_BIND_PORT_DEFAULT); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java index d5f9093128..ffbca6a2b5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.KsmUtils; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; @@ -53,8 +53,8 @@ enum Command { EXCLUDE_FILE("-excludeFile", "gets the exclude file path that defines the datanodes " + "that need to decommissioned."), - KEYSPACEMANAGER("-keyspacemanagers", - "gets list of ozone key space manager nodes in the cluster"), + OZONEMANAGER("-ozonemanagers", + "gets list of Ozone Manager nodes in the cluster"), STORAGECONTAINERMANAGER("-storagecontainermanagers", "gets list of ozone storage container manager nodes in the cluster"), CONFKEY("-confKey [key]", "gets a specific key from the configuration"); @@ -63,8 +63,8 @@ enum Command { static { HANDLERS = new HashMap(); - HANDLERS.put(StringUtils.toLowerCase(KEYSPACEMANAGER.getName()), - new KeySpaceManagersCommandHandler()); + HANDLERS.put(StringUtils.toLowerCase(OZONEMANAGER.getName()), + new OzoneManagersCommandHandler()); HANDLERS.put(StringUtils.toLowerCase(STORAGECONTAINERMANAGER.getName()), new StorageContainerManagersCommandHandler()); HANDLERS.put(StringUtils.toLowerCase(CONFKEY.getName()), @@ -245,13 +245,13 @@ public int doWorkInternal(OzoneGetConf tool, String[] args) } /** - * Handler for {@link Command#KEYSPACEMANAGER}. + * Handler for {@link Command#OZONEMANAGER}. */ - static class KeySpaceManagersCommandHandler extends CommandHandler { + static class OzoneManagersCommandHandler extends CommandHandler { @Override public int doWorkInternal(OzoneGetConf tool, String[] args) throws IOException { - tool.printOut(KsmUtils.getKsmAddress(tool.getConf()).getHostName()); + tool.printOut(OmUtils.getOmAddress(tool.getConf()).getHostName()); return 0; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java deleted file mode 100644 index 75cf613ba6..0000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.ksm; - -import org.apache.hadoop.ozone.OzoneAcl; -/** - * KSM Constants. - */ -public final class KSMConfigKeys { - /** - * Never constructed. - */ - private KSMConfigKeys() { - } - - - public static final String OZONE_KSM_HANDLER_COUNT_KEY = - "ozone.ksm.handler.count.key"; - public static final int OZONE_KSM_HANDLER_COUNT_DEFAULT = 20; - - public static final String OZONE_KSM_ADDRESS_KEY = - "ozone.ksm.address"; - public static final String OZONE_KSM_BIND_HOST_DEFAULT = - "0.0.0.0"; - public static final int OZONE_KSM_PORT_DEFAULT = 9862; - - public static final String OZONE_KSM_HTTP_ENABLED_KEY = - "ozone.ksm.http.enabled"; - public static final String OZONE_KSM_HTTP_BIND_HOST_KEY = - "ozone.ksm.http-bind-host"; - public static final String OZONE_KSM_HTTPS_BIND_HOST_KEY = - "ozone.ksm.https-bind-host"; - public static final String OZONE_KSM_HTTP_ADDRESS_KEY = - "ozone.ksm.http-address"; - public static final String OZONE_KSM_HTTPS_ADDRESS_KEY = - "ozone.ksm.https-address"; - public static final String OZONE_KSM_KEYTAB_FILE = - "ozone.ksm.keytab.file"; - public static final String OZONE_KSM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; - public static final int OZONE_KSM_HTTP_BIND_PORT_DEFAULT = 9874; - public static final int OZONE_KSM_HTTPS_BIND_PORT_DEFAULT = 9875; - - // LevelDB cache file uses an off-heap cache in LevelDB of 128 MB. - public static final String OZONE_KSM_DB_CACHE_SIZE_MB = - "ozone.ksm.db.cache.size.mb"; - public static final int OZONE_KSM_DB_CACHE_SIZE_DEFAULT = 128; - - public static final String OZONE_KSM_USER_MAX_VOLUME = - "ozone.ksm.user.max.volume"; - public static final int OZONE_KSM_USER_MAX_VOLUME_DEFAULT = 1024; - - // KSM Default user/group permissions - public static final String OZONE_KSM_USER_RIGHTS = - "ozone.ksm.user.rights"; - public static final OzoneAcl.OzoneACLRights OZONE_KSM_USER_RIGHTS_DEFAULT = - OzoneAcl.OzoneACLRights.READ_WRITE; - - public static final String OZONE_KSM_GROUP_RIGHTS = - "ozone.ksm.group.rights"; - public static final OzoneAcl.OzoneACLRights OZONE_KSM_GROUP_RIGHTS_DEFAULT = - OzoneAcl.OzoneACLRights.READ_WRITE; - - public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK = - "ozone.key.deleting.limit.per.task"; - public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000; -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java new file mode 100644 index 0000000000..b9ca296631 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.ozone.OzoneAcl; +/** + * Ozone Manager Constants. + */ +public final class OMConfigKeys { + /** + * Never constructed. + */ + private OMConfigKeys() { + } + + + public static final String OZONE_OM_HANDLER_COUNT_KEY = + "ozone.om.handler.count.key"; + public static final int OZONE_OM_HANDLER_COUNT_DEFAULT = 20; + + public static final String OZONE_OM_ADDRESS_KEY = + "ozone.om.address"; + public static final String OZONE_OM_BIND_HOST_DEFAULT = + "0.0.0.0"; + public static final int OZONE_OM_PORT_DEFAULT = 9862; + + public static final String OZONE_OM_HTTP_ENABLED_KEY = + "ozone.om.http.enabled"; + public static final String OZONE_OM_HTTP_BIND_HOST_KEY = + "ozone.om.http-bind-host"; + public static final String OZONE_OM_HTTPS_BIND_HOST_KEY = + "ozone.om.https-bind-host"; + public static final String OZONE_OM_HTTP_ADDRESS_KEY = + "ozone.om.http-address"; + public static final String OZONE_OM_HTTPS_ADDRESS_KEY = + "ozone.om.https-address"; + public static final String OZONE_OM_KEYTAB_FILE = + "ozone.om.keytab.file"; + public static final String OZONE_OM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; + public static final int OZONE_OM_HTTP_BIND_PORT_DEFAULT = 9874; + public static final int OZONE_OM_HTTPS_BIND_PORT_DEFAULT = 9875; + + // LevelDB cache file uses an off-heap cache in LevelDB of 128 MB. + public static final String OZONE_OM_DB_CACHE_SIZE_MB = + "ozone.om.db.cache.size.mb"; + public static final int OZONE_OM_DB_CACHE_SIZE_DEFAULT = 128; + + public static final String OZONE_OM_USER_MAX_VOLUME = + "ozone.om.user.max.volume"; + public static final int OZONE_OM_USER_MAX_VOLUME_DEFAULT = 1024; + + // OM Default user/group permissions + public static final String OZONE_OM_USER_RIGHTS = + "ozone.om.user.rights"; + public static final OzoneAcl.OzoneACLRights OZONE_OM_USER_RIGHTS_DEFAULT = + OzoneAcl.OzoneACLRights.READ_WRITE; + + public static final String OZONE_OM_GROUP_RIGHTS = + "ozone.om.group.rights"; + public static final OzoneAcl.OzoneACLRights OZONE_OM_GROUP_RIGHTS_DEFAULT = + OzoneAcl.OzoneACLRights.READ_WRITE; + + public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK = + "ozone.key.deleting.limit.per.task"; + public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000; +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java similarity index 81% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketArgs.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index 1211b50c5c..6aabfef6b4 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.helpers; +package org.apache.hadoop.ozone.om.helpers; import java.util.List; import java.util.stream.Collectors; @@ -25,13 +25,13 @@ import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.BucketArgs; -import org.apache.hadoop.ozone.protocolPB.KSMPBHelper; + .OzoneManagerProtocolProtos.BucketArgs; +import org.apache.hadoop.ozone.protocolPB.OMPBHelper; /** * A class that encapsulates Bucket Arguments. */ -public final class KsmBucketArgs { +public final class OmBucketArgs { /** * Name of the volume in which the bucket belongs to. */ @@ -67,9 +67,9 @@ public final class KsmBucketArgs { * @param isVersionEnabled - Bucket version flag. * @param storageType - Storage type to be used. */ - private KsmBucketArgs(String volumeName, String bucketName, - List addAcls, List removeAcls, - Boolean isVersionEnabled, StorageType storageType) { + private OmBucketArgs(String volumeName, String bucketName, + List addAcls, List removeAcls, + Boolean isVersionEnabled, StorageType storageType) { this.volumeName = volumeName; this.bucketName = bucketName; this.addAcls = addAcls; @@ -127,7 +127,7 @@ public StorageType getStorageType() { } /** - * Returns new builder class that builds a KsmBucketArgs. + * Returns new builder class that builds a OmBucketArgs. * * @return Builder */ @@ -136,7 +136,7 @@ public static Builder newBuilder() { } /** - * Builder for KsmBucketArgs. + * Builder for OmBucketArgs. */ public static class Builder { private String volumeName; @@ -177,19 +177,19 @@ public Builder setStorageType(StorageType storage) { } /** - * Constructs the KsmBucketArgs. - * @return instance of KsmBucketArgs. + * Constructs the OmBucketArgs. + * @return instance of OmBucketArgs. */ - public KsmBucketArgs build() { + public OmBucketArgs build() { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); - return new KsmBucketArgs(volumeName, bucketName, addAcls, + return new OmBucketArgs(volumeName, bucketName, addAcls, removeAcls, isVersionEnabled, storageType); } } /** - * Creates BucketArgs protobuf from KsmBucketArgs. + * Creates BucketArgs protobuf from OmBucketArgs. */ public BucketArgs getProtobuf() { BucketArgs.Builder builder = BucketArgs.newBuilder(); @@ -197,11 +197,11 @@ public BucketArgs getProtobuf() { .setBucketName(bucketName); if(addAcls != null && !addAcls.isEmpty()) { builder.addAllAddAcls(addAcls.stream().map( - KSMPBHelper::convertOzoneAcl).collect(Collectors.toList())); + OMPBHelper::convertOzoneAcl).collect(Collectors.toList())); } if(removeAcls != null && !removeAcls.isEmpty()) { builder.addAllRemoveAcls(removeAcls.stream().map( - KSMPBHelper::convertOzoneAcl).collect(Collectors.toList())); + OMPBHelper::convertOzoneAcl).collect(Collectors.toList())); } if(isVersionEnabled != null) { builder.setIsVersionEnabled(isVersionEnabled); @@ -214,20 +214,20 @@ public BucketArgs getProtobuf() { } /** - * Parses BucketInfo protobuf and creates KsmBucketArgs. + * Parses BucketInfo protobuf and creates OmBucketArgs. * @param bucketArgs - * @return instance of KsmBucketArgs + * @return instance of OmBucketArgs */ - public static KsmBucketArgs getFromProtobuf(BucketArgs bucketArgs) { - return new KsmBucketArgs(bucketArgs.getVolumeName(), + public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) { + return new OmBucketArgs(bucketArgs.getVolumeName(), bucketArgs.getBucketName(), bucketArgs.getAddAclsList().stream().map( - KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()), + OMPBHelper::convertOzoneAcl).collect(Collectors.toList()), bucketArgs.getRemoveAclsList().stream().map( - KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()), + OMPBHelper::convertOzoneAcl).collect(Collectors.toList()), bucketArgs.hasIsVersionEnabled() ? bucketArgs.getIsVersionEnabled() : null, bucketArgs.hasStorageType() ? PBHelperClient.convertStorageType( bucketArgs.getStorageType()) : null); } -} \ No newline at end of file +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java similarity index 83% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketInfo.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index a49137a7cf..bf5abddc43 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -15,15 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.helpers; +package org.apache.hadoop.ozone.om.helpers; import com.google.common.base.Preconditions; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.BucketInfo; -import org.apache.hadoop.ozone.protocolPB.KSMPBHelper; + .OzoneManagerProtocolProtos.BucketInfo; +import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import java.util.LinkedList; import java.util.List; @@ -32,7 +32,7 @@ /** * A class that encapsulates Bucket Info. */ -public final class KsmBucketInfo { +public final class OmBucketInfo { /** * Name of the volume in which the bucket belongs to. */ @@ -68,9 +68,9 @@ public final class KsmBucketInfo { * @param storageType - Storage type to be used. * @param creationTime - Bucket creation time. */ - private KsmBucketInfo(String volumeName, String bucketName, - List acls, boolean isVersionEnabled, - StorageType storageType, long creationTime) { + private OmBucketInfo(String volumeName, String bucketName, + List acls, boolean isVersionEnabled, + StorageType storageType, long creationTime) { this.volumeName = volumeName; this.bucketName = bucketName; this.acls = acls; @@ -129,7 +129,7 @@ public long getCreationTime() { } /** - * Returns new builder class that builds a KsmBucketInfo. + * Returns new builder class that builds a OmBucketInfo. * * @return Builder */ @@ -138,7 +138,7 @@ public static Builder newBuilder() { } /** - * Builder for KsmBucketInfo. + * Builder for OmBucketInfo. */ public static class Builder { private String volumeName; @@ -186,30 +186,30 @@ public Builder setCreationTime(long createdOn) { } /** - * Constructs the KsmBucketInfo. - * @return instance of KsmBucketInfo. + * Constructs the OmBucketInfo. + * @return instance of OmBucketInfo. */ - public KsmBucketInfo build() { + public OmBucketInfo build() { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); Preconditions.checkNotNull(acls); Preconditions.checkNotNull(isVersionEnabled); Preconditions.checkNotNull(storageType); - return new KsmBucketInfo(volumeName, bucketName, acls, + return new OmBucketInfo(volumeName, bucketName, acls, isVersionEnabled, storageType, creationTime); } } /** - * Creates BucketInfo protobuf from KsmBucketInfo. + * Creates BucketInfo protobuf from OmBucketInfo. */ public BucketInfo getProtobuf() { return BucketInfo.newBuilder() .setVolumeName(volumeName) .setBucketName(bucketName) .addAllAcls(acls.stream().map( - KSMPBHelper::convertOzoneAcl).collect(Collectors.toList())) + OMPBHelper::convertOzoneAcl).collect(Collectors.toList())) .setIsVersionEnabled(isVersionEnabled) .setStorageType(PBHelperClient.convertStorageType( storageType)) @@ -218,18 +218,18 @@ public BucketInfo getProtobuf() { } /** - * Parses BucketInfo protobuf and creates KsmBucketInfo. + * Parses BucketInfo protobuf and creates OmBucketInfo. * @param bucketInfo - * @return instance of KsmBucketInfo + * @return instance of OmBucketInfo */ - public static KsmBucketInfo getFromProtobuf(BucketInfo bucketInfo) { - return new KsmBucketInfo( + public static OmBucketInfo getFromProtobuf(BucketInfo bucketInfo) { + return new OmBucketInfo( bucketInfo.getVolumeName(), bucketInfo.getBucketName(), bucketInfo.getAclsList().stream().map( - KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()), + OMPBHelper::convertOzoneAcl).collect(Collectors.toList()), bucketInfo.getIsVersionEnabled(), PBHelperClient.convertStorageType( bucketInfo.getStorageType()), bucketInfo.getCreationTime()); } -} \ No newline at end of file +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java similarity index 88% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index cd17e28b9a..1f8ed5fb1e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.helpers; +package org.apache.hadoop.ozone.om.helpers; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @@ -23,7 +23,7 @@ * Args for key. Client use this to specify key's attributes on key creation * (putKey()). */ -public final class KsmKeyArgs { +public final class OmKeyArgs { private final String volumeName; private final String bucketName; private final String keyName; @@ -31,8 +31,8 @@ public final class KsmKeyArgs { private final ReplicationType type; private final ReplicationFactor factor; - private KsmKeyArgs(String volumeName, String bucketName, String keyName, - long dataSize, ReplicationType type, ReplicationFactor factor) { + private OmKeyArgs(String volumeName, String bucketName, String keyName, + long dataSize, ReplicationType type, ReplicationFactor factor) { this.volumeName = volumeName; this.bucketName = bucketName; this.keyName = keyName; @@ -70,7 +70,7 @@ public void setDataSize(long size) { } /** - * Builder class of KsmKeyArgs. + * Builder class of OmKeyArgs. */ public static class Builder { private String volumeName; @@ -111,8 +111,8 @@ public Builder setFactor(ReplicationFactor replicationFactor) { return this; } - public KsmKeyArgs build() { - return new KsmKeyArgs(volumeName, bucketName, keyName, dataSize, + public OmKeyArgs build() { + return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, type, factor); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java similarity index 79% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyInfo.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 5d6e63367a..05c8d45fbf 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.helpers; +package org.apache.hadoop.ozone.om.helpers; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.util.Time; import java.io.IOException; @@ -28,36 +28,36 @@ /** * Args for key block. The block instance for the key requested in putKey. - * This is returned from KSM to client, and client use class to talk to - * datanode. Also, this is the metadata written to ksm.db on server side. + * This is returned from OM to client, and client use class to talk to + * datanode. Also, this is the metadata written to om.db on server side. */ -public final class KsmKeyInfo { +public final class OmKeyInfo { private final String volumeName; private final String bucketName; // name of key client specified private String keyName; private long dataSize; - private List keyLocationVersions; + private List keyLocationVersions; private final long creationTime; private long modificationTime; private HddsProtos.ReplicationType type; private HddsProtos.ReplicationFactor factor; - private KsmKeyInfo(String volumeName, String bucketName, String keyName, - List versions, long dataSize, - long creationTime, long modificationTime, HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor) { + private OmKeyInfo(String volumeName, String bucketName, String keyName, + List versions, long dataSize, + long creationTime, long modificationTime, HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor) { this.volumeName = volumeName; this.bucketName = bucketName; this.keyName = keyName; this.dataSize = dataSize; // it is important that the versions are ordered from old to new. - // Do this sanity check when versions got loaded on creating KsmKeyInfo. + // Do this sanity check when versions got loaded on creating OmKeyInfo. // TODO : this is not necessary, here only because versioning is still a // work in-progress, remove this following check when versioning is // complete and prove correctly functioning long currentVersion = -1; - for (KsmKeyLocationInfoGroup version : versions) { + for (OmKeyLocationInfoGroup version : versions) { Preconditions.checkArgument( currentVersion + 1 == version.getVersion()); currentVersion = version.getVersion(); @@ -101,13 +101,13 @@ public void setDataSize(long size) { this.dataSize = size; } - public synchronized KsmKeyLocationInfoGroup getLatestVersionLocations() + public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() throws IOException { return keyLocationVersions.size() == 0? null : keyLocationVersions.get(keyLocationVersions.size() - 1); } - public List getKeyLocationVersions() { + public List getKeyLocationVersions() { return keyLocationVersions; } @@ -123,11 +123,11 @@ public void updateModifcationTime() { * @throws IOException */ public synchronized void appendNewBlocks( - List newLocationList) throws IOException { + List newLocationList) throws IOException { if (keyLocationVersions.size() == 0) { throw new IOException("Appending new block, but no version exist"); } - KsmKeyLocationInfoGroup currentLatestVersion = + OmKeyLocationInfoGroup currentLatestVersion = keyLocationVersions.get(keyLocationVersions.size() - 1); currentLatestVersion.appendNewBlocks(newLocationList); setModificationTime(Time.now()); @@ -141,18 +141,18 @@ public synchronized void appendNewBlocks( * @throws IOException */ public synchronized long addNewVersion( - List newLocationList) throws IOException { + List newLocationList) throws IOException { long latestVersionNum; if (keyLocationVersions.size() == 0) { // no version exist, these blocks are the very first version. - keyLocationVersions.add(new KsmKeyLocationInfoGroup(0, newLocationList)); + keyLocationVersions.add(new OmKeyLocationInfoGroup(0, newLocationList)); latestVersionNum = 0; } else { // it is important that the new version are always at the tail of the list - KsmKeyLocationInfoGroup currentLatestVersion = + OmKeyLocationInfoGroup currentLatestVersion = keyLocationVersions.get(keyLocationVersions.size() - 1); // the new version is created based on the current latest version - KsmKeyLocationInfoGroup newVersion = + OmKeyLocationInfoGroup newVersion = currentLatestVersion.generateNextVersion(newLocationList); keyLocationVersions.add(newVersion); latestVersionNum = newVersion.getVersion(); @@ -174,14 +174,14 @@ public void setModificationTime(long modificationTime) { } /** - * Builder of KsmKeyInfo. + * Builder of OmKeyInfo. */ public static class Builder { private String volumeName; private String bucketName; private String keyName; private long dataSize; - private List ksmKeyLocationInfoGroups; + private List omKeyLocationInfoGroups; private long creationTime; private long modificationTime; private HddsProtos.ReplicationType type; @@ -202,9 +202,9 @@ public Builder setKeyName(String key) { return this; } - public Builder setKsmKeyLocationInfos( - List ksmKeyLocationInfoList) { - this.ksmKeyLocationInfoGroups = ksmKeyLocationInfoList; + public Builder setOmKeyLocationInfos( + List omKeyLocationInfoList) { + this.omKeyLocationInfoGroups = omKeyLocationInfoList; return this; } @@ -233,9 +233,9 @@ public Builder setReplicationType(HddsProtos.ReplicationType type) { return this; } - public KsmKeyInfo build() { - return new KsmKeyInfo( - volumeName, bucketName, keyName, ksmKeyLocationInfoGroups, + public OmKeyInfo build() { + return new OmKeyInfo( + volumeName, bucketName, keyName, omKeyLocationInfoGroups, dataSize, creationTime, modificationTime, type, factor); } } @@ -251,7 +251,7 @@ public KeyInfo getProtobuf() { .setFactor(factor) .setType(type) .addAllKeyLocationList(keyLocationVersions.stream() - .map(KsmKeyLocationInfoGroup::getProtobuf) + .map(OmKeyLocationInfoGroup::getProtobuf) .collect(Collectors.toList())) .setLatestVersion(latestVersion) .setCreationTime(creationTime) @@ -259,13 +259,13 @@ public KeyInfo getProtobuf() { .build(); } - public static KsmKeyInfo getFromProtobuf(KeyInfo keyInfo) { - return new KsmKeyInfo( + public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) { + return new OmKeyInfo( keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName(), keyInfo.getKeyLocationListList().stream() - .map(KsmKeyLocationInfoGroup::getFromProtobuf) + .map(OmKeyLocationInfoGroup::getFromProtobuf) .collect(Collectors.toList()), keyInfo.getDataSize(), keyInfo.getCreationTime(), diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java similarity index 85% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java index 45feda0b7c..3f6666df81 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java @@ -14,16 +14,16 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm.helpers; +package org.apache.hadoop.ozone.om.helpers; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyLocation; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation; /** * One key can be too huge to fit in one container. In which case it gets split * into a number of subkeys. This class represents one such subkey instance. */ -public final class KsmKeyLocationInfo { +public final class OmKeyLocationInfo { private final BlockID blockID; private final boolean shouldCreateContainer; // the id of this subkey in all the subkeys. @@ -32,8 +32,8 @@ public final class KsmKeyLocationInfo { // the version number indicating when this block was added private long createVersion; - private KsmKeyLocationInfo(BlockID blockID, boolean shouldCreateContainer, - long length, long offset) { + private OmKeyLocationInfo(BlockID blockID, boolean shouldCreateContainer, + long length, long offset) { this.blockID = blockID; this.shouldCreateContainer = shouldCreateContainer; this.length = length; @@ -73,7 +73,7 @@ public long getOffset() { } /** - * Builder of KsmKeyLocationInfo. + * Builder of OmKeyLocationInfo. */ public static class Builder { private BlockID blockID; @@ -101,8 +101,8 @@ public Builder setOffset(long off) { return this; } - public KsmKeyLocationInfo build() { - return new KsmKeyLocationInfo(blockID, + public OmKeyLocationInfo build() { + return new OmKeyLocationInfo(blockID, shouldCreateContainer, length, offset); } } @@ -117,8 +117,8 @@ public KeyLocation getProtobuf() { .build(); } - public static KsmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) { - KsmKeyLocationInfo info = new KsmKeyLocationInfo( + public static OmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) { + OmKeyLocationInfo info = new OmKeyLocationInfo( BlockID.getFromProtobuf(keyLocation.getBlockID()), keyLocation.getShouldCreateContainer(), keyLocation.getLength(), diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java similarity index 70% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java index 0facf3ca41..8bdcee3803 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java @@ -14,9 +14,9 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm.helpers; +package org.apache.hadoop.ozone.om.helpers; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyLocationList; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList; import java.io.IOException; import java.util.ArrayList; @@ -27,12 +27,12 @@ * A list of key locations. This class represents one single version of the * blocks of a key. */ -public class KsmKeyLocationInfoGroup { +public class OmKeyLocationInfoGroup { private final long version; - private final List locationList; + private final List locationList; - public KsmKeyLocationInfoGroup(long version, - List locations) { + public OmKeyLocationInfoGroup(long version, + List locations) { this.version = version; this.locationList = locations; } @@ -42,8 +42,8 @@ public KsmKeyLocationInfoGroup(long version, * * @return the list of blocks that are created in the latest version. */ - public List getBlocksLatestVersionOnly() { - List list = new ArrayList<>(); + public List getBlocksLatestVersionOnly() { + List list = new ArrayList<>(); locationList.stream().filter(x -> x.getCreateVersion() == version) .forEach(list::add); return list; @@ -53,7 +53,7 @@ public long getVersion() { return version; } - public List getLocationList() { + public List getLocationList() { return locationList; } @@ -61,17 +61,17 @@ public KeyLocationList getProtobuf() { return KeyLocationList.newBuilder() .setVersion(version) .addAllKeyLocations( - locationList.stream().map(KsmKeyLocationInfo::getProtobuf) + locationList.stream().map(OmKeyLocationInfo::getProtobuf) .collect(Collectors.toList())) .build(); } - public static KsmKeyLocationInfoGroup getFromProtobuf( + public static OmKeyLocationInfoGroup getFromProtobuf( KeyLocationList keyLocationList) { - return new KsmKeyLocationInfoGroup( + return new OmKeyLocationInfoGroup( keyLocationList.getVersion(), keyLocationList.getKeyLocationsList().stream() - .map(KsmKeyLocationInfo::getFromProtobuf) + .map(OmKeyLocationInfo::getFromProtobuf) .collect(Collectors.toList())); } @@ -82,25 +82,25 @@ public static KsmKeyLocationInfoGroup getFromProtobuf( * @param newLocationList a list of new location to be added. * @return */ - KsmKeyLocationInfoGroup generateNextVersion( - List newLocationList) throws IOException { + OmKeyLocationInfoGroup generateNextVersion( + List newLocationList) throws IOException { // TODO : revisit if we can do this method more efficiently // one potential inefficiency here is that later version always include // older ones. e.g. v1 has B1, then v2, v3...will all have B1 and only add // more - List newList = new ArrayList<>(); + List newList = new ArrayList<>(); newList.addAll(locationList); - for (KsmKeyLocationInfo newInfo : newLocationList) { + for (OmKeyLocationInfo newInfo : newLocationList) { // all these new blocks will have addVersion of current version + 1 newInfo.setCreateVersion(version + 1); newList.add(newInfo); } - return new KsmKeyLocationInfoGroup(version + 1, newList); + return new OmKeyLocationInfoGroup(version + 1, newList); } - void appendNewBlocks(List newLocationList) + void appendNewBlocks(List newLocationList) throws IOException { - for (KsmKeyLocationInfo info : newLocationList) { + for (OmKeyLocationInfo info : newLocationList) { info.setCreateVersion(version); locationList.add(info); } @@ -110,7 +110,7 @@ void appendNewBlocks(List newLocationList) public String toString() { StringBuilder sb = new StringBuilder(); sb.append("version:").append(version).append(" "); - for (KsmKeyLocationInfo kli : locationList) { + for (OmKeyLocationInfo kli : locationList) { sb.append(kli.getLocalID()).append(" || "); } return sb.toString(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java similarity index 89% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmOzoneAclMap.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java index 7d9efad15a..de75a05e9d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmOzoneAclMap.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java @@ -16,14 +16,14 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.helpers; +package org.apache.hadoop.ozone.om.helpers; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.OzoneAclInfo; + .OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclRights; + .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclType; + .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; import java.util.List; import java.util.LinkedList; @@ -34,11 +34,11 @@ /** * This helper class keeps a map of all user and their permissions. */ -public class KsmOzoneAclMap { +public class OmOzoneAclMap { // per Acl Type user:rights map private ArrayList> aclMaps; - KsmOzoneAclMap() { + OmOzoneAclMap() { aclMaps = new ArrayList<>(); for (OzoneAclType aclType : OzoneAclType.values()) { aclMaps.add(aclType.ordinal(), new HashMap<>()); @@ -99,9 +99,9 @@ public List ozoneAclGetProtobuf() { } // Create map from list of OzoneAclInfos - public static KsmOzoneAclMap ozoneAclGetFromProtobuf( + public static OmOzoneAclMap ozoneAclGetFromProtobuf( List aclList) { - KsmOzoneAclMap aclMap = new KsmOzoneAclMap(); + OmOzoneAclMap aclMap = new OmOzoneAclMap(); for (OzoneAclInfo acl : aclList) { aclMap.addAcl(acl); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java similarity index 83% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index 6b42c279a0..c8b59b682d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -15,13 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.helpers; +package org.apache.hadoop.ozone.om.helpers; import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.OzoneAclInfo; + .OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.VolumeInfo; + .OzoneManagerProtocolProtos.VolumeInfo; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; import java.io.IOException; @@ -33,16 +33,16 @@ /** - * A class that encapsulates the KsmVolumeArgs Args. + * A class that encapsulates the OmVolumeArgs Args. */ -public final class KsmVolumeArgs { +public final class OmVolumeArgs { private final String adminName; private final String ownerName; private final String volume; private final long creationTime; private final long quotaInBytes; private final Map keyValueMap; - private final KsmOzoneAclMap aclMap; + private final OmOzoneAclMap aclMap; /** * Private constructor, constructed via builder. @@ -54,9 +54,9 @@ public final class KsmVolumeArgs { * @param aclMap - User to access rights map. * @param creationTime - Volume creation time. */ - private KsmVolumeArgs(String adminName, String ownerName, String volume, - long quotaInBytes, Map keyValueMap, - KsmOzoneAclMap aclMap, long creationTime) { + private OmVolumeArgs(String adminName, String ownerName, String volume, + long quotaInBytes, Map keyValueMap, + OmOzoneAclMap aclMap, long creationTime) { this.adminName = adminName; this.ownerName = ownerName; this.volume = volume; @@ -110,11 +110,11 @@ public Map getKeyValueMap() { return keyValueMap; } - public KsmOzoneAclMap getAclMap() { + public OmOzoneAclMap getAclMap() { return aclMap; } /** - * Returns new builder class that builds a KsmVolumeArgs. + * Returns new builder class that builds a OmVolumeArgs. * * @return Builder */ @@ -123,7 +123,7 @@ public static Builder newBuilder() { } /** - * Builder for KsmVolumeArgs. + * Builder for OmVolumeArgs. */ public static class Builder { private String adminName; @@ -132,14 +132,14 @@ public static class Builder { private long creationTime; private long quotaInBytes; private Map keyValueMap; - private KsmOzoneAclMap aclMap; + private OmOzoneAclMap aclMap; /** * Constructs a builder. */ Builder() { keyValueMap = new HashMap<>(); - aclMap = new KsmOzoneAclMap(); + aclMap = new OmOzoneAclMap(); } public Builder setAdminName(String admin) { @@ -181,11 +181,11 @@ public Builder addOzoneAcls(OzoneAclInfo acl) throws IOException { * Constructs a CreateVolumeArgument. * @return CreateVolumeArgs. */ - public KsmVolumeArgs build() { + public OmVolumeArgs build() { Preconditions.checkNotNull(adminName); Preconditions.checkNotNull(ownerName); Preconditions.checkNotNull(volume); - return new KsmVolumeArgs(adminName, ownerName, volume, quotaInBytes, + return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, keyValueMap, aclMap, creationTime); } } @@ -209,14 +209,14 @@ public VolumeInfo getProtobuf() { .build(); } - public static KsmVolumeArgs getFromProtobuf(VolumeInfo volInfo) { + public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo) { Map kvMap = volInfo.getMetadataList().stream() .collect(Collectors.toMap(KeyValue::getKey, KeyValue::getValue)); - KsmOzoneAclMap aclMap = - KsmOzoneAclMap.ozoneAclGetFromProtobuf(volInfo.getVolumeAclsList()); + OmOzoneAclMap aclMap = + OmOzoneAclMap.ozoneAclGetFromProtobuf(volInfo.getVolumeAclsList()); - return new KsmVolumeArgs(volInfo.getAdminName(), volInfo.getOwnerName(), + return new OmVolumeArgs(volInfo.getAdminName(), volInfo.getOwnerName(), volInfo.getVolume(), volInfo.getQuotaInBytes(), kvMap, aclMap, volInfo.getCreationTime()); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/OpenKeySession.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java similarity index 89% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/OpenKeySession.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java index c19c04b1d8..bc364e665f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/OpenKeySession.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.helpers; +package org.apache.hadoop.ozone.om.helpers; /** * This class represents a open key "session". A session here means a key is @@ -24,13 +24,13 @@ */ public class OpenKeySession { private final int id; - private final KsmKeyInfo keyInfo; + private final OmKeyInfo keyInfo; // the version of the key when it is being opened in this session. // a block that has a create version equals to open version means it will // be committed only when this open session is closed. private long openVersion; - public OpenKeySession(int id, KsmKeyInfo info, long version) { + public OpenKeySession(int id, OmKeyInfo info, long version) { this.id = id; this.keyInfo = info; this.openVersion = version; @@ -40,7 +40,7 @@ public long getOpenVersion() { return this.openVersion; } - public KsmKeyInfo getKeyInfo() { + public OmKeyInfo getKeyInfo() { return keyInfo; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java similarity index 89% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java index e07232d388..9b03aefe1a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.helpers; +package org.apache.hadoop.ozone.om.helpers; import com.fasterxml.jackson.annotation.JsonIgnore; @@ -25,8 +25,8 @@ import com.fasterxml.jackson.databind.ObjectWriter; import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.client.rest.response.BucketInfo; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .ServicePort; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; @@ -121,14 +121,14 @@ public int getPort(ServicePort.Type type) { } /** - * Converts {@link ServiceInfo} to KeySpaceManagerProtocolProtos.ServiceInfo. + * Converts {@link ServiceInfo} to OzoneManagerProtocolProtos.ServiceInfo. * - * @return KeySpaceManagerProtocolProtos.ServiceInfo + * @return OzoneManagerProtocolProtos.ServiceInfo */ @JsonIgnore - public KeySpaceManagerProtocolProtos.ServiceInfo getProtobuf() { - KeySpaceManagerProtocolProtos.ServiceInfo.Builder builder = - KeySpaceManagerProtocolProtos.ServiceInfo.newBuilder(); + public OzoneManagerProtocolProtos.ServiceInfo getProtobuf() { + OzoneManagerProtocolProtos.ServiceInfo.Builder builder = + OzoneManagerProtocolProtos.ServiceInfo.newBuilder(); builder.setNodeType(nodeType) .setHostname(hostname) .addAllServicePorts( @@ -143,13 +143,13 @@ public KeySpaceManagerProtocolProtos.ServiceInfo getProtobuf() { } /** - * Converts KeySpaceManagerProtocolProtos.ServiceInfo to {@link ServiceInfo}. + * Converts OzoneManagerProtocolProtos.ServiceInfo to {@link ServiceInfo}. * * @return {@link ServiceInfo} */ @JsonIgnore public static ServiceInfo getFromProtobuf( - KeySpaceManagerProtocolProtos.ServiceInfo serviceInfo) { + OzoneManagerProtocolProtos.ServiceInfo serviceInfo) { return new ServiceInfo(serviceInfo.getNodeType(), serviceInfo.getHostname(), serviceInfo.getServicePortsList()); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java similarity index 98% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/VolumeArgs.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java index 1a3d486d12..6fc7c8fcc5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/VolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.helpers; +package org.apache.hadoop.ozone.om.helpers; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java similarity index 94% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/package-info.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java index ce627a5c39..b1211d8cb8 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/package-info.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java @@ -15,4 +15,4 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.helpers; \ No newline at end of file +package org.apache.hadoop.ozone.om.helpers; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java similarity index 94% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java index 7698ee191a..1744cffc13 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; /** - This package contains client side protocol library to communicate with KSM. + This package contains client side protocol library to communicate with OM. */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java similarity index 76% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 54862d3241..b7a099d028 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -15,32 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.protocol; +package org.apache.hadoop.ozone.om.protocol; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; -import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; -import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; +import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.OzoneAclInfo; + .OzoneManagerProtocolProtos.OzoneAclInfo; import java.io.IOException; import java.util.List; /** - * Protocol to talk to KSM. + * Protocol to talk to OM. */ -public interface KeySpaceManagerProtocol { +public interface OzoneManagerProtocol { /** * Creates a volume. * @param args - Arguments to create Volume. * @throws IOException */ - void createVolume(KsmVolumeArgs args) throws IOException; + void createVolume(OmVolumeArgs args) throws IOException; /** * Changes the owner of a volume. @@ -75,7 +75,7 @@ boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) * @return VolumeArgs or exception is thrown. * @throws IOException */ - KsmVolumeArgs getVolumeInfo(String volume) throws IOException; + OmVolumeArgs getVolumeInfo(String volume) throws IOException; /** * Deletes an existing empty volume. @@ -93,7 +93,7 @@ boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) * @return List of Volumes. * @throws IOException */ - List listVolumeByUser(String userName, String prefix, String + List listVolumeByUser(String userName, String prefix, String prevKey, int maxKeys) throws IOException; /** @@ -104,7 +104,7 @@ List listVolumeByUser(String userName, String prefix, String * @return List of Volumes. * @throws IOException */ - List listAllVolumes(String prefix, String + List listAllVolumes(String prefix, String prevKey, int maxKeys) throws IOException; /** @@ -112,16 +112,16 @@ List listAllVolumes(String prefix, String * @param bucketInfo - BucketInfo to create Bucket. * @throws IOException */ - void createBucket(KsmBucketInfo bucketInfo) throws IOException; + void createBucket(OmBucketInfo bucketInfo) throws IOException; /** * Gets the bucket information. * @param volumeName - Volume name. * @param bucketName - Bucket name. - * @return KsmBucketInfo or exception is thrown. + * @return OmBucketInfo or exception is thrown. * @throws IOException */ - KsmBucketInfo getBucketInfo(String volumeName, String bucketName) + OmBucketInfo getBucketInfo(String volumeName, String bucketName) throws IOException; /** @@ -129,7 +129,7 @@ KsmBucketInfo getBucketInfo(String volumeName, String bucketName) * @param args - BucketArgs. * @throws IOException */ - void setBucketProperty(KsmBucketArgs args) throws IOException; + void setBucketProperty(OmBucketArgs args) throws IOException; /** * Open the given key and return an open key session. @@ -138,7 +138,7 @@ KsmBucketInfo getBucketInfo(String volumeName, String bucketName) * @return OpenKeySession instance that client uses to talk to container. * @throws IOException */ - OpenKeySession openKey(KsmKeyArgs args) throws IOException; + OpenKeySession openKey(OmKeyArgs args) throws IOException; /** * Commit a key. This will make the change from the client visible. The client @@ -148,7 +148,7 @@ KsmBucketInfo getBucketInfo(String volumeName, String bucketName) * @param clientID the client identification * @throws IOException */ - void commitKey(KsmKeyArgs args, int clientID) throws IOException; + void commitKey(OmKeyArgs args, int clientID) throws IOException; /** * Allocate a new block, it is assumed that the client is having an open key @@ -159,24 +159,24 @@ KsmBucketInfo getBucketInfo(String volumeName, String bucketName) * @return an allocated block * @throws IOException */ - KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) + OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID) throws IOException; /** * Look up for the container of an existing key. * * @param args the args of the key. - * @return KsmKeyInfo instance that client uses to talk to container. + * @return OmKeyInfo instance that client uses to talk to container. * @throws IOException */ - KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException; + OmKeyInfo lookupKey(OmKeyArgs args) throws IOException; /** * Rename an existing key within a bucket * @param args the args of the key. * @param toKeyName New name to be used for the Key */ - void renameKey(KsmKeyArgs args, String toKeyName) throws IOException; + void renameKey(OmKeyArgs args, String toKeyName) throws IOException; /** * Deletes an existing key. @@ -184,7 +184,7 @@ KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) * @param args the args of the key. * @throws IOException */ - void deleteKey(KsmKeyArgs args) throws IOException; + void deleteKey(OmKeyArgs args) throws IOException; /** * Deletes an existing empty bucket from volume. @@ -195,7 +195,7 @@ KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) void deleteBucket(String volume, String bucket) throws IOException; /** - * Returns a list of buckets represented by {@link KsmBucketInfo} + * Returns a list of buckets represented by {@link OmBucketInfo} * in the given volume. Argument volumeName is required, others * are optional. * @@ -213,12 +213,12 @@ KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) * @return a list of buckets. * @throws IOException */ - List listBuckets(String volumeName, - String startBucketName, String bucketPrefix, int maxNumOfBuckets) + List listBuckets(String volumeName, + String startBucketName, String bucketPrefix, int maxNumOfBuckets) throws IOException; /** - * Returns a list of keys represented by {@link KsmKeyInfo} + * Returns a list of keys represented by {@link OmKeyInfo} * in the given bucket. Argument volumeName, bucketName is required, * others are optional. * @@ -238,8 +238,8 @@ List listBuckets(String volumeName, * @return a list of keys. * @throws IOException */ - List listKeys(String volumeName, - String bucketName, String startKeyName, String keyPrefix, int maxKeys) + List listKeys(String volumeName, + String bucketName, String startKeyName, String keyPrefix, int maxKeys) throws IOException; /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java similarity index 94% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/package-info.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java index f77e5fd047..9c7f3888d3 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/package-info.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java @@ -16,4 +16,4 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.protocol; \ No newline at end of file +package org.apache.hadoop.ozone.om.protocol; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java similarity index 79% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 0f381692af..37151fb659 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.protocolPB; +package org.apache.hadoop.ozone.om.protocolPB; import com.google.common.base.Strings; import com.google.common.collect.Lists; @@ -24,95 +24,95 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; -import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; -import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; -import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol; +import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.ServiceInfo; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.AllocateBlockRequest; + .OzoneManagerProtocolProtos.AllocateBlockRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.AllocateBlockResponse; + .OzoneManagerProtocolProtos.AllocateBlockResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CommitKeyRequest; + .OzoneManagerProtocolProtos.CommitKeyRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CommitKeyResponse; + .OzoneManagerProtocolProtos.CommitKeyResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.BucketArgs; + .OzoneManagerProtocolProtos.BucketArgs; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.BucketInfo; + .OzoneManagerProtocolProtos.BucketInfo; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CreateBucketRequest; + .OzoneManagerProtocolProtos.CreateBucketRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CreateBucketResponse; + .OzoneManagerProtocolProtos.CreateBucketResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.InfoBucketRequest; + .OzoneManagerProtocolProtos.InfoBucketRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.InfoBucketResponse; + .OzoneManagerProtocolProtos.InfoBucketResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.SetBucketPropertyRequest; + .OzoneManagerProtocolProtos.SetBucketPropertyRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.SetBucketPropertyResponse; + .OzoneManagerProtocolProtos.SetBucketPropertyResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.DeleteBucketRequest; + .OzoneManagerProtocolProtos.DeleteBucketRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.DeleteBucketResponse; + .OzoneManagerProtocolProtos.DeleteBucketResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CreateVolumeRequest; + .OzoneManagerProtocolProtos.CreateVolumeRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CreateVolumeResponse; + .OzoneManagerProtocolProtos.CreateVolumeResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.LocateKeyRequest; + .OzoneManagerProtocolProtos.LocateKeyRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.LocateKeyResponse; + .OzoneManagerProtocolProtos.LocateKeyResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.RenameKeyRequest; + .OzoneManagerProtocolProtos.RenameKeyRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.RenameKeyResponse; + .OzoneManagerProtocolProtos.RenameKeyResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.KeyArgs; + .OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.SetVolumePropertyRequest; + .OzoneManagerProtocolProtos.SetVolumePropertyRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.SetVolumePropertyResponse; + .OzoneManagerProtocolProtos.SetVolumePropertyResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.DeleteVolumeRequest; + .OzoneManagerProtocolProtos.DeleteVolumeRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.DeleteVolumeResponse; + .OzoneManagerProtocolProtos.DeleteVolumeResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.InfoVolumeRequest; + .OzoneManagerProtocolProtos.InfoVolumeRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.InfoVolumeResponse; + .OzoneManagerProtocolProtos.InfoVolumeResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CheckVolumeAccessRequest; + .OzoneManagerProtocolProtos.CheckVolumeAccessRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CheckVolumeAccessResponse; + .OzoneManagerProtocolProtos.CheckVolumeAccessResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ListBucketsRequest; + .OzoneManagerProtocolProtos.ListBucketsRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ListBucketsResponse; + .OzoneManagerProtocolProtos.ListBucketsResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ListKeysRequest; + .OzoneManagerProtocolProtos.ListKeysRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ListKeysResponse; + .OzoneManagerProtocolProtos.ListKeysResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.VolumeInfo; + .OzoneManagerProtocolProtos.VolumeInfo; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.Status; + .OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.OzoneAclInfo; + .OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ListVolumeRequest; + .OzoneManagerProtocolProtos.ListVolumeRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ListVolumeResponse; + .OzoneManagerProtocolProtos.ListVolumeResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ServiceListRequest; + .OzoneManagerProtocolProtos.ServiceListRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ServiceListResponse; + .OzoneManagerProtocolProtos.ServiceListResponse; import java.io.Closeable; import java.io.IOException; @@ -121,26 +121,26 @@ import java.util.stream.Collectors; /** - * The client side implementation of KeySpaceManagerProtocol. + * The client side implementation of OzoneManagerProtocol. */ @InterfaceAudience.Private -public final class KeySpaceManagerProtocolClientSideTranslatorPB - implements KeySpaceManagerProtocol, ProtocolTranslator, Closeable { +public final class OzoneManagerProtocolClientSideTranslatorPB + implements OzoneManagerProtocol, ProtocolTranslator, Closeable { /** * RpcController is not used and hence is set to null. */ private static final RpcController NULL_RPC_CONTROLLER = null; - private final KeySpaceManagerProtocolPB rpcProxy; + private final OzoneManagerProtocolPB rpcProxy; /** * Constructor for KeySpaceManger Client. * @param rpcProxy */ - public KeySpaceManagerProtocolClientSideTranslatorPB( - KeySpaceManagerProtocolPB rpcProxy) { + public OzoneManagerProtocolClientSideTranslatorPB( + OzoneManagerProtocolPB rpcProxy) { this.rpcProxy = rpcProxy; } @@ -169,7 +169,7 @@ public void close() throws IOException { * @throws IOException */ @Override - public void createVolume(KsmVolumeArgs args) throws IOException { + public void createVolume(OmVolumeArgs args) throws IOException { CreateVolumeRequest.Builder req = CreateVolumeRequest.newBuilder(); VolumeInfo volumeInfo = args.getProtobuf(); @@ -273,11 +273,11 @@ public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) throws * Gets the volume information. * * @param volume - Volume name. - * @return KsmVolumeArgs or exception is thrown. + * @return OmVolumeArgs or exception is thrown. * @throws IOException */ @Override - public KsmVolumeArgs getVolumeInfo(String volume) throws IOException { + public OmVolumeArgs getVolumeInfo(String volume) throws IOException { InfoVolumeRequest.Builder req = InfoVolumeRequest.newBuilder(); req.setVolumeName(volume); final InfoVolumeResponse resp; @@ -290,7 +290,7 @@ public KsmVolumeArgs getVolumeInfo(String volume) throws IOException { throw new IOException("Info Volume failed, error:" + resp.getStatus()); } - return KsmVolumeArgs.getFromProtobuf(resp.getVolumeInfo()); + return OmVolumeArgs.getFromProtobuf(resp.getVolumeInfo()); } /** @@ -327,8 +327,8 @@ public void deleteVolume(String volume) throws IOException { * @throws IOException */ @Override - public List listVolumeByUser(String userName, String prefix, - String prevKey, int maxKeys) + public List listVolumeByUser(String userName, String prefix, + String prevKey, int maxKeys) throws IOException { ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder(); if (!Strings.isNullOrEmpty(prefix)) { @@ -354,8 +354,8 @@ public List listVolumeByUser(String userName, String prefix, * @throws IOException */ @Override - public List listAllVolumes(String prefix, String prevKey, - int maxKeys) throws IOException { + public List listAllVolumes(String prefix, String prevKey, + int maxKeys) throws IOException { ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder(); if (!Strings.isNullOrEmpty(prefix)) { builder.setPrefix(prefix); @@ -368,7 +368,7 @@ public List listAllVolumes(String prefix, String prevKey, return listVolume(builder.build()); } - private List listVolume(ListVolumeRequest request) + private List listVolume(ListVolumeRequest request) throws IOException { final ListVolumeResponse resp; try { @@ -382,14 +382,14 @@ private List listVolume(ListVolumeRequest request) + resp.getStatus()); } - List result = Lists.newArrayList(); + List result = Lists.newArrayList(); for (VolumeInfo volInfo : resp.getVolumeInfoList()) { - KsmVolumeArgs volArgs = KsmVolumeArgs.getFromProtobuf(volInfo); + OmVolumeArgs volArgs = OmVolumeArgs.getFromProtobuf(volInfo); result.add(volArgs); } return resp.getVolumeInfoList().stream() - .map(item -> KsmVolumeArgs.getFromProtobuf(item)) + .map(item -> OmVolumeArgs.getFromProtobuf(item)) .collect(Collectors.toList()); } @@ -400,7 +400,7 @@ private List listVolume(ListVolumeRequest request) * @throws IOException */ @Override - public void createBucket(KsmBucketInfo bucketInfo) throws IOException { + public void createBucket(OmBucketInfo bucketInfo) throws IOException { CreateBucketRequest.Builder req = CreateBucketRequest.newBuilder(); BucketInfo bucketInfoProtobuf = bucketInfo.getProtobuf(); @@ -424,11 +424,11 @@ public void createBucket(KsmBucketInfo bucketInfo) throws IOException { * * @param volume - Volume name. * @param bucket - Bucket name. - * @return KsmBucketInfo or exception is thrown. + * @return OmBucketInfo or exception is thrown. * @throws IOException */ @Override - public KsmBucketInfo getBucketInfo(String volume, String bucket) + public OmBucketInfo getBucketInfo(String volume, String bucket) throws IOException { InfoBucketRequest.Builder req = InfoBucketRequest.newBuilder(); @@ -443,7 +443,7 @@ public KsmBucketInfo getBucketInfo(String volume, String bucket) throw ProtobufHelper.getRemoteException(e); } if (resp.getStatus() == Status.OK) { - return KsmBucketInfo.getFromProtobuf(resp.getBucketInfo()); + return OmBucketInfo.getFromProtobuf(resp.getBucketInfo()); } else { throw new IOException("Info Bucket failed, error: " + resp.getStatus()); @@ -456,7 +456,7 @@ public KsmBucketInfo getBucketInfo(String volume, String bucket) * @throws IOException */ @Override - public void setBucketProperty(KsmBucketArgs args) + public void setBucketProperty(OmBucketArgs args) throws IOException { SetBucketPropertyRequest.Builder req = SetBucketPropertyRequest.newBuilder(); @@ -486,9 +486,9 @@ public void setBucketProperty(KsmBucketArgs args) * @throws IOException */ @Override - public List listBuckets(String volumeName, - String startKey, String prefix, int count) throws IOException { - List buckets = new ArrayList<>(); + public List listBuckets(String volumeName, + String startKey, String prefix, int count) throws IOException { + List buckets = new ArrayList<>(); ListBucketsRequest.Builder reqBuilder = ListBucketsRequest.newBuilder(); reqBuilder.setVolumeName(volumeName); reqBuilder.setCount(count); @@ -509,7 +509,7 @@ public List listBuckets(String volumeName, if (resp.getStatus() == Status.OK) { buckets.addAll( resp.getBucketInfoList().stream() - .map(KsmBucketInfo::getFromProtobuf) + .map(OmBucketInfo::getFromProtobuf) .collect(Collectors.toList())); return buckets; } else { @@ -526,7 +526,7 @@ public List listBuckets(String volumeName, * @throws IOException */ @Override - public OpenKeySession openKey(KsmKeyArgs args) throws IOException { + public OpenKeySession openKey(OmKeyArgs args) throws IOException { LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder(); KeyArgs.Builder keyArgs = KeyArgs.newBuilder() .setVolumeName(args.getVolumeName()) @@ -549,11 +549,11 @@ public OpenKeySession openKey(KsmKeyArgs args) throws IOException { throw new IOException("Create key failed, error:" + resp.getStatus()); } return new OpenKeySession(resp.getID(), - KsmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion()); + OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion()); } @Override - public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) + public OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID) throws IOException { AllocateBlockRequest.Builder req = AllocateBlockRequest.newBuilder(); KeyArgs keyArgs = KeyArgs.newBuilder() @@ -574,11 +574,11 @@ public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) throw new IOException("Allocate block failed, error:" + resp.getStatus()); } - return KsmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation()); + return OmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation()); } @Override - public void commitKey(KsmKeyArgs args, int clientID) + public void commitKey(OmKeyArgs args, int clientID) throws IOException { CommitKeyRequest.Builder req = CommitKeyRequest.newBuilder(); KeyArgs keyArgs = KeyArgs.newBuilder() @@ -603,7 +603,7 @@ public void commitKey(KsmKeyArgs args, int clientID) @Override - public KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException { + public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder(); KeyArgs keyArgs = KeyArgs.newBuilder() .setVolumeName(args.getVolumeName()) @@ -622,11 +622,11 @@ public KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException { throw new IOException("Lookup key failed, error:" + resp.getStatus()); } - return KsmKeyInfo.getFromProtobuf(resp.getKeyInfo()); + return OmKeyInfo.getFromProtobuf(resp.getKeyInfo()); } @Override - public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException { + public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { RenameKeyRequest.Builder req = RenameKeyRequest.newBuilder(); KeyArgs keyArgs = KeyArgs.newBuilder() .setVolumeName(args.getVolumeName()) @@ -655,7 +655,7 @@ public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException { * @throws IOException */ @Override - public void deleteKey(KsmKeyArgs args) throws IOException { + public void deleteKey(OmKeyArgs args) throws IOException { LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder(); KeyArgs keyArgs = KeyArgs.newBuilder() .setVolumeName(args.getVolumeName()) @@ -701,9 +701,9 @@ public void deleteBucket(String volume, String bucket) throws IOException { * List keys in a bucket. */ @Override - public List listKeys(String volumeName, String bucketName, - String startKey, String prefix, int maxKeys) throws IOException { - List keys = new ArrayList<>(); + public List listKeys(String volumeName, String bucketName, + String startKey, String prefix, int maxKeys) throws IOException { + List keys = new ArrayList<>(); ListKeysRequest.Builder reqBuilder = ListKeysRequest.newBuilder(); reqBuilder.setVolumeName(volumeName); reqBuilder.setBucketName(bucketName); @@ -728,7 +728,7 @@ public List listKeys(String volumeName, String bucketName, if (resp.getStatus() == Status.OK) { keys.addAll( resp.getKeyInfoList().stream() - .map(KsmKeyInfo::getFromProtobuf) + .map(OmKeyInfo::getFromProtobuf) .collect(Collectors.toList())); return keys; } else { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java similarity index 77% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java index 8acca8adb7..e0879d601c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java @@ -15,20 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.protocolPB; +package org.apache.hadoop.ozone.om.protocolPB; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.KeySpaceManagerService; + .OzoneManagerProtocolProtos.OzoneManagerService; /** - * Protocol used to communicate with KSM. + * Protocol used to communicate with OM. */ @ProtocolInfo(protocolName = - "org.apache.hadoop.ozone.protocol.KeySpaceManagerProtocol", + "org.apache.hadoop.ozone.protocol.OzoneManagerProtocol", protocolVersion = 1) @InterfaceAudience.Private -public interface KeySpaceManagerProtocolPB - extends KeySpaceManagerService.BlockingInterface { +public interface OzoneManagerProtocolPB + extends OzoneManagerService.BlockingInterface { } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java similarity index 94% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java index 67f9f7bc4f..d595edf291 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java @@ -16,4 +16,4 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.protocolPB; \ No newline at end of file +package org.apache.hadoop.ozone.om.protocolPB; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java similarity index 93% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java index fdc3ce7a4e..d57d32e0c4 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java @@ -19,18 +19,18 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.OzoneAclInfo; + .OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclType; + .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclRights; + .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights; /** * Utilities for converting protobuf classes. */ -public final class KSMPBHelper { +public final class OMPBHelper { - private KSMPBHelper() { + private OMPBHelper() { /** Hidden constructor */ } diff --git a/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto similarity index 96% rename from hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto rename to hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index d3d1de69bd..36b1c83efb 100644 --- a/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -23,14 +23,14 @@ */ option java_package = "org.apache.hadoop.ozone.protocol.proto"; -option java_outer_classname = "KeySpaceManagerProtocolProtos"; +option java_outer_classname = "OzoneManagerProtocolProtos"; option java_generic_services = true; option java_generate_equals_and_hash = true; package hadoop.ozone; /** This is file contains the protocol to communicate with -Ozone key space manager. Ozone KSM manages the namespace for ozone. +Ozone Manager. Ozone Manager manages the namespace for ozone. This is similar to Namenode for Ozone. */ @@ -53,6 +53,12 @@ enum Status { INVALID_KEY_NAME = 13; ACCESS_DENIED = 14; INTERNAL_ERROR = 15; + KEY_ALLOCATION_ERROR = 16; + KEY_DELETION_ERROR = 17; + KEY_RENAME_ERROR = 18; + METADATA_ERROR = 19; + OM_NOT_INITIALIZED = 20; + SCM_VERSION_MISMATCH_ERROR = 21; } @@ -355,9 +361,9 @@ message ServiceInfo { } /** - The KSM service that takes care of Ozone namespace. + The OM service that takes care of Ozone namespace. */ -service KeySpaceManagerService { +service OzoneManagerService { /** Creates a Volume. diff --git a/hadoop-ozone/docs/content/GettingStarted.md b/hadoop-ozone/docs/content/GettingStarted.md index 531d192412..117a3071c5 100644 --- a/hadoop-ozone/docs/content/GettingStarted.md +++ b/hadoop-ozone/docs/content/GettingStarted.md @@ -194,12 +194,12 @@ This path will be created by datanodes if it doesn't exist already. Here is an ``` -1. **ozone.ksm.address** OM server address. This is used by OzoneClient and +1. **ozone.om.address** OM server address. This is used by OzoneClient and Ozone File System. ``` - ozone.ksm.address - ksm.hadoop.apache.org + ozone.om.address + om.hadoop.apache.org ``` @@ -210,10 +210,10 @@ Ozone File System. | ozone.enabled | True | This enables SCM and containers in HDFS cluster. | | ozone.metadata.dirs | file path | The metadata will be stored here. | | ozone.scm.names | SCM server name | Hostname:port or or IP:port address of SCM. | -| ozone.scm.block.client.address | SCM server name and port | Used by services like OM | +| ozone.scm.block.client.address | SCM server name and port | Used by services like OM | | ozone.scm.client.address | SCM server name and port | Used by client side | | ozone.scm.datanode.address | SCM server name and port | Used by datanode to talk to SCM | -| ozone.ksm.address | OM server name | Used by Ozone handler and Ozone file system. | +| ozone.om.address | OM server name | Used by Ozone handler and Ozone file system. | #### Sample ozone-site.xml @@ -253,7 +253,7 @@ Ozone File System. - ozone.ksm.address + ozone.om.address 127.0.0.1:9874 @@ -286,12 +286,12 @@ ozone --daemon start scm Once SCM gets started, OM must be initialized. ``` -ozone ksm -createObjectStore +ozone om -createObjectStore ``` Start OM. ``` -ozone --daemon start ksm +ozone --daemon start om ``` If you would like to start HDFS and Ozone together, you can do that by running @@ -349,7 +349,7 @@ log4j.additivity.org.apache.hadoop.ozone=false ``` On the SCM/OM side, you will be able to see -1. `hadoop-hdfs-ksm-hostname.log` +1. `hadoop-hdfs-om-hostname.log` 1. `hadoop-hdfs-scm-hostname.log` ## Reporting Bugs diff --git a/hadoop-ozone/docs/content/Metrics.md b/hadoop-ozone/docs/content/Metrics.md index dc58460517..64a481fa8c 100644 --- a/hadoop-ozone/docs/content/Metrics.md +++ b/hadoop-ozone/docs/content/Metrics.md @@ -131,10 +131,10 @@ Following are the counters for containers: ### Key Space Metrics -The metrics for various key space manager operations in HDFS Ozone. +The metrics for various Ozone Manager operations in HDFS Ozone. -key space manager (KSM) is a service that similar to the Namenode in HDFS. -In the current design of KSM, it maintains metadata of all volumes, buckets and keys. +The Ozone Manager (OM) is a service that similar to the Namenode in HDFS. +In the current design of OM, it maintains metadata of all volumes, buckets and keys. These metrics are only available when ozone is enabled. Following is the set of counters maintained for each key space operation. @@ -142,12 +142,12 @@ Following is the set of counters maintained for each key space operation. *Total number of operation* - We maintain an array which counts how many times a specific operation has been performed. Eg.`NumVolumeCreate` tells us how many times create volume has been -invoked in KSM. +invoked in OM. *Total number of failed operation* - This type operation is opposite to the above operation. Eg.`NumVolumeCreateFails` tells us how many times create volume has been invoked -failed in KSM. +failed in OM. Following are the counters for each of key space operations. diff --git a/hadoop-ozone/docs/content/_index.md b/hadoop-ozone/docs/content/_index.md index ab7eabe644..383b2e0ef0 100644 --- a/hadoop-ozone/docs/content/_index.md +++ b/hadoop-ozone/docs/content/_index.md @@ -56,14 +56,14 @@ This is like DFSClient in HDFS. This acts as the standard client to talk to Ozone. All other components that we have discussed so far rely on Ozone client (TODO: Add Ozone client documentation).
 -## Key Space Manager
 +## Ozone Manager -Key Space Manager(KSM) takes care of the Ozone's namespace. -All ozone entities like volumes, buckets and keys are managed by KSM -(TODO: Add KSM documentation). In Short, KSM is the metadata manager for Ozone. -KSM talks to blockManager(SCM) to get blocks and passes it on to the Ozone +Ozone Manager (OM) takes care of the Ozone's namespace. +All ozone entities like volumes, buckets and keys are managed by OM +(TODO: Add OM documentation). In short, OM is the metadata manager for Ozone. +OM talks to blockManager(SCM) to get blocks and passes it on to the Ozone client. Ozone client writes data to these blocks. -KSM will eventually be replicated via Apache Ratis for High Availability.
 +OM will eventually be replicated via Apache Ratis for High Availability.
 ## Storage Container Manager Storage Container Manager (SCM) is the block and cluster manager for Ozone. diff --git a/hadoop-ozone/docs/static/OzoneOverview.svg b/hadoop-ozone/docs/static/OzoneOverview.svg index 2e14d3fe66..0120a5cc36 100644 --- a/hadoop-ozone/docs/static/OzoneOverview.svg +++ b/hadoop-ozone/docs/static/OzoneOverview.svg @@ -166,7 +166,7 @@ - + diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 091d771592..b568672db3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.ksm.KeySpaceManager; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.test.GenericTestUtils; @@ -82,12 +82,12 @@ static Builder newBuilder(OzoneConfiguration conf) { StorageContainerManager getStorageContainerManager(); /** - * Returns {@link KeySpaceManager} associated with this + * Returns {@link OzoneManager} associated with this * {@link MiniOzoneCluster} instance. * - * @return {@link KeySpaceManager} instance + * @return {@link OzoneManager} instance */ - KeySpaceManager getKeySpaceManager(); + OzoneManager getOzoneManager(); /** * Returns the list of {@link HddsDatanodeService} which are part of this @@ -141,11 +141,11 @@ static Builder newBuilder(OzoneConfiguration conf) { void restartStorageContainerManager() throws IOException; /** - * Restarts KeySpaceManager instance. + * Restarts OzoneManager instance. * * @throws IOException */ - void restartKeySpaceManager() throws IOException; + void restartOzoneManager() throws IOException; /** * Restart a particular HddsDatanode. @@ -184,13 +184,13 @@ abstract class Builder { protected Optional hbInterval = Optional.empty(); protected Optional hbProcessorInterval = Optional.empty(); protected Optional scmId = Optional.empty(); - protected Optional ksmId = Optional.empty(); + protected Optional omId = Optional.empty(); protected Boolean ozoneEnabled = true; protected Boolean randomContainerPort = true; // Use relative smaller number of handlers for testing - protected int numOfKsmHandlers = 20; + protected int numOfOmHandlers = 20; protected int numOfScmHandlers = 20; protected int numOfDatanodes = 1; @@ -226,14 +226,14 @@ public Builder setScmId(String id) { } /** - * Sets the KSM id. + * Sets the OM id. * - * @param id KSM Id + * @param id OM Id * * @return MiniOzoneCluster.Builder */ - public Builder setKsmId(String id) { - ksmId = Optional.of(id); + public Builder setOmId(String id) { + omId = Optional.of(id); return this; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index f0bfef17be..b3137bf3f4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -34,10 +34,10 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.ksm.KSMConfigKeys; -import org.apache.hadoop.ozone.ksm.KeySpaceManager; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.hdds.scm.server.SCMStorage; -import org.apache.hadoop.ozone.ksm.KSMStorage; +import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; @@ -73,7 +73,7 @@ /** * MiniOzoneCluster creates a complete in-process Ozone cluster suitable for - * running tests. The cluster consists of a KeySpaceManager, + * running tests. The cluster consists of a OzoneManager, * StorageContainerManager and multiple DataNodes. */ @InterfaceAudience.Private @@ -84,7 +84,7 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster { private final OzoneConfiguration conf; private final StorageContainerManager scm; - private final KeySpaceManager ksm; + private final OzoneManager ozoneManager; private final List hddsDatanodes; /** @@ -93,11 +93,11 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster { * @throws IOException if there is an I/O error */ private MiniOzoneClusterImpl(OzoneConfiguration conf, - KeySpaceManager ksm, + OzoneManager ozoneManager, StorageContainerManager scm, List hddsDatanodes) { this.conf = conf; - this.ksm = ksm; + this.ozoneManager = ozoneManager; this.scm = scm; this.hddsDatanodes = hddsDatanodes; } @@ -147,8 +147,8 @@ public StorageContainerManager getStorageContainerManager() { } @Override - public KeySpaceManager getKeySpaceManager() { - return this.ksm; + public OzoneManager getOzoneManager() { + return this.ozoneManager; } @Override @@ -209,9 +209,9 @@ public void restartStorageContainerManager() throws IOException { } @Override - public void restartKeySpaceManager() throws IOException { - ksm.stop(); - ksm.start(); + public void restartOzoneManager() throws IOException { + ozoneManager.stop(); + ozoneManager.start(); } @Override @@ -247,10 +247,10 @@ public void shutdown() { scm.getClientProtocolServer().getScmInfo().getClusterId())); FileUtils.deleteDirectory(baseDir); - if (ksm != null) { - LOG.info("Shutting down the keySpaceManager"); - ksm.stop(); - ksm.join(); + if (ozoneManager != null) { + LOG.info("Shutting down the OzoneManager"); + ozoneManager.stop(); + ozoneManager.join(); } if (scm != null) { @@ -291,11 +291,11 @@ public MiniOzoneCluster build() throws IOException { initializeConfiguration(); StorageContainerManager scm = createSCM(); scm.start(); - KeySpaceManager ksm = createKSM(); - ksm.start(); + OzoneManager om = createOM(); + om.start(); List hddsDatanodes = createHddsDatanodes(scm); hddsDatanodes.forEach((datanode) -> datanode.start(null)); - return new MiniOzoneClusterImpl(conf, ksm, scm, hddsDatanodes); + return new MiniOzoneClusterImpl(conf, om, scm, hddsDatanodes); } /** @@ -331,20 +331,20 @@ private StorageContainerManager createSCM() throws IOException { } /** - * Creates a new KeySpaceManager instance. + * Creates a new OzoneManager instance. * - * @return {@link KeySpaceManager} + * @return {@link OzoneManager} * * @throws IOException */ - private KeySpaceManager createKSM() throws IOException { - configureKSM(); - KSMStorage ksmStore = new KSMStorage(conf); - ksmStore.setClusterId(clusterId); - ksmStore.setScmId(scmId.get()); - ksmStore.setKsmId(ksmId.orElse(UUID.randomUUID().toString())); - ksmStore.initialize(); - return KeySpaceManager.createKSM(null, conf); + private OzoneManager createOM() throws IOException { + configureOM(); + OMStorage omStore = new OMStorage(conf); + omStore.setClusterId(clusterId); + omStore.setScmId(scmId.get()); + omStore.setOmId(omId.orElse(UUID.randomUUID().toString())); + omStore.initialize(); + return OzoneManager.createOm(null, conf); } /** @@ -415,10 +415,10 @@ private void configureSCMheartbeat() { } - private void configureKSM() { - conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - conf.setInt(KSMConfigKeys.OZONE_KSM_HANDLER_COUNT_KEY, numOfKsmHandlers); + private void configureOM() { + conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers); } private void configureHddsDatanodes() { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index 4898a1b6c2..717bb68534 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone; import org.apache.hadoop.conf.TestConfigurationFieldsBase; -import org.apache.hadoop.ozone.ksm.KSMConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys; /** @@ -31,7 +31,7 @@ public void initializeMemberVariables() { xmlFilename = new String("ozone-default.xml"); configurationClasses = new Class[] {OzoneConfigKeys.class, ScmConfigKeys.class, - KSMConfigKeys.class}; + OMConfigKeys.class}; errorIfMissingConfigProps = true; errorIfMissingXmlProps = true; xmlPropsToSkipCompare.add("hadoop.tags.custom"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index dd1a8de890..cc367b3339 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.scm.server.SCMStorage; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.ozone.container.ContainerTestHelper; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -60,8 +61,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.junit.rules.Timeout; @@ -211,7 +211,7 @@ public void testBlockDeletionTransactions() throws Exception { // Create {numKeys} random names keys. TestStorageContainerManagerHelper helper = new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(numKeys, 4096); + Map keyLocations = helper.createKeys(numKeys, 4096); Map> containerBlocks = createDeleteTXLog(delLog, keyLocations, helper); @@ -293,7 +293,7 @@ public void testBlockDeletingThrottling() throws Exception { // Create {numKeys} random names keys. TestStorageContainerManagerHelper helper = new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(numKeys, 4096); + Map keyLocations = helper.createKeys(numKeys, 4096); createDeleteTXLog(delLog, keyLocations, helper); // Verify a few TX gets created in the TX log. @@ -320,13 +320,13 @@ public void testBlockDeletingThrottling() throws Exception { } private Map> createDeleteTXLog(DeletedBlockLog delLog, - Map keyLocations, + Map keyLocations, TestStorageContainerManagerHelper helper) throws IOException { // These keys will be written into a bunch of containers, // gets a set of container names, verify container containerBlocks // on datanodes. Set containerNames = new HashSet<>(); - for (Map.Entry entry : keyLocations.entrySet()) { + for (Map.Entry entry : keyLocations.entrySet()) { entry.getValue().getLatestVersionLocations().getLocationList() .forEach(loc -> containerNames.add(loc.getContainerID())); } @@ -334,7 +334,7 @@ private Map> createDeleteTXLog(DeletedBlockLog delLog, // Total number of containerBlocks of these containers should be equal to // total number of containerBlocks via creation call. int totalCreatedBlocks = 0; - for (KsmKeyInfo info : keyLocations.values()) { + for (OmKeyInfo info : keyLocations.values()) { totalCreatedBlocks += info.getKeyLocationVersions().size(); } Assert.assertTrue(totalCreatedBlocks > 0); @@ -343,8 +343,8 @@ private Map> createDeleteTXLog(DeletedBlockLog delLog, // Create a deletion TX for each key. Map> containerBlocks = Maps.newHashMap(); - for (KsmKeyInfo info : keyLocations.values()) { - List list = + for (OmKeyInfo info : keyLocations.values()) { + List list = info.getLatestVersionLocations().getLocationList(); list.forEach(location -> { if (containerBlocks.containsKey(location.getContainerID())) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java index 4c2a904ef0..a30c6f4ffe 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java @@ -30,8 +30,8 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.web.handlers.BucketArgs; import org.apache.hadoop.ozone.web.handlers.KeyArgs; import org.apache.hadoop.ozone.web.handlers.UserArgs; @@ -67,9 +67,9 @@ public TestStorageContainerManagerHelper(MiniOzoneCluster cluster, storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); } - public Map createKeys(int numOfKeys, int keySize) + public Map createKeys(int numOfKeys, int keySize) throws Exception { - Map keyLocationMap = Maps.newHashMap(); + Map keyLocationMap = Maps.newHashMap(); String volume = "volume" + RandomStringUtils.randomNumeric(5); String bucket = "bucket" + RandomStringUtils.randomNumeric(5); String userName = "user" + RandomStringUtils.randomNumeric(5); @@ -104,12 +104,12 @@ public Map createKeys(int numOfKeys, int keySize) } for (String key : keyNames) { - KsmKeyArgs arg = new KsmKeyArgs.Builder() + OmKeyArgs arg = new OmKeyArgs.Builder() .setVolumeName(volume) .setBucketName(bucket) .setKeyName(key) .build(); - KsmKeyInfo location = cluster.getKeySpaceManager() + OmKeyInfo location = cluster.getOzoneManager() .lookupKey(arg); keyLocationMap.put(key, location); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java index 9918d63668..0dc039983b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java @@ -77,10 +77,10 @@ public static void init() throws Exception { OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); - InetSocketAddress ksmHttpAddress = cluster.getKeySpaceManager() + InetSocketAddress omHttpAddress = cluster.getOzoneManager() .getHttpServer().getHttpAddress(); - ozClient = OzoneClientFactory.getRestClient(ksmHttpAddress.getHostName(), - ksmHttpAddress.getPort(), conf); + ozClient = OzoneClientFactory.getRestClient(omHttpAddress.getHostName(), + omHttpAddress.getPort(), conf); store = ozClient.getObjectStore(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java index 214382e5f6..2fbab361a7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java @@ -39,10 +39,10 @@ import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.ksm.KeySpaceManager; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -73,7 +73,7 @@ public class TestOzoneRpcClient { private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; private static ObjectStore store = null; - private static KeySpaceManager keySpaceManager; + private static OzoneManager ozoneManager; private static StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; @@ -97,7 +97,7 @@ public static void init() throws Exception { store = ozClient.getObjectStore(); storageContainerLocationClient = cluster.getStorageContainerLocationClient(); - keySpaceManager = cluster.getKeySpaceManager(); + ozoneManager = cluster.getOzoneManager(); } @Test @@ -376,7 +376,7 @@ public void testDeleteBucket() private boolean verifyRatisReplication(String volumeName, String bucketName, String keyName, ReplicationType type, ReplicationFactor factor) throws IOException { - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) @@ -385,8 +385,8 @@ private boolean verifyRatisReplication(String volumeName, String bucketName, HddsProtos.ReplicationType.valueOf(type.toString()); HddsProtos.ReplicationFactor replicationFactor = HddsProtos.ReplicationFactor.valueOf(factor.getValue()); - KsmKeyInfo keyInfo = keySpaceManager.lookupKey(keyArgs); - for (KsmKeyLocationInfo info: + OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs); + for (OmKeyLocationInfo info: keyInfo.getLatestVersionLocations().getLocationList()) { ContainerInfo container = storageContainerLocationClient.getContainer(info.getContainerID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 43e3f5095f..62059ec85f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -37,10 +37,10 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl; -import org.apache.hadoop.ozone.ksm.KeySpaceManager; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.ozShell.TestOzoneShell; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.utils.MetadataStore; @@ -61,7 +61,7 @@ public class TestBlockDeletion { private static ObjectStore store; private static ContainerManagerImpl dnContainerManager = null; private static StorageContainerManager scm = null; - private static KeySpaceManager ksm = null; + private static OzoneManager om = null; private static Set containerIdsWithDeletedBlocks; @BeforeClass @@ -88,7 +88,7 @@ public static void init() throws Exception { dnContainerManager = (ContainerManagerImpl) cluster.getHddsDatanodes().get(0) .getDatanodeStateMachine().getContainer().getContainerManager(); - ksm = cluster.getKeySpaceManager(); + om = cluster.getOzoneManager(); scm = cluster.getStorageContainerManager(); containerIdsWithDeletedBlocks = new HashSet<>(); } @@ -112,23 +112,23 @@ public void testBlockDeletion() out.write(value.getBytes()); out.close(); - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder().setVolumeName(volumeName) + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setKeyName(keyName).setDataSize(0) .setType(HddsProtos.ReplicationType.STAND_ALONE) .setFactor(HddsProtos.ReplicationFactor.ONE).build(); - List ksmKeyLocationInfoGroupList = - ksm.lookupKey(keyArgs).getKeyLocationVersions(); + List omKeyLocationInfoGroupList = + om.lookupKey(keyArgs).getKeyLocationVersions(); // verify key blocks were created in DN. - Assert.assertTrue(verifyBlocksCreated(ksmKeyLocationInfoGroupList)); + Assert.assertTrue(verifyBlocksCreated(omKeyLocationInfoGroupList)); // No containers with deleted blocks Assert.assertTrue(containerIdsWithDeletedBlocks.isEmpty()); // Delete transactionIds for the containers should be 0 matchContainerTransactionIds(); - ksm.deleteKey(keyArgs); + om.deleteKey(keyArgs); Thread.sleep(5000); // The blocks should be deleted in the DN. - Assert.assertTrue(verifyBlocksDeleted(ksmKeyLocationInfoGroupList)); + Assert.assertTrue(verifyBlocksDeleted(omKeyLocationInfoGroupList)); // Few containers with deleted blocks Assert.assertTrue(!containerIdsWithDeletedBlocks.isEmpty()); @@ -155,7 +155,7 @@ private void matchContainerTransactionIds() throws IOException { } private boolean verifyBlocksCreated( - List ksmKeyLocationInfoGroups) + List omKeyLocationInfoGroups) throws IOException { return performOperationOnKeyContainers((blockID) -> { try { @@ -166,11 +166,11 @@ private boolean verifyBlocksCreated( } catch (IOException e) { e.printStackTrace(); } - }, ksmKeyLocationInfoGroups); + }, omKeyLocationInfoGroups); } private boolean verifyBlocksDeleted( - List ksmKeyLocationInfoGroups) + List omKeyLocationInfoGroups) throws IOException { return performOperationOnKeyContainers((blockID) -> { try { @@ -186,19 +186,20 @@ private boolean verifyBlocksDeleted( } catch (IOException e) { e.printStackTrace(); } - }, ksmKeyLocationInfoGroups); + }, omKeyLocationInfoGroups); } private boolean performOperationOnKeyContainers(Consumer consumer, - List ksmKeyLocationInfoGroups) + List omKeyLocationInfoGroups) throws IOException { try { - for (KsmKeyLocationInfoGroup ksmKeyLocationInfoGroup : ksmKeyLocationInfoGroups) { - List ksmKeyLocationInfos = - ksmKeyLocationInfoGroup.getLocationList(); - for (KsmKeyLocationInfo ksmKeyLocationInfo : ksmKeyLocationInfos) { - BlockID blockID = ksmKeyLocationInfo.getBlockID(); + for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : + omKeyLocationInfoGroups) { + List omKeyLocationInfos = + omKeyLocationInfoGroup.getLocationList(); + for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfos) { + BlockID blockID = omKeyLocationInfo.getBlockID(); consumer.accept(blockID); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index 3e514e7906..58b831b078 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -34,8 +34,8 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; @@ -45,7 +45,6 @@ import java.io.IOException; import java.util.List; -import java.util.Random; import java.util.concurrent.TimeoutException; public class TestCloseContainerByPipeline { @@ -98,17 +97,17 @@ public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception { key.close(); //get the name of a valid container - KsmKeyArgs keyArgs = - new KsmKeyArgs.Builder().setVolumeName("test").setBucketName("test") + OmKeyArgs keyArgs = + new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test") .setType(HddsProtos.ReplicationType.STAND_ALONE) .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024) .setKeyName("testCloseContainer").build(); - KsmKeyLocationInfo ksmKeyLocationInfo = - cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions() + OmKeyLocationInfo omKeyLocationInfo = + cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() .get(0).getBlocksLatestVersionOnly().get(0); - long containerID = ksmKeyLocationInfo.getContainerID(); + long containerID = omKeyLocationInfo.getContainerID(); List datanodes = cluster.getStorageContainerManager() .getScmContainerManager().getContainerWithPipeline(containerID) .getPipeline().getMachines(); @@ -153,17 +152,17 @@ public void testCloseContainerViaStandaAlone() key.close(); //get the name of a valid container - KsmKeyArgs keyArgs = - new KsmKeyArgs.Builder().setVolumeName("test").setBucketName("test") + OmKeyArgs keyArgs = + new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test") .setType(HddsProtos.ReplicationType.STAND_ALONE) .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024) .setKeyName("standalone").build(); - KsmKeyLocationInfo ksmKeyLocationInfo = - cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions() + OmKeyLocationInfo omKeyLocationInfo = + cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() .get(0).getBlocksLatestVersionOnly().get(0); - long containerID = ksmKeyLocationInfo.getContainerID(); + long containerID = omKeyLocationInfo.getContainerID(); List datanodes = cluster.getStorageContainerManager() .getScmContainerManager().getContainerWithPipeline(containerID) .getPipeline().getMachines(); @@ -207,16 +206,16 @@ public void testCloseContainerViaRatis() throws IOException, key.close(); //get the name of a valid container - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder().setVolumeName("test"). + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName("test"). setBucketName("test").setType(HddsProtos.ReplicationType.RATIS) .setFactor(HddsProtos.ReplicationFactor.THREE).setDataSize(1024) .setKeyName("ratis").build(); - KsmKeyLocationInfo ksmKeyLocationInfo = - cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions() + OmKeyLocationInfo omKeyLocationInfo = + cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() .get(0).getBlocksLatestVersionOnly().get(0); - long containerID = ksmKeyLocationInfo.getContainerID(); + long containerID = omKeyLocationInfo.getContainerID(); List datanodes = cluster.getStorageContainerManager() .getScmContainerManager().getContainerWithPipeline(containerID) .getPipeline().getMachines(); @@ -232,7 +231,7 @@ public void testCloseContainerViaRatis() throws IOException, .addDatanodeCommand(details.getUuid(), new CloseContainerCommand(containerID, HddsProtos.ReplicationType.RATIS)); - } + } for (DatanodeDetails datanodeDetails : datanodes) { GenericTestUtils.waitFor( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index efb734497f..58a5154037 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -28,8 +28,8 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB; @@ -69,17 +69,17 @@ public void test() throws IOException, TimeoutException, InterruptedException, key.close(); //get the name of a valid container - KsmKeyArgs keyArgs = - new KsmKeyArgs.Builder().setVolumeName("test").setBucketName("test") + OmKeyArgs keyArgs = + new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test") .setType(HddsProtos.ReplicationType.STAND_ALONE) .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024) .setKeyName("test").build(); - KsmKeyLocationInfo ksmKeyLocationInfo = - cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions() + OmKeyLocationInfo omKeyLocationInfo = + cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() .get(0).getBlocksLatestVersionOnly().get(0); - long containerID = ksmKeyLocationInfo.getContainerID(); + long containerID = omKeyLocationInfo.getContainerID(); Assert.assertFalse(isContainerClosed(cluster, containerID)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java similarity index 94% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java index 1cc7ff8f22..54815061c0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.commons.lang3.RandomStringUtils; @@ -30,8 +30,8 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.junit.AfterClass; @@ -104,7 +104,7 @@ public void testContainerReportKeyWrite() throws Exception { key.write(dataString.getBytes()); key.close(); - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) @@ -113,8 +113,8 @@ public void testContainerReportKeyWrite() throws Exception { .build(); - KsmKeyLocationInfo keyInfo = - cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions() + OmKeyLocationInfo keyInfo = + cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() .get(0).getBlocksLatestVersionOnly().get(0); ContainerData cd = getContainerData(keyInfo.getContainerID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java similarity index 99% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java index 1cb6e82c14..1389cbaa98 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java similarity index 81% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java index 15c3fd3b46..15122b94ce 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -24,11 +24,11 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.web.handlers.BucketArgs; import org.apache.hadoop.ozone.web.handlers.KeyArgs; import org.apache.hadoop.ozone.web.handlers.UserArgs; @@ -51,13 +51,13 @@ import static org.junit.Assert.assertTrue; /** - * This class tests the versioning of blocks from KSM side. + * This class tests the versioning of blocks from OM side. */ -public class TestKsmBlockVersioning { +public class TestOmBlockVersioning { private static MiniOzoneCluster cluster = null; private static UserArgs userArgs; private static OzoneConfiguration conf; - private static KeySpaceManager keySpaceManager; + private static OzoneManager ozoneManager; private static StorageHandler storageHandler; @Rule @@ -81,7 +81,7 @@ public static void init() throws Exception { storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); userArgs = new UserArgs(null, OzoneUtils.getRequestID(), null, null, null, null); - keySpaceManager = cluster.getKeySpaceManager(); + ozoneManager = cluster.getOzoneManager(); } /** @@ -113,7 +113,7 @@ public void testAllocateCommit() throws Exception { bucketArgs.setStorageType(StorageType.DISK); storageHandler.createBucket(bucketArgs); - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) @@ -121,49 +121,49 @@ public void testAllocateCommit() throws Exception { .build(); // 1st update, version 0 - OpenKeySession openKey = keySpaceManager.openKey(keyArgs); - keySpaceManager.commitKey(keyArgs, openKey.getId()); + OpenKeySession openKey = ozoneManager.openKey(keyArgs); + ozoneManager.commitKey(keyArgs, openKey.getId()); - KsmKeyInfo keyInfo = keySpaceManager.lookupKey(keyArgs); - KsmKeyLocationInfoGroup highestVersion = + OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs); + OmKeyLocationInfoGroup highestVersion = checkVersions(keyInfo.getKeyLocationVersions()); assertEquals(0, highestVersion.getVersion()); assertEquals(1, highestVersion.getLocationList().size()); // 2nd update, version 1 - openKey = keySpaceManager.openKey(keyArgs); - //KsmKeyLocationInfo locationInfo = - // keySpaceManager.allocateBlock(keyArgs, openKey.getId()); - keySpaceManager.commitKey(keyArgs, openKey.getId()); + openKey = ozoneManager.openKey(keyArgs); + //OmKeyLocationInfo locationInfo = + // ozoneManager.allocateBlock(keyArgs, openKey.getId()); + ozoneManager.commitKey(keyArgs, openKey.getId()); - keyInfo = keySpaceManager.lookupKey(keyArgs); + keyInfo = ozoneManager.lookupKey(keyArgs); highestVersion = checkVersions(keyInfo.getKeyLocationVersions()); assertEquals(1, highestVersion.getVersion()); assertEquals(2, highestVersion.getLocationList().size()); // 3rd update, version 2 - openKey = keySpaceManager.openKey(keyArgs); + openKey = ozoneManager.openKey(keyArgs); // this block will be appended to the latest version of version 2. - keySpaceManager.allocateBlock(keyArgs, openKey.getId()); - keySpaceManager.commitKey(keyArgs, openKey.getId()); + ozoneManager.allocateBlock(keyArgs, openKey.getId()); + ozoneManager.commitKey(keyArgs, openKey.getId()); - keyInfo = keySpaceManager.lookupKey(keyArgs); + keyInfo = ozoneManager.lookupKey(keyArgs); highestVersion = checkVersions(keyInfo.getKeyLocationVersions()); assertEquals(2, highestVersion.getVersion()); assertEquals(4, highestVersion.getLocationList().size()); } - private KsmKeyLocationInfoGroup checkVersions( - List versions) { - KsmKeyLocationInfoGroup currentVersion = null; - for (KsmKeyLocationInfoGroup version : versions) { + private OmKeyLocationInfoGroup checkVersions( + List versions) { + OmKeyLocationInfoGroup currentVersion = null; + for (OmKeyLocationInfoGroup version : versions) { if (currentVersion != null) { assertEquals(currentVersion.getVersion() + 1, version.getVersion()); - for (KsmKeyLocationInfo info : currentVersion.getLocationList()) { + for (OmKeyLocationInfo info : currentVersion.getLocationList()) { boolean found = false; // all the blocks from the previous version must present in the next // version - for (KsmKeyLocationInfo info2 : version.getLocationList()) { + for (OmKeyLocationInfo info2 : version.getLocationList()) { if (info.getLocalID() == info2.getLocalID()) { found = true; break; @@ -197,7 +197,7 @@ public void testReadLatestVersion() throws Exception { bucketArgs.setStorageType(StorageType.DISK); storageHandler.createBucket(bucketArgs); - KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder() + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) @@ -214,7 +214,7 @@ public void testReadLatestVersion() throws Exception { try (InputStream in = storageHandler.newKeyReader(keyArgs)) { in.read(data); } - KsmKeyInfo keyInfo = keySpaceManager.lookupKey(ksmKeyArgs); + OmKeyInfo keyInfo = ozoneManager.lookupKey(omKeyArgs); assertEquals(dataString, DFSUtil.bytes2String(data)); assertEquals(0, keyInfo.getLatestVersionLocations().getVersion()); assertEquals(1, @@ -230,7 +230,7 @@ public void testReadLatestVersion() throws Exception { try (InputStream in = storageHandler.newKeyReader(keyArgs)) { in.read(data); } - keyInfo = keySpaceManager.lookupKey(ksmKeyArgs); + keyInfo = ozoneManager.lookupKey(omKeyArgs); assertEquals(dataString, DFSUtil.bytes2String(data)); assertEquals(1, keyInfo.getLatestVersionLocations().getVersion()); assertEquals(2, @@ -244,7 +244,7 @@ public void testReadLatestVersion() throws Exception { try (InputStream in = storageHandler.newKeyReader(keyArgs)) { in.read(data); } - keyInfo = keySpaceManager.lookupKey(ksmKeyArgs); + keyInfo = ozoneManager.lookupKey(omKeyArgs); assertEquals(dataString, DFSUtil.bytes2String(data)); assertEquals(2, keyInfo.getLatestVersionLocations().getVersion()); assertEquals(3, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java similarity index 53% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index bf7d870bd4..8d0f4b2129 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; @@ -26,18 +26,18 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.test.Whitebox; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; /** - * Test for KSM metrics. + * Test for OM metrics. */ -public class TestKSMMetrcis { +@SuppressWarnings("deprecation") +public class TestOmMetrics { private MiniOzoneCluster cluster; - private KeySpaceManager ksmManager; + private OzoneManager ozoneManager; /** * The exception used for testing failure metrics. @@ -56,7 +56,7 @@ public void setup() throws Exception { OzoneConsts.OZONE_HANDLER_DISTRIBUTED); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); - ksmManager = cluster.getKeySpaceManager(); + ozoneManager = cluster.getOzoneManager(); } /** @@ -71,8 +71,9 @@ public void shutdown() { @Test public void testVolumeOps() throws IOException { - VolumeManager volumeManager = (VolumeManager) Whitebox - .getInternalState(ksmManager, "volumeManager"); + VolumeManager volumeManager = + (VolumeManager) org.apache.hadoop.test.Whitebox + .getInternalState(ozoneManager, "volumeManager"); VolumeManager mockVm = Mockito.spy(volumeManager); Mockito.doNothing().when(mockVm).createVolume(null); @@ -82,17 +83,18 @@ public void testVolumeOps() throws IOException { Mockito.doNothing().when(mockVm).setOwner(null, null); Mockito.doReturn(null).when(mockVm).listVolumes(null, null, null, 0); - Whitebox.setInternalState(ksmManager, "volumeManager", mockVm); + org.apache.hadoop.test.Whitebox.setInternalState( + ozoneManager, "volumeManager", mockVm); doVolumeOps(); - MetricsRecordBuilder ksmMetrics = getMetrics("KSMMetrics"); - assertCounter("NumVolumeOps", 6L, ksmMetrics); - assertCounter("NumVolumeCreates", 1L, ksmMetrics); - assertCounter("NumVolumeUpdates", 1L, ksmMetrics); - assertCounter("NumVolumeInfos", 1L, ksmMetrics); - assertCounter("NumVolumeCheckAccesses", 1L, ksmMetrics); - assertCounter("NumVolumeDeletes", 1L, ksmMetrics); - assertCounter("NumVolumeLists", 1L, ksmMetrics); + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + assertCounter("NumVolumeOps", 6L, omMetrics); + assertCounter("NumVolumeCreates", 1L, omMetrics); + assertCounter("NumVolumeUpdates", 1L, omMetrics); + assertCounter("NumVolumeInfos", 1L, omMetrics); + assertCounter("NumVolumeCheckAccesses", 1L, omMetrics); + assertCounter("NumVolumeDeletes", 1L, omMetrics); + assertCounter("NumVolumeLists", 1L, omMetrics); // inject exception to test for Failure Metrics Mockito.doThrow(exception).when(mockVm).createVolume(null); @@ -102,30 +104,31 @@ public void testVolumeOps() throws IOException { Mockito.doThrow(exception).when(mockVm).setOwner(null, null); Mockito.doThrow(exception).when(mockVm).listVolumes(null, null, null, 0); - Whitebox.setInternalState(ksmManager, "volumeManager", mockVm); + org.apache.hadoop.test.Whitebox.setInternalState(ozoneManager, "volumeManager", mockVm); doVolumeOps(); - ksmMetrics = getMetrics("KSMMetrics"); - assertCounter("NumVolumeOps", 12L, ksmMetrics); - assertCounter("NumVolumeCreates", 2L, ksmMetrics); - assertCounter("NumVolumeUpdates", 2L, ksmMetrics); - assertCounter("NumVolumeInfos", 2L, ksmMetrics); - assertCounter("NumVolumeCheckAccesses", 2L, ksmMetrics); - assertCounter("NumVolumeDeletes", 2L, ksmMetrics); - assertCounter("NumVolumeLists", 2L, ksmMetrics); + omMetrics = getMetrics("OMMetrics"); + assertCounter("NumVolumeOps", 12L, omMetrics); + assertCounter("NumVolumeCreates", 2L, omMetrics); + assertCounter("NumVolumeUpdates", 2L, omMetrics); + assertCounter("NumVolumeInfos", 2L, omMetrics); + assertCounter("NumVolumeCheckAccesses", 2L, omMetrics); + assertCounter("NumVolumeDeletes", 2L, omMetrics); + assertCounter("NumVolumeLists", 2L, omMetrics); - assertCounter("NumVolumeCreateFails", 1L, ksmMetrics); - assertCounter("NumVolumeUpdateFails", 1L, ksmMetrics); - assertCounter("NumVolumeInfoFails", 1L, ksmMetrics); - assertCounter("NumVolumeCheckAccessFails", 1L, ksmMetrics); - assertCounter("NumVolumeDeleteFails", 1L, ksmMetrics); - assertCounter("NumVolumeListFails", 1L, ksmMetrics); + assertCounter("NumVolumeCreateFails", 1L, omMetrics); + assertCounter("NumVolumeUpdateFails", 1L, omMetrics); + assertCounter("NumVolumeInfoFails", 1L, omMetrics); + assertCounter("NumVolumeCheckAccessFails", 1L, omMetrics); + assertCounter("NumVolumeDeleteFails", 1L, omMetrics); + assertCounter("NumVolumeListFails", 1L, omMetrics); } @Test public void testBucketOps() throws IOException { - BucketManager bucketManager = (BucketManager) Whitebox - .getInternalState(ksmManager, "bucketManager"); + BucketManager bucketManager = + (BucketManager) org.apache.hadoop.test.Whitebox + .getInternalState(ozoneManager, "bucketManager"); BucketManager mockBm = Mockito.spy(bucketManager); Mockito.doNothing().when(mockBm).createBucket(null); @@ -134,16 +137,17 @@ public void testBucketOps() throws IOException { Mockito.doNothing().when(mockBm).setBucketProperty(null); Mockito.doReturn(null).when(mockBm).listBuckets(null, null, null, 0); - Whitebox.setInternalState(ksmManager, "bucketManager", mockBm); + org.apache.hadoop.test.Whitebox.setInternalState( + ozoneManager, "bucketManager", mockBm); doBucketOps(); - MetricsRecordBuilder ksmMetrics = getMetrics("KSMMetrics"); - assertCounter("NumBucketOps", 5L, ksmMetrics); - assertCounter("NumBucketCreates", 1L, ksmMetrics); - assertCounter("NumBucketUpdates", 1L, ksmMetrics); - assertCounter("NumBucketInfos", 1L, ksmMetrics); - assertCounter("NumBucketDeletes", 1L, ksmMetrics); - assertCounter("NumBucketLists", 1L, ksmMetrics); + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + assertCounter("NumBucketOps", 5L, omMetrics); + assertCounter("NumBucketCreates", 1L, omMetrics); + assertCounter("NumBucketUpdates", 1L, omMetrics); + assertCounter("NumBucketInfos", 1L, omMetrics); + assertCounter("NumBucketDeletes", 1L, omMetrics); + assertCounter("NumBucketLists", 1L, omMetrics); // inject exception to test for Failure Metrics Mockito.doThrow(exception).when(mockBm).createBucket(null); @@ -152,28 +156,29 @@ public void testBucketOps() throws IOException { Mockito.doThrow(exception).when(mockBm).setBucketProperty(null); Mockito.doThrow(exception).when(mockBm).listBuckets(null, null, null, 0); - Whitebox.setInternalState(ksmManager, "bucketManager", mockBm); + org.apache.hadoop.test.Whitebox.setInternalState( + ozoneManager, "bucketManager", mockBm); doBucketOps(); - ksmMetrics = getMetrics("KSMMetrics"); - assertCounter("NumBucketOps", 10L, ksmMetrics); - assertCounter("NumBucketCreates", 2L, ksmMetrics); - assertCounter("NumBucketUpdates", 2L, ksmMetrics); - assertCounter("NumBucketInfos", 2L, ksmMetrics); - assertCounter("NumBucketDeletes", 2L, ksmMetrics); - assertCounter("NumBucketLists", 2L, ksmMetrics); + omMetrics = getMetrics("OMMetrics"); + assertCounter("NumBucketOps", 10L, omMetrics); + assertCounter("NumBucketCreates", 2L, omMetrics); + assertCounter("NumBucketUpdates", 2L, omMetrics); + assertCounter("NumBucketInfos", 2L, omMetrics); + assertCounter("NumBucketDeletes", 2L, omMetrics); + assertCounter("NumBucketLists", 2L, omMetrics); - assertCounter("NumBucketCreateFails", 1L, ksmMetrics); - assertCounter("NumBucketUpdateFails", 1L, ksmMetrics); - assertCounter("NumBucketInfoFails", 1L, ksmMetrics); - assertCounter("NumBucketDeleteFails", 1L, ksmMetrics); - assertCounter("NumBucketListFails", 1L, ksmMetrics); + assertCounter("NumBucketCreateFails", 1L, omMetrics); + assertCounter("NumBucketUpdateFails", 1L, omMetrics); + assertCounter("NumBucketInfoFails", 1L, omMetrics); + assertCounter("NumBucketDeleteFails", 1L, omMetrics); + assertCounter("NumBucketListFails", 1L, omMetrics); } @Test public void testKeyOps() throws IOException { - KeyManager bucketManager = (KeyManager) Whitebox - .getInternalState(ksmManager, "keyManager"); + KeyManager bucketManager = (KeyManager) org.apache.hadoop.test.Whitebox + .getInternalState(ozoneManager, "keyManager"); KeyManager mockKm = Mockito.spy(bucketManager); Mockito.doReturn(null).when(mockKm).openKey(null); @@ -181,15 +186,16 @@ public void testKeyOps() throws IOException { Mockito.doReturn(null).when(mockKm).lookupKey(null); Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0); - Whitebox.setInternalState(ksmManager, "keyManager", mockKm); + org.apache.hadoop.test.Whitebox.setInternalState( + ozoneManager, "keyManager", mockKm); doKeyOps(); - MetricsRecordBuilder ksmMetrics = getMetrics("KSMMetrics"); - assertCounter("NumKeyOps", 4L, ksmMetrics); - assertCounter("NumKeyAllocate", 1L, ksmMetrics); - assertCounter("NumKeyLookup", 1L, ksmMetrics); - assertCounter("NumKeyDeletes", 1L, ksmMetrics); - assertCounter("NumKeyLists", 1L, ksmMetrics); + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + assertCounter("NumKeyOps", 4L, omMetrics); + assertCounter("NumKeyAllocate", 1L, omMetrics); + assertCounter("NumKeyLookup", 1L, omMetrics); + assertCounter("NumKeyDeletes", 1L, omMetrics); + assertCounter("NumKeyLists", 1L, omMetrics); // inject exception to test for Failure Metrics Mockito.doThrow(exception).when(mockKm).openKey(null); @@ -198,20 +204,21 @@ public void testKeyOps() throws IOException { Mockito.doThrow(exception).when(mockKm).listKeys( null, null, null, null, 0); - Whitebox.setInternalState(ksmManager, "keyManager", mockKm); + org.apache.hadoop.test.Whitebox.setInternalState( + ozoneManager, "keyManager", mockKm); doKeyOps(); - ksmMetrics = getMetrics("KSMMetrics"); - assertCounter("NumKeyOps", 8L, ksmMetrics); - assertCounter("NumKeyAllocate", 2L, ksmMetrics); - assertCounter("NumKeyLookup", 2L, ksmMetrics); - assertCounter("NumKeyDeletes", 2L, ksmMetrics); - assertCounter("NumKeyLists", 2L, ksmMetrics); + omMetrics = getMetrics("OMMetrics"); + assertCounter("NumKeyOps", 8L, omMetrics); + assertCounter("NumKeyAllocate", 2L, omMetrics); + assertCounter("NumKeyLookup", 2L, omMetrics); + assertCounter("NumKeyDeletes", 2L, omMetrics); + assertCounter("NumKeyLists", 2L, omMetrics); - assertCounter("NumKeyAllocateFails", 1L, ksmMetrics); - assertCounter("NumKeyLookupFails", 1L, ksmMetrics); - assertCounter("NumKeyDeleteFails", 1L, ksmMetrics); - assertCounter("NumKeyListFails", 1L, ksmMetrics); + assertCounter("NumKeyAllocateFails", 1L, omMetrics); + assertCounter("NumKeyLookupFails", 1L, omMetrics); + assertCounter("NumKeyDeleteFails", 1L, omMetrics); + assertCounter("NumKeyListFails", 1L, omMetrics); } /** @@ -219,32 +226,32 @@ public void testKeyOps() throws IOException { */ private void doVolumeOps() { try { - ksmManager.createVolume(null); + ozoneManager.createVolume(null); } catch (IOException ignored) { } try { - ksmManager.deleteVolume(null); + ozoneManager.deleteVolume(null); } catch (IOException ignored) { } try { - ksmManager.getVolumeInfo(null); + ozoneManager.getVolumeInfo(null); } catch (IOException ignored) { } try { - ksmManager.checkVolumeAccess(null, null); + ozoneManager.checkVolumeAccess(null, null); } catch (IOException ignored) { } try { - ksmManager.setOwner(null, null); + ozoneManager.setOwner(null, null); } catch (IOException ignored) { } try { - ksmManager.listAllVolumes(null, null, 0); + ozoneManager.listAllVolumes(null, null, 0); } catch (IOException ignored) { } } @@ -254,27 +261,27 @@ private void doVolumeOps() { */ private void doBucketOps() { try { - ksmManager.createBucket(null); + ozoneManager.createBucket(null); } catch (IOException ignored) { } try { - ksmManager.deleteBucket(null, null); + ozoneManager.deleteBucket(null, null); } catch (IOException ignored) { } try { - ksmManager.getBucketInfo(null, null); + ozoneManager.getBucketInfo(null, null); } catch (IOException ignored) { } try { - ksmManager.setBucketProperty(null); + ozoneManager.setBucketProperty(null); } catch (IOException ignored) { } try { - ksmManager.listBuckets(null, null, null, 0); + ozoneManager.listBuckets(null, null, null, 0); } catch (IOException ignored) { } } @@ -284,22 +291,22 @@ private void doBucketOps() { */ private void doKeyOps() { try { - ksmManager.openKey(null); + ozoneManager.openKey(null); } catch (IOException ignored) { } try { - ksmManager.deleteKey(null); + ozoneManager.deleteKey(null); } catch (IOException ignored) { } try { - ksmManager.lookupKey(null); + ozoneManager.lookupKey(null); } catch (IOException ignored) { } try { - ksmManager.listKeys(null, null, null, null, 0); + ozoneManager.listKeys(null, null, null, null, 0); } catch (IOException ignored) { } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java similarity index 96% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java index 7b92ec75bc..005a0124e8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -51,17 +51,17 @@ import java.util.List; import java.util.UUID; -import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME; +import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; /** - * This class tests the CLI that transforms ksm.db into SQLite DB files. + * This class tests the CLI that transforms om.db into SQLite DB files. */ @RunWith(Parameterized.class) -public class TestKSMSQLCli { +public class TestOmSQLCli { private MiniOzoneCluster cluster = null; private StorageHandler storageHandler; private UserArgs userArgs; @@ -90,7 +90,7 @@ public static Collection data() { private String metaStoreType; - public TestKSMSQLCli(String type) { + public TestOmSQLCli(String type) { metaStoreType = type; } @@ -152,7 +152,7 @@ public void setup() throws Exception { stream = storageHandler.newKeyWriter(keyArgs3); stream.close(); - cluster.getKeySpaceManager().stop(); + cluster.getOzoneManager().stop(); cluster.getStorageContainerManager().stop(); conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType); cli = new SQLCLI(conf); @@ -166,12 +166,12 @@ public void shutdown() { } @Test - public void testKSMDB() throws Exception { + public void testOmDB() throws Exception { String dbOutPath = GenericTestUtils.getTempPath( UUID.randomUUID() + "/out_sql.db"); String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS); - String dbPath = dbRootPath + "/" + KSM_DB_NAME; + String dbPath = dbRootPath + "/" + OM_DB_NAME; String[] args = {"-p", dbPath, "-o", dbOutPath}; cli.run(args); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java similarity index 90% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java index 8a16bfe86d..7c8595ca02 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.commons.lang3.RandomStringUtils; @@ -29,11 +29,11 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.ksm.exceptions.KSMException; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.hdds.scm.server.SCMStorage; -import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; +import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ServicePort; + .OzoneManagerProtocolProtos.ServicePort; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.web.handlers.BucketArgs; import org.apache.hadoop.ozone.web.handlers.KeyArgs; @@ -50,7 +50,7 @@ import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.Status; + .OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.web.handlers.ListArgs; import org.apache.hadoop.ozone.web.response.ListBuckets; import org.apache.hadoop.ozone.web.response.ListKeys; @@ -74,7 +74,6 @@ import java.nio.file.Paths; import java.net.InetSocketAddress; import java.text.ParseException; -import java.util.HashSet; import java.util.LinkedList; import java.util.Map; import java.util.Random; @@ -86,22 +85,22 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_CLIENT_ADDRESS_KEY; /** - * Test Key Space Manager operation in distributed handler scenario. + * Test Ozone Manager operation in distributed handler scenario. */ -public class TestKeySpaceManager { +public class TestOzoneManager { private static MiniOzoneCluster cluster = null; private static StorageHandler storageHandler; private static UserArgs userArgs; - private static KSMMetrics ksmMetrics; + private static OMMetrics omMetrics; private static OzoneConfiguration conf; private static String clusterId; private static String scmId; - private static String ksmId; + private static String omId; @Rule public ExpectedException exception = ExpectedException.none(); @@ -119,20 +118,20 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); - ksmId = UUID.randomUUID().toString(); + omId = UUID.randomUUID().toString(); conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, OzoneConsts.OZONE_HANDLER_DISTRIBUTED); conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); cluster = MiniOzoneCluster.newBuilder(conf) .setClusterId(clusterId) .setScmId(scmId) - .setKsmId(ksmId) + .setOmId(omId) .build(); cluster.waitForClusterToBeReady(); storageHandler = new ObjectStoreHandler(conf).getStorageHandler(); userArgs = new UserArgs(null, OzoneUtils.getRequestID(), null, null, null, null); - ksmMetrics = cluster.getKeySpaceManager().getMetrics(); + omMetrics = cluster.getOzoneManager().getMetrics(); } /** @@ -148,7 +147,7 @@ public static void shutdown() { // Create a volume and test its attribute after creating them @Test(timeout = 60000) public void testCreateVolume() throws IOException, OzoneException { - long volumeCreateFailCount = ksmMetrics.getNumVolumeCreateFails(); + long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails(); String userName = "user" + RandomStringUtils.randomNumeric(5); String adminName = "admin" + RandomStringUtils.randomNumeric(5); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); @@ -163,14 +162,14 @@ public void testCreateVolume() throws IOException, OzoneException { Assert.assertTrue(retVolumeinfo.getVolumeName().equals(volumeName)); Assert.assertTrue(retVolumeinfo.getOwner().getName().equals(userName)); Assert.assertEquals(volumeCreateFailCount, - ksmMetrics.getNumVolumeCreateFails()); + omMetrics.getNumVolumeCreateFails()); } // Create a volume and modify the volume owner and then test its attributes @Test(timeout = 60000) public void testChangeVolumeOwner() throws IOException, OzoneException { - long volumeCreateFailCount = ksmMetrics.getNumVolumeCreateFails(); - long volumeInfoFailCount = ksmMetrics.getNumVolumeInfoFails(); + long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails(); + long volumeInfoFailCount = omMetrics.getNumVolumeInfoFails(); String userName = "user" + RandomStringUtils.randomNumeric(5); String adminName = "admin" + RandomStringUtils.randomNumeric(5); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); @@ -191,16 +190,16 @@ public void testChangeVolumeOwner() throws IOException, OzoneException { Assert.assertFalse(retVolumeInfo.getOwner().getName().equals(userName)); Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(newUserName)); Assert.assertEquals(volumeCreateFailCount, - ksmMetrics.getNumVolumeCreateFails()); + omMetrics.getNumVolumeCreateFails()); Assert.assertEquals(volumeInfoFailCount, - ksmMetrics.getNumVolumeInfoFails()); + omMetrics.getNumVolumeInfoFails()); } // Create a volume and modify the volume owner and then test its attributes @Test(timeout = 60000) public void testChangeVolumeQuota() throws IOException, OzoneException { - long numVolumeCreateFail = ksmMetrics.getNumVolumeCreateFails(); - long numVolumeInfoFail = ksmMetrics.getNumVolumeInfoFails(); + long numVolumeCreateFail = omMetrics.getNumVolumeCreateFails(); + long numVolumeInfoFail = omMetrics.getNumVolumeInfoFails(); String userName = "user" + RandomStringUtils.randomNumeric(5); String adminName = "admin" + RandomStringUtils.randomNumeric(5); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); @@ -237,15 +236,15 @@ public void testChangeVolumeQuota() throws IOException, OzoneException { Assert.assertEquals(OzoneConsts.MAX_QUOTA_IN_BYTES, retVolumeInfo.getQuota().sizeInBytes()); Assert.assertEquals(numVolumeCreateFail, - ksmMetrics.getNumVolumeCreateFails()); + omMetrics.getNumVolumeCreateFails()); Assert.assertEquals(numVolumeInfoFail, - ksmMetrics.getNumVolumeInfoFails()); + omMetrics.getNumVolumeInfoFails()); } // Create a volume and then delete it and then check for deletion @Test(timeout = 60000) public void testDeleteVolume() throws IOException, OzoneException { - long volumeCreateFailCount = ksmMetrics.getNumVolumeCreateFails(); + long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails(); String userName = "user" + RandomStringUtils.randomNumeric(5); String adminName = "admin" + RandomStringUtils.randomNumeric(5); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); @@ -270,7 +269,7 @@ public void testDeleteVolume() throws IOException, OzoneException { Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName1)); Assert.assertTrue(volumeInfo.getOwner().getName().equals(userName)); Assert.assertEquals(volumeCreateFailCount, - ksmMetrics.getNumVolumeCreateFails()); + omMetrics.getNumVolumeCreateFails()); // Volume with _A should be able to delete as it is empty. storageHandler.deleteVolume(volumeArgs); @@ -291,7 +290,7 @@ public void testDeleteVolume() throws IOException, OzoneException { // then delete it and then check for deletion failure @Test(timeout = 60000) public void testFailedDeleteVolume() throws IOException, OzoneException { - long numVolumeCreateFails = ksmMetrics.getNumVolumeCreateFails(); + long numVolumeCreateFails = omMetrics.getNumVolumeCreateFails(); String userName = "user" + RandomStringUtils.randomNumeric(5); String adminName = "admin" + RandomStringUtils.randomNumeric(5); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); @@ -307,7 +306,7 @@ public void testFailedDeleteVolume() throws IOException, OzoneException { Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName)); Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName)); Assert.assertEquals(numVolumeCreateFails, - ksmMetrics.getNumVolumeCreateFails()); + omMetrics.getNumVolumeCreateFails()); BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs); storageHandler.createBucket(bucketArgs); @@ -366,15 +365,15 @@ public void testAccessVolume() throws IOException, OzoneException { OzoneAcl.OzoneACLRights.READ); Assert.assertFalse(storageHandler.checkVolumeAccess(volumeName, worldAcl)); - Assert.assertEquals(0, ksmMetrics.getNumVolumeCheckAccessFails()); - Assert.assertEquals(0, ksmMetrics.getNumVolumeCreateFails()); + Assert.assertEquals(0, omMetrics.getNumVolumeCheckAccessFails()); + Assert.assertEquals(0, omMetrics.getNumVolumeCreateFails()); } @Test(timeout = 60000) public void testCreateBucket() throws IOException, OzoneException { - long numVolumeCreateFail = ksmMetrics.getNumVolumeCreateFails(); - long numBucketCreateFail = ksmMetrics.getNumBucketCreateFails(); - long numBucketInfoFail = ksmMetrics.getNumBucketInfoFails(); + long numVolumeCreateFail = omMetrics.getNumVolumeCreateFails(); + long numBucketCreateFail = omMetrics.getNumBucketCreateFails(); + long numBucketInfoFail = omMetrics.getNumBucketInfoFails(); String userName = "user" + RandomStringUtils.randomNumeric(5); String adminName = "admin" + RandomStringUtils.randomNumeric(5); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); @@ -394,11 +393,11 @@ public void testCreateBucket() throws IOException, OzoneException { Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName)); Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName)); Assert.assertEquals(numVolumeCreateFail, - ksmMetrics.getNumVolumeCreateFails()); + omMetrics.getNumVolumeCreateFails()); Assert.assertEquals(numBucketCreateFail, - ksmMetrics.getNumBucketCreateFails()); + omMetrics.getNumBucketCreateFails()); Assert.assertEquals(numBucketInfoFail, - ksmMetrics.getNumBucketInfoFails()); + omMetrics.getNumBucketInfoFails()); } @Test(timeout = 60000) @@ -479,7 +478,7 @@ public void testDeleteNonEmptyBucket() throws IOException, OzoneException { } /** - * Basic test of both putKey and getKey from KSM, as one can not be tested + * Basic test of both putKey and getKey from OM, as one can not be tested * without the other. * * @throws IOException @@ -492,8 +491,8 @@ public void testGetKeyWriterReader() throws IOException, OzoneException { String volumeName = "volume" + RandomStringUtils.randomNumeric(5); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); String keyName = "key" + RandomStringUtils.randomNumeric(5); - long numKeyAllocates = ksmMetrics.getNumKeyAllocates(); - long numKeyLookups = ksmMetrics.getNumKeyLookups(); + long numKeyAllocates = omMetrics.getNumKeyAllocates(); + long numKeyLookups = omMetrics.getNumKeyLookups(); VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); createVolumeArgs.setUserName(userName); @@ -512,14 +511,14 @@ public void testGetKeyWriterReader() throws IOException, OzoneException { try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) { stream.write(dataString.getBytes()); } - Assert.assertEquals(1 + numKeyAllocates, ksmMetrics.getNumKeyAllocates()); + Assert.assertEquals(1 + numKeyAllocates, omMetrics.getNumKeyAllocates()); byte[] data = new byte[dataString.length()]; try (InputStream in = storageHandler.newKeyReader(keyArgs)) { in.read(data); } Assert.assertEquals(dataString, DFSUtil.bytes2String(data)); - Assert.assertEquals(1 + numKeyLookups, ksmMetrics.getNumKeyLookups()); + Assert.assertEquals(1 + numKeyLookups, omMetrics.getNumKeyLookups()); } /** @@ -536,7 +535,7 @@ public void testKeyOverwrite() throws IOException, OzoneException { String volumeName = "volume" + RandomStringUtils.randomNumeric(5); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); String keyName = "key" + RandomStringUtils.randomNumeric(5); - long numKeyAllocateFails = ksmMetrics.getNumKeyAllocateFails(); + long numKeyAllocateFails = omMetrics.getNumKeyAllocateFails(); VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); createVolumeArgs.setUserName(userName); @@ -558,12 +557,12 @@ public void testKeyOverwrite() throws IOException, OzoneException { // We allow the key overwrite to be successful. Please note : Till // HDFS-11922 is fixed this causes a data block leak on the data node side. - // That is this overwrite only overwrites the keys on KSM. We need to + // That is this overwrite only overwrites the keys on OM. We need to // garbage collect those blocks from datanode. KeyArgs keyArgs2 = new KeyArgs(volumeName, bucketName, keyName, userArgs); storageHandler.newKeyWriter(keyArgs2); Assert - .assertEquals(numKeyAllocateFails, ksmMetrics.getNumKeyAllocateFails()); + .assertEquals(numKeyAllocateFails, omMetrics.getNumKeyAllocateFails()); } /** @@ -579,7 +578,7 @@ public void testGetNonExistKey() throws IOException, OzoneException { String volumeName = "volume" + RandomStringUtils.randomNumeric(5); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); String keyName = "key" + RandomStringUtils.randomNumeric(5); - long numKeyLookupFails = ksmMetrics.getNumKeyLookupFails(); + long numKeyLookupFails = omMetrics.getNumKeyLookupFails(); VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); createVolumeArgs.setUserName(userName); @@ -598,11 +597,11 @@ public void testGetNonExistKey() throws IOException, OzoneException { exception.expectMessage("KEY_NOT_FOUND"); storageHandler.newKeyReader(keyArgs); Assert.assertEquals(1 + numKeyLookupFails, - ksmMetrics.getNumKeyLookupFails()); + omMetrics.getNumKeyLookupFails()); } /** - * Test delete keys for ksm. + * Test delete keys for om. * * @throws IOException * @throws OzoneException @@ -614,8 +613,8 @@ public void testDeleteKey() throws IOException, OzoneException { String volumeName = "volume" + RandomStringUtils.randomNumeric(5); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); String keyName = "key" + RandomStringUtils.randomNumeric(5); - long numKeyDeletes = ksmMetrics.getNumKeyDeletes(); - long numKeyDeleteFails = ksmMetrics.getNumKeyDeletesFails(); + long numKeyDeletes = omMetrics.getNumKeyDeletes(); + long numKeyDeleteFails = omMetrics.getNumKeyDeletesFails(); VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs); createVolumeArgs.setUserName(userName); @@ -633,10 +632,10 @@ public void testDeleteKey() throws IOException, OzoneException { } storageHandler.deleteKey(keyArgs); - Assert.assertEquals(1 + numKeyDeletes, ksmMetrics.getNumKeyDeletes()); + Assert.assertEquals(1 + numKeyDeletes, omMetrics.getNumKeyDeletes()); // Make sure the deleted key has been renamed. - MetadataStore store = cluster.getKeySpaceManager(). + MetadataStore store = cluster.getOzoneManager(). getMetadataManager().getStore(); List> list = store.getRangeKVs(null, 10, new MetadataKeyFilters.KeyPrefixFilter() @@ -651,11 +650,11 @@ public void testDeleteKey() throws IOException, OzoneException { Assert.assertTrue(ioe.getMessage().contains("KEY_NOT_FOUND")); } Assert.assertEquals(1 + numKeyDeleteFails, - ksmMetrics.getNumKeyDeletesFails()); + omMetrics.getNumKeyDeletesFails()); } /** - * Test rename key for ksm. + * Test rename key for om. * * @throws IOException * @throws OzoneException @@ -667,8 +666,8 @@ public void testRenameKey() throws IOException, OzoneException { String volumeName = "volume" + RandomStringUtils.randomNumeric(5); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); String keyName = "key" + RandomStringUtils.randomNumeric(5); - long numKeyRenames = ksmMetrics.getNumKeyRenames(); - long numKeyRenameFails = ksmMetrics.getNumKeyRenameFails(); + long numKeyRenames = omMetrics.getNumKeyRenames(); + long numKeyRenameFails = omMetrics.getNumKeyRenameFails(); int testRenameFails = 0; int testRenames = 0; IOException ioe = null; @@ -706,9 +705,9 @@ public void testRenameKey() throws IOException, OzoneException { testRenames++; storageHandler.renameKey(keyArgs, toKeyName); Assert.assertEquals(numKeyRenames + testRenames, - ksmMetrics.getNumKeyRenames()); + omMetrics.getNumKeyRenames()); Assert.assertEquals(numKeyRenameFails + testRenameFails, - ksmMetrics.getNumKeyRenameFails()); + omMetrics.getNumKeyRenameFails()); // Try to get the key, should fail as it has been renamed try { @@ -764,9 +763,9 @@ public void testRenameKey() throws IOException, OzoneException { Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error")); Assert.assertEquals(numKeyRenames + testRenames, - ksmMetrics.getNumKeyRenames()); + omMetrics.getNumKeyRenames()); Assert.assertEquals(numKeyRenameFails + testRenameFails, - ksmMetrics.getNumKeyRenameFails()); + omMetrics.getNumKeyRenameFails()); } @Test(timeout = 60000) @@ -1183,7 +1182,7 @@ public void testWriteSize() throws IOException, OzoneException { */ @Test public void testGetScmInfo() throws IOException { - ScmInfo info = cluster.getKeySpaceManager().getScmInfo(); + ScmInfo info = cluster.getOzoneManager().getScmInfo(); Assert.assertEquals(clusterId, info.getClusterId()); Assert.assertEquals(scmId, info.getScmId()); } @@ -1192,7 +1191,7 @@ public void testGetScmInfo() throws IOException { @Test public void testExpiredOpenKey() throws Exception { BackgroundService openKeyCleanUpService = ((KeyManagerImpl)cluster - .getKeySpaceManager().getKeyManager()).getOpenKeyCleanupService(); + .getOzoneManager().getKeyManager()).getOpenKeyCleanupService(); String userName = "user" + RandomStringUtils.randomNumeric(5); String adminName = "admin" + RandomStringUtils.randomNumeric(5); @@ -1228,7 +1227,7 @@ public void testExpiredOpenKey() throws Exception { // Now all k1-k4 should be in open state, so ExpiredOpenKeys should not // contain these values. - openKeys = cluster.getKeySpaceManager() + openKeys = cluster.getOzoneManager() .getMetadataManager().getExpiredOpenKeys(); for (BlockGroup bg : openKeys) { @@ -1239,7 +1238,7 @@ public void testExpiredOpenKey() throws Exception { Thread.sleep(2000); // Now all k1-k4 should be in ExpiredOpenKeys - openKeys = cluster.getKeySpaceManager() + openKeys = cluster.getOzoneManager() .getMetadataManager().getExpiredOpenKeys(); for (BlockGroup bg : openKeys) { String[] subs = bg.getGroupID().split("/"); @@ -1258,7 +1257,7 @@ public void testExpiredOpenKey() throws Exception { // now all k1-k4 should have been removed by the clean-up task, only k5 // should be present in ExpiredOpenKeys. openKeys = - cluster.getKeySpaceManager().getMetadataManager().getExpiredOpenKeys(); + cluster.getOzoneManager().getMetadataManager().getExpiredOpenKeys(); System.out.println(openKeys); boolean key5found = false; Set removed = Stream.of( @@ -1276,68 +1275,68 @@ public void testExpiredOpenKey() throws Exception { } /** - * Tests the KSM Initialization. + * Tests the OM Initialization. * @throws IOException */ @Test - public void testKSMInitialization() throws IOException { - // Read the version file info from KSM version file - KSMStorage ksmStorage = cluster.getKeySpaceManager().getKsmStorage(); + public void testOmInitialization() throws IOException { + // Read the version file info from OM version file + OMStorage omStorage = cluster.getOzoneManager().getOmStorage(); SCMStorage scmStorage = new SCMStorage(conf); // asserts whether cluster Id and SCM ID are properly set in SCM Version // file. Assert.assertEquals(clusterId, scmStorage.getClusterID()); Assert.assertEquals(scmId, scmStorage.getScmId()); - // asserts whether KSM Id is properly set in KSM Version file. - Assert.assertEquals(ksmId, ksmStorage.getKsmId()); - // asserts whether the SCM info is correct in KSM Version file. - Assert.assertEquals(clusterId, ksmStorage.getClusterID()); - Assert.assertEquals(scmId, ksmStorage.getScmId()); + // asserts whether OM Id is properly set in OM Version file. + Assert.assertEquals(omId, omStorage.getOmId()); + // asserts whether the SCM info is correct in OM Version file. + Assert.assertEquals(clusterId, omStorage.getClusterID()); + Assert.assertEquals(scmId, omStorage.getScmId()); } /** - * Tests the KSM Initialization Failure. + * Tests the OM Initialization Failure. * @throws IOException */ @Test - public void testKSMInitializationFailure() throws Exception { + public void testOmInitializationFailure() throws Exception { OzoneConfiguration config = new OzoneConfiguration(); final String path = GenericTestUtils.getTempPath(UUID.randomUUID().toString()); - Path metaDirPath = Paths.get(path, "ksm-meta"); + Path metaDirPath = Paths.get(path, "om-meta"); config.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); config.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); config.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); config.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, conf.get(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY)); - exception.expect(KSMException.class); - exception.expectMessage("KSM not initialized."); - KeySpaceManager.createKSM(null, config); - KSMStorage ksmStore = new KSMStorage(config); - ksmStore.setClusterId("testClusterId"); - ksmStore.setScmId("testScmId"); + exception.expect(OMException.class); + exception.expectMessage("OM not initialized."); + OzoneManager.createOm(null, config); + OMStorage omStore = new OMStorage(config); + omStore.setClusterId("testClusterId"); + omStore.setScmId("testScmId"); // writes the version file properties - ksmStore.initialize(); - exception.expect(KSMException.class); + omStore.initialize(); + exception.expect(OMException.class); exception.expectMessage("SCM version info mismatch."); - KeySpaceManager.createKSM(null, conf); + OzoneManager.createOm(null, conf); } @Test public void testGetServiceList() throws IOException { - long numGetServiceListCalls = ksmMetrics.getNumGetServiceLists(); - List services = cluster.getKeySpaceManager().getServiceList(); + long numGetServiceListCalls = omMetrics.getNumGetServiceLists(); + List services = cluster.getOzoneManager().getServiceList(); Assert.assertEquals(numGetServiceListCalls + 1, - ksmMetrics.getNumGetServiceLists()); + omMetrics.getNumGetServiceLists()); - ServiceInfo ksmInfo = services.stream().filter( - a -> a.getNodeType().equals(HddsProtos.NodeType.KSM)) + ServiceInfo omInfo = services.stream().filter( + a -> a.getNodeType().equals(HddsProtos.NodeType.OM)) .collect(Collectors.toList()).get(0); - InetSocketAddress ksmAddress = new InetSocketAddress(ksmInfo.getHostname(), - ksmInfo.getPort(ServicePort.Type.RPC)); + InetSocketAddress omAddress = new InetSocketAddress(omInfo.getHostname(), + omInfo.getPort(ServicePort.Type.RPC)); Assert.assertEquals(NetUtils.createSocketAddr( - conf.get(OZONE_KSM_ADDRESS_KEY)), ksmAddress); + conf.get(OZONE_OM_ADDRESS_KEY)), omAddress); ServiceInfo scmInfo = services.stream().filter( a -> a.getNodeType().equals(HddsProtos.NodeType.SCM)) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java similarity index 83% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java index feb83d3e5a..8168d27a5d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.core.type.TypeReference; @@ -24,9 +24,9 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; +import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ServicePort; + .OzoneManagerProtocolProtos.ServicePort; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; @@ -44,12 +44,12 @@ import java.util.Map; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; -import static org.apache.hadoop.ozone.KsmUtils.getKsmAddressForClients; +import static org.apache.hadoop.ozone.OmUtils.getOmAddressForClients; /** - * This class is to test the REST interface exposed by KeySpaceManager. + * This class is to test the REST interface exposed by OzoneManager. */ -public class TestKeySpaceManagerRestInterface { +public class TestOzoneManagerRestInterface { private static MiniOzoneCluster cluster; private static OzoneConfiguration conf; @@ -70,8 +70,8 @@ public static void tearDown() throws Exception { @Test public void testGetServiceList() throws Exception { - KeySpaceManagerHttpServer server = - cluster.getKeySpaceManager().getHttpServer(); + OzoneManagerHttpServer server = + cluster.getOzoneManager().getHttpServer(); HttpClient client = HttpClients.createDefault(); String connectionUri = "http://" + NetUtils.getHostPortString(server.getHttpAddress()); @@ -89,15 +89,15 @@ public void testGetServiceList() throws Exception { serviceMap.put(serviceInfo.getNodeType(), serviceInfo); } - InetSocketAddress ksmAddress = - getKsmAddressForClients(conf); - ServiceInfo ksmInfo = serviceMap.get(HddsProtos.NodeType.KSM); + InetSocketAddress omAddress = + getOmAddressForClients(conf); + ServiceInfo omInfo = serviceMap.get(HddsProtos.NodeType.OM); - Assert.assertEquals(ksmAddress.getHostName(), ksmInfo.getHostname()); - Assert.assertEquals(ksmAddress.getPort(), - ksmInfo.getPort(ServicePort.Type.RPC)); + Assert.assertEquals(omAddress.getHostName(), omInfo.getHostname()); + Assert.assertEquals(omAddress.getPort(), + omInfo.getPort(ServicePort.Type.RPC)); Assert.assertEquals(server.getHttpAddress().getPort(), - ksmInfo.getPort(ServicePort.Type.HTTP)); + omInfo.getPort(ServicePort.Type.HTTP)); InetSocketAddress scmAddress = getScmAddressForClients(conf); @@ -123,7 +123,7 @@ public void testGetServiceList() throws Exception { ports.get(type)); break; default: - // KSM only sends Datanode's info port details + // OM only sends Datanode's info port details // i.e. HTTP or HTTPS // Other ports are not expected as of now. Assert.fail(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java index ed8f0d5c57..508287082e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java @@ -59,7 +59,7 @@ import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.rest.RestClient; import org.apache.hadoop.ozone.client.rpc.RpcClient; -import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; +import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.web.ozShell.Shell; import org.apache.hadoop.ozone.web.request.OzoneQuota; import org.apache.hadoop.ozone.web.response.BucketInfo; @@ -167,23 +167,23 @@ public void setup() { System.setOut(new PrintStream(out)); System.setErr(new PrintStream(err)); if(clientProtocol.equals(RestClient.class)) { - String hostName = cluster.getKeySpaceManager().getHttpServer() + String hostName = cluster.getOzoneManager().getHttpServer() .getHttpAddress().getHostName(); int port = cluster - .getKeySpaceManager().getHttpServer().getHttpAddress().getPort(); + .getOzoneManager().getHttpServer().getHttpAddress().getPort(); url = String.format("http://" + hostName + ":" + port); } else { List services = null; try { - services = cluster.getKeySpaceManager().getServiceList(); + services = cluster.getOzoneManager().getServiceList(); } catch (IOException e) { - LOG.error("Could not get service list from KSM"); + LOG.error("Could not get service list from OM"); } String hostName = services.stream().filter( - a -> a.getNodeType().equals(HddsProtos.NodeType.KSM)) + a -> a.getNodeType().equals(HddsProtos.NodeType.OM)) .collect(Collectors.toList()).get(0).getHostname(); - String port = cluster.getKeySpaceManager().getRpcPort(); + String port = cluster.getOzoneManager().getRpcPort(); url = String.format("o3://" + hostName + ":" + port); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java index b4ed2b12c2..1a1f37ca37 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.ozone.scm.cli.SQLCLI; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; @@ -113,7 +112,7 @@ public void setup() throws Exception { cluster.waitForClusterToBeReady(); datanodeIpAddress = cluster.getHddsDatanodes().get(0) .getDatanodeDetails().getIpAddress(); - cluster.getKeySpaceManager().stop(); + cluster.getOzoneManager().stop(); cluster.getStorageContainerManager().stop(); nodeManager = cluster.getStorageContainerManager().getScmNodeManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java index 0e61391c63..e592d560d3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java @@ -90,7 +90,7 @@ public static void shutdown() { @Test public void testCreateVolumes() throws IOException { super.testCreateVolumes(port); - Assert.assertEquals(0, cluster.getKeySpaceManager() + Assert.assertEquals(0, cluster.getOzoneManager() .getMetrics().getNumVolumeCreateFails()); } @@ -102,7 +102,7 @@ public void testCreateVolumes() throws IOException { @Test public void testCreateVolumesWithQuota() throws IOException { super.testCreateVolumesWithQuota(port); - Assert.assertEquals(0, cluster.getKeySpaceManager() + Assert.assertEquals(0, cluster.getOzoneManager() .getMetrics().getNumVolumeCreateFails()); } @@ -114,7 +114,7 @@ public void testCreateVolumesWithQuota() throws IOException { @Test public void testCreateVolumesWithInvalidQuota() throws IOException { super.testCreateVolumesWithInvalidQuota(port); - Assert.assertEquals(0, cluster.getKeySpaceManager() + Assert.assertEquals(0, cluster.getOzoneManager() .getMetrics().getNumVolumeCreateFails()); } @@ -128,7 +128,7 @@ public void testCreateVolumesWithInvalidQuota() throws IOException { @Test public void testCreateVolumesWithInvalidUser() throws IOException { super.testCreateVolumesWithInvalidUser(port); - Assert.assertEquals(0, cluster.getKeySpaceManager() + Assert.assertEquals(0, cluster.getOzoneManager() .getMetrics().getNumVolumeCreateFails()); } @@ -143,7 +143,7 @@ public void testCreateVolumesWithInvalidUser() throws IOException { @Test public void testCreateVolumesWithOutAdminRights() throws IOException { super.testCreateVolumesWithOutAdminRights(port); - Assert.assertEquals(0, cluster.getKeySpaceManager() + Assert.assertEquals(0, cluster.getOzoneManager() .getMetrics().getNumVolumeCreateFails()); } @@ -155,7 +155,7 @@ public void testCreateVolumesWithOutAdminRights() throws IOException { @Test public void testCreateVolumesInLoop() throws IOException { super.testCreateVolumesInLoop(port); - Assert.assertEquals(0, cluster.getKeySpaceManager() + Assert.assertEquals(0, cluster.getOzoneManager() .getMetrics().getNumVolumeCreateFails()); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java index b86c57721c..a95bd0e65f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java @@ -48,13 +48,13 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.helpers.KeyData; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.ksm.KeySpaceManager; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .Status; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.web.utils.OzoneUtils; @@ -644,15 +644,15 @@ int totalNumOfKeys() { } } - private int countKsmKeys(KeySpaceManager ksm) throws IOException { + private int countOmKeys(OzoneManager om) throws IOException { int totalCount = 0; - List volumes = - ksm.listAllVolumes(null, null, Integer.MAX_VALUE); - for (KsmVolumeArgs volume : volumes) { - List buckets = - ksm.listBuckets(volume.getVolume(), null, null, Integer.MAX_VALUE); - for (KsmBucketInfo bucket : buckets) { - List keys = ksm.listKeys(bucket.getVolumeName(), + List volumes = + om.listAllVolumes(null, null, Integer.MAX_VALUE); + for (OmVolumeArgs volume : volumes) { + List buckets = + om.listBuckets(volume.getVolume(), null, null, Integer.MAX_VALUE); + for (OmBucketInfo bucket : buckets) { + List keys = om.listKeys(bucket.getVolumeName(), bucket.getBucketName(), null, null, Integer.MAX_VALUE); totalCount += keys.size(); } @@ -662,10 +662,10 @@ private int countKsmKeys(KeySpaceManager ksm) throws IOException { @Test public void testDeleteKey() throws Exception { - KeySpaceManager ksm = ozoneCluster.getKeySpaceManager(); + OzoneManager ozoneManager = ozoneCluster.getOzoneManager(); // To avoid interference from other test cases, // we collect number of existing keys at the beginning - int numOfExistedKeys = countKsmKeys(ksm); + int numOfExistedKeys = countOmKeys(ozoneManager); // Keep tracking bucket keys info while creating them PutHelper helper = new PutHelper(client, path); @@ -689,15 +689,15 @@ public void testDeleteKey() throws Exception { // count the total number of created keys. Set> buckets = bucketKeys.getAllBuckets(); for (Pair buk : buckets) { - List createdKeys = - ksm.listKeys(buk.getKey(), buk.getValue(), null, null, 20); + List createdKeys = + ozoneManager.listKeys(buk.getKey(), buk.getValue(), null, null, 20); // Memorize chunks that has been created, // so we can verify actual deletions at DN side later. - for (KsmKeyInfo keyInfo : createdKeys) { - List locations = + for (OmKeyInfo keyInfo : createdKeys) { + List locations = keyInfo.getLatestVersionLocations().getLocationList(); - for (KsmKeyLocationInfo location : locations) { + for (OmKeyLocationInfo location : locations) { KeyData keyData = new KeyData(location.getBlockID()); KeyData blockInfo = cm.getContainerManager() .getKeyManager().getKey(keyData); @@ -721,9 +721,9 @@ public void testDeleteKey() throws Exception { // Ensure all keys are created. Assert.assertEquals(20, numOfCreatedKeys); - // Ensure all keys are visible from KSM. + // Ensure all keys are visible from OM. // Total number should be numOfCreated + numOfExisted - Assert.assertEquals(20 + numOfExistedKeys, countKsmKeys(ksm)); + Assert.assertEquals(20 + numOfExistedKeys, countOmKeys(ozoneManager)); // Delete 10 keys int delCount = 20; @@ -732,21 +732,21 @@ public void testDeleteKey() throws Exception { List bks = bucketKeys.getBucketKeys(bucketInfo.getValue()); for (String keyName : bks) { if (delCount > 0) { - KsmKeyArgs arg = - new KsmKeyArgs.Builder().setVolumeName(bucketInfo.getKey()) + OmKeyArgs arg = + new OmKeyArgs.Builder().setVolumeName(bucketInfo.getKey()) .setBucketName(bucketInfo.getValue()).setKeyName(keyName) .build(); - ksm.deleteKey(arg); + ozoneManager.deleteKey(arg); delCount--; } } } - // It should be pretty quick that keys are removed from KSM namespace, + // It should be pretty quick that keys are removed from OM namespace, // because actual deletion happens in async mode. GenericTestUtils.waitFor(() -> { try { - int num = countKsmKeys(ksm); + int num = countOmKeys(ozoneManager); return num == (numOfExistedKeys); } catch (IOException e) { return false; diff --git a/hadoop-ozone/integration-test/src/test/resources/webapps/ksm/.gitkeep b/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep similarity index 100% rename from hadoop-ozone/integration-test/src/test/resources/webapps/ksm/.gitkeep rename to hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java index 3128d31e80..2200cd8887 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; -import static org.apache.hadoop.ozone.KsmUtils.getKsmAddress; +import static org.apache.hadoop.ozone.OmUtils.getOmAddress; import static org.apache.hadoop.ozone.OzoneConfigKeys.*; import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS; import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE; @@ -34,9 +34,8 @@ import com.sun.jersey.api.core.ApplicationAdapter; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.ksm.protocolPB - .KeySpaceManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.web.ObjectStoreApplication; import org.apache.hadoop.ozone.web.handlers.ServiceFilter; @@ -72,8 +71,8 @@ public final class ObjectStoreHandler implements Closeable { LoggerFactory.getLogger(ObjectStoreHandler.class); private final ObjectStoreJerseyContainer objectStoreJerseyContainer; - private final KeySpaceManagerProtocolClientSideTranslatorPB - keySpaceManagerClient; + private final OzoneManagerProtocolClientSideTranslatorPB + ozoneManagerClient; private final StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; private final ScmBlockLocationProtocolClientSideTranslatorPB @@ -119,28 +118,28 @@ public ObjectStoreHandler(Configuration conf) throws IOException { NetUtils.getDefaultSocketFactory(conf), Client.getRpcTimeout(conf))); - RPC.setProtocolEngine(conf, KeySpaceManagerProtocolPB.class, + RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class, ProtobufRpcEngine.class); - long ksmVersion = - RPC.getProtocolVersion(KeySpaceManagerProtocolPB.class); - InetSocketAddress ksmAddress = getKsmAddress(conf); - this.keySpaceManagerClient = - new KeySpaceManagerProtocolClientSideTranslatorPB( - RPC.getProxy(KeySpaceManagerProtocolPB.class, ksmVersion, - ksmAddress, UserGroupInformation.getCurrentUser(), conf, + long omVersion = + RPC.getProtocolVersion(OzoneManagerProtocolPB.class); + InetSocketAddress omAddress = getOmAddress(conf); + this.ozoneManagerClient = + new OzoneManagerProtocolClientSideTranslatorPB( + RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, + omAddress, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), Client.getRpcTimeout(conf))); storageHandler = new DistributedStorageHandler( new OzoneConfiguration(conf), this.storageContainerLocationClient, - this.keySpaceManagerClient); + this.ozoneManagerClient); } else { if (OzoneConsts.OZONE_HANDLER_LOCAL.equalsIgnoreCase(shType)) { storageHandler = new LocalStorageHandler(conf); this.storageContainerLocationClient = null; this.scmBlockLocationClient = null; - this.keySpaceManagerClient = null; + this.ozoneManagerClient = null; } else { throw new IllegalArgumentException( String.format("Unrecognized value for %s: %s," @@ -186,6 +185,6 @@ public void close() { storageHandler.close(); IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient); IOUtils.cleanupWithLogger(LOG, scmBlockLocationClient); - IOUtils.cleanupWithLogger(LOG, keySpaceManagerClient); + IOUtils.cleanupWithLogger(LOG, ozoneManagerClient); } } diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java index ef0293e725..ad48787318 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java @@ -21,7 +21,7 @@ import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.ozone.OzoneRestUtils; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.web.exceptions.ErrorTable; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.rest.headers.Header; @@ -102,7 +102,7 @@ public Response handleCall(String volume, String bucket, String key, LOG.error("IOException:", fsExp); // Map KEY_NOT_FOUND to INVALID_KEY if (fsExp.getMessage().endsWith( - KeySpaceManagerProtocolProtos.Status.KEY_NOT_FOUND.name())) { + OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND.name())) { throw ErrorTable.newError(ErrorTable.INVALID_KEY, userArgs, fsExp); } diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java index 1d98400f31..fb95bb9a9f 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java @@ -30,7 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ozone.OzoneRestUtils; import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.web.exceptions.ErrorTable; import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.interfaces.UserAuth; @@ -135,7 +135,7 @@ private void handleIOException(String volume, String reqID, String hostName, OzoneException exp = null; if ((fsExp != null && fsExp.getMessage().endsWith( - KeySpaceManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS.name())) + OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS.name())) || fsExp instanceof FileAlreadyExistsException) { exp = ErrorTable .newError(ErrorTable.VOLUME_ALREADY_EXISTS, reqID, volume, hostName); diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java index fedc0f007a..ec33990de4 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java @@ -22,14 +22,13 @@ import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.client.io.LengthInputStream; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; -import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; -import org.apache.hadoop.ozone.ksm.protocolPB - .KeySpaceManagerProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; @@ -37,9 +36,9 @@ import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream; import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos; -import org.apache.hadoop.ozone.protocolPB.KSMPBHelper; -import org.apache.hadoop.ozone.ksm.KSMConfigKeys; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocolPB.OMPBHelper; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.web.request.OzoneQuota; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -77,8 +76,8 @@ public final class DistributedStorageHandler implements StorageHandler { private final StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private final KeySpaceManagerProtocolClientSideTranslatorPB - keySpaceManagerClient; + private final OzoneManagerProtocolClientSideTranslatorPB + ozoneManagerClient; private final XceiverClientManager xceiverClientManager; private final OzoneAcl.OzoneACLRights userRights; private final OzoneAcl.OzoneACLRights groupRights; @@ -92,14 +91,14 @@ public final class DistributedStorageHandler implements StorageHandler { * * @param conf configuration * @param storageContainerLocation StorageContainerLocationProtocol proxy - * @param keySpaceManagerClient KeySpaceManager proxy + * @param ozoneManagerClient OzoneManager proxy */ public DistributedStorageHandler(OzoneConfiguration conf, StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocation, - KeySpaceManagerProtocolClientSideTranslatorPB - keySpaceManagerClient) { - this.keySpaceManagerClient = keySpaceManagerClient; + OzoneManagerProtocolClientSideTranslatorPB + ozoneManagerClient) { + this.ozoneManagerClient = ozoneManagerClient; this.storageContainerLocationClient = storageContainerLocation; this.xceiverClientManager = new XceiverClientManager(conf); this.useRatis = conf.getBoolean( @@ -116,10 +115,10 @@ public DistributedStorageHandler(OzoneConfiguration conf, chunkSize = conf.getInt(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT); - userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS, - KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT); - groupRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS, - KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS_DEFAULT); + userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS, + OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT); + groupRights = conf.getEnum(OMConfigKeys.OZONE_OM_GROUP_RIGHTS, + OMConfigKeys.OZONE_OM_GROUP_RIGHTS_DEFAULT); if(chunkSize > ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE) { LOG.warn("The chunk size ({}) is not allowed to be more than" + " the maximum size ({})," @@ -136,26 +135,26 @@ public void createVolume(VolumeArgs args) throws IOException, OzoneException { OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, args.getUserName(), userRights); - KsmVolumeArgs.Builder builder = KsmVolumeArgs.newBuilder(); + OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); builder.setAdminName(args.getAdminName()) .setOwnerName(args.getUserName()) .setVolume(args.getVolumeName()) .setQuotaInBytes(quota) - .addOzoneAcls(KSMPBHelper.convertOzoneAcl(userAcl)); + .addOzoneAcls(OMPBHelper.convertOzoneAcl(userAcl)); if (args.getGroups() != null) { for (String group : args.getGroups()) { OzoneAcl groupAcl = new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights); - builder.addOzoneAcls(KSMPBHelper.convertOzoneAcl(groupAcl)); + builder.addOzoneAcls(OMPBHelper.convertOzoneAcl(groupAcl)); } } - keySpaceManagerClient.createVolume(builder.build()); + ozoneManagerClient.createVolume(builder.build()); } @Override public void setVolumeOwner(VolumeArgs args) throws IOException, OzoneException { - keySpaceManagerClient.setOwner(args.getVolumeName(), args.getUserName()); + ozoneManagerClient.setOwner(args.getVolumeName(), args.getUserName()); } @Override @@ -163,14 +162,14 @@ public void setVolumeQuota(VolumeArgs args, boolean remove) throws IOException, OzoneException { long quota = remove ? OzoneConsts.MAX_QUOTA_IN_BYTES : args.getQuota().sizeInBytes(); - keySpaceManagerClient.setQuota(args.getVolumeName(), quota); + ozoneManagerClient.setQuota(args.getVolumeName(), quota); } @Override public boolean checkVolumeAccess(String volume, OzoneAcl acl) throws IOException, OzoneException { - return keySpaceManagerClient - .checkVolumeAccess(volume, KSMPBHelper.convertOzoneAcl(acl)); + return ozoneManagerClient + .checkVolumeAccess(volume, OMPBHelper.convertOzoneAcl(acl)); } @Override @@ -185,9 +184,9 @@ public ListVolumes listVolumes(ListArgs args) OzoneConsts.MAX_LISTVOLUMES_SIZE, maxNumOfKeys)); } - List listResult; + List listResult; if (args.isRootScan()) { - listResult = keySpaceManagerClient.listAllVolumes(args.getPrefix(), + listResult = ozoneManagerClient.listAllVolumes(args.getPrefix(), args.getPrevKey(), args.getMaxKeys()); } else { UserArgs userArgs = args.getArgs(); @@ -195,16 +194,16 @@ public ListVolumes listVolumes(ListArgs args) throw new IllegalArgumentException("Illegal argument," + " missing user argument."); } - listResult = keySpaceManagerClient.listVolumeByUser( + listResult = ozoneManagerClient.listVolumeByUser( args.getArgs().getUserName(), args.getPrefix(), args.getPrevKey(), args.getMaxKeys()); } // TODO Add missing fields createdBy, bucketCount and bytesUsed ListVolumes result = new ListVolumes(); - for (KsmVolumeArgs volumeArgs : listResult) { + for (OmVolumeArgs volumeArgs : listResult) { VolumeInfo info = new VolumeInfo(); - KeySpaceManagerProtocolProtos.VolumeInfo + OzoneManagerProtocolProtos.VolumeInfo infoProto = volumeArgs.getProtobuf(); info.setOwner(new VolumeOwner(infoProto.getOwnerName())); info.setQuota(OzoneQuota.getOzoneQuota(infoProto.getQuotaInBytes())); @@ -220,14 +219,14 @@ public ListVolumes listVolumes(ListArgs args) @Override public void deleteVolume(VolumeArgs args) throws IOException, OzoneException { - keySpaceManagerClient.deleteVolume(args.getVolumeName()); + ozoneManagerClient.deleteVolume(args.getVolumeName()); } @Override public VolumeInfo getVolumeInfo(VolumeArgs args) throws IOException, OzoneException { - KsmVolumeArgs volumeArgs = - keySpaceManagerClient.getVolumeInfo(args.getVolumeName()); + OmVolumeArgs volumeArgs = + ozoneManagerClient.getVolumeInfo(args.getVolumeName()); //TODO: add support for createdOn and other fields in getVolumeInfo VolumeInfo volInfo = new VolumeInfo(volumeArgs.getVolume(), null, @@ -242,7 +241,7 @@ public VolumeInfo getVolumeInfo(VolumeArgs args) @Override public void createBucket(final BucketArgs args) throws IOException, OzoneException { - KsmBucketInfo.Builder builder = KsmBucketInfo.newBuilder(); + OmBucketInfo.Builder builder = OmBucketInfo.newBuilder(); builder.setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()); if(args.getAddAcls() != null) { @@ -255,7 +254,7 @@ public void createBucket(final BucketArgs args) builder.setIsVersionEnabled(getBucketVersioningProtobuf( args.getVersioning())); } - keySpaceManagerClient.createBucket(builder.build()); + ozoneManagerClient.createBucket(builder.build()); } /** @@ -285,7 +284,7 @@ public void setBucketAcls(BucketArgs args) List removeAcls = args.getRemoveAcls(); List addAcls = args.getAddAcls(); if(removeAcls != null || addAcls != null) { - KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder(); + OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); builder.setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()); if(removeAcls != null && !removeAcls.isEmpty()) { @@ -294,35 +293,35 @@ public void setBucketAcls(BucketArgs args) if(addAcls != null && !addAcls.isEmpty()) { builder.setAddAcls(args.getAddAcls()); } - keySpaceManagerClient.setBucketProperty(builder.build()); + ozoneManagerClient.setBucketProperty(builder.build()); } } @Override public void setBucketVersioning(BucketArgs args) throws IOException, OzoneException { - KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder(); + OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); builder.setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setIsVersionEnabled(getBucketVersioningProtobuf( args.getVersioning())); - keySpaceManagerClient.setBucketProperty(builder.build()); + ozoneManagerClient.setBucketProperty(builder.build()); } @Override public void setBucketStorageClass(BucketArgs args) throws IOException, OzoneException { - KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder(); + OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); builder.setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setStorageType(args.getStorageType()); - keySpaceManagerClient.setBucketProperty(builder.build()); + ozoneManagerClient.setBucketProperty(builder.build()); } @Override public void deleteBucket(BucketArgs args) throws IOException, OzoneException { - keySpaceManagerClient.deleteBucket(args.getVolumeName(), + ozoneManagerClient.deleteBucket(args.getVolumeName(), args.getBucketName()); } @@ -354,12 +353,12 @@ public ListBuckets listBuckets(ListArgs args) OzoneConsts.MAX_LISTBUCKETS_SIZE, maxNumOfKeys)); } - List buckets = - keySpaceManagerClient.listBuckets(va.getVolumeName(), + List buckets = + ozoneManagerClient.listBuckets(va.getVolumeName(), args.getPrevKey(), args.getPrefix(), args.getMaxKeys()); // Convert the result for the web layer. - for (KsmBucketInfo bucketInfo : buckets) { + for (OmBucketInfo bucketInfo : buckets) { BucketInfo bk = new BucketInfo(); bk.setVolumeName(bucketInfo.getVolumeName()); bk.setBucketName(bucketInfo.getBucketName()); @@ -382,26 +381,26 @@ public BucketInfo getBucketInfo(BucketArgs args) throws IOException { String volumeName = args.getVolumeName(); String bucketName = args.getBucketName(); - KsmBucketInfo ksmBucketInfo = keySpaceManagerClient.getBucketInfo( + OmBucketInfo omBucketInfo = ozoneManagerClient.getBucketInfo( volumeName, bucketName); - BucketInfo bucketInfo = new BucketInfo(ksmBucketInfo.getVolumeName(), - ksmBucketInfo.getBucketName()); - if(ksmBucketInfo.getIsVersionEnabled()) { + BucketInfo bucketInfo = new BucketInfo(omBucketInfo.getVolumeName(), + omBucketInfo.getBucketName()); + if(omBucketInfo.getIsVersionEnabled()) { bucketInfo.setVersioning(Versioning.ENABLED); } else { bucketInfo.setVersioning(Versioning.DISABLED); } - bucketInfo.setStorageType(ksmBucketInfo.getStorageType()); - bucketInfo.setAcls(ksmBucketInfo.getAcls()); + bucketInfo.setStorageType(omBucketInfo.getStorageType()); + bucketInfo.setAcls(omBucketInfo.getAcls()); bucketInfo.setCreatedOn( - HddsClientUtils.formatDateTime(ksmBucketInfo.getCreationTime())); + HddsClientUtils.formatDateTime(omBucketInfo.getCreationTime())); return bucketInfo; } @Override public OutputStream newKeyWriter(KeyArgs args) throws IOException, OzoneException { - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) @@ -409,14 +408,14 @@ public OutputStream newKeyWriter(KeyArgs args) throws IOException, .setType(xceiverClientManager.getType()) .setFactor(xceiverClientManager.getFactor()) .build(); - // contact KSM to allocate a block for key. - OpenKeySession openKey = keySpaceManagerClient.openKey(keyArgs); + // contact OM to allocate a block for key. + OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); ChunkGroupOutputStream groupOutputStream = new ChunkGroupOutputStream.Builder() .setHandler(openKey) .setXceiverClientManager(xceiverClientManager) .setScmClient(storageContainerLocationClient) - .setKsmClient(keySpaceManagerClient) + .setOmClient(ozoneManagerClient) .setChunkSize(chunkSize) .setRequestID(args.getRequestID()) .setType(xceiverClientManager.getType()) @@ -437,56 +436,56 @@ public void commitKey(KeyArgs args, OutputStream stream) throws @Override public LengthInputStream newKeyReader(KeyArgs args) throws IOException, OzoneException { - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) .setDataSize(args.getSize()) .build(); - KsmKeyInfo keyInfo = keySpaceManagerClient.lookupKey(keyArgs); - return ChunkGroupInputStream.getFromKsmKeyInfo( + OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); + return ChunkGroupInputStream.getFromOmKeyInfo( keyInfo, xceiverClientManager, storageContainerLocationClient, args.getRequestID()); } @Override public void deleteKey(KeyArgs args) throws IOException, OzoneException { - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) .build(); - keySpaceManagerClient.deleteKey(keyArgs); + ozoneManagerClient.deleteKey(keyArgs); } @Override public void renameKey(KeyArgs args, String toKeyName) throws IOException, OzoneException { - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) .build(); - keySpaceManagerClient.renameKey(keyArgs, toKeyName); + ozoneManagerClient.renameKey(keyArgs, toKeyName); } @Override public KeyInfo getKeyInfo(KeyArgs args) throws IOException, OzoneException { - KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) .build(); - KsmKeyInfo ksmKeyInfo = keySpaceManagerClient.lookupKey(keyArgs); + OmKeyInfo omKeyInfo = ozoneManagerClient.lookupKey(keyArgs); KeyInfo keyInfo = new KeyInfo(); keyInfo.setVersion(0); - keyInfo.setKeyName(ksmKeyInfo.getKeyName()); - keyInfo.setSize(ksmKeyInfo.getDataSize()); + keyInfo.setKeyName(omKeyInfo.getKeyName()); + keyInfo.setSize(omKeyInfo.getDataSize()); keyInfo.setCreatedOn( - HddsClientUtils.formatDateTime(ksmKeyInfo.getCreationTime())); + HddsClientUtils.formatDateTime(omKeyInfo.getCreationTime())); keyInfo.setModifiedOn( - HddsClientUtils.formatDateTime(ksmKeyInfo.getModificationTime())); + HddsClientUtils.formatDateTime(omKeyInfo.getModificationTime())); return keyInfo; } @@ -515,13 +514,13 @@ public ListKeys listKeys(ListArgs args) throws IOException, OzoneException { OzoneConsts.MAX_LISTKEYS_SIZE, maxNumOfKeys)); } - List keys= - keySpaceManagerClient.listKeys(bucketArgs.getVolumeName(), + List keys= + ozoneManagerClient.listKeys(bucketArgs.getVolumeName(), bucketArgs.getBucketName(), args.getPrevKey(), args.getPrefix(), args.getMaxKeys()); // Convert the result for the web layer. - for (KsmKeyInfo info : keys) { + for (OmKeyInfo info : keys) { KeyInfo tempInfo = new KeyInfo(); tempInfo.setVersion(0); tempInfo.setKeyName(info.getKeyName()); @@ -547,7 +546,7 @@ public ListKeys listKeys(ListArgs args) throws IOException, OzoneException { @Override public void close() { IOUtils.cleanupWithLogger(LOG, xceiverClientManager); - IOUtils.cleanupWithLogger(LOG, keySpaceManagerClient); + IOUtils.cleanupWithLogger(LOG, ozoneManagerClient); IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java similarity index 78% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java index 6c756913d5..ddb2b0e26d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java @@ -14,10 +14,10 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import java.io.IOException; import java.util.List; @@ -28,15 +28,15 @@ public interface BucketManager { /** * Creates a bucket. - * @param bucketInfo - KsmBucketInfo for creating bucket. + * @param bucketInfo - OmBucketInfo for creating bucket. */ - void createBucket(KsmBucketInfo bucketInfo) throws IOException; + void createBucket(OmBucketInfo bucketInfo) throws IOException; /** * Returns Bucket Information. * @param volumeName - Name of the Volume. * @param bucketName - Name of the Bucket. */ - KsmBucketInfo getBucketInfo(String volumeName, String bucketName) + OmBucketInfo getBucketInfo(String volumeName, String bucketName) throws IOException; /** @@ -44,7 +44,7 @@ KsmBucketInfo getBucketInfo(String volumeName, String bucketName) * @param args - BucketArgs. * @throws IOException */ - void setBucketProperty(KsmBucketArgs args) throws IOException; + void setBucketProperty(OmBucketArgs args) throws IOException; /** * Deletes an existing empty bucket from volume. @@ -55,7 +55,7 @@ KsmBucketInfo getBucketInfo(String volumeName, String bucketName) void deleteBucket(String volumeName, String bucketName) throws IOException; /** - * Returns a list of buckets represented by {@link KsmBucketInfo} + * Returns a list of buckets represented by {@link OmBucketInfo} * in the given volume. * * @param volumeName @@ -73,7 +73,7 @@ KsmBucketInfo getBucketInfo(String volumeName, String bucketName) * @return a list of buckets. * @throws IOException */ - List listBuckets(String volumeName, - String startBucket, String bucketPrefix, int maxNumOfBuckets) + List listBuckets(String volumeName, + String startBucket, String bucketPrefix, int maxNumOfBuckets) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java similarity index 79% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManagerImpl.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 957a6d9777..4bbce8101b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -14,15 +14,15 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import com.google.common.base.Preconditions; import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; -import org.apache.hadoop.ozone.ksm.exceptions.KSMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.BucketInfo; + .OzoneManagerProtocolProtos.BucketInfo; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.util.Time; import org.iq80.leveldb.DBException; @@ -33,22 +33,22 @@ import java.util.List; /** - * KSM bucket manager. + * OM bucket manager. */ public class BucketManagerImpl implements BucketManager { private static final Logger LOG = LoggerFactory.getLogger(BucketManagerImpl.class); /** - * KSMMetadataManager is used for accessing KSM MetadataDB and ReadWriteLock. + * OMMetadataManager is used for accessing OM MetadataDB and ReadWriteLock. */ - private final KSMMetadataManager metadataManager; + private final OMMetadataManager metadataManager; /** * Constructs BucketManager. * @param metadataManager */ - public BucketManagerImpl(KSMMetadataManager metadataManager){ + public BucketManagerImpl(OMMetadataManager metadataManager){ this.metadataManager = metadataManager; } @@ -73,10 +73,10 @@ public BucketManagerImpl(KSMMetadataManager metadataManager){ /** * Creates a bucket. - * @param bucketInfo - KsmBucketInfo. + * @param bucketInfo - OmBucketInfo. */ @Override - public void createBucket(KsmBucketInfo bucketInfo) throws IOException { + public void createBucket(OmBucketInfo bucketInfo) throws IOException { Preconditions.checkNotNull(bucketInfo); metadataManager.writeLock().lock(); String volumeName = bucketInfo.getVolumeName(); @@ -88,17 +88,17 @@ public void createBucket(KsmBucketInfo bucketInfo) throws IOException { //Check if the volume exists if (metadataManager.get(volumeKey) == null) { LOG.debug("volume: {} not found ", volumeName); - throw new KSMException("Volume doesn't exist", - KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND); + throw new OMException("Volume doesn't exist", + OMException.ResultCodes.FAILED_VOLUME_NOT_FOUND); } //Check if bucket already exists if (metadataManager.get(bucketKey) != null) { LOG.debug("bucket: {} already exists ", bucketName); - throw new KSMException("Bucket already exist", - KSMException.ResultCodes.FAILED_BUCKET_ALREADY_EXISTS); + throw new OMException("Bucket already exist", + OMException.ResultCodes.FAILED_BUCKET_ALREADY_EXISTS); } - KsmBucketInfo ksmBucketInfo = KsmBucketInfo.newBuilder() + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() .setVolumeName(bucketInfo.getVolumeName()) .setBucketName(bucketInfo.getBucketName()) .setAcls(bucketInfo.getAcls()) @@ -106,11 +106,11 @@ public void createBucket(KsmBucketInfo bucketInfo) throws IOException { .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) .setCreationTime(Time.now()) .build(); - metadataManager.put(bucketKey, ksmBucketInfo.getProtobuf().toByteArray()); + metadataManager.put(bucketKey, omBucketInfo.getProtobuf().toByteArray()); LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName); } catch (IOException | DBException ex) { - if (!(ex instanceof KSMException)) { + if (!(ex instanceof OMException)) { LOG.error("Bucket creation failed for bucket:{} in volume:{}", bucketName, volumeName, ex); } @@ -127,7 +127,7 @@ public void createBucket(KsmBucketInfo bucketInfo) throws IOException { * @param bucketName - Name of the Bucket. */ @Override - public KsmBucketInfo getBucketInfo(String volumeName, String bucketName) + public OmBucketInfo getBucketInfo(String volumeName, String bucketName) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); @@ -138,12 +138,12 @@ public KsmBucketInfo getBucketInfo(String volumeName, String bucketName) if (value == null) { LOG.debug("bucket: {} not found in volume: {}.", bucketName, volumeName); - throw new KSMException("Bucket not found", - KSMException.ResultCodes.FAILED_BUCKET_NOT_FOUND); + throw new OMException("Bucket not found", + OMException.ResultCodes.FAILED_BUCKET_NOT_FOUND); } - return KsmBucketInfo.getFromProtobuf(BucketInfo.parseFrom(value)); + return OmBucketInfo.getFromProtobuf(BucketInfo.parseFrom(value)); } catch (IOException | DBException ex) { - if (!(ex instanceof KSMException)) { + if (!(ex instanceof OMException)) { LOG.error("Exception while getting bucket info for bucket: {}", bucketName, ex); } @@ -159,7 +159,7 @@ public KsmBucketInfo getBucketInfo(String volumeName, String bucketName) * @throws IOException */ @Override - public void setBucketProperty(KsmBucketArgs args) throws IOException { + public void setBucketProperty(OmBucketArgs args) throws IOException { Preconditions.checkNotNull(args); metadataManager.writeLock().lock(); String volumeName = args.getVolumeName(); @@ -170,19 +170,19 @@ public void setBucketProperty(KsmBucketArgs args) throws IOException { if(metadataManager.get(metadataManager.getVolumeKey(volumeName)) == null) { LOG.debug("volume: {} not found ", volumeName); - throw new KSMException("Volume doesn't exist", - KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND); + throw new OMException("Volume doesn't exist", + OMException.ResultCodes.FAILED_VOLUME_NOT_FOUND); } byte[] value = metadataManager.get(bucketKey); //Check if bucket exist if(value == null) { LOG.debug("bucket: {} not found ", bucketName); - throw new KSMException("Bucket doesn't exist", - KSMException.ResultCodes.FAILED_BUCKET_NOT_FOUND); + throw new OMException("Bucket doesn't exist", + OMException.ResultCodes.FAILED_BUCKET_NOT_FOUND); } - KsmBucketInfo oldBucketInfo = KsmBucketInfo.getFromProtobuf( + OmBucketInfo oldBucketInfo = OmBucketInfo.getFromProtobuf( BucketInfo.parseFrom(value)); - KsmBucketInfo.Builder bucketInfoBuilder = KsmBucketInfo.newBuilder(); + OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder(); bucketInfoBuilder.setVolumeName(oldBucketInfo.getVolumeName()) .setBucketName(oldBucketInfo.getBucketName()); @@ -221,7 +221,7 @@ public void setBucketProperty(KsmBucketArgs args) throws IOException { metadataManager.put(bucketKey, bucketInfoBuilder.build().getProtobuf().toByteArray()); } catch (IOException | DBException ex) { - if (!(ex instanceof KSMException)) { + if (!(ex instanceof OMException)) { LOG.error("Setting bucket property failed for bucket:{} in volume:{}", bucketName, volumeName, ex); } @@ -269,24 +269,24 @@ public void deleteBucket(String volumeName, String bucketName) if (metadataManager.get(metadataManager.getVolumeKey(volumeName)) == null) { LOG.debug("volume: {} not found ", volumeName); - throw new KSMException("Volume doesn't exist", - KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND); + throw new OMException("Volume doesn't exist", + OMException.ResultCodes.FAILED_VOLUME_NOT_FOUND); } //Check if bucket exist if (metadataManager.get(bucketKey) == null) { LOG.debug("bucket: {} not found ", bucketName); - throw new KSMException("Bucket doesn't exist", - KSMException.ResultCodes.FAILED_BUCKET_NOT_FOUND); + throw new OMException("Bucket doesn't exist", + OMException.ResultCodes.FAILED_BUCKET_NOT_FOUND); } //Check if bucket is empty if (!metadataManager.isBucketEmpty(volumeName, bucketName)) { LOG.debug("bucket: {} is not empty ", bucketName); - throw new KSMException("Bucket is not empty", - KSMException.ResultCodes.FAILED_BUCKET_NOT_EMPTY); + throw new OMException("Bucket is not empty", + OMException.ResultCodes.FAILED_BUCKET_NOT_EMPTY); } metadataManager.delete(bucketKey); } catch (IOException ex) { - if (!(ex instanceof KSMException)) { + if (!(ex instanceof OMException)) { LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName, volumeName, ex); } @@ -300,8 +300,8 @@ public void deleteBucket(String volumeName, String bucketName) * {@inheritDoc} */ @Override - public List listBuckets(String volumeName, - String startBucket, String bucketPrefix, int maxNumOfBuckets) + public List listBuckets(String volumeName, + String startBucket, String bucketPrefix, int maxNumOfBuckets) throws IOException { Preconditions.checkNotNull(volumeName); metadataManager.readLock().lock(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java similarity index 88% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java index e51ab28d02..ee23fe06fa 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; @@ -35,12 +35,12 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; /** * This is the background service to delete keys. - * Scan the metadata of ksm periodically to get + * Scan the metadata of om periodically to get * the keys with prefix "#deleting" and ask scm to * delete metadata accordingly, if scm returns * success for keys, then clean up those keys. @@ -76,7 +76,7 @@ public BackgroundTaskQueue getTasks() { } /** - * A key deleting task scans KSM DB and looking for a certain number + * A key deleting task scans OM DB and looking for a certain number * of pending-deletion keys, sends these keys along with their associated * blocks to SCM for deletion. Once SCM confirms keys are deleted (once * SCM persisted the blocks info in its deletedBlockLog), it removes @@ -97,15 +97,15 @@ public BackgroundTaskResult call() throws Exception { List keyBlocksList = manager .getPendingDeletionKeys(keyLimitPerTask); if (keyBlocksList.size() > 0) { - LOG.info("Found {} to-delete keys in KSM", keyBlocksList.size()); + LOG.info("Found {} to-delete keys in OM", keyBlocksList.size()); List results = scmClient.deleteKeyBlocks(keyBlocksList); for (DeleteBlockGroupResult result : results) { if (result.isSuccess()) { try { - // Purge key from KSM DB. + // Purge key from OM DB. manager.deletePendingDeletionKey(result.getObjectKey()); - LOG.debug("Key {} deleted from KSM DB", result.getObjectKey()); + LOG.debug("Key {} deleted from OM DB", result.getObjectKey()); } catch (IOException e) { // if a pending deletion key is failed to delete, // print a warning here and retain it in this state, @@ -123,14 +123,14 @@ public BackgroundTaskResult call() throws Exception { } if (!results.isEmpty()) { - LOG.info("Number of key deleted from KSM DB: {}," + LOG.info("Number of key deleted from OM DB: {}," + " task elapsed time: {}ms", results.size(), Time.monotonicNow() - startTime); } return results::size; } else { - LOG.debug("No pending deletion key found in KSM"); + LOG.debug("No pending deletion key found in OM"); } } catch (IOException e) { LOG.error("Unable to get pending deletion keys, retry in" diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java similarity index 82% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManager.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 5ec1db8c5a..226c07d6fe 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -14,13 +14,13 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import java.io.IOException; import java.util.List; @@ -49,7 +49,7 @@ public interface KeyManager { * @param clientID the client that is committing. * @throws IOException */ - void commitKey(KsmKeyArgs args, int clientID) throws IOException; + void commitKey(OmKeyArgs args, int clientID) throws IOException; /** * A client calls this on an open key, to request to allocate a new block, @@ -60,30 +60,30 @@ public interface KeyManager { * @return the reference to the new block. * @throws IOException */ - KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) + OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID) throws IOException; /** * Given the args of a key to put, write an open key entry to meta data. * * In case that the container creation or key write failed on - * DistributedStorageHandler, this key's metadata will still stay in KSM. + * DistributedStorageHandler, this key's metadata will still stay in OM. * TODO garbage collect the open keys that never get closed * * @param args the args of the key provided by client. * @return a OpenKeySession instance client uses to talk to container. * @throws Exception */ - OpenKeySession openKey(KsmKeyArgs args) throws IOException; + OpenKeySession openKey(OmKeyArgs args) throws IOException; /** * Look up an existing key. Return the info of the key to client side, which * DistributedStorageHandler will use to access the data on datanode. * * @param args the args of the key provided by client. - * @return a KsmKeyInfo instance client uses to talk to container. + * @return a OmKeyInfo instance client uses to talk to container. * @throws IOException */ - KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException; + OmKeyInfo lookupKey(OmKeyArgs args) throws IOException; /** * Renames an existing key within a bucket. @@ -93,21 +93,21 @@ KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) * @throws IOException if specified key doesn't exist or * some other I/O errors while renaming the key. */ - void renameKey(KsmKeyArgs args, String toKeyName) throws IOException; + void renameKey(OmKeyArgs args, String toKeyName) throws IOException; /** * Deletes an object by an object key. The key will be immediately removed - * from KSM namespace and become invisible to clients. The object data + * from OM namespace and become invisible to clients. The object data * will be removed in async manner that might retain for some time. * * @param args the args of the key provided by client. * @throws IOException if specified key doesn't exist or * some other I/O errors while deleting an object. */ - void deleteKey(KsmKeyArgs args) throws IOException; + void deleteKey(OmKeyArgs args) throws IOException; /** - * Returns a list of keys represented by {@link KsmKeyInfo} + * Returns a list of keys represented by {@link OmKeyInfo} * in the given bucket. * * @param volumeName @@ -127,15 +127,15 @@ KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) * @return a list of keys. * @throws IOException */ - List listKeys(String volumeName, - String bucketName, String startKey, String keyPrefix, int maxKeys) + List listKeys(String volumeName, + String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException; /** * Returns a list of pending deletion key info that ups to the given count. * Each entry is a {@link BlockGroup}, which contains the info about the * key name and all its associated block IDs. A pending deletion key is - * stored with #deleting# prefix in KSM DB. + * stored with #deleting# prefix in OM DB. * * @param count max number of keys to return. * @return a list of {@link BlockGroup} representing keys and blocks. @@ -146,7 +146,7 @@ List listKeys(String volumeName, /** * Deletes a pending deletion key by its name. This is often called when * key can be safely deleted from this layer. Once called, all footprints - * of the key will be purged from KSM DB. + * of the key will be purged from OM DB. * * @param objectKeyName object key name with #deleting# prefix. * @throws IOException if specified key doesn't exist or other I/O errors. @@ -156,7 +156,7 @@ List listKeys(String volumeName, /** * Returns a list of all still open key info. Which contains the info about * the key name and all its associated block IDs. A pending open key has - * prefix #open# in KSM DB. + * prefix #open# in OM DB. * * @return a list of {@link BlockGroup} representing keys and blocks. * @throws IOException @@ -166,7 +166,7 @@ List listKeys(String volumeName, /** * Deletes a expired open key by its name. Called when a hanging key has been * lingering for too long. Once called, the open key entries gets removed - * from KSM mdata data. + * from OM mdata data. * * @param objectKeyName object key name with #open# prefix. * @throws IOException if specified key doesn't exist or other I/O errors. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java similarity index 83% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 0d4cfda45c..ba92a29e81 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -14,23 +14,23 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.ksm.exceptions.KSMException; -import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.KeyInfo; + .OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.util.Time; @@ -88,7 +88,7 @@ public class KeyManagerImpl implements KeyManager { * A SCM block client, used to talk to SCM to allocate block during putKey. */ private final ScmBlockLocationProtocol scmBlockClient; - private final KSMMetadataManager metadataManager; + private final OMMetadataManager metadataManager; private final long scmBlockSize; private final boolean useRatis; private final BackgroundService keyDeletingService; @@ -96,11 +96,11 @@ public class KeyManagerImpl implements KeyManager { private final long preallocateMax; private final Random random; - private final String ksmId; + private final String omId; public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient, - KSMMetadataManager metadataManager, OzoneConfiguration conf, - String ksmId) { + OMMetadataManager metadataManager, OzoneConfiguration conf, + String omId) { this.scmBlockClient = scmBlockClient; this.metadataManager = metadataManager; this.scmBlockSize = conf.getLong(OZONE_SCM_BLOCK_SIZE_IN_MB, @@ -126,7 +126,7 @@ public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient, openKeyCleanupService = new OpenKeyCleanupService( scmBlockClient, this, openkeyCheckInterval, serviceTimeout); random = new Random(); - this.ksmId = ksmId; + this.omId = omId; } @VisibleForTesting @@ -154,19 +154,19 @@ private void validateBucket(String volumeName, String bucketName) //Check if the volume exists if(metadataManager.get(volumeKey) == null) { LOG.error("volume not found: {}", volumeName); - throw new KSMException("Volume not found", - KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND); + throw new OMException("Volume not found", + OMException.ResultCodes.FAILED_VOLUME_NOT_FOUND); } //Check if bucket already exists if(metadataManager.get(bucketKey) == null) { LOG.error("bucket not found: {}/{} ", volumeName, bucketName); - throw new KSMException("Bucket not found", - KSMException.ResultCodes.FAILED_BUCKET_NOT_FOUND); + throw new OMException("Bucket not found", + OMException.ResultCodes.FAILED_BUCKET_NOT_FOUND); } } @Override - public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) + public OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID) throws IOException { Preconditions.checkNotNull(args); metadataManager.writeLock().lock(); @@ -183,15 +183,15 @@ public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) if (keyData == null) { LOG.error("Allocate block for a key not in open status in meta store " + objectKey + " with ID " + clientID); - throw new KSMException("Open Key not found", - KSMException.ResultCodes.FAILED_KEY_NOT_FOUND); + throw new OMException("Open Key not found", + OMException.ResultCodes.FAILED_KEY_NOT_FOUND); } - KsmKeyInfo keyInfo = - KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(keyData)); + OmKeyInfo keyInfo = + OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(keyData)); AllocatedBlock allocatedBlock = scmBlockClient.allocateBlock(scmBlockSize, keyInfo.getType(), - keyInfo.getFactor(), ksmId); - KsmKeyLocationInfo info = new KsmKeyLocationInfo.Builder() + keyInfo.getFactor(), omId); + OmKeyLocationInfo info = new OmKeyLocationInfo.Builder() .setBlockID(allocatedBlock.getBlockID()) .setShouldCreateContainer(allocatedBlock.getCreateContainer()) .setLength(scmBlockSize) @@ -209,7 +209,7 @@ public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) } @Override - public OpenKeySession openKey(KsmKeyArgs args) throws IOException { + public OpenKeySession openKey(OmKeyArgs args) throws IOException { Preconditions.checkNotNull(args); metadataManager.writeLock().lock(); String volumeName = args.getVolumeName(); @@ -219,7 +219,7 @@ public OpenKeySession openKey(KsmKeyArgs args) throws IOException { ReplicationType type = args.getType(); // If user does not specify a replication strategy or - // replication factor, KSM will use defaults. + // replication factor, OM will use defaults. if(factor == null) { factor = useRatis ? ReplicationFactor.THREE: ReplicationFactor.ONE; } @@ -231,19 +231,19 @@ public OpenKeySession openKey(KsmKeyArgs args) throws IOException { try { validateBucket(volumeName, bucketName); long requestedSize = Math.min(preallocateMax, args.getDataSize()); - List locations = new ArrayList<>(); + List locations = new ArrayList<>(); String objectKey = metadataManager.getKeyWithDBPrefix( volumeName, bucketName, keyName); // requested size is not required but more like a optimization: // SCM looks at the requested, if it 0, no block will be allocated at // the point, if client needs more blocks, client can always call - // allocateBlock. But if requested size is not 0, KSM will preallocate + // allocateBlock. But if requested size is not 0, OM will preallocate // some blocks and piggyback to client, to save RPC calls. while (requestedSize > 0) { long allocateSize = Math.min(scmBlockSize, requestedSize); AllocatedBlock allocatedBlock = - scmBlockClient.allocateBlock(allocateSize, type, factor, ksmId); - KsmKeyLocationInfo subKeyInfo = new KsmKeyLocationInfo.Builder() + scmBlockClient.allocateBlock(allocateSize, type, factor, omId); + OmKeyLocationInfo subKeyInfo = new OmKeyLocationInfo.Builder() .setBlockID(allocatedBlock.getBlockID()) .setShouldCreateContainer(allocatedBlock.getCreateContainer()) .setLength(allocateSize) @@ -260,11 +260,11 @@ public OpenKeySession openKey(KsmKeyArgs args) throws IOException { byte[] keyKey = metadataManager.getDBKeyBytes( volumeName, bucketName, keyName); byte[] value = metadataManager.get(keyKey); - KsmKeyInfo keyInfo; + OmKeyInfo keyInfo; long openVersion; if (value != null) { // the key already exist, the new blocks will be added as new version - keyInfo = KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value)); + keyInfo = OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value)); // when locations.size = 0, the new version will have identical blocks // as its previous version openVersion = keyInfo.addNewVersion(locations); @@ -273,12 +273,12 @@ public OpenKeySession openKey(KsmKeyArgs args) throws IOException { // the key does not exist, create a new object, the new blocks are the // version 0 long currentTime = Time.now(); - keyInfo = new KsmKeyInfo.Builder() + keyInfo = new OmKeyInfo.Builder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) - .setKsmKeyLocationInfos(Collections.singletonList( - new KsmKeyLocationInfoGroup(0, locations))) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(0, locations))) .setCreationTime(currentTime) .setModificationTime(currentTime) .setDataSize(size) @@ -305,22 +305,22 @@ public OpenKeySession openKey(KsmKeyArgs args) throws IOException { LOG.debug("Key {} allocated in volume {} bucket {}", keyName, volumeName, bucketName); return new OpenKeySession(id, keyInfo, openVersion); - } catch (KSMException e) { + } catch (OMException e) { throw e; } catch (IOException ex) { - if (!(ex instanceof KSMException)) { + if (!(ex instanceof OMException)) { LOG.error("Key open failed for volume:{} bucket:{} key:{}", volumeName, bucketName, keyName, ex); } - throw new KSMException(ex.getMessage(), - KSMException.ResultCodes.FAILED_KEY_ALLOCATION); + throw new OMException(ex.getMessage(), + OMException.ResultCodes.FAILED_KEY_ALLOCATION); } finally { metadataManager.writeLock().unlock(); } } @Override - public void commitKey(KsmKeyArgs args, int clientID) throws IOException { + public void commitKey(OmKeyArgs args, int clientID) throws IOException { Preconditions.checkNotNull(args); metadataManager.writeLock().lock(); String volumeName = args.getVolumeName(); @@ -335,31 +335,31 @@ public void commitKey(KsmKeyArgs args, int clientID) throws IOException { byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, clientID); byte[] openKeyData = metadataManager.get(openKey); if (openKeyData == null) { - throw new KSMException("Commit a key without corresponding entry " + + throw new OMException("Commit a key without corresponding entry " + DFSUtil.bytes2String(openKey), ResultCodes.FAILED_KEY_NOT_FOUND); } - KsmKeyInfo keyInfo = - KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(openKeyData)); + OmKeyInfo keyInfo = + OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(openKeyData)); keyInfo.setDataSize(args.getDataSize()); keyInfo.setModificationTime(Time.now()); BatchOperation batch = new BatchOperation(); batch.delete(openKey); batch.put(objectKeyBytes, keyInfo.getProtobuf().toByteArray()); metadataManager.writeBatch(batch); - } catch (KSMException e) { + } catch (OMException e) { throw e; } catch (IOException ex) { LOG.error("Key commit failed for volume:{} bucket:{} key:{}", volumeName, bucketName, keyName, ex); - throw new KSMException(ex.getMessage(), - KSMException.ResultCodes.FAILED_KEY_ALLOCATION); + throw new OMException(ex.getMessage(), + OMException.ResultCodes.FAILED_KEY_ALLOCATION); } finally { metadataManager.writeLock().unlock(); } } @Override - public KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException { + public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { Preconditions.checkNotNull(args); metadataManager.writeLock().lock(); String volumeName = args.getVolumeName(); @@ -372,22 +372,22 @@ public KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException { if (value == null) { LOG.debug("volume:{} bucket:{} Key:{} not found", volumeName, bucketName, keyName); - throw new KSMException("Key not found", - KSMException.ResultCodes.FAILED_KEY_NOT_FOUND); + throw new OMException("Key not found", + OMException.ResultCodes.FAILED_KEY_NOT_FOUND); } - return KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value)); + return OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value)); } catch (DBException ex) { LOG.error("Get key failed for volume:{} bucket:{} key:{}", volumeName, bucketName, keyName, ex); - throw new KSMException(ex.getMessage(), - KSMException.ResultCodes.FAILED_KEY_NOT_FOUND); + throw new OMException(ex.getMessage(), + OMException.ResultCodes.FAILED_KEY_NOT_FOUND); } finally { metadataManager.writeLock().unlock(); } } @Override - public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException { + public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { Preconditions.checkNotNull(args); Preconditions.checkNotNull(toKeyName); String volumeName = args.getVolumeName(); @@ -396,7 +396,7 @@ public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException { if (toKeyName.length() == 0 || fromKeyName.length() == 0) { LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}.", volumeName, bucketName, fromKeyName, toKeyName); - throw new KSMException("Key name is empty", + throw new OMException("Key name is empty", ResultCodes.FAILED_INVALID_KEY_NAME); } @@ -412,8 +412,8 @@ public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException { "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. " + "Key: {} not found.", volumeName, bucketName, fromKeyName, toKeyName, fromKeyName); - throw new KSMException("Key not found", - KSMException.ResultCodes.FAILED_KEY_NOT_FOUND); + throw new OMException("Key not found", + OMException.ResultCodes.FAILED_KEY_NOT_FOUND); } // toKeyName should not exist @@ -425,16 +425,16 @@ public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException { "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. " + "Key: {} already exists.", volumeName, bucketName, fromKeyName, toKeyName, toKeyName); - throw new KSMException("Key not found", - KSMException.ResultCodes.FAILED_KEY_ALREADY_EXISTS); + throw new OMException("Key not found", + OMException.ResultCodes.FAILED_KEY_ALREADY_EXISTS); } if (fromKeyName.equals(toKeyName)) { return; } - KsmKeyInfo newKeyInfo = - KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(fromKeyValue)); + OmKeyInfo newKeyInfo = + OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(fromKeyValue)); newKeyInfo.setKeyName(toKeyName); newKeyInfo.updateModifcationTime(); BatchOperation batch = new BatchOperation(); @@ -444,7 +444,7 @@ public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException { } catch (DBException ex) { LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}.", volumeName, bucketName, fromKeyName, toKeyName, ex); - throw new KSMException(ex.getMessage(), + throw new OMException(ex.getMessage(), ResultCodes.FAILED_KEY_RENAME); } finally { metadataManager.writeLock().unlock(); @@ -452,7 +452,7 @@ public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException { } @Override - public void deleteKey(KsmKeyArgs args) throws IOException { + public void deleteKey(OmKeyArgs args) throws IOException { Preconditions.checkNotNull(args); metadataManager.writeLock().lock(); String volumeName = args.getVolumeName(); @@ -463,8 +463,8 @@ public void deleteKey(KsmKeyArgs args) throws IOException { volumeName, bucketName, keyName); byte[] objectValue = metadataManager.get(objectKey); if (objectValue == null) { - throw new KSMException("Key not found", - KSMException.ResultCodes.FAILED_KEY_NOT_FOUND); + throw new OMException("Key not found", + OMException.ResultCodes.FAILED_KEY_NOT_FOUND); } byte[] deletingKey = metadataManager.getDeletedKeyName(objectKey); BatchOperation batch = new BatchOperation(); @@ -474,7 +474,7 @@ public void deleteKey(KsmKeyArgs args) throws IOException { } catch (DBException ex) { LOG.error(String.format("Delete key failed for volume:%s " + "bucket:%s key:%s", volumeName, bucketName, keyName), ex); - throw new KSMException(ex.getMessage(), ex, + throw new OMException(ex.getMessage(), ex, ResultCodes.FAILED_KEY_DELETION); } finally { metadataManager.writeLock().unlock(); @@ -482,8 +482,8 @@ public void deleteKey(KsmKeyArgs args) throws IOException { } @Override - public List listKeys(String volumeName, String bucketName, - String startKey, String keyPrefix, int maxKeys) throws IOException { + public List listKeys(String volumeName, String bucketName, + String startKey, String keyPrefix, int maxKeys) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); @@ -516,7 +516,7 @@ public void deletePendingDeletionKey(String objectKeyName) + " the name should be the key name with deleting prefix"); } - // Simply removes the entry from KSM DB. + // Simply removes the entry from OM DB. metadataManager.writeLock().lock(); try { byte[] pendingDelKey = DFSUtil.string2Bytes(objectKeyName); @@ -549,7 +549,7 @@ public void deleteExpiredOpenKey(String objectKeyName) throws IOException { + " the name should be the key name with open key prefix"); } - // Simply removes the entry from KSM DB. + // Simply removes the entry from OM DB. metadataManager.writeLock().lock(); try { byte[] openKey = DFSUtil.string2Bytes(objectKeyName); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java similarity index 86% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java index bf223324ae..3ab9f47568 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java @@ -16,16 +16,16 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdds.server.ServiceRuntimeInfo; /** - * This is the JMX management interface for ksm information. + * This is the JMX management interface for OM information. */ @InterfaceAudience.Private -public interface KSMMXBean extends ServiceRuntimeInfo { +public interface OMMXBean extends ServiceRuntimeInfo { String getRpcPort(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java similarity index 85% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManager.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index f5a2d5bb0d..f2e78e661c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -14,13 +14,13 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.utils.BatchOperation; import org.apache.hadoop.utils.MetadataStore; @@ -29,9 +29,9 @@ import java.util.concurrent.locks.Lock; /** - * KSM metadata manager interface. + * OM metadata manager interface. */ -public interface KSMMetadataManager { +public interface OMMetadataManager { /** * Start metadata manager. */ @@ -117,7 +117,7 @@ public interface KSMMetadataManager { byte[] getDBKeyBytes(String volume, String bucket, String key); /** - * Returns the DB key name of a deleted key in KSM metadata store. + * Returns the DB key name of a deleted key in OM metadata store. * The name for a deleted key has prefix #deleting# followed by * the actual key name. * @param keyName - key name @@ -126,7 +126,7 @@ public interface KSMMetadataManager { byte[] getDeletedKeyName(byte[] keyName); /** - * Returns the DB key name of a open key in KSM metadata store. + * Returns the DB key name of a open key in OM metadata store. * Should be #open# prefix followed by actual key name. * @param keyName - key name * @param id - the id for this open @@ -163,7 +163,7 @@ String getKeyWithDBPrefix(String volumeName, String bucketName, boolean isBucketEmpty(String volume, String bucket) throws IOException; /** - * Returns a list of buckets represented by {@link KsmBucketInfo} + * Returns a list of buckets represented by {@link OmBucketInfo} * in the given volume. * * @param volumeName @@ -182,11 +182,11 @@ String getKeyWithDBPrefix(String volumeName, String bucketName, * @return a list of buckets. * @throws IOException */ - List listBuckets(String volumeName, String startBucket, - String bucketPrefix, int maxNumOfBuckets) throws IOException; + List listBuckets(String volumeName, String startBucket, + String bucketPrefix, int maxNumOfBuckets) throws IOException; /** - * Returns a list of keys represented by {@link KsmKeyInfo} + * Returns a list of keys represented by {@link OmKeyInfo} * in the given bucket. * * @param volumeName @@ -206,8 +206,8 @@ List listBuckets(String volumeName, String startBucket, * @return a list of keys. * @throws IOException */ - List listKeys(String volumeName, - String bucketName, String startKey, String keyPrefix, int maxKeys) + List listKeys(String volumeName, + String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException; /** @@ -223,17 +223,17 @@ List listKeys(String volumeName, * this key is excluded from the result. * @param maxKeys * the maximum number of volumes to return. - * @return a list of {@link KsmVolumeArgs} + * @return a list of {@link OmVolumeArgs} * @throws IOException */ - List listVolumes(String userName, String prefix, - String startKey, int maxKeys) throws IOException; + List listVolumes(String userName, String prefix, + String startKey, int maxKeys) throws IOException; /** * Returns a list of pending deletion key info that ups to the given count. * Each entry is a {@link BlockGroup}, which contains the info about the * key name and all its associated block IDs. A pending deletion key is - * stored with #deleting# prefix in KSM DB. + * stored with #deleting# prefix in OM DB. * * @param count max number of keys to return. * @return a list of {@link BlockGroup} represent keys and blocks. @@ -244,7 +244,7 @@ List listVolumes(String userName, String prefix, /** * Returns a list of all still open key info. Which contains the info about * the key name and all its associated block IDs. A pending open key has - * prefix #open# in KSM DB. + * prefix #open# in OM DB. * * @return a list of {@link BlockGroup} representing keys and blocks. * @throws IOException diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java similarity index 96% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index 8ee67c3e27..2d044521ed 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import com.google.common.annotations.VisibleForTesting; @@ -27,20 +27,20 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; /** - * This class is for maintaining KeySpaceManager statistics. + * This class is for maintaining Ozone Manager statistics. */ @InterfaceAudience.Private -@Metrics(about="Key Space Manager Metrics", context="dfs") -public class KSMMetrics { +@Metrics(about="Ozone Manager Metrics", context="dfs") +public class OMMetrics { private static final String SOURCE_NAME = - KSMMetrics.class.getSimpleName(); + OMMetrics.class.getSimpleName(); - // KSM request type op metrics + // OM request type op metrics private @Metric MutableCounterLong numVolumeOps; private @Metric MutableCounterLong numBucketOps; private @Metric MutableCounterLong numKeyOps; - // KSM op metrics + // OM op metrics private @Metric MutableCounterLong numVolumeCreates; private @Metric MutableCounterLong numVolumeUpdates; private @Metric MutableCounterLong numVolumeInfos; @@ -82,14 +82,14 @@ public class KSMMetrics { private @Metric MutableCounterLong numBlockAllocateCallFails; private @Metric MutableCounterLong numGetServiceListFails; - public KSMMetrics() { + public OMMetrics() { } - public static KSMMetrics create() { + public static OMMetrics create() { MetricsSystem ms = DefaultMetricsSystem.instance(); return ms.register(SOURCE_NAME, - "Key Space Manager Metrics", - new KSMMetrics()); + "Oozne Manager Metrics", + new OMMetrics()); } public void incNumVolumeCreates() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java similarity index 63% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java index 015bed6986..3820aed76a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import java.io.IOException; import java.util.Properties; @@ -29,35 +29,35 @@ import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; /** - * KSMStorage is responsible for management of the StorageDirectories used by - * the KSM. + * OMStorage is responsible for management of the StorageDirectories used by + * the Ozone Manager. */ -public class KSMStorage extends Storage { +public class OMStorage extends Storage { - public static final String STORAGE_DIR = "ksm"; - public static final String KSM_ID = "ksmUuid"; + public static final String STORAGE_DIR = "om"; + public static final String OM_ID = "omUuid"; /** - * Construct KSMStorage. + * Construct OMStorage. * @throws IOException if any directories are inaccessible. */ - public KSMStorage(OzoneConfiguration conf) throws IOException { - super(NodeType.KSM, getOzoneMetaDirPath(conf), STORAGE_DIR); + public OMStorage(OzoneConfiguration conf) throws IOException { + super(NodeType.OM, getOzoneMetaDirPath(conf), STORAGE_DIR); } public void setScmId(String scmId) throws IOException { if (getState() == StorageState.INITIALIZED) { - throw new IOException("KSM is already initialized."); + throw new IOException("OM is already initialized."); } else { getStorageInfo().setProperty(SCM_ID, scmId); } } - public void setKsmId(String ksmId) throws IOException { + public void setOmId(String omId) throws IOException { if (getState() == StorageState.INITIALIZED) { - throw new IOException("KSM is already initialized."); + throw new IOException("OM is already initialized."); } else { - getStorageInfo().setProperty(KSM_ID, ksmId); + getStorageInfo().setProperty(OM_ID, omId); } } @@ -70,21 +70,21 @@ public String getScmId() { } /** - * Retrieves the KSM ID from the version file. - * @return KSM_ID + * Retrieves the OM ID from the version file. + * @return OM_ID */ - public String getKsmId() { - return getStorageInfo().getProperty(KSM_ID); + public String getOmId() { + return getStorageInfo().getProperty(OM_ID); } @Override protected Properties getNodeProperties() { - String ksmId = getKsmId(); - if (ksmId == null) { - ksmId = UUID.randomUUID().toString(); + String omId = getOmId(); + if (omId == null) { + omId = UUID.randomUUID().toString(); } - Properties ksmProperties = new Properties(); - ksmProperties.setProperty(KSM_ID, ksmId); - return ksmProperties; + Properties omProperties = new Properties(); + omProperties.setProperty(OM_ID, omId); + return omProperties; } } \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java similarity index 79% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 6664a32462..21d24114c8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; @@ -22,19 +22,19 @@ import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.ksm.exceptions.KSMException; -import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.BucketInfo; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.BatchOperation; @@ -58,32 +58,32 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME; +import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_ID_DELIMINATOR; import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_PREFIX; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys - .OZONE_KSM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys - .OZONE_KSM_DB_CACHE_SIZE_MB; +import static org.apache.hadoop.ozone.om.OMConfigKeys + .OZONE_OM_DB_CACHE_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys + .OZONE_OM_DB_CACHE_SIZE_MB; import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; /** - * KSM metadata manager interface. + * Ozone metadata manager interface. */ -public class KSMMetadataManagerImpl implements KSMMetadataManager { +public class OmMetadataManagerImpl implements OMMetadataManager { private final MetadataStore store; private final ReadWriteLock lock; private final long openKeyExpireThresholdMS; - public KSMMetadataManagerImpl(OzoneConfiguration conf) throws IOException { + public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException { File metaDir = getOzoneMetaDirPath(conf); - final int cacheSize = conf.getInt(OZONE_KSM_DB_CACHE_SIZE_MB, - OZONE_KSM_DB_CACHE_SIZE_DEFAULT); - File ksmDBFile = new File(metaDir.getPath(), KSM_DB_NAME); + final int cacheSize = conf.getInt(OZONE_OM_DB_CACHE_SIZE_MB, + OZONE_OM_DB_CACHE_SIZE_DEFAULT); + File omDBFile = new File(metaDir.getPath(), OM_DB_NAME); this.store = MetadataStoreBuilder.newBuilder() .setConf(conf) - .setDbFile(ksmDBFile) + .setDbFile(omDBFile) .setCacheSize(cacheSize * OzoneConsts.MB) .build(); this.lock = new ReentrantReadWriteLock(); @@ -125,7 +125,7 @@ public MetadataStore getStore() { * @param volume - Volume name */ public byte[] getVolumeKey(String volume) { - String dbVolumeName = OzoneConsts.KSM_VOLUME_PREFIX + volume; + String dbVolumeName = OzoneConsts.OM_VOLUME_PREFIX + volume; return DFSUtil.string2Bytes(dbVolumeName); } @@ -134,7 +134,7 @@ public byte[] getVolumeKey(String volume) { * @param user - User name */ public byte[] getUserKey(String user) { - String dbUserName = OzoneConsts.KSM_USER_PREFIX + user; + String dbUserName = OzoneConsts.OM_USER_PREFIX + user; return DFSUtil.string2Bytes(dbUserName); } @@ -144,8 +144,8 @@ public byte[] getUserKey(String user) { * @param bucket - Bucket name */ public byte[] getBucketKey(String volume, String bucket) { - String bucketKeyString = OzoneConsts.KSM_VOLUME_PREFIX + volume - + OzoneConsts.KSM_BUCKET_PREFIX + bucket; + String bucketKeyString = OzoneConsts.OM_VOLUME_PREFIX + volume + + OzoneConsts.OM_BUCKET_PREFIX + bucket; return DFSUtil.string2Bytes(bucketKeyString); } @@ -156,9 +156,9 @@ public byte[] getBucketKey(String volume, String bucket) { */ private String getBucketWithDBPrefix(String volume, String bucket) { StringBuffer sb = new StringBuffer(); - sb.append(OzoneConsts.KSM_VOLUME_PREFIX) + sb.append(OzoneConsts.OM_VOLUME_PREFIX) .append(volume) - .append(OzoneConsts.KSM_BUCKET_PREFIX); + .append(OzoneConsts.OM_BUCKET_PREFIX); if (!Strings.isNullOrEmpty(bucket)) { sb.append(bucket); } @@ -167,9 +167,9 @@ private String getBucketWithDBPrefix(String volume, String bucket) { @Override public String getKeyWithDBPrefix(String volume, String bucket, String key) { - String keyVB = OzoneConsts.KSM_KEY_PREFIX + volume - + OzoneConsts.KSM_KEY_PREFIX + bucket - + OzoneConsts.KSM_KEY_PREFIX; + String keyVB = OzoneConsts.OM_KEY_PREFIX + volume + + OzoneConsts.OM_KEY_PREFIX + bucket + + OzoneConsts.OM_KEY_PREFIX; return Strings.isNullOrEmpty(key) ? keyVB : keyVB + key; } @@ -247,8 +247,8 @@ public void writeBatch(BatchOperation batch) throws IOException { * @return true if the volume is empty */ public boolean isVolumeEmpty(String volume) throws IOException { - String dbVolumeRootName = OzoneConsts.KSM_VOLUME_PREFIX + volume - + OzoneConsts.KSM_BUCKET_PREFIX; + String dbVolumeRootName = OzoneConsts.OM_VOLUME_PREFIX + volume + + OzoneConsts.OM_BUCKET_PREFIX; byte[] dbVolumeRootKey = DFSUtil.string2Bytes(dbVolumeRootName); ImmutablePair volumeRoot = store.peekAround(0, dbVolumeRootKey); @@ -282,18 +282,18 @@ public boolean isBucketEmpty(String volume, String bucket) * {@inheritDoc} */ @Override - public List listBuckets(final String volumeName, - final String startBucket, final String bucketPrefix, - final int maxNumOfBuckets) throws IOException { - List result = new ArrayList<>(); + public List listBuckets(final String volumeName, + final String startBucket, final String bucketPrefix, + final int maxNumOfBuckets) throws IOException { + List result = new ArrayList<>(); if (Strings.isNullOrEmpty(volumeName)) { - throw new KSMException("Volume name is required.", + throw new OMException("Volume name is required.", ResultCodes.FAILED_VOLUME_NOT_FOUND); } byte[] volumeNameBytes = getVolumeKey(volumeName); if (store.get(volumeNameBytes) == null) { - throw new KSMException("Volume " + volumeName + " not found.", + throw new OMException("Volume " + volumeName + " not found.", ResultCodes.FAILED_VOLUME_NOT_FOUND); } @@ -325,7 +325,7 @@ public List listBuckets(final String volumeName, } for (Map.Entry entry : rangeResult) { - KsmBucketInfo info = KsmBucketInfo.getFromProtobuf( + OmBucketInfo info = OmBucketInfo.getFromProtobuf( BucketInfo.parseFrom(entry.getValue())); result.add(info); } @@ -333,22 +333,22 @@ public List listBuckets(final String volumeName, } @Override - public List listKeys(String volumeName, String bucketName, - String startKey, String keyPrefix, int maxKeys) throws IOException { - List result = new ArrayList<>(); + public List listKeys(String volumeName, String bucketName, + String startKey, String keyPrefix, int maxKeys) throws IOException { + List result = new ArrayList<>(); if (Strings.isNullOrEmpty(volumeName)) { - throw new KSMException("Volume name is required.", + throw new OMException("Volume name is required.", ResultCodes.FAILED_VOLUME_NOT_FOUND); } if (Strings.isNullOrEmpty(bucketName)) { - throw new KSMException("Bucket name is required.", + throw new OMException("Bucket name is required.", ResultCodes.FAILED_BUCKET_NOT_FOUND); } byte[] bucketNameBytes = getBucketKey(volumeName, bucketName); if (store.get(bucketNameBytes) == null) { - throw new KSMException("Bucket " + bucketName + " not found.", + throw new OMException("Bucket " + bucketName + " not found.", ResultCodes.FAILED_BUCKET_NOT_FOUND); } @@ -371,7 +371,7 @@ public List listKeys(String volumeName, String bucketName, } for (Map.Entry entry : rangeResult) { - KsmKeyInfo info = KsmKeyInfo.getFromProtobuf( + OmKeyInfo info = OmKeyInfo.getFromProtobuf( KeyInfo.parseFrom(entry.getValue())); result.add(info); } @@ -379,9 +379,9 @@ public List listKeys(String volumeName, String bucketName, } @Override - public List listVolumes(String userName, - String prefix, String startKey, int maxKeys) throws IOException { - List result = Lists.newArrayList(); + public List listVolumes(String userName, + String prefix, String startKey, int maxKeys) throws IOException { + List result = Lists.newArrayList(); VolumeList volumes; if (Strings.isNullOrEmpty(userName)) { volumes = getAllVolumes(); @@ -410,13 +410,13 @@ public List listVolumes(String userName, if (volumeInfo == null) { // Could not get volume info by given volume name, // since the volume name is loaded from db, - // this probably means ksm db is corrupted or some entries are + // this probably means om db is corrupted or some entries are // accidentally removed. - throw new KSMException("Volume info not found for " + volumeName, + throw new OMException("Volume info not found for " + volumeName, ResultCodes.FAILED_VOLUME_NOT_FOUND); } VolumeInfo info = VolumeInfo.parseFrom(volumeInfo); - KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(info); + OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(info); result.add(volumeArgs); } } @@ -425,12 +425,12 @@ public List listVolumes(String userName, } private VolumeList getVolumesByUser(String userName) - throws KSMException { + throws OMException { return getVolumesByUser(getUserKey(userName)); } private VolumeList getVolumesByUser(byte[] userNameKey) - throws KSMException { + throws OMException { VolumeList volumes = null; try { byte[] volumesInBytes = store.get(userNameKey); @@ -440,7 +440,7 @@ private VolumeList getVolumesByUser(byte[] userNameKey) } volumes = VolumeList.parseFrom(volumesInBytes); } catch (IOException e) { - throw new KSMException("Unable to get volumes info by the given user, " + throw new OMException("Unable to get volumes info by the given user, " + "metadata might be corrupted", e, ResultCodes.FAILED_METADATA_ERROR); } @@ -450,7 +450,7 @@ private VolumeList getVolumesByUser(byte[] userNameKey) private VolumeList getAllVolumes() throws IOException { // Scan all users in database KeyPrefixFilter filter = - new KeyPrefixFilter().addFilter(OzoneConsts.KSM_USER_PREFIX); + new KeyPrefixFilter().addFilter(OzoneConsts.OM_USER_PREFIX); // We are not expecting a huge number of users per cluster, // it should be fine to scan all users in db and return us a // list of volume names in string per user. @@ -474,10 +474,10 @@ public List getPendingDeletionKeys(final int count) store.getRangeKVs(null, count, MetadataKeyFilters.getDeletingKeyFilter()); for (Map.Entry entry : rangeResult) { - KsmKeyInfo info = - KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(entry.getValue())); + OmKeyInfo info = + OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(entry.getValue())); // Get block keys as a list. - KsmKeyLocationInfoGroup latest = info.getLatestVersionLocations(); + OmKeyLocationInfoGroup latest = info.getLatestVersionLocations(); if (latest == null) { return Collections.emptyList(); } @@ -503,8 +503,8 @@ public List getExpiredOpenKeys() throws IOException { store.getSequentialRangeKVs(null, Integer.MAX_VALUE, openKeyFilter); for (Map.Entry entry : rangeResult) { - KsmKeyInfo info = - KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(entry.getValue())); + OmKeyInfo info = + OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(entry.getValue())); long lastModify = info.getModificationTime(); if (now - lastModify < this.openKeyExpireThresholdMS) { // consider as may still be active, not hanging. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java similarity index 93% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java index 8e2540a627..8d94f5ab77 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; @@ -35,7 +35,7 @@ /** * This is the background service to delete hanging open keys. - * Scan the metadata of ksm periodically to get + * Scan the metadata of om periodically to get * the keys with prefix "#open#" and ask scm to * delete metadata accordingly, if scm returns * success for keys, then clean up those keys. @@ -80,7 +80,7 @@ public BackgroundTaskResult call() throws Exception { List keyBlocksList = keyManager.getExpiredOpenKeys(); if (keyBlocksList.size() > 0) { int toDeleteSize = keyBlocksList.size(); - LOG.debug("Found {} to-delete open keys in KSM", toDeleteSize); + LOG.debug("Found {} to-delete open keys in OM", toDeleteSize); List results = scmClient.deleteKeyBlocks(keyBlocksList); int deletedSize = 0; @@ -88,7 +88,7 @@ public BackgroundTaskResult call() throws Exception { if (result.isSuccess()) { try { keyManager.deleteExpiredOpenKey(result.getObjectKey()); - LOG.debug("Key {} deleted from KSM DB", result.getObjectKey()); + LOG.debug("Key {} deleted from OM DB", result.getObjectKey()); deletedSize += 1; } catch (IOException e) { LOG.warn("Failed to delete hanging-open key {}", @@ -105,7 +105,7 @@ public BackgroundTaskResult call() throws Exception { "cleaned up {} entries", toDeleteSize, deletedSize); return results::size; } else { - LOG.debug("No hanging open key fond in KSM"); + LOG.debug("No hanging open key found in OM"); } } catch (IOException e) { LOG.error("Unable to get hanging open keys, retry in" diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java similarity index 74% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 5fa313bbd3..71fa921cc5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -28,29 +28,28 @@ import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; import org.apache.hadoop.ozone.common.Storage.StorageState; -import org.apache.hadoop.ozone.ksm.exceptions.KSMException; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; -import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; -import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; -import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol; -import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB; -import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.ServiceInfo; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .ServicePort; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.OzoneAclInfo; + .OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.protocolPB - .KeySpaceManagerProtocolServerSideTranslatorPB; +import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; @@ -67,7 +66,7 @@ import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; import static org.apache.hadoop.hdds.HddsUtils.isHddsEnabled; -import static org.apache.hadoop.ozone.KsmUtils.getKsmAddress; +import static org.apache.hadoop.ozone.OmUtils.getOmAddress; import static org.apache.hadoop.hdds.server.ServerUtils .updateRPCListenAddress; import org.slf4j.Logger; @@ -83,31 +82,31 @@ import java.util.Map; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys - .OZONE_KSM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys - .OZONE_KSM_HANDLER_COUNT_DEFAULT; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys - .OZONE_KSM_HANDLER_COUNT_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys + .OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys + .OZONE_OM_HANDLER_COUNT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys + .OZONE_OM_HANDLER_COUNT_KEY; import static org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.KeySpaceManagerService + .OzoneManagerProtocolProtos.OzoneManagerService .newReflectiveBlockingService; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos .NodeState.HEALTHY; import static org.apache.hadoop.util.ExitUtil.terminate; /** - * Ozone Keyspace manager is the metadata manager of ozone. + * Ozone Manager is the metadata manager of ozone. */ @InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"}) -public final class KeySpaceManager extends ServiceRuntimeInfoImpl - implements KeySpaceManagerProtocol, KSMMXBean { +public final class OzoneManager extends ServiceRuntimeInfoImpl + implements OzoneManagerProtocol, OMMXBean { private static final Logger LOG = - LoggerFactory.getLogger(KeySpaceManager.class); + LoggerFactory.getLogger(OzoneManager.class); private static final String USAGE = - "Usage: \n ozone ksm [genericOptions] " + "[ " - + StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "ozone ksm [ " + "Usage: \n ozone om [genericOptions] " + "[ " + + StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "ozone om [ " + StartupOption.HELP.getName() + " ]\n"; /** Startup options. */ @@ -137,60 +136,60 @@ public static StartupOption parse(String value) { } private final OzoneConfiguration configuration; - private final RPC.Server ksmRpcServer; - private final InetSocketAddress ksmRpcAddress; - private final KSMMetadataManager metadataManager; + private final RPC.Server omRpcServer; + private final InetSocketAddress omRpcAddress; + private final OMMetadataManager metadataManager; private final VolumeManager volumeManager; private final BucketManager bucketManager; private final KeyManager keyManager; - private final KSMMetrics metrics; - private final KeySpaceManagerHttpServer httpServer; - private final KSMStorage ksmStorage; + private final OMMetrics metrics; + private final OzoneManagerHttpServer httpServer; + private final OMStorage omStorage; private final ScmBlockLocationProtocol scmBlockClient; private final StorageContainerLocationProtocol scmContainerClient; - private ObjectName ksmInfoBeanName; + private ObjectName omInfoBeanName; - private KeySpaceManager(OzoneConfiguration conf) throws IOException { + private OzoneManager(OzoneConfiguration conf) throws IOException { Preconditions.checkNotNull(conf); configuration = conf; - ksmStorage = new KSMStorage(conf); + omStorage = new OMStorage(conf); scmBlockClient = getScmBlockClient(configuration); scmContainerClient = getScmContainerClient(configuration); - if (ksmStorage.getState() != StorageState.INITIALIZED) { - throw new KSMException("KSM not initialized.", - ResultCodes.KSM_NOT_INITIALIZED); + if (omStorage.getState() != StorageState.INITIALIZED) { + throw new OMException("OM not initialized.", + ResultCodes.OM_NOT_INITIALIZED); } - // verifies that the SCM info in the KSM Version file is correct. + // verifies that the SCM info in the OM Version file is correct. ScmInfo scmInfo = scmBlockClient.getScmInfo(); - if (!(scmInfo.getClusterId().equals(ksmStorage.getClusterID()) && scmInfo - .getScmId().equals(ksmStorage.getScmId()))) { - throw new KSMException("SCM version info mismatch.", + if (!(scmInfo.getClusterId().equals(omStorage.getClusterID()) && scmInfo + .getScmId().equals(omStorage.getScmId()))) { + throw new OMException("SCM version info mismatch.", ResultCodes.SCM_VERSION_MISMATCH_ERROR); } - final int handlerCount = conf.getInt(OZONE_KSM_HANDLER_COUNT_KEY, - OZONE_KSM_HANDLER_COUNT_DEFAULT); + final int handlerCount = conf.getInt(OZONE_OM_HANDLER_COUNT_KEY, + OZONE_OM_HANDLER_COUNT_DEFAULT); - RPC.setProtocolEngine(configuration, KeySpaceManagerProtocolPB.class, + RPC.setProtocolEngine(configuration, OzoneManagerProtocolPB.class, ProtobufRpcEngine.class); - BlockingService ksmService = newReflectiveBlockingService( - new KeySpaceManagerProtocolServerSideTranslatorPB(this)); - final InetSocketAddress ksmNodeRpcAddr = - getKsmAddress(configuration); - ksmRpcServer = startRpcServer(configuration, ksmNodeRpcAddr, - KeySpaceManagerProtocolPB.class, ksmService, + BlockingService omService = newReflectiveBlockingService( + new OzoneManagerProtocolServerSideTranslatorPB(this)); + final InetSocketAddress omNodeRpcAddr = + getOmAddress(configuration); + omRpcServer = startRpcServer(configuration, omNodeRpcAddr, + OzoneManagerProtocolPB.class, omService, handlerCount); - ksmRpcAddress = updateRPCListenAddress(configuration, - OZONE_KSM_ADDRESS_KEY, ksmNodeRpcAddr, ksmRpcServer); - metadataManager = new KSMMetadataManagerImpl(configuration); + omRpcAddress = updateRPCListenAddress(configuration, + OZONE_OM_ADDRESS_KEY, omNodeRpcAddr, omRpcServer); + metadataManager = new OmMetadataManagerImpl(configuration); volumeManager = new VolumeManagerImpl(metadataManager, configuration); bucketManager = new BucketManagerImpl(metadataManager); - metrics = KSMMetrics.create(); + metrics = OMMetrics.create(); keyManager = new KeyManagerImpl(scmBlockClient, metadataManager, configuration, - ksmStorage.getKsmId()); - httpServer = new KeySpaceManagerHttpServer(configuration, this); + omStorage.getOmId()); + httpServer = new OzoneManagerHttpServer(configuration, this); } /** @@ -250,8 +249,8 @@ public ScmInfo getScmInfo() throws IOException { } @VisibleForTesting - public KSMStorage getKsmStorage() { - return ksmStorage; + public OMStorage getOmStorage() { + return omStorage; } /** * Starts an RPC server, if configured. @@ -286,16 +285,16 @@ private static RPC.Server startRpcServer(OzoneConfiguration conf, * Get metadata manager. * @return metadata manager. */ - public KSMMetadataManager getMetadataManager() { + public OMMetadataManager getMetadataManager() { return metadataManager; } - public KSMMetrics getMetrics() { + public OMMetrics getMetrics() { return metrics; } /** - * Main entry point for starting KeySpaceManager. + * Main entry point for starting OzoneManager. * * @param argv arguments * @throws IOException if startup fails due to I/O error @@ -312,14 +311,14 @@ public static void main(String[] argv) throws IOException { hParser.printGenericCommandUsage(System.err); System.exit(1); } - StringUtils.startupShutdownMessage(KeySpaceManager.class, argv, LOG); - KeySpaceManager ksm = createKSM(hParser.getRemainingArgs(), conf); - if (ksm != null) { - ksm.start(); - ksm.join(); + StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG); + OzoneManager om = createOm(hParser.getRemainingArgs(), conf); + if (om != null) { + om.start(); + om.join(); } } catch (Throwable t) { - LOG.error("Failed to start the KeyspaceManager.", t); + LOG.error("Failed to start the OzoneManager.", t); terminate(1, t); } } @@ -329,17 +328,17 @@ private static void printUsage(PrintStream out) { } /** - * Constructs KSM instance based on command line arguments. + * Constructs OM instance based on command line arguments. * @param argv Command line arguments * @param conf OzoneConfiguration - * @return KSM instance - * @throws IOException in case KSM instance creation fails. + * @return OM instance + * @throws IOException in case OM instance creation fails. */ - public static KeySpaceManager createKSM(String[] argv, - OzoneConfiguration conf) throws IOException { + public static OzoneManager createOm(String[] argv, + OzoneConfiguration conf) throws IOException { if (!isHddsEnabled(conf)) { - System.err.println("KSM cannot be started in secure mode or when " + + System.err.println("OM cannot be started in secure mode or when " + OZONE_ENABLED + " is set to false"); System.exit(1); } @@ -351,27 +350,27 @@ public static KeySpaceManager createKSM(String[] argv, } switch (startOpt) { case CREATEOBJECTSTORE: - terminate(ksmInit(conf) ? 0 : 1); + terminate(omInit(conf) ? 0 : 1); return null; case HELP: printUsage(System.err); terminate(0); return null; default: - return new KeySpaceManager(conf); + return new OzoneManager(conf); } } /** - * Initializes the KSM instance. + * Initializes the OM instance. * @param conf OzoneConfiguration - * @return true if KSM initialization succeeds , false otherwise + * @return true if OM initialization succeeds, false otherwise * @throws IOException in case ozone metadata directory path is not accessible */ - private static boolean ksmInit(OzoneConfiguration conf) throws IOException { - KSMStorage ksmStorage = new KSMStorage(conf); - StorageState state = ksmStorage.getState(); + private static boolean omInit(OzoneConfiguration conf) throws IOException { + OMStorage omStorage = new OMStorage(conf); + StorageState state = omStorage.getState(); if (state != StorageState.INITIALIZED) { try { ScmBlockLocationProtocol scmBlockClient = getScmBlockClient(conf); @@ -384,29 +383,29 @@ private static boolean ksmInit(OzoneConfiguration conf) throws IOException { if (scmId == null || scmId.isEmpty()) { throw new IOException("Invalid SCM ID"); } - ksmStorage.setClusterId(clusterId); - ksmStorage.setScmId(scmId); - ksmStorage.initialize(); + omStorage.setClusterId(clusterId); + omStorage.setScmId(scmId); + omStorage.initialize(); System.out.println( - "KSM initialization succeeded.Current cluster id for sd=" - + ksmStorage.getStorageDir() + ";cid=" + ksmStorage + "OM initialization succeeded.Current cluster id for sd=" + + omStorage.getStorageDir() + ";cid=" + omStorage .getClusterID()); return true; } catch (IOException ioe) { - LOG.error("Could not initialize KSM version file", ioe); + LOG.error("Could not initialize OM version file", ioe); return false; } } else { System.out.println( - "KSM already initialized.Reusing existing cluster id for sd=" - + ksmStorage.getStorageDir() + ";cid=" + ksmStorage + "OM already initialized.Reusing existing cluster id for sd=" + + omStorage.getStorageDir() + ";cid=" + omStorage .getClusterID()); return true; } } /** - * Parses the command line options for KSM initialization. + * Parses the command line options for OM initialization. * @param args command line arguments * @return StartupOption if options are valid, null otherwise */ @@ -437,12 +436,12 @@ private static String buildRpcServerStartMessage(String description, * Start service. */ public void start() throws IOException { - LOG.info(buildRpcServerStartMessage("KeyspaceManager RPC server", - ksmRpcAddress)); - DefaultMetricsSystem.initialize("KeySpaceManager"); + LOG.info(buildRpcServerStartMessage("OzoneManager RPC server", + omRpcAddress)); + DefaultMetricsSystem.initialize("OzoneManager"); metadataManager.start(); keyManager.start(); - ksmRpcServer.start(); + omRpcServer.start(); httpServer.start(); registerMXBean(); setStartTime(); @@ -454,13 +453,13 @@ public void start() throws IOException { public void stop() { try { metadataManager.stop(); - ksmRpcServer.stop(); + omRpcServer.stop(); keyManager.stop(); httpServer.stop(); metrics.unRegister(); unregisterMXBean(); } catch (Exception e) { - LOG.error("Key Space Manager stop failed.", e); + LOG.error("OzoneManager stop failed.", e); } } @@ -469,10 +468,10 @@ public void stop() { */ public void join() { try { - ksmRpcServer.join(); + omRpcServer.join(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - LOG.info("Interrupted during KeyspaceManager join.", e); + LOG.info("Interrupted during OzoneManager join.", e); } } @@ -483,7 +482,7 @@ public void join() { * @throws IOException */ @Override - public void createVolume(KsmVolumeArgs args) throws IOException { + public void createVolume(OmVolumeArgs args) throws IOException { try { metrics.incNumVolumeCreates(); volumeManager.createVolume(args); @@ -558,7 +557,7 @@ public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) * @throws IOException */ @Override - public KsmVolumeArgs getVolumeInfo(String volume) throws IOException { + public OmVolumeArgs getVolumeInfo(String volume) throws IOException { try { metrics.incNumVolumeInfos(); return volumeManager.getVolumeInfo(volume); @@ -597,8 +596,8 @@ public void deleteVolume(String volume) throws IOException { * @throws IOException */ @Override - public List listVolumeByUser(String userName, String prefix, - String prevKey, int maxKeys) throws IOException { + public List listVolumeByUser(String userName, String prefix, + String prevKey, int maxKeys) throws IOException { try { metrics.incNumVolumeLists(); return volumeManager.listVolumes(userName, prefix, prevKey, maxKeys); @@ -619,7 +618,7 @@ public List listVolumeByUser(String userName, String prefix, * @throws IOException */ @Override - public List listAllVolumes(String prefix, String prevKey, int + public List listAllVolumes(String prefix, String prevKey, int maxKeys) throws IOException { try { metrics.incNumVolumeLists(); @@ -637,7 +636,7 @@ public List listAllVolumes(String prefix, String prevKey, int * @throws IOException */ @Override - public void createBucket(KsmBucketInfo bucketInfo) throws IOException { + public void createBucket(OmBucketInfo bucketInfo) throws IOException { try { metrics.incNumBucketCreates(); bucketManager.createBucket(bucketInfo); @@ -651,8 +650,8 @@ public void createBucket(KsmBucketInfo bucketInfo) throws IOException { * {@inheritDoc} */ @Override - public List listBuckets(String volumeName, - String startKey, String prefix, int maxNumOfBuckets) + public List listBuckets(String volumeName, + String startKey, String prefix, int maxNumOfBuckets) throws IOException { try { metrics.incNumBucketLists(); @@ -669,11 +668,11 @@ public List listBuckets(String volumeName, * * @param volume - Volume name. * @param bucket - Bucket name. - * @return KsmBucketInfo or exception is thrown. + * @return OmBucketInfo or exception is thrown. * @throws IOException */ @Override - public KsmBucketInfo getBucketInfo(String volume, String bucket) + public OmBucketInfo getBucketInfo(String volume, String bucket) throws IOException { try { metrics.incNumBucketInfos(); @@ -688,11 +687,11 @@ public KsmBucketInfo getBucketInfo(String volume, String bucket) * Allocate a key. * * @param args - attributes of the key. - * @return KsmKeyInfo - the info about the allocated key. + * @return OmKeyInfo - the info about the allocated key. * @throws IOException */ @Override - public OpenKeySession openKey(KsmKeyArgs args) throws IOException { + public OpenKeySession openKey(OmKeyArgs args) throws IOException { try { metrics.incNumKeyAllocates(); return keyManager.openKey(args); @@ -703,7 +702,7 @@ public OpenKeySession openKey(KsmKeyArgs args) throws IOException { } @Override - public void commitKey(KsmKeyArgs args, int clientID) + public void commitKey(OmKeyArgs args, int clientID) throws IOException { try { metrics.incNumKeyCommits(); @@ -715,7 +714,7 @@ public void commitKey(KsmKeyArgs args, int clientID) } @Override - public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) + public OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID) throws IOException { try { metrics.incNumBlockAllocateCalls(); @@ -730,11 +729,11 @@ public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID) * Lookup a key. * * @param args - attributes of the key. - * @return KsmKeyInfo - the info about the requested key. + * @return OmKeyInfo - the info about the requested key. * @throws IOException */ @Override - public KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException { + public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { try { metrics.incNumKeyLookups(); return keyManager.lookupKey(args); @@ -745,7 +744,7 @@ public KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException { } @Override - public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException { + public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { try { metrics.incNumKeyRenames(); keyManager.renameKey(args, toKeyName); @@ -762,7 +761,7 @@ public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException { * @throws IOException */ @Override - public void deleteKey(KsmKeyArgs args) throws IOException { + public void deleteKey(OmKeyArgs args) throws IOException { try { metrics.incNumKeyDeletes(); keyManager.deleteKey(args); @@ -773,8 +772,8 @@ public void deleteKey(KsmKeyArgs args) throws IOException { } @Override - public List listKeys(String volumeName, String bucketName, - String startKey, String keyPrefix, int maxKeys) throws IOException { + public List listKeys(String volumeName, String bucketName, + String startKey, String keyPrefix, int maxKeys) throws IOException { try { metrics.incNumKeyLists(); return keyManager.listKeys(volumeName, bucketName, @@ -791,7 +790,7 @@ public List listKeys(String volumeName, String bucketName, * @throws IOException */ @Override - public void setBucketProperty(KsmBucketArgs args) + public void setBucketProperty(OmBucketArgs args) throws IOException { try { metrics.incNumBucketUpdates(); @@ -822,27 +821,27 @@ public void deleteBucket(String volume, String bucket) throws IOException { private void registerMXBean() { Map jmxProperties = new HashMap(); jmxProperties.put("component", "ServerRuntime"); - this.ksmInfoBeanName = - MBeans.register("KeySpaceManager", - "KeySpaceManagerInfo", + this.omInfoBeanName = + MBeans.register("OzoneManager", + "OzoneManagerInfo", jmxProperties, this); } private void unregisterMXBean() { - if (this.ksmInfoBeanName != null) { - MBeans.unregister(this.ksmInfoBeanName); - this.ksmInfoBeanName = null; + if (this.omInfoBeanName != null) { + MBeans.unregister(this.omInfoBeanName); + this.omInfoBeanName = null; } } @Override public String getRpcPort() { - return "" + ksmRpcAddress.getPort(); + return "" + omRpcAddress.getPort(); } @VisibleForTesting - public KeySpaceManagerHttpServer getHttpServer() { + public OzoneManagerHttpServer getHttpServer() { return httpServer; } @@ -850,26 +849,26 @@ public KeySpaceManagerHttpServer getHttpServer() { public List getServiceList() throws IOException { // When we implement multi-home this call has to be handled properly. List services = new ArrayList<>(); - ServiceInfo.Builder ksmServiceInfoBuilder = ServiceInfo.newBuilder() - .setNodeType(HddsProtos.NodeType.KSM) - .setHostname(ksmRpcAddress.getHostName()) + ServiceInfo.Builder omServiceInfoBuilder = ServiceInfo.newBuilder() + .setNodeType(HddsProtos.NodeType.OM) + .setHostname(omRpcAddress.getHostName()) .addServicePort(ServicePort.newBuilder() .setType(ServicePort.Type.RPC) - .setValue(ksmRpcAddress.getPort()) + .setValue(omRpcAddress.getPort()) .build()); if (httpServer.getHttpAddress() != null) { - ksmServiceInfoBuilder.addServicePort(ServicePort.newBuilder() + omServiceInfoBuilder.addServicePort(ServicePort.newBuilder() .setType(ServicePort.Type.HTTP) .setValue(httpServer.getHttpAddress().getPort()) .build()); } if (httpServer.getHttpsAddress() != null) { - ksmServiceInfoBuilder.addServicePort(ServicePort.newBuilder() + omServiceInfoBuilder.addServicePort(ServicePort.newBuilder() .setType(ServicePort.Type.HTTPS) .setValue(httpServer.getHttpsAddress().getPort()) .build()); } - services.add(ksmServiceInfoBuilder.build()); + services.add(omServiceInfoBuilder.build()); // For client we have to return SCM with container protocol port, // not block protocol. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java similarity index 68% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java index 478804b32c..bd6ab6910a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -25,47 +25,47 @@ import java.io.IOException; /** - * HttpServer wrapper for the KeySpaceManager. + * HttpServer wrapper for the OzoneManager. */ -public class KeySpaceManagerHttpServer extends BaseHttpServer { +public class OzoneManagerHttpServer extends BaseHttpServer { - public KeySpaceManagerHttpServer(Configuration conf, KeySpaceManager ksm) + public OzoneManagerHttpServer(Configuration conf, OzoneManager om) throws IOException { - super(conf, "ksm"); + super(conf, "ozoneManager"); addServlet("serviceList", "/serviceList", ServiceListJSONServlet.class); - getWebAppContext().setAttribute(OzoneConsts.KSM_CONTEXT_ATTRIBUTE, ksm); + getWebAppContext().setAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE, om); } @Override protected String getHttpAddressKey() { - return KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY; + return OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY; } @Override protected String getHttpBindHostKey() { - return KSMConfigKeys.OZONE_KSM_HTTP_BIND_HOST_KEY; + return OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY; } @Override protected String getHttpsAddressKey() { - return KSMConfigKeys.OZONE_KSM_HTTPS_ADDRESS_KEY; + return OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY; } @Override protected String getHttpsBindHostKey() { - return KSMConfigKeys.OZONE_KSM_HTTPS_BIND_HOST_KEY; + return OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY; } @Override protected String getBindHostDefault() { - return KSMConfigKeys.OZONE_KSM_HTTP_BIND_HOST_DEFAULT; + return OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_DEFAULT; } @Override protected int getHttpBindPortDefault() { - return KSMConfigKeys.OZONE_KSM_HTTP_BIND_PORT_DEFAULT; + return OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT; } @Override protected int getHttpsBindPortDefault() { - return KSMConfigKeys.OZONE_KSM_HTTPS_BIND_PORT_DEFAULT; + return OMConfigKeys.OZONE_OM_HTTPS_BIND_PORT_DEFAULT; } @Override protected String getKeytabFile() { - return KSMConfigKeys.OZONE_KSM_KEYTAB_FILE; + return OMConfigKeys.OZONE_OM_KEYTAB_FILE; } @Override protected String getSpnegoPrincipal() { @@ -73,6 +73,6 @@ public KeySpaceManagerHttpServer(Configuration conf, KeySpaceManager ksm) } @Override protected String getEnabledKey() { - return KSMConfigKeys.OZONE_KSM_HTTP_ENABLED_KEY; + return OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/ServiceListJSONServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java similarity index 89% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/ServiceListJSONServlet.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java index 34a80ce104..47713e2010 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/ServiceListJSONServlet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; @@ -37,7 +37,7 @@ * Provides REST access to Ozone Service List. *

* This servlet generally will be placed under the /serviceList URL of - * KeySpaceManager HttpServer. + * OzoneManager HttpServer. * * The return format is of JSON and in the form *

@@ -45,7 +45,7 @@ * { * "services" : [ * { - * "NodeType":"KSM", + * "NodeType":"OM", * "Hostname" "$hostname", * "ports" : { * "$PortType" : "$port", @@ -64,11 +64,11 @@ public class ServiceListJSONServlet extends HttpServlet { LoggerFactory.getLogger(ServiceListJSONServlet.class); private static final long serialVersionUID = 1L; - private KeySpaceManager ksm; + private transient OzoneManager om; public void init() throws ServletException { - this.ksm = (KeySpaceManager) getServletContext() - .getAttribute(OzoneConsts.KSM_CONTEXT_ATTRIBUTE); + this.om = (OzoneManager) getServletContext() + .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE); } /** @@ -87,7 +87,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) { response.setContentType("application/json; charset=utf8"); PrintWriter writer = response.getWriter(); try { - writer.write(objectMapper.writeValueAsString(ksm.getServiceList())); + writer.write(objectMapper.writeValueAsString(om.getServiceList())); } finally { if (writer != null) { writer.close(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java similarity index 84% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManager.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java index 6ac78d6eae..8475dd9e4a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java @@ -14,17 +14,17 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; -import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.OzoneAclInfo; + .OzoneManagerProtocolProtos.OzoneAclInfo; import java.io.IOException; import java.util.List; /** - * KSM volume manager interface. + * OM volume manager interface. */ public interface VolumeManager { @@ -32,7 +32,7 @@ public interface VolumeManager { * Create a new volume. * @param args - Volume args to create a volume */ - void createVolume(KsmVolumeArgs args) throws IOException; + void createVolume(OmVolumeArgs args) throws IOException; /** * Changes the owner of a volume. @@ -58,7 +58,7 @@ public interface VolumeManager { * @return VolumeArgs or exception is thrown. * @throws IOException */ - KsmVolumeArgs getVolumeInfo(String volume) throws IOException; + OmVolumeArgs getVolumeInfo(String volume) throws IOException; /** * Deletes an existing empty volume. @@ -92,9 +92,9 @@ boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) * this key is excluded from the result. * @param maxKeys * the maximum number of volumes to return. - * @return a list of {@link KsmVolumeArgs} + * @return a list of {@link OmVolumeArgs} * @throws IOException */ - List listVolumes(String userName, String prefix, - String startKey, int maxKeys) throws IOException; + List listVolumes(String userName, String prefix, + String startKey, int maxKeys) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java similarity index 82% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index cc2f78aa34..e50145debd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -14,18 +14,18 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.ksm.exceptions.KSMException; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.OzoneAclInfo; + .OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.VolumeList; + .OzoneManagerProtocolProtos.VolumeList; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.VolumeInfo; + .OzoneManagerProtocolProtos.VolumeInfo; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.BatchOperation; @@ -37,21 +37,20 @@ import java.util.List; import java.util.Map; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys - .OZONE_KSM_USER_MAX_VOLUME_DEFAULT; -import static org.apache.hadoop.ozone.ksm.KSMConfigKeys - .OZONE_KSM_USER_MAX_VOLUME; -import static org.apache.hadoop.ozone.ksm.exceptions - .KSMException.ResultCodes; +import static org.apache.hadoop.ozone.om.OMConfigKeys + .OZONE_OM_USER_MAX_VOLUME_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys + .OZONE_OM_USER_MAX_VOLUME; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; /** - * KSM volume management code. + * OM volume management code. */ public class VolumeManagerImpl implements VolumeManager { private static final Logger LOG = LoggerFactory.getLogger(VolumeManagerImpl.class); - private final KSMMetadataManager metadataManager; + private final OMMetadataManager metadataManager; private final int maxUserVolumeCount; /** @@ -59,11 +58,11 @@ public class VolumeManagerImpl implements VolumeManager { * @param conf - Ozone configuration. * @throws IOException */ - public VolumeManagerImpl(KSMMetadataManager metadataManager, + public VolumeManagerImpl(OMMetadataManager metadataManager, OzoneConfiguration conf) throws IOException { this.metadataManager = metadataManager; - this.maxUserVolumeCount = conf.getInt(OZONE_KSM_USER_MAX_VOLUME, - OZONE_KSM_USER_MAX_VOLUME_DEFAULT); + this.maxUserVolumeCount = conf.getInt(OZONE_OM_USER_MAX_VOLUME, + OZONE_OM_USER_MAX_VOLUME_DEFAULT); } // Helpers to add and delete volume from user list @@ -81,7 +80,7 @@ private void addVolumeToOwnerList(String volume, String owner, // Check the volume count if (prevVolList.size() >= maxUserVolumeCount) { LOG.debug("Too many volumes for user:{}", owner); - throw new KSMException(ResultCodes.FAILED_TOO_MANY_USER_VOLUMES); + throw new OMException(ResultCodes.FAILED_TOO_MANY_USER_VOLUMES); } // Add the new volume to the list @@ -103,7 +102,7 @@ private void delVolumeFromOwnerList(String volume, String owner, prevVolList.addAll(vlist.getVolumeNamesList()); } else { LOG.debug("volume:{} not found for user:{}"); - throw new KSMException(ResultCodes.FAILED_USER_NOT_FOUND); + throw new OMException(ResultCodes.FAILED_USER_NOT_FOUND); } // Remove the volume from the list @@ -119,10 +118,10 @@ private void delVolumeFromOwnerList(String volume, String owner, /** * Creates a volume. - * @param args - KsmVolumeArgs. + * @param args - OmVolumeArgs. */ @Override - public void createVolume(KsmVolumeArgs args) throws IOException { + public void createVolume(OmVolumeArgs args) throws IOException { Preconditions.checkNotNull(args); metadataManager.writeLock().lock(); try { @@ -132,7 +131,7 @@ public void createVolume(KsmVolumeArgs args) throws IOException { // Check of the volume already exists if (volumeInfo != null) { LOG.debug("volume:{} already exists", args.getVolume()); - throw new KSMException(ResultCodes.FAILED_VOLUME_ALREADY_EXISTS); + throw new OMException(ResultCodes.FAILED_VOLUME_ALREADY_EXISTS); } BatchOperation batch = new BatchOperation(); @@ -161,7 +160,7 @@ public void createVolume(KsmVolumeArgs args) throws IOException { LOG.debug("created volume:{} user:{}", args.getVolume(), args.getOwnerName()); } catch (IOException ex) { - if (!(ex instanceof KSMException)) { + if (!(ex instanceof OMException)) { LOG.error("Volume creation failed for user:{} volume:{}", args.getOwnerName(), args.getVolume(), ex); } @@ -189,19 +188,19 @@ public void setOwner(String volume, String owner) throws IOException { if (volInfo == null) { LOG.debug("Changing volume ownership failed for user:{} volume:{}", owner, volume); - throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND); + throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND); } VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo); - KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo); + OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo); Preconditions.checkState(volume.equals(volumeInfo.getVolume())); BatchOperation batch = new BatchOperation(); delVolumeFromOwnerList(volume, volumeArgs.getOwnerName(), batch); addVolumeToOwnerList(volume, owner, batch); - KsmVolumeArgs newVolumeArgs = - KsmVolumeArgs.newBuilder().setVolume(volumeArgs.getVolume()) + OmVolumeArgs newVolumeArgs = + OmVolumeArgs.newBuilder().setVolume(volumeArgs.getVolume()) .setAdminName(volumeArgs.getAdminName()) .setOwnerName(owner) .setQuotaInBytes(volumeArgs.getQuotaInBytes()) @@ -213,7 +212,7 @@ public void setOwner(String volume, String owner) throws IOException { metadataManager.writeBatch(batch); } catch (IOException ex) { - if (!(ex instanceof KSMException)) { + if (!(ex instanceof OMException)) { LOG.error("Changing volume ownership failed for user:{} volume:{}", owner, volume, ex); } @@ -238,15 +237,15 @@ public void setQuota(String volume, long quota) throws IOException { byte[] volInfo = metadataManager.get(dbVolumeKey); if (volInfo == null) { LOG.debug("volume:{} does not exist", volume); - throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND); + throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND); } VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo); - KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo); + OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo); Preconditions.checkState(volume.equals(volumeInfo.getVolume())); - KsmVolumeArgs newVolumeArgs = - KsmVolumeArgs.newBuilder() + OmVolumeArgs newVolumeArgs = + OmVolumeArgs.newBuilder() .setVolume(volumeArgs.getVolume()) .setAdminName(volumeArgs.getAdminName()) .setOwnerName(volumeArgs.getOwnerName()) @@ -256,7 +255,7 @@ public void setQuota(String volume, long quota) throws IOException { VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf(); metadataManager.put(dbVolumeKey, newVolumeInfo.toByteArray()); } catch (IOException ex) { - if (!(ex instanceof KSMException)) { + if (!(ex instanceof OMException)) { LOG.error("Changing volume quota failed for volume:{} quota:{}", volume, quota, ex); } @@ -272,7 +271,7 @@ public void setQuota(String volume, long quota) throws IOException { * @return VolumeArgs or exception is thrown. * @throws IOException */ - public KsmVolumeArgs getVolumeInfo(String volume) throws IOException { + public OmVolumeArgs getVolumeInfo(String volume) throws IOException { Preconditions.checkNotNull(volume); metadataManager.readLock().lock(); try { @@ -280,15 +279,15 @@ public KsmVolumeArgs getVolumeInfo(String volume) throws IOException { byte[] volInfo = metadataManager.get(dbVolumeKey); if (volInfo == null) { LOG.debug("volume:{} does not exist", volume); - throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND); + throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND); } VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo); - KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo); + OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo); Preconditions.checkState(volume.equals(volumeInfo.getVolume())); return volumeArgs; } catch (IOException ex) { - if (!(ex instanceof KSMException)) { + if (!(ex instanceof OMException)) { LOG.warn("Info volume failed for volume:{}", volume, ex); } throw ex; @@ -313,12 +312,12 @@ public void deleteVolume(String volume) throws IOException { byte[] volInfo = metadataManager.get(dbVolumeKey); if (volInfo == null) { LOG.debug("volume:{} does not exist", volume); - throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND); + throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND); } if (!metadataManager.isVolumeEmpty(volume)) { LOG.debug("volume:{} is not empty", volume); - throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_EMPTY); + throw new OMException(ResultCodes.FAILED_VOLUME_NOT_EMPTY); } VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo); @@ -329,7 +328,7 @@ public void deleteVolume(String volume) throws IOException { batch.delete(dbVolumeKey); metadataManager.writeBatch(batch); } catch (IOException ex) { - if (!(ex instanceof KSMException)) { + if (!(ex instanceof OMException)) { LOG.error("Delete volume failed for volume:{}", volume, ex); } throw ex; @@ -356,15 +355,15 @@ public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) byte[] volInfo = metadataManager.get(dbVolumeKey); if (volInfo == null) { LOG.debug("volume:{} does not exist", volume); - throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND); + throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND); } VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo); - KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo); + OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo); Preconditions.checkState(volume.equals(volumeInfo.getVolume())); return volumeArgs.getAclMap().hasAccess(userAcl); } catch (IOException ex) { - if (!(ex instanceof KSMException)) { + if (!(ex instanceof OMException)) { LOG.error("Check volume access failed for volume:{} user:{} rights:{}", volume, userAcl.getName(), userAcl.getRights(), ex); } @@ -378,8 +377,8 @@ public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) * {@inheritDoc} */ @Override - public List listVolumes(String userName, - String prefix, String startKey, int maxKeys) throws IOException { + public List listVolumes(String userName, + String prefix, String startKey, int maxKeys) throws IOException { metadataManager.readLock().lock(); try { return metadataManager.listVolumes( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java similarity index 85% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index b902eab7ce..55cef97ed5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -15,21 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm.exceptions; +package org.apache.hadoop.ozone.om.exceptions; import java.io.IOException; /** - * Exception thrown by KSM. + * Exception thrown by Ozone Manager. */ -public class KSMException extends IOException { - private final KSMException.ResultCodes result; +public class OMException extends IOException { + private final OMException.ResultCodes result; /** * Constructs an {@code IOException} with {@code null} * as its error detail message. */ - public KSMException(KSMException.ResultCodes result) { + public OMException(OMException.ResultCodes result) { this.result = result; } @@ -40,7 +40,7 @@ public KSMException(KSMException.ResultCodes result) { * the * {@link #getMessage()} method) */ - public KSMException(String message, KSMException.ResultCodes result) { + public OMException(String message, OMException.ResultCodes result) { super(message); this.result = result; } @@ -61,8 +61,8 @@ public KSMException(String message, KSMException.ResultCodes result) { * cause is nonexistent or unknown.) * @since 1.6 */ - public KSMException(String message, Throwable cause, - KSMException.ResultCodes result) { + public OMException(String message, Throwable cause, + OMException.ResultCodes result) { super(message, cause); this.result = result; } @@ -79,7 +79,7 @@ public KSMException(String message, Throwable cause, * cause is nonexistent or unknown.) * @since 1.6 */ - public KSMException(Throwable cause, KSMException.ResultCodes result) { + public OMException(Throwable cause, OMException.ResultCodes result) { super(cause); this.result = result; } @@ -88,7 +88,7 @@ public KSMException(Throwable cause, KSMException.ResultCodes result) { * Returns resultCode. * @return ResultCode */ - public KSMException.ResultCodes getResult() { + public OMException.ResultCodes getResult() { return result; } @@ -112,7 +112,7 @@ public enum ResultCodes { FAILED_INVALID_KEY_NAME, FAILED_METADATA_ERROR, FAILED_INTERNAL_ERROR, - KSM_NOT_INITIALIZED, + OM_NOT_INITIALIZED, SCM_VERSION_MISMATCH_ERROR } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java similarity index 91% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java index 09fd87f22c..5091545433 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java @@ -15,5 +15,5 @@ * the License. */ -package org.apache.hadoop.ozone.ksm.exceptions; -// Exception thrown by KSM. +package org.apache.hadoop.ozone.om.exceptions; +// Exception thrown by OM. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java similarity index 89% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java index 09d9f32635..7904d5da08 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; /* - This package contains the keyspace manager classes. + This package contains the Ozone Manager classes. */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java similarity index 76% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index 38e7797ff0..40a88b698a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -19,89 +19,89 @@ import com.google.common.collect.Lists; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; -import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; -import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; -import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol; -import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB; -import org.apache.hadoop.ozone.ksm.exceptions.KSMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; +import org.apache.hadoop.ozone.om.helpers.ServiceInfo; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.AllocateBlockRequest; + .OzoneManagerProtocolProtos.AllocateBlockRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.AllocateBlockResponse; + .OzoneManagerProtocolProtos.AllocateBlockResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CommitKeyRequest; + .OzoneManagerProtocolProtos.CommitKeyRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CommitKeyResponse; + .OzoneManagerProtocolProtos.CommitKeyResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CreateBucketRequest; + .OzoneManagerProtocolProtos.CreateBucketRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CreateBucketResponse; + .OzoneManagerProtocolProtos.CreateBucketResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.InfoBucketRequest; + .OzoneManagerProtocolProtos.InfoBucketRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.InfoBucketResponse; + .OzoneManagerProtocolProtos.InfoBucketResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.SetBucketPropertyRequest; + .OzoneManagerProtocolProtos.SetBucketPropertyRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.SetBucketPropertyResponse; + .OzoneManagerProtocolProtos.SetBucketPropertyResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.DeleteBucketRequest; + .OzoneManagerProtocolProtos.DeleteBucketRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.DeleteBucketResponse; + .OzoneManagerProtocolProtos.DeleteBucketResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CreateVolumeRequest; + .OzoneManagerProtocolProtos.CreateVolumeRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CreateVolumeResponse; + .OzoneManagerProtocolProtos.CreateVolumeResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.LocateKeyRequest; + .OzoneManagerProtocolProtos.LocateKeyRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.LocateKeyResponse; + .OzoneManagerProtocolProtos.LocateKeyResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.RenameKeyRequest; + .OzoneManagerProtocolProtos.RenameKeyRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.RenameKeyResponse; + .OzoneManagerProtocolProtos.RenameKeyResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.KeyArgs; + .OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.SetVolumePropertyRequest; + .OzoneManagerProtocolProtos.SetVolumePropertyRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.SetVolumePropertyResponse; + .OzoneManagerProtocolProtos.SetVolumePropertyResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CheckVolumeAccessRequest; + .OzoneManagerProtocolProtos.CheckVolumeAccessRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.CheckVolumeAccessResponse; + .OzoneManagerProtocolProtos.CheckVolumeAccessResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.InfoVolumeRequest; + .OzoneManagerProtocolProtos.InfoVolumeRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.InfoVolumeResponse; + .OzoneManagerProtocolProtos.InfoVolumeResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.DeleteVolumeRequest; + .OzoneManagerProtocolProtos.DeleteVolumeRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.DeleteVolumeResponse; + .OzoneManagerProtocolProtos.DeleteVolumeResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ListVolumeRequest; + .OzoneManagerProtocolProtos.ListVolumeRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ListVolumeResponse; + .OzoneManagerProtocolProtos.ListVolumeResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ListBucketsRequest; + .OzoneManagerProtocolProtos.ListBucketsRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ListBucketsResponse; + .OzoneManagerProtocolProtos.ListBucketsResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ListKeysRequest; + .OzoneManagerProtocolProtos.ListKeysRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ListKeysResponse; + .OzoneManagerProtocolProtos.ListKeysResponse; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.Status; + .OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ServiceListRequest; + .OzoneManagerProtocolProtos.ServiceListRequest; import org.apache.hadoop.ozone.protocol.proto - .KeySpaceManagerProtocolProtos.ServiceListResponse; + .OzoneManagerProtocolProtos.ServiceListResponse; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -112,30 +112,30 @@ /** * This class is the server-side translator that forwards requests received on - * {@link org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB} - * to the KeySpaceManagerService server implementation. + * {@link OzoneManagerProtocolPB} + * to the OzoneManagerService server implementation. */ -public class KeySpaceManagerProtocolServerSideTranslatorPB implements - KeySpaceManagerProtocolPB { +public class OzoneManagerProtocolServerSideTranslatorPB implements + OzoneManagerProtocolPB { private static final Logger LOG = LoggerFactory - .getLogger(KeySpaceManagerProtocolServerSideTranslatorPB.class); - private final KeySpaceManagerProtocol impl; + .getLogger(OzoneManagerProtocolServerSideTranslatorPB.class); + private final OzoneManagerProtocol impl; /** * Constructs an instance of the server handler. * - * @param impl KeySpaceManagerProtocolPB + * @param impl OzoneManagerProtocolPB */ - public KeySpaceManagerProtocolServerSideTranslatorPB( - KeySpaceManagerProtocol impl) { + public OzoneManagerProtocolServerSideTranslatorPB( + OzoneManagerProtocol impl) { this.impl = impl; } // Convert and exception to corresponding status code private Status exceptionToResponseStatus(IOException ex) { - if (ex instanceof KSMException) { - KSMException ksmException = (KSMException)ex; - switch (ksmException.getResult()) { + if (ex instanceof OMException) { + OMException omException = (OMException)ex; + switch (omException.getResult()) { case FAILED_VOLUME_ALREADY_EXISTS: return Status.VOLUME_ALREADY_EXISTS; case FAILED_TOO_MANY_USER_VOLUMES: @@ -158,6 +158,18 @@ private Status exceptionToResponseStatus(IOException ex) { return Status.KEY_NOT_FOUND; case FAILED_INVALID_KEY_NAME: return Status.INVALID_KEY_NAME; + case FAILED_KEY_ALLOCATION: + return Status.KEY_ALLOCATION_ERROR; + case FAILED_KEY_DELETION: + return Status.KEY_DELETION_ERROR; + case FAILED_KEY_RENAME: + return Status.KEY_RENAME_ERROR; + case FAILED_METADATA_ERROR: + return Status.METADATA_ERROR; + case OM_NOT_INITIALIZED: + return Status.OM_NOT_INITIALIZED; + case SCM_VERSION_MISMATCH_ERROR: + return Status.SCM_VERSION_MISMATCH_ERROR; default: return Status.INTERNAL_ERROR; } @@ -176,7 +188,7 @@ public CreateVolumeResponse createVolume( CreateVolumeResponse.Builder resp = CreateVolumeResponse.newBuilder(); resp.setStatus(Status.OK); try { - impl.createVolume(KsmVolumeArgs.getFromProtobuf(request.getVolumeInfo())); + impl.createVolume(OmVolumeArgs.getFromProtobuf(request.getVolumeInfo())); } catch (IOException e) { resp.setStatus(exceptionToResponseStatus(e)); } @@ -235,7 +247,7 @@ public InfoVolumeResponse infoVolume( resp.setStatus(Status.OK); String volume = request.getVolumeName(); try { - KsmVolumeArgs ret = impl.getVolumeInfo(volume); + OmVolumeArgs ret = impl.getVolumeInfo(volume); resp.setVolumeInfo(ret.getProtobuf()); } catch (IOException e) { resp.setStatus(exceptionToResponseStatus(e)); @@ -262,7 +274,7 @@ public ListVolumeResponse listVolumes( RpcController controller, ListVolumeRequest request) throws ServiceException { ListVolumeResponse.Builder resp = ListVolumeResponse.newBuilder(); - List result = Lists.newArrayList(); + List result = Lists.newArrayList(); try { if (request.getScope() == ListVolumeRequest.Scope.VOLUMES_BY_USER) { @@ -294,7 +306,7 @@ public CreateBucketResponse createBucket( CreateBucketResponse.Builder resp = CreateBucketResponse.newBuilder(); try { - impl.createBucket(KsmBucketInfo.getFromProtobuf( + impl.createBucket(OmBucketInfo.getFromProtobuf( request.getBucketInfo())); resp.setStatus(Status.OK); } catch (IOException e) { @@ -310,10 +322,10 @@ public InfoBucketResponse infoBucket( InfoBucketResponse.Builder resp = InfoBucketResponse.newBuilder(); try { - KsmBucketInfo ksmBucketInfo = impl.getBucketInfo( + OmBucketInfo omBucketInfo = impl.getBucketInfo( request.getVolumeName(), request.getBucketName()); resp.setStatus(Status.OK); - resp.setBucketInfo(ksmBucketInfo.getProtobuf()); + resp.setBucketInfo(omBucketInfo.getProtobuf()); } catch(IOException e) { resp.setStatus(exceptionToResponseStatus(e)); } @@ -332,7 +344,7 @@ public LocateKeyResponse createKey( keyArgs.hasType()? keyArgs.getType() : null; HddsProtos.ReplicationFactor factor = keyArgs.hasFactor()? keyArgs.getFactor() : null; - KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder() + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) @@ -341,11 +353,11 @@ public LocateKeyResponse createKey( .setFactor(factor) .build(); if (keyArgs.hasDataSize()) { - ksmKeyArgs.setDataSize(keyArgs.getDataSize()); + omKeyArgs.setDataSize(keyArgs.getDataSize()); } else { - ksmKeyArgs.setDataSize(0); + omKeyArgs.setDataSize(0); } - OpenKeySession openKey = impl.openKey(ksmKeyArgs); + OpenKeySession openKey = impl.openKey(omKeyArgs); resp.setKeyInfo(openKey.getKeyInfo().getProtobuf()); resp.setID(openKey.getId()); resp.setOpenVersion(openKey.getOpenVersion()); @@ -364,12 +376,12 @@ public LocateKeyResponse lookupKey( LocateKeyResponse.newBuilder(); try { KeyArgs keyArgs = request.getKeyArgs(); - KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder() + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .build(); - KsmKeyInfo keyInfo = impl.lookupKey(ksmKeyArgs); + OmKeyInfo keyInfo = impl.lookupKey(omKeyArgs); resp.setKeyInfo(keyInfo.getProtobuf()); resp.setStatus(Status.OK); } catch (IOException e) { @@ -385,12 +397,12 @@ public RenameKeyResponse renameKey( RenameKeyResponse.Builder resp = RenameKeyResponse.newBuilder(); try { KeyArgs keyArgs = request.getKeyArgs(); - KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder() + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .build(); - impl.renameKey(ksmKeyArgs, request.getToKeyName()); + impl.renameKey(omKeyArgs, request.getToKeyName()); resp.setStatus(Status.OK); } catch (IOException e){ resp.setStatus(exceptionToResponseStatus(e)); @@ -405,7 +417,7 @@ public SetBucketPropertyResponse setBucketProperty( SetBucketPropertyResponse.Builder resp = SetBucketPropertyResponse.newBuilder(); try { - impl.setBucketProperty(KsmBucketArgs.getFromProtobuf( + impl.setBucketProperty(OmBucketArgs.getFromProtobuf( request.getBucketArgs())); resp.setStatus(Status.OK); } catch(IOException e) { @@ -421,12 +433,12 @@ public LocateKeyResponse deleteKey(RpcController controller, LocateKeyResponse.newBuilder(); try { KeyArgs keyArgs = request.getKeyArgs(); - KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder() + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .build(); - impl.deleteKey(ksmKeyArgs); + impl.deleteKey(omKeyArgs); resp.setStatus(Status.OK); } catch (IOException e) { resp.setStatus(exceptionToResponseStatus(e)); @@ -455,12 +467,12 @@ public ListBucketsResponse listBuckets( ListBucketsResponse.Builder resp = ListBucketsResponse.newBuilder(); try { - List buckets = impl.listBuckets( + List buckets = impl.listBuckets( request.getVolumeName(), request.getStartKey(), request.getPrefix(), request.getCount()); - for(KsmBucketInfo bucket : buckets) { + for(OmBucketInfo bucket : buckets) { resp.addBucketInfo(bucket.getProtobuf()); } resp.setStatus(Status.OK); @@ -476,13 +488,13 @@ public ListKeysResponse listKeys(RpcController controller, ListKeysResponse.Builder resp = ListKeysResponse.newBuilder(); try { - List keys = impl.listKeys( + List keys = impl.listKeys( request.getVolumeName(), request.getBucketName(), request.getStartKey(), request.getPrefix(), request.getCount()); - for(KsmKeyInfo key : keys) { + for(OmKeyInfo key : keys) { resp.addKeyInfo(key.getProtobuf()); } resp.setStatus(Status.OK); @@ -503,7 +515,7 @@ public CommitKeyResponse commitKey(RpcController controller, keyArgs.hasType()? keyArgs.getType() : null; HddsProtos.ReplicationFactor factor = keyArgs.hasFactor()? keyArgs.getFactor() : null; - KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder() + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) @@ -512,7 +524,7 @@ public CommitKeyResponse commitKey(RpcController controller, .setFactor(factor) .build(); int id = request.getClientID(); - impl.commitKey(ksmKeyArgs, id); + impl.commitKey(omKeyArgs, id); resp.setStatus(Status.OK); } catch (IOException e) { resp.setStatus(exceptionToResponseStatus(e)); @@ -527,13 +539,13 @@ public AllocateBlockResponse allocateBlock(RpcController controller, AllocateBlockResponse.newBuilder(); try { KeyArgs keyArgs = request.getKeyArgs(); - KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder() + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .build(); int id = request.getClientID(); - KsmKeyLocationInfo newLocation = impl.allocateBlock(ksmKeyArgs, id); + OmKeyLocationInfo newLocation = impl.allocateBlock(omKeyArgs, id); resp.setKeyLocation(newLocation.getProtobuf()); resp.setStatus(Status.OK); } catch (IOException e) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java index e9c24306ee..9bc393dd18 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java @@ -18,5 +18,5 @@ package org.apache.hadoop.ozone.protocolPB; /** - * KSM protocol buffer translators. + * OM protocol buffer translators. */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html similarity index 88% rename from hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html rename to hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html index 7f18028f2b..ba54cb2cf4 100644 --- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html +++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html @@ -22,9 +22,9 @@ - + - HDFS Key Space Manager + Ozone Manager @@ -34,7 +34,7 @@ - +

@@ -64,7 +64,7 @@ - + diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css similarity index 100% rename from hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css rename to hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html similarity index 100% rename from hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html rename to hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html similarity index 98% rename from hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html rename to hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html index e63fb000cd..15fba2fcb3 100644 --- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html +++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. --> -

KSM Metrics

+

OzoneManager Metrics

{{type}}

diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js similarity index 90% rename from hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js rename to hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js index ab6f73bfc9..ca035548cd 100644 --- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js +++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js @@ -22,15 +22,15 @@ return key == 'name' || key == 'modelerType' || key.match(/tag.*/); }; - angular.module('ksm', ['ozone', 'nvd3']); - angular.module('ksm').config(function ($routeProvider) { + angular.module('ozoneManager', ['ozone', 'nvd3']); + angular.module('ozoneManager').config(function ($routeProvider) { $routeProvider - .when("/metrics/ksm", { - template: "" + .when("/metrics/ozoneManager", { + template: "" }); }); - angular.module('ksm').component('ksmMetrics', { - templateUrl: 'ksm-metrics.html', + angular.module('ozoneManager').component('omMetrics', { + templateUrl: 'om-metrics.html', controller: function ($http) { var ctrl = this; @@ -63,7 +63,7 @@ }; - $http.get("jmx?qry=Hadoop:service=KeySpaceManager,name=KSMMetrics") + $http.get("jmx?qry=Hadoop:service=OzoneManager,name=OMMetrics") .then(function (result) { var groupedMetrics = {others: [], nums: {}}; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java similarity index 77% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java index 0b43bf9b70..1ecac7fdac 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java @@ -14,16 +14,15 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs; -import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.ksm.exceptions.KSMException; -import org.apache.hadoop.ozone.ksm.exceptions - .KSMException.ResultCodes; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.OzoneAcl; import org.junit.Assert; import org.junit.Rule; @@ -47,16 +46,16 @@ import static org.mockito.Mockito.any; /** - * Tests BucketManagerImpl, mocks KSMMetadataManager for testing. + * Tests BucketManagerImpl, mocks OMMetadataManager for testing. */ @RunWith(MockitoJUnitRunner.class) public class TestBucketManagerImpl { @Rule public ExpectedException thrown = ExpectedException.none(); - private KSMMetadataManager getMetadataManagerMock(String... volumesToCreate) + private OMMetadataManager getMetadataManagerMock(String... volumesToCreate) throws IOException { - KSMMetadataManager metadataManager = Mockito.mock(KSMMetadataManager.class); + OMMetadataManager metadataManager = Mockito.mock(OMMetadataManager.class); Map metadataDB = new HashMap<>(); ReadWriteLock lock = new ReentrantReadWriteLock(); @@ -65,14 +64,14 @@ private KSMMetadataManager getMetadataManagerMock(String... volumesToCreate) Mockito.when(metadataManager.getVolumeKey(any(String.class))).thenAnswer( (InvocationOnMock invocation) -> DFSUtil.string2Bytes( - OzoneConsts.KSM_VOLUME_PREFIX + invocation.getArguments()[0])); + OzoneConsts.OM_VOLUME_PREFIX + invocation.getArguments()[0])); Mockito.when(metadataManager .getBucketKey(any(String.class), any(String.class))).thenAnswer( (InvocationOnMock invocation) -> DFSUtil.string2Bytes( - OzoneConsts.KSM_VOLUME_PREFIX + OzoneConsts.OM_VOLUME_PREFIX + invocation.getArguments()[0] - + OzoneConsts.KSM_BUCKET_PREFIX + + OzoneConsts.OM_BUCKET_PREFIX + invocation.getArguments()[1])); Mockito.doAnswer( @@ -80,11 +79,11 @@ private KSMMetadataManager getMetadataManagerMock(String... volumesToCreate) @Override public Boolean answer(InvocationOnMock invocation) throws Throwable { - String keyRootName = OzoneConsts.KSM_KEY_PREFIX + String keyRootName = OzoneConsts.OM_KEY_PREFIX + invocation.getArguments()[0] - + OzoneConsts.KSM_KEY_PREFIX + + OzoneConsts.OM_KEY_PREFIX + invocation.getArguments()[1] - + OzoneConsts.KSM_KEY_PREFIX; + + OzoneConsts.OM_KEY_PREFIX; Iterator keyIterator = metadataDB.keySet().iterator(); while(keyIterator.hasNext()) { if(keyIterator.next().startsWith(keyRootName)) { @@ -124,7 +123,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { for(String volumeName : volumesToCreate) { byte[] dummyVolumeInfo = DFSUtil.string2Bytes(volumeName); - metadataDB.put(OzoneConsts.KSM_VOLUME_PREFIX + volumeName, + metadataDB.put(OzoneConsts.OM_VOLUME_PREFIX + volumeName, dummyVolumeInfo); } return metadataManager; @@ -133,26 +132,26 @@ public Void answer(InvocationOnMock invocation) throws Throwable { @Test public void testCreateBucketWithoutVolume() throws IOException { thrown.expectMessage("Volume doesn't exist"); - KSMMetadataManager metaMgr = getMetadataManagerMock(); + OMMetadataManager metaMgr = getMetadataManagerMock(); try { BucketManager bucketManager = new BucketManagerImpl(metaMgr); - KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder() + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .build(); bucketManager.createBucket(bucketInfo); - } catch(KSMException ksmEx) { + } catch(OMException omEx) { Assert.assertEquals(ResultCodes.FAILED_VOLUME_NOT_FOUND, - ksmEx.getResult()); - throw ksmEx; + omEx.getResult()); + throw omEx; } } @Test public void testCreateBucket() throws IOException { - KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); + OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); BucketManager bucketManager = new BucketManagerImpl(metaMgr); - KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder() + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .build(); @@ -163,19 +162,19 @@ public void testCreateBucket() throws IOException { @Test public void testCreateAlreadyExistingBucket() throws IOException { thrown.expectMessage("Bucket already exist"); - KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); + OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); try { BucketManager bucketManager = new BucketManagerImpl(metaMgr); - KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder() + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .build(); bucketManager.createBucket(bucketInfo); bucketManager.createBucket(bucketInfo); - } catch(KSMException ksmEx) { + } catch(OMException omEx) { Assert.assertEquals(ResultCodes.FAILED_BUCKET_ALREADY_EXISTS, - ksmEx.getResult()); - throw ksmEx; + omEx.getResult()); + throw omEx; } } @@ -183,28 +182,28 @@ public void testCreateAlreadyExistingBucket() throws IOException { public void testGetBucketInfoForInvalidBucket() throws IOException { thrown.expectMessage("Bucket not found"); try { - KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); + OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); BucketManager bucketManager = new BucketManagerImpl(metaMgr); bucketManager.getBucketInfo("sampleVol", "bucketOne"); - } catch(KSMException ksmEx) { + } catch(OMException omEx) { Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND, - ksmEx.getResult()); - throw ksmEx; + omEx.getResult()); + throw omEx; } } @Test public void testGetBucketInfo() throws IOException { - KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); + OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); BucketManager bucketManager = new BucketManagerImpl(metaMgr); - KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder() + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .setStorageType(StorageType.DISK) .setIsVersionEnabled(false) .build(); bucketManager.createBucket(bucketInfo); - KsmBucketInfo result = bucketManager.getBucketInfo( + OmBucketInfo result = bucketManager.getBucketInfo( "sampleVol", "bucketOne"); Assert.assertEquals("sampleVol", result.getVolumeName()); Assert.assertEquals("bucketOne", result.getBucketName()); @@ -215,13 +214,13 @@ public void testGetBucketInfo() throws IOException { @Test public void testSetBucketPropertyAddACL() throws IOException { - KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); + OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); List acls = new LinkedList<>(); OzoneAcl ozoneAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, "root", OzoneAcl.OzoneACLRights.READ); acls.add(ozoneAcl); BucketManager bucketManager = new BucketManagerImpl(metaMgr); - KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder() + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .setAcls(acls) @@ -229,7 +228,7 @@ public void testSetBucketPropertyAddACL() throws IOException { .setIsVersionEnabled(false) .build(); bucketManager.createBucket(bucketInfo); - KsmBucketInfo result = bucketManager.getBucketInfo( + OmBucketInfo result = bucketManager.getBucketInfo( "sampleVol", "bucketOne"); Assert.assertEquals("sampleVol", result.getVolumeName()); Assert.assertEquals("bucketOne", result.getBucketName()); @@ -238,13 +237,13 @@ public void testSetBucketPropertyAddACL() throws IOException { OzoneAcl newAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, "ozone", OzoneAcl.OzoneACLRights.READ); addAcls.add(newAcl); - KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder() + OmBucketArgs bucketArgs = OmBucketArgs.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .setAddAcls(addAcls) .build(); bucketManager.setBucketProperty(bucketArgs); - KsmBucketInfo updatedResult = bucketManager.getBucketInfo( + OmBucketInfo updatedResult = bucketManager.getBucketInfo( "sampleVol", "bucketOne"); Assert.assertEquals(2, updatedResult.getAcls().size()); Assert.assertTrue(updatedResult.getAcls().contains(newAcl)); @@ -252,7 +251,7 @@ public void testSetBucketPropertyAddACL() throws IOException { @Test public void testSetBucketPropertyRemoveACL() throws IOException { - KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); + OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); List acls = new LinkedList<>(); OzoneAcl aclOne = new OzoneAcl(OzoneAcl.OzoneACLType.USER, "root", OzoneAcl.OzoneACLRights.READ); @@ -261,7 +260,7 @@ public void testSetBucketPropertyRemoveACL() throws IOException { acls.add(aclOne); acls.add(aclTwo); BucketManager bucketManager = new BucketManagerImpl(metaMgr); - KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder() + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .setAcls(acls) @@ -269,18 +268,18 @@ public void testSetBucketPropertyRemoveACL() throws IOException { .setIsVersionEnabled(false) .build(); bucketManager.createBucket(bucketInfo); - KsmBucketInfo result = bucketManager.getBucketInfo( + OmBucketInfo result = bucketManager.getBucketInfo( "sampleVol", "bucketOne"); Assert.assertEquals(2, result.getAcls().size()); List removeAcls = new LinkedList<>(); removeAcls.add(aclTwo); - KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder() + OmBucketArgs bucketArgs = OmBucketArgs.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .setRemoveAcls(removeAcls) .build(); bucketManager.setBucketProperty(bucketArgs); - KsmBucketInfo updatedResult = bucketManager.getBucketInfo( + OmBucketInfo updatedResult = bucketManager.getBucketInfo( "sampleVol", "bucketOne"); Assert.assertEquals(1, updatedResult.getAcls().size()); Assert.assertFalse(updatedResult.getAcls().contains(aclTwo)); @@ -288,25 +287,25 @@ public void testSetBucketPropertyRemoveACL() throws IOException { @Test public void testSetBucketPropertyChangeStorageType() throws IOException { - KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); + OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); BucketManager bucketManager = new BucketManagerImpl(metaMgr); - KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder() + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .setStorageType(StorageType.DISK) .build(); bucketManager.createBucket(bucketInfo); - KsmBucketInfo result = bucketManager.getBucketInfo( + OmBucketInfo result = bucketManager.getBucketInfo( "sampleVol", "bucketOne"); Assert.assertEquals(StorageType.DISK, result.getStorageType()); - KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder() + OmBucketArgs bucketArgs = OmBucketArgs.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .setStorageType(StorageType.SSD) .build(); bucketManager.setBucketProperty(bucketArgs); - KsmBucketInfo updatedResult = bucketManager.getBucketInfo( + OmBucketInfo updatedResult = bucketManager.getBucketInfo( "sampleVol", "bucketOne"); Assert.assertEquals(StorageType.SSD, updatedResult.getStorageType()); @@ -314,24 +313,24 @@ public void testSetBucketPropertyChangeStorageType() throws IOException { @Test public void testSetBucketPropertyChangeVersioning() throws IOException { - KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); + OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); BucketManager bucketManager = new BucketManagerImpl(metaMgr); - KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder() + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .setIsVersionEnabled(false) .build(); bucketManager.createBucket(bucketInfo); - KsmBucketInfo result = bucketManager.getBucketInfo( + OmBucketInfo result = bucketManager.getBucketInfo( "sampleVol", "bucketOne"); Assert.assertFalse(result.getIsVersionEnabled()); - KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder() + OmBucketArgs bucketArgs = OmBucketArgs.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .setIsVersionEnabled(true) .build(); bucketManager.setBucketProperty(bucketArgs); - KsmBucketInfo updatedResult = bucketManager.getBucketInfo( + OmBucketInfo updatedResult = bucketManager.getBucketInfo( "sampleVol", "bucketOne"); Assert.assertTrue(updatedResult.getIsVersionEnabled()); } @@ -339,10 +338,10 @@ public void testSetBucketPropertyChangeVersioning() throws IOException { @Test public void testDeleteBucket() throws IOException { thrown.expectMessage("Bucket not found"); - KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); + OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); BucketManager bucketManager = new BucketManagerImpl(metaMgr); for(int i = 0; i < 5; i++) { - KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder() + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucket_" + i) .build(); @@ -362,19 +361,19 @@ public void testDeleteBucket() throws IOException { } try { bucketManager.getBucketInfo("sampleVol", "bucket_1"); - } catch(KSMException ksmEx) { + } catch(OMException omEx) { Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND, - ksmEx.getResult()); - throw ksmEx; + omEx.getResult()); + throw omEx; } } @Test public void testDeleteNonEmptyBucket() throws IOException { thrown.expectMessage("Bucket is not empty"); - KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); + OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol"); BucketManager bucketManager = new BucketManagerImpl(metaMgr); - KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder() + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName("sampleVol") .setBucketName("bucketOne") .build(); @@ -386,10 +385,10 @@ public void testDeleteNonEmptyBucket() throws IOException { DFSUtil.string2Bytes("value_two")); try { bucketManager.deleteBucket("sampleVol", "bucketOne"); - } catch(KSMException ksmEx) { + } catch(OMException omEx) { Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_EMPTY, - ksmEx.getResult()); - throw ksmEx; + omEx.getResult()); + throw omEx; } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java similarity index 99% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java index e6158bddd0..7ce916a9cc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java similarity index 91% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java index b263df56a1..3e11a13b14 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; @@ -44,12 +44,12 @@ import java.util.Collection; /** - * Test http server os KSM with various HTTP option. + * Test http server of OM with various HTTP option. */ @RunWith(value = Parameterized.class) -public class TestKeySpaceManagerHttpServer { +public class TestOzoneManagerHttpServer { private static final String BASEDIR = GenericTestUtils - .getTempPath(TestKeySpaceManagerHttpServer.class.getSimpleName()); + .getTempPath(TestOzoneManagerHttpServer.class.getSimpleName()); private static String keystoresDir; private static String sslConfDir; private static Configuration conf; @@ -65,7 +65,7 @@ public class TestKeySpaceManagerHttpServer { private final HttpConfig.Policy policy; - public TestKeySpaceManagerHttpServer(Policy policy) { + public TestOzoneManagerHttpServer(Policy policy) { super(); this.policy = policy; } @@ -77,7 +77,7 @@ public TestKeySpaceManagerHttpServer(Policy policy) { conf = new Configuration(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir( - TestKeySpaceManagerHttpServer.class); + TestOzoneManagerHttpServer.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); connectionFactory = URLConnectionFactory.newDefaultURLConnectionFactory(conf); @@ -97,9 +97,9 @@ public TestKeySpaceManagerHttpServer(Policy policy) { conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0"); InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0); - KeySpaceManagerHttpServer server = null; + OzoneManagerHttpServer server = null; try { - server = new KeySpaceManagerHttpServer(conf, null); + server = new OzoneManagerHttpServer(conf, null); server.start(); Assert.assertTrue(implies(policy.isHttpEnabled(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java similarity index 94% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java index 089ff4be31..12fcf7c272 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.ksm; +package org.apache.hadoop.ozone.om; /** - * KSM tests + * OM tests */ diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java index 8417e463f9..b63e182a1e 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java @@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.web.handlers.VolumeArgs; import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.ozone.ksm.KSMConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.junit.Assert; @@ -109,7 +109,7 @@ public FileSystem getTestFileSystem() throws IOException { String uri = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); getConf().set("fs.defaultFS", uri); - copyClusterConfigs(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY); + copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY); copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); return FileSystem.get(getConf()); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java index 26776c5c35..3884eddfc0 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java @@ -32,11 +32,11 @@ import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.BucketInfo; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo; -import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.util.Tool; @@ -60,10 +60,10 @@ import java.util.Set; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX; -import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME; -import static org.apache.hadoop.ozone.OzoneConsts.KSM_USER_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.KSM_BUCKET_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.KSM_VOLUME_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; +import static org.apache.hadoop.ozone.OzoneConsts.OM_USER_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.OM_BUCKET_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.OM_VOLUME_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB; /** @@ -120,7 +120,7 @@ public class SQLCLI extends Configured implements Tool { "INSERT INTO openContainer (containerName, containerUsed) " + "VALUES (\"%s\", \"%s\")"; - // for ksm.db + // for om.db private static final String CREATE_VOLUME_LIST = "CREATE TABLE volumeList (" + "userName TEXT NOT NULL," + @@ -278,9 +278,9 @@ public int run(String[] args) throws Exception { } else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) { LOG.info("Converting open container DB"); convertOpenContainerDB(dbPath, outPath); - } else if (dbName.toString().equals(KSM_DB_NAME)) { - LOG.info("Converting ksm DB"); - convertKSMDB(dbPath, outPath); + } else if (dbName.toString().equals(OM_DB_NAME)) { + LOG.info("Converting om DB"); + convertOMDB(dbPath, outPath); } else { LOG.error("Unrecognized db name {}", dbName); } @@ -301,7 +301,7 @@ private void executeSQL(Connection conn, String sql) throws SQLException { } /** - * Convert ksm.db to sqlite db file. With following schema. + * Convert om.db to sqlite db file. With following schema. * (* for primary key) * * 1. for key type USER, it contains a username and a list volumes @@ -341,8 +341,8 @@ private void executeSQL(Connection conn, String sql) throws SQLException { * @param outPath * @throws Exception */ - private void convertKSMDB(Path dbPath, Path outPath) throws Exception { - LOG.info("Create tables for sql ksm db."); + private void convertOMDB(Path dbPath, Path outPath) throws Exception { + LOG.info("Create tables for sql om db."); File dbFile = dbPath.toFile(); try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder() .setConf(conf).setDbFile(dbFile).build(); @@ -357,7 +357,7 @@ private void convertKSMDB(Path dbPath, Path outPath) throws Exception { String keyString = DFSUtilClient.bytes2String(key); KeyType type = getKeyType(keyString); try { - insertKSMDB(conn, type, keyString, value); + insertOMDB(conn, type, keyString, value); } catch (IOException | SQLException ex) { LOG.error("Exception inserting key {} type {}", keyString, type, ex); } @@ -366,8 +366,8 @@ private void convertKSMDB(Path dbPath, Path outPath) throws Exception { } } - private void insertKSMDB(Connection conn, KeyType type, String keyName, - byte[] value) throws IOException, SQLException { + private void insertOMDB(Connection conn, KeyType type, String keyName, + byte[] value) throws IOException, SQLException { switch (type) { case USER: VolumeList volumeList = VolumeList.parseFrom(value); @@ -412,16 +412,16 @@ private void insertKSMDB(Connection conn, KeyType type, String keyName, executeSQL(conn, insertKeyInfo); break; default: - throw new IOException("Unknown key from ksm.db"); + throw new IOException("Unknown key from om.db"); } } private KeyType getKeyType(String key) { - if (key.startsWith(KSM_USER_PREFIX)) { + if (key.startsWith(OM_USER_PREFIX)) { return KeyType.USER; - } else if (key.startsWith(KSM_VOLUME_PREFIX)) { - return key.replaceFirst(KSM_VOLUME_PREFIX, "") - .contains(KSM_BUCKET_PREFIX) ? KeyType.BUCKET : KeyType.VOLUME; + } else if (key.startsWith(OM_VOLUME_PREFIX)) { + return key.replaceFirst(OM_VOLUME_PREFIX, "") + .contains(OM_BUCKET_PREFIX) ? KeyType.BUCKET : KeyType.VOLUME; }else { return KeyType.KEY; }