HDFS-13320. Ozone: Support for MicrobenchMarking Tool. Contributed by Shashikant Banerjee
This commit is contained in:
parent
7ae9543a57
commit
6cc765fbb6
@ -114,6 +114,8 @@ public ContainerStateMap() {
|
|||||||
*/
|
*/
|
||||||
public void addContainer(ContainerInfo info)
|
public void addContainer(ContainerInfo info)
|
||||||
throws SCMException {
|
throws SCMException {
|
||||||
|
Preconditions.checkNotNull(info, "Container Info cannot be null");
|
||||||
|
Preconditions.checkNotNull(info.getPipeline(), "Pipeline cannot be null");
|
||||||
|
|
||||||
try (AutoCloseableLock lock = autoLock.acquire()) {
|
try (AutoCloseableLock lock = autoLock.acquire()) {
|
||||||
ContainerID id = ContainerID.valueof(info.getContainerID());
|
ContainerID id = ContainerID.valueof(info.getContainerID());
|
||||||
|
@ -33,50 +33,23 @@ function hadoop_usage
|
|||||||
hadoop_add_option "--workers" "turn on worker mode"
|
hadoop_add_option "--workers" "turn on worker mode"
|
||||||
|
|
||||||
|
|
||||||
hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility"
|
|
||||||
hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache"
|
|
||||||
hadoop_add_subcommand "cblock" admin "cblock CLI"
|
hadoop_add_subcommand "cblock" admin "cblock CLI"
|
||||||
hadoop_add_subcommand "cblockserver" daemon "run cblock server"
|
hadoop_add_subcommand "cblockserver" daemon "run cblock server"
|
||||||
hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
|
hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
|
||||||
hadoop_add_subcommand "crypto" admin "configure HDFS encryption zones"
|
|
||||||
hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
|
hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
|
||||||
hadoop_add_subcommand "debug" admin "run a Debug Admin to execute HDFS debug commands"
|
|
||||||
hadoop_add_subcommand "dfs" client "run a filesystem command on the file system"
|
|
||||||
hadoop_add_subcommand "dfsadmin" admin "run a DFS admin client"
|
|
||||||
hadoop_add_subcommand "dfsrouter" daemon "run the DFS router"
|
|
||||||
hadoop_add_subcommand "dfsrouteradmin" admin "manage Router-based federation"
|
|
||||||
hadoop_add_subcommand "diskbalancer" daemon "Distributes data evenly among disks on a given node"
|
|
||||||
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
|
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
|
||||||
hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI"
|
|
||||||
hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the NameNode"
|
|
||||||
hadoop_add_subcommand "freon" client "runs an ozone data generator"
|
hadoop_add_subcommand "freon" client "runs an ozone data generator"
|
||||||
hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility"
|
hadoop_add_subcommand "genesis" client "runs a collection of ozone benchmarks to help with tuning."
|
||||||
hadoop_add_subcommand "getconf" client "get config values from configuration"
|
|
||||||
hadoop_add_subcommand "getozoneconf" client "get ozone config values from
|
hadoop_add_subcommand "getozoneconf" client "get ozone config values from
|
||||||
configuration"
|
configuration"
|
||||||
hadoop_add_subcommand "groups" client "get the groups which users belong to"
|
|
||||||
hadoop_add_subcommand "haadmin" admin "run a DFS HA admin client"
|
|
||||||
hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode."
|
hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode."
|
||||||
hadoop_add_subcommand "journalnode" daemon "run the DFS journalnode"
|
|
||||||
hadoop_add_subcommand "jscsi" daemon "run cblock jscsi server"
|
hadoop_add_subcommand "jscsi" daemon "run cblock jscsi server"
|
||||||
hadoop_add_subcommand "ksm" daemon "Ozone keyspace manager"
|
hadoop_add_subcommand "ksm" daemon "Ozone keyspace manager"
|
||||||
hadoop_add_subcommand "lsSnapshottableDir" client "list all snapshottable dirs owned by the current user"
|
hadoop_add_subcommand "o3" client "command line interface for ozone"
|
||||||
hadoop_add_subcommand "mover" daemon "run a utility to move block replicas across storage types"
|
hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data"
|
||||||
hadoop_add_subcommand "namenode" daemon "run the DFS namenode"
|
|
||||||
hadoop_add_subcommand "nfs3" daemon "run an NFS version 3 gateway"
|
|
||||||
hadoop_add_subcommand "oev" admin "apply the offline edits viewer to an edits file"
|
|
||||||
hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an fsimage"
|
|
||||||
hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer to a legacy fsimage"
|
|
||||||
hadoop_add_subcommand "oz" client "command line interface for ozone"
|
|
||||||
hadoop_add_subcommand "oz_debug" client "ozone debug tool, convert ozone metadata into relational data"
|
|
||||||
hadoop_add_subcommand "portmap" daemon "run a portmap service"
|
|
||||||
hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
|
hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
|
||||||
hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager "
|
hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager "
|
||||||
hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode"
|
|
||||||
hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot"
|
|
||||||
hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies"
|
|
||||||
hadoop_add_subcommand "version" client "print the version"
|
hadoop_add_subcommand "version" client "print the version"
|
||||||
hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
|
|
||||||
|
|
||||||
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
|
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
|
||||||
}
|
}
|
||||||
@ -92,13 +65,6 @@ function ozonecmd_case
|
|||||||
shift
|
shift
|
||||||
|
|
||||||
case ${subcmd} in
|
case ${subcmd} in
|
||||||
balancer)
|
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.balancer.Balancer
|
|
||||||
;;
|
|
||||||
cacheadmin)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CacheAdmin
|
|
||||||
;;
|
|
||||||
cblock)
|
cblock)
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.cblock.cli.CBlockCli
|
HADOOP_CLASSNAME=org.apache.hadoop.cblock.cli.CBlockCli
|
||||||
;;
|
;;
|
||||||
@ -109,9 +75,6 @@ function ozonecmd_case
|
|||||||
classpath)
|
classpath)
|
||||||
hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
|
hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
|
||||||
;;
|
;;
|
||||||
crypto)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CryptoAdmin
|
|
||||||
;;
|
|
||||||
datanode)
|
datanode)
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||||
HADOOP_SECURE_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
|
HADOOP_SECURE_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
|
||||||
@ -119,25 +82,6 @@ function ozonecmd_case
|
|||||||
hadoop_deprecate_envvar HADOOP_SECURE_DN_PID_DIR HADOOP_SECURE_PID_DIR
|
hadoop_deprecate_envvar HADOOP_SECURE_DN_PID_DIR HADOOP_SECURE_PID_DIR
|
||||||
hadoop_deprecate_envvar HADOOP_SECURE_DN_LOG_DIR HADOOP_SECURE_LOG_DIR
|
hadoop_deprecate_envvar HADOOP_SECURE_DN_LOG_DIR HADOOP_SECURE_LOG_DIR
|
||||||
;;
|
;;
|
||||||
debug)
|
|
||||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DebugAdmin'
|
|
||||||
;;
|
|
||||||
dfs)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.fs.FsShell
|
|
||||||
;;
|
|
||||||
dfsadmin)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSAdmin
|
|
||||||
;;
|
|
||||||
dfsrouter)
|
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
||||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.DFSRouter'
|
|
||||||
;;
|
|
||||||
dfsrouteradmin)
|
|
||||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.federation.RouterAdmin'
|
|
||||||
;;
|
|
||||||
diskbalancer)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancerCLI
|
|
||||||
;;
|
|
||||||
envvars)
|
envvars)
|
||||||
echo "JAVA_HOME='${JAVA_HOME}'"
|
echo "JAVA_HOME='${JAVA_HOME}'"
|
||||||
echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'"
|
echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'"
|
||||||
@ -153,87 +97,29 @@ function ozonecmd_case
|
|||||||
fi
|
fi
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
ec)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.ECAdmin
|
|
||||||
;;
|
|
||||||
fetchdt)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
|
|
||||||
;;
|
|
||||||
freon)
|
freon)
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.Freon
|
HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.Freon
|
||||||
;;
|
;;
|
||||||
fsck)
|
genesis)
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSck
|
HADOOP_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis
|
||||||
;;
|
|
||||||
getconf)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.GetConf
|
|
||||||
;;
|
;;
|
||||||
getozoneconf)
|
getozoneconf)
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.OzoneGetConf
|
HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.OzoneGetConf
|
||||||
;;
|
;;
|
||||||
groups)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.GetGroups
|
|
||||||
;;
|
|
||||||
haadmin)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSHAAdmin
|
|
||||||
;;
|
|
||||||
journalnode)
|
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
||||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
|
|
||||||
;;
|
|
||||||
jscsi)
|
jscsi)
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.cblock.jscsiHelper.SCSITargetDaemon
|
HADOOP_CLASSNAME=org.apache.hadoop.cblock.jscsiHelper.SCSITargetDaemon
|
||||||
;;
|
;;
|
||||||
jmxget)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.JMXGet
|
|
||||||
;;
|
|
||||||
ksm)
|
ksm)
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.ozone.ksm.KeySpaceManager
|
HADOOP_CLASSNAME=org.apache.hadoop.ozone.ksm.KeySpaceManager
|
||||||
;;
|
;;
|
||||||
lsSnapshottableDir)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
|
|
||||||
;;
|
|
||||||
mover)
|
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.mover.Mover
|
|
||||||
;;
|
|
||||||
namenode)
|
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
||||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.NameNode'
|
|
||||||
hadoop_add_param HADOOP_OPTS hdfs.audit.logger "-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
|
|
||||||
;;
|
|
||||||
nfs3)
|
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
HADOOP_SECURE_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
|
|
||||||
hadoop_deprecate_envvar HADOOP_SECURE_NFS3_LOG_DIR HADOOP_SECURE_LOG_DIR
|
|
||||||
hadoop_deprecate_envvar HADOOP_SECURE_NFS3_PID_DIR HADOOP_SECURE_PID_DIR
|
|
||||||
;;
|
|
||||||
oev)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
|
|
||||||
;;
|
|
||||||
oiv)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
|
|
||||||
;;
|
|
||||||
oiv_legacy)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
|
|
||||||
;;
|
|
||||||
oz)
|
oz)
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell
|
HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell
|
||||||
;;
|
;;
|
||||||
oz_debug)
|
noz)
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SQLCLI
|
HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SQLCLI
|
||||||
;;
|
;;
|
||||||
portmap)
|
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap
|
|
||||||
;;
|
|
||||||
scmcli)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SCMCLI
|
|
||||||
;;
|
|
||||||
scm)
|
scm)
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||||
HADOOP_CLASSNAME='org.apache.hadoop.ozone.scm.StorageContainerManager'
|
HADOOP_CLASSNAME='org.apache.hadoop.ozone.scm.StorageContainerManager'
|
||||||
@ -243,25 +129,9 @@ function ozonecmd_case
|
|||||||
scmcli)
|
scmcli)
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SCMCLI
|
HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SCMCLI
|
||||||
;;
|
;;
|
||||||
secondarynamenode)
|
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
||||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
|
|
||||||
hadoop_add_param HADOOP_OPTS hdfs.audit.logger "-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
|
|
||||||
;;
|
|
||||||
snapshotDiff)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
|
|
||||||
;;
|
|
||||||
storagepolicies)
|
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
|
|
||||||
;;
|
|
||||||
version)
|
version)
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
|
HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
|
||||||
;;
|
;;
|
||||||
zkfc)
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
||||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
|
|
||||||
;;
|
|
||||||
*)
|
*)
|
||||||
HADOOP_CLASSNAME="${subcmd}"
|
HADOOP_CLASSNAME="${subcmd}"
|
||||||
if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then
|
if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
* the License.
|
* the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.tools;
|
package org.apache.hadoop.ozone.freon;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
@ -15,7 +15,7 @@
|
|||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.ozone.tools;
|
package org.apache.hadoop.ozone.freon;
|
||||||
/**
|
/**
|
||||||
* Classes related to Ozone tools.
|
* Classes related to Ozone tools.
|
||||||
*/
|
*/
|
@ -16,7 +16,7 @@
|
|||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.tools;
|
package org.apache.hadoop.ozone.freon;
|
||||||
|
|
||||||
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
|
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
|
@ -16,7 +16,7 @@
|
|||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.tools;
|
package org.apache.hadoop.ozone.freon;
|
||||||
|
|
||||||
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
|
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
@ -15,7 +15,7 @@
|
|||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.ozone.tools;
|
package org.apache.hadoop.ozone.freon;
|
||||||
/**
|
/**
|
||||||
* Classes related to Ozone tools tests.
|
* Classes related to Ozone tools tests.
|
||||||
*/
|
*/
|
@ -1,226 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with this
|
|
||||||
* work for additional information regarding copyright ownership. The ASF
|
|
||||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
* License for the specific language governing permissions and limitations under
|
|
||||||
* the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.scm.container.ContainerStates;
|
|
||||||
|
|
||||||
import org.apache.hadoop.ozone.container.ContainerTestHelper;
|
|
||||||
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
|
|
||||||
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
|
|
||||||
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
|
|
||||||
import org.apache.hadoop.util.Time;
|
|
||||||
import org.junit.Assert;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.SortedSet;
|
|
||||||
import java.util.UUID;
|
|
||||||
|
|
||||||
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.CLOSED;
|
|
||||||
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.OPEN;
|
|
||||||
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor.ONE;
|
|
||||||
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType.STAND_ALONE;
|
|
||||||
|
|
||||||
public class TestContainerStateMap {
|
|
||||||
@Test
|
|
||||||
public void testLifeCyleStates() throws IOException {
|
|
||||||
ContainerStateMap stateMap = new ContainerStateMap();
|
|
||||||
int currentCount = 1;
|
|
||||||
Pipeline pipeline = ContainerTestHelper
|
|
||||||
.createSingleNodePipeline(UUID.randomUUID().toString());
|
|
||||||
for (int x = 1; x < 1001; x++) {
|
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
|
||||||
.setContainerName(pipeline.getContainerName())
|
|
||||||
.setState(HdslProtos.LifeCycleState.OPEN)
|
|
||||||
.setPipeline(pipeline)
|
|
||||||
.setAllocatedBytes(0)
|
|
||||||
.setUsedBytes(0)
|
|
||||||
.setNumberOfKeys(0)
|
|
||||||
.setStateEnterTime(Time.monotonicNow())
|
|
||||||
.setOwner("OZONE")
|
|
||||||
.setContainerID(x)
|
|
||||||
.build();
|
|
||||||
stateMap.addContainer(containerInfo);
|
|
||||||
currentCount++;
|
|
||||||
}
|
|
||||||
|
|
||||||
SortedSet<ContainerID> openSet = stateMap.getMatchingContainerIDs(OPEN,
|
|
||||||
"OZONE", ONE, STAND_ALONE);
|
|
||||||
Assert.assertEquals(1000, openSet.size());
|
|
||||||
|
|
||||||
int nextMax = currentCount + 1000;
|
|
||||||
for (int y = currentCount; y < nextMax; y++) {
|
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
|
||||||
.setContainerName(pipeline.getContainerName())
|
|
||||||
.setState(HdslProtos.LifeCycleState.CLOSED)
|
|
||||||
.setPipeline(pipeline)
|
|
||||||
.setAllocatedBytes(0)
|
|
||||||
.setUsedBytes(0)
|
|
||||||
.setNumberOfKeys(0)
|
|
||||||
.setStateEnterTime(Time.monotonicNow())
|
|
||||||
.setOwner("OZONE")
|
|
||||||
.setContainerID(y)
|
|
||||||
.build();
|
|
||||||
stateMap.addContainer(containerInfo);
|
|
||||||
currentCount++;
|
|
||||||
}
|
|
||||||
|
|
||||||
openSet = stateMap.getMatchingContainerIDs(OPEN, "OZONE",
|
|
||||||
ONE, STAND_ALONE);
|
|
||||||
SortedSet<ContainerID> closeSet = stateMap.getMatchingContainerIDs(CLOSED,
|
|
||||||
"OZONE", ONE, STAND_ALONE);
|
|
||||||
|
|
||||||
// Assert that open is still 1000 and we added 1000 more closed containers.
|
|
||||||
Assert.assertEquals(1000, openSet.size());
|
|
||||||
Assert.assertEquals(1000, closeSet.size());
|
|
||||||
|
|
||||||
SortedSet<ContainerID> ownerSet = stateMap.getContainerIDsByOwner("OZONE");
|
|
||||||
|
|
||||||
// Ozone owns 1000 open and 1000 closed containers.
|
|
||||||
Assert.assertEquals(2000, ownerSet.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetMatchingContainers() throws IOException {
|
|
||||||
ContainerStateMap stateMap = new ContainerStateMap();
|
|
||||||
Pipeline pipeline = ContainerTestHelper
|
|
||||||
.createSingleNodePipeline(UUID.randomUUID().toString());
|
|
||||||
|
|
||||||
int currentCount = 1;
|
|
||||||
for (int x = 1; x < 1001; x++) {
|
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
|
||||||
.setContainerName(pipeline.getContainerName())
|
|
||||||
.setState(HdslProtos.LifeCycleState.OPEN)
|
|
||||||
.setPipeline(pipeline)
|
|
||||||
.setAllocatedBytes(0)
|
|
||||||
.setUsedBytes(0)
|
|
||||||
.setNumberOfKeys(0)
|
|
||||||
.setStateEnterTime(Time.monotonicNow())
|
|
||||||
.setOwner("OZONE")
|
|
||||||
.setContainerID(x)
|
|
||||||
.build();
|
|
||||||
stateMap.addContainer(containerInfo);
|
|
||||||
currentCount++;
|
|
||||||
}
|
|
||||||
SortedSet<ContainerID> openSet = stateMap.getMatchingContainerIDs(OPEN,
|
|
||||||
"OZONE", ONE, STAND_ALONE);
|
|
||||||
Assert.assertEquals(1000, openSet.size());
|
|
||||||
int nextMax = currentCount + 200;
|
|
||||||
for (int y = currentCount; y < nextMax; y++) {
|
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
|
||||||
.setContainerName(pipeline.getContainerName())
|
|
||||||
.setState(HdslProtos.LifeCycleState.CLOSED)
|
|
||||||
.setPipeline(pipeline)
|
|
||||||
.setAllocatedBytes(0)
|
|
||||||
.setUsedBytes(0)
|
|
||||||
.setNumberOfKeys(0)
|
|
||||||
.setStateEnterTime(Time.monotonicNow())
|
|
||||||
.setOwner("OZONE")
|
|
||||||
.setContainerID(y)
|
|
||||||
.build();
|
|
||||||
stateMap.addContainer(containerInfo);
|
|
||||||
currentCount++;
|
|
||||||
}
|
|
||||||
|
|
||||||
nextMax = currentCount + 30000;
|
|
||||||
for (int z = currentCount; z < nextMax; z++) {
|
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
|
||||||
.setContainerName(pipeline.getContainerName())
|
|
||||||
.setState(HdslProtos.LifeCycleState.OPEN)
|
|
||||||
.setPipeline(pipeline)
|
|
||||||
.setAllocatedBytes(0)
|
|
||||||
.setUsedBytes(0)
|
|
||||||
.setNumberOfKeys(0)
|
|
||||||
.setStateEnterTime(Time.monotonicNow())
|
|
||||||
.setOwner("OZONE")
|
|
||||||
.setContainerID(z)
|
|
||||||
.build();
|
|
||||||
stateMap.addContainer(containerInfo);
|
|
||||||
currentCount++;
|
|
||||||
}
|
|
||||||
// At this point, if we get all Open Containers that belong to Ozone,
|
|
||||||
// with one replica and standalone replica strategy -- we should get
|
|
||||||
// 1000 + 30000.
|
|
||||||
|
|
||||||
openSet = stateMap.getMatchingContainerIDs(OPEN,
|
|
||||||
"OZONE", ONE, STAND_ALONE);
|
|
||||||
Assert.assertEquals(1000 + 30000, openSet.size());
|
|
||||||
|
|
||||||
|
|
||||||
// There is no such owner, so should be a set of zero size.
|
|
||||||
SortedSet<ContainerID> zeroSet = stateMap.getMatchingContainerIDs(OPEN,
|
|
||||||
"BILBO", ONE, STAND_ALONE);
|
|
||||||
Assert.assertEquals(0, zeroSet.size());
|
|
||||||
int nextId = currentCount++;
|
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
|
||||||
.setContainerName(pipeline.getContainerName())
|
|
||||||
.setState(HdslProtos.LifeCycleState.OPEN)
|
|
||||||
.setPipeline(pipeline)
|
|
||||||
.setAllocatedBytes(0)
|
|
||||||
.setUsedBytes(0)
|
|
||||||
.setNumberOfKeys(0)
|
|
||||||
.setStateEnterTime(Time.monotonicNow())
|
|
||||||
.setOwner("BILBO")
|
|
||||||
.setContainerID(nextId)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
stateMap.addContainer(containerInfo);
|
|
||||||
zeroSet = stateMap.getMatchingContainerIDs(OPEN,
|
|
||||||
"BILBO", ONE, STAND_ALONE);
|
|
||||||
Assert.assertEquals(1, zeroSet.size());
|
|
||||||
|
|
||||||
// Assert that the container we got back is the nextID itself.
|
|
||||||
Assert.assertTrue(zeroSet.contains(new ContainerID(nextId)));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testUpdateState() throws IOException {
|
|
||||||
ContainerStateMap stateMap = new ContainerStateMap();
|
|
||||||
Pipeline pipeline = ContainerTestHelper
|
|
||||||
.createSingleNodePipeline(UUID.randomUUID().toString());
|
|
||||||
|
|
||||||
ContainerInfo containerInfo = null;
|
|
||||||
int currentCount = 1;
|
|
||||||
for (int x = 1; x < 1001; x++) {
|
|
||||||
containerInfo = new ContainerInfo.Builder()
|
|
||||||
.setContainerName(pipeline.getContainerName())
|
|
||||||
.setState(HdslProtos.LifeCycleState.OPEN)
|
|
||||||
.setPipeline(pipeline)
|
|
||||||
.setAllocatedBytes(0)
|
|
||||||
.setUsedBytes(0)
|
|
||||||
.setNumberOfKeys(0)
|
|
||||||
.setStateEnterTime(Time.monotonicNow())
|
|
||||||
.setOwner("OZONE")
|
|
||||||
.setContainerID(x)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
|
|
||||||
stateMap.addContainer(containerInfo);
|
|
||||||
currentCount++;
|
|
||||||
}
|
|
||||||
|
|
||||||
stateMap.updateState(containerInfo, OPEN, CLOSED);
|
|
||||||
SortedSet<ContainerID> closedSet = stateMap.getMatchingContainerIDs(CLOSED,
|
|
||||||
"OZONE", ONE, STAND_ALONE);
|
|
||||||
Assert.assertEquals(1, closedSet.size());
|
|
||||||
Assert.assertTrue(closedSet.contains(containerInfo.containerID()));
|
|
||||||
|
|
||||||
SortedSet<ContainerID> openSet = stateMap.getMatchingContainerIDs(OPEN,
|
|
||||||
"OZONE", ONE, STAND_ALONE);
|
|
||||||
Assert.assertEquals(999, openSet.size());
|
|
||||||
}
|
|
||||||
}
|
|
@ -49,5 +49,23 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||||||
<artifactId>metrics-core</artifactId>
|
<artifactId>metrics-core</artifactId>
|
||||||
<version>3.2.4</version>
|
<version>3.2.4</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-hdsl-server-scm</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-hdsl-common</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.openjdk.jmh</groupId>
|
||||||
|
<artifactId>jmh-core</artifactId>
|
||||||
|
<version>1.19</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.openjdk.jmh</groupId>
|
||||||
|
<artifactId>jmh-generator-annprocess</artifactId>
|
||||||
|
<version>1.19</version>
|
||||||
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
</project>
|
</project>
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
* the License.
|
* the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.tools;
|
package org.apache.hadoop.ozone.freon;
|
||||||
|
|
||||||
import com.codahale.metrics.Histogram;
|
import com.codahale.metrics.Histogram;
|
||||||
import com.codahale.metrics.Snapshot;
|
import com.codahale.metrics.Snapshot;
|
@ -15,7 +15,7 @@
|
|||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.ozone.tools;
|
package org.apache.hadoop.ozone.freon;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
This package contains class used for testing and benchmarking ozone cluster.
|
This package contains class used for testing and benchmarking ozone cluster.
|
@ -16,106 +16,52 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.scm.container.ContainerStates;
|
package org.apache.hadoop.ozone.genesis;
|
||||||
|
|
||||||
import org.apache.hadoop.ozone.container.ContainerTestHelper;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
|
import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
|
||||||
|
import org.apache.hadoop.ozone.scm.container.ContainerStates.ContainerStateMap;
|
||||||
import org.apache.hadoop.ozone.scm.exceptions.SCMException;
|
import org.apache.hadoop.ozone.scm.exceptions.SCMException;
|
||||||
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
|
||||||
|
import org.apache.hadoop.scm.container.common.helpers.PipelineChannel;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.junit.Test;
|
|
||||||
import org.openjdk.jmh.annotations.Benchmark;
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
import org.openjdk.jmh.annotations.Level;
|
import org.openjdk.jmh.annotations.Level;
|
||||||
import org.openjdk.jmh.annotations.Mode;
|
|
||||||
import org.openjdk.jmh.annotations.Scope;
|
import org.openjdk.jmh.annotations.Scope;
|
||||||
import org.openjdk.jmh.annotations.Setup;
|
import org.openjdk.jmh.annotations.Setup;
|
||||||
import org.openjdk.jmh.annotations.State;
|
import org.openjdk.jmh.annotations.State;
|
||||||
import org.openjdk.jmh.infra.Blackhole;
|
import org.openjdk.jmh.infra.Blackhole;
|
||||||
import org.openjdk.jmh.runner.Runner;
|
|
||||||
import org.openjdk.jmh.runner.RunnerException;
|
|
||||||
import org.openjdk.jmh.runner.options.Options;
|
|
||||||
import org.openjdk.jmh.runner.options.OptionsBuilder;
|
|
||||||
import org.openjdk.jmh.runner.options.TimeValue;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.List;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.Objects;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.OPEN;
|
||||||
|
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.CLOSED;
|
||||||
|
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType;
|
||||||
|
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos
|
@State(Scope.Thread)
|
||||||
.LifeCycleState.OPEN;
|
public class BenchMarkContainerStateMap {
|
||||||
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos
|
|
||||||
.ReplicationFactor.ONE;
|
|
||||||
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos
|
|
||||||
.ReplicationType.STAND_ALONE;
|
|
||||||
|
|
||||||
public class BenchmarkContainerStateMap {
|
|
||||||
@Test
|
|
||||||
public void testRunBenchMarks() throws RunnerException {
|
|
||||||
Options opt = new OptionsBuilder()
|
|
||||||
.include(this.getClass().getName() + ".*")
|
|
||||||
.mode(Mode.Throughput)
|
|
||||||
.timeUnit(TimeUnit.SECONDS)
|
|
||||||
.warmupTime(TimeValue.seconds(1))
|
|
||||||
.warmupIterations(2)
|
|
||||||
.measurementTime(TimeValue.seconds(1))
|
|
||||||
.measurementIterations(2)
|
|
||||||
.threads(2)
|
|
||||||
.forks(1)
|
|
||||||
.shouldFailOnError(true)
|
|
||||||
.shouldDoGC(true)
|
|
||||||
.build();
|
|
||||||
new Runner(opt).run();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Benchmark
|
|
||||||
public void createContainerBenchMark(BenchmarkState state, Blackhole bh)
|
|
||||||
throws IOException {
|
|
||||||
Pipeline pipeline = ContainerTestHelper
|
|
||||||
.createSingleNodePipeline(UUID.randomUUID().toString());
|
|
||||||
int cid = state.containerID.incrementAndGet();
|
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
|
||||||
.setContainerName(pipeline.getContainerName())
|
|
||||||
.setState(HdslProtos.LifeCycleState.CLOSED)
|
|
||||||
.setPipeline(null)
|
|
||||||
// This is bytes allocated for blocks inside container, not the
|
|
||||||
// container size
|
|
||||||
.setAllocatedBytes(0)
|
|
||||||
.setUsedBytes(0)
|
|
||||||
.setNumberOfKeys(0)
|
|
||||||
.setStateEnterTime(Time.monotonicNow())
|
|
||||||
.setOwner("OZONE")
|
|
||||||
.setContainerID(cid)
|
|
||||||
.build();
|
|
||||||
state.stateMap.addContainer(containerInfo);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Benchmark
|
|
||||||
public void getMatchingContainerBenchMark(BenchmarkState state,
|
|
||||||
Blackhole bh) {
|
|
||||||
state.stateMap.getMatchingContainerIDs(OPEN, "BILBO", ONE, STAND_ALONE);
|
|
||||||
}
|
|
||||||
|
|
||||||
@State(Scope.Thread)
|
|
||||||
public static class BenchmarkState {
|
|
||||||
public ContainerStateMap stateMap;
|
public ContainerStateMap stateMap;
|
||||||
public AtomicInteger containerID;
|
public AtomicInteger containerID;
|
||||||
|
|
||||||
@Setup(Level.Trial)
|
@Setup(Level.Trial)
|
||||||
public void initialize() throws IOException {
|
public void initialize() throws IOException {
|
||||||
stateMap = new ContainerStateMap();
|
stateMap = new ContainerStateMap();
|
||||||
Pipeline pipeline = ContainerTestHelper
|
Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString());
|
||||||
.createSingleNodePipeline(UUID.randomUUID().toString());
|
Preconditions.checkNotNull(pipeline, "Pipeline cannot be null.");
|
||||||
|
|
||||||
|
|
||||||
int currentCount = 1;
|
int currentCount = 1;
|
||||||
for (int x = 1; x < 1000 * 1000; x++) {
|
for (int x = 1; x < 1000; x++) {
|
||||||
try {
|
try {
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
||||||
.setContainerName(pipeline.getContainerName())
|
.setContainerName(pipeline.getContainerName())
|
||||||
.setState(HdslProtos.LifeCycleState.CLOSED)
|
.setState(CLOSED)
|
||||||
.setPipeline(null)
|
.setPipeline(pipeline)
|
||||||
// This is bytes allocated for blocks inside container, not the
|
// This is bytes allocated for blocks inside container, not the
|
||||||
// container size
|
// container size
|
||||||
.setAllocatedBytes(0)
|
.setAllocatedBytes(0)
|
||||||
@ -135,8 +81,8 @@ public void initialize() throws IOException {
|
|||||||
try {
|
try {
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
||||||
.setContainerName(pipeline.getContainerName())
|
.setContainerName(pipeline.getContainerName())
|
||||||
.setState(HdslProtos.LifeCycleState.OPEN)
|
.setState(OPEN)
|
||||||
.setPipeline(null)
|
.setPipeline(pipeline)
|
||||||
// This is bytes allocated for blocks inside container, not the
|
// This is bytes allocated for blocks inside container, not the
|
||||||
// container size
|
// container size
|
||||||
.setAllocatedBytes(0)
|
.setAllocatedBytes(0)
|
||||||
@ -151,14 +97,12 @@ public void initialize() throws IOException {
|
|||||||
} catch (SCMException e) {
|
} catch (SCMException e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
|
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
||||||
.setContainerName(pipeline.getContainerName())
|
.setContainerName(pipeline.getContainerName())
|
||||||
.setState(HdslProtos.LifeCycleState.OPEN)
|
.setState(OPEN)
|
||||||
.setPipeline(null)
|
.setPipeline(pipeline)
|
||||||
// This is bytes allocated for blocks inside container, not the
|
// This is bytes allocated for blocks inside container, not the
|
||||||
// container size
|
// container size
|
||||||
.setAllocatedBytes(0)
|
.setAllocatedBytes(0)
|
||||||
@ -176,5 +120,71 @@ public void initialize() throws IOException {
|
|||||||
containerID = new AtomicInteger(currentCount++);
|
containerID = new AtomicInteger(currentCount++);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static Pipeline createSingleNodePipeline(String containerName) throws
|
||||||
|
IOException {
|
||||||
|
return createPipeline(containerName, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a pipeline with single node replica.
|
||||||
|
*
|
||||||
|
* @return Pipeline with single node in it.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static Pipeline createPipeline(String containerName, int numNodes)
|
||||||
|
throws IOException {
|
||||||
|
Preconditions.checkArgument(numNodes >= 1);
|
||||||
|
final List<DatanodeDetails> ids = new ArrayList<>(numNodes);
|
||||||
|
for (int i = 0; i < numNodes; i++) {
|
||||||
|
ids.add(GenesisUtil.createDatanodeDetails(UUID.randomUUID().toString()));
|
||||||
|
}
|
||||||
|
return createPipeline(containerName, ids);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Pipeline createPipeline(
|
||||||
|
String containerName, Iterable<DatanodeDetails> ids)
|
||||||
|
throws IOException {
|
||||||
|
Objects.requireNonNull(ids, "ids == null");
|
||||||
|
final Iterator<DatanodeDetails> i = ids.iterator();
|
||||||
|
Preconditions.checkArgument(i.hasNext());
|
||||||
|
final DatanodeDetails leader = i.next();
|
||||||
|
String pipelineName = "TEST-" + UUID.randomUUID().toString().substring(5);
|
||||||
|
final PipelineChannel pipelineChannel =
|
||||||
|
new PipelineChannel(leader.getUuidString(), OPEN,
|
||||||
|
ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName);
|
||||||
|
pipelineChannel.addMember(leader);
|
||||||
|
for (; i.hasNext(); ) {
|
||||||
|
pipelineChannel.addMember(i.next());
|
||||||
|
}
|
||||||
|
return new Pipeline(containerName, pipelineChannel);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
public void createContainerBenchMark(BenchMarkContainerStateMap state, Blackhole bh)
|
||||||
|
throws IOException {
|
||||||
|
Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString());
|
||||||
|
int cid = state.containerID.incrementAndGet();
|
||||||
|
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
||||||
|
.setContainerName(pipeline.getContainerName())
|
||||||
|
.setState(CLOSED)
|
||||||
|
.setPipeline(pipeline)
|
||||||
|
// This is bytes allocated for blocks inside container, not the
|
||||||
|
// container size
|
||||||
|
.setAllocatedBytes(0)
|
||||||
|
.setUsedBytes(0)
|
||||||
|
.setNumberOfKeys(0)
|
||||||
|
.setStateEnterTime(Time.monotonicNow())
|
||||||
|
.setOwner("OZONE")
|
||||||
|
.setContainerID(cid)
|
||||||
|
.build();
|
||||||
|
state.stateMap.addContainer(containerInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
public void getMatchingContainerBenchMark(BenchMarkContainerStateMap state,
|
||||||
|
Blackhole bh) {
|
||||||
|
bh.consume(state.stateMap.getMatchingContainerIDs(OPEN, "BILBO",
|
||||||
|
ReplicationFactor.ONE, ReplicationType.STAND_ALONE));
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -0,0 +1,263 @@
|
|||||||
|
package org.apache.hadoop.ozone.genesis;
|
||||||
|
|
||||||
|
import com.google.protobuf.ByteString;
|
||||||
|
import org.apache.commons.codec.digest.DigestUtils;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
|
import org.apache.commons.lang.RandomStringUtils;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||||
|
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
|
||||||
|
import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
|
||||||
|
import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
|
||||||
|
import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
|
||||||
|
import org.apache.hadoop.ozone.container.common.impl.Dispatcher;
|
||||||
|
import org.apache.hadoop.ozone.container.common.impl.KeyManagerImpl;
|
||||||
|
import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
|
||||||
|
|
||||||
|
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
|
||||||
|
import org.apache.hadoop.scm.container.common.helpers.PipelineChannel;
|
||||||
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
|
import org.openjdk.jmh.annotations.Level;
|
||||||
|
import org.openjdk.jmh.annotations.Scope;
|
||||||
|
import org.openjdk.jmh.annotations.Setup;
|
||||||
|
import org.openjdk.jmh.annotations.State;
|
||||||
|
import org.openjdk.jmh.annotations.TearDown;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Random;
|
||||||
|
import java.util.UUID;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState;
|
||||||
|
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
|
||||||
|
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.CreateContainerRequestProto;
|
||||||
|
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ReadChunkRequestProto;
|
||||||
|
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.WriteChunkRequestProto;
|
||||||
|
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.PutKeyRequestProto;
|
||||||
|
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.GetKeyRequestProto;
|
||||||
|
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
|
||||||
|
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType;
|
||||||
|
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor;
|
||||||
|
|
||||||
|
@State(Scope.Benchmark)
|
||||||
|
public class BenchMarkDatanodeDispatcher {
|
||||||
|
|
||||||
|
private String baseDir;
|
||||||
|
private String datanodeUuid;
|
||||||
|
private Dispatcher dispatcher;
|
||||||
|
private PipelineChannel pipelineChannel;
|
||||||
|
private ByteString data;
|
||||||
|
private Random random;
|
||||||
|
private AtomicInteger containerCount;
|
||||||
|
private AtomicInteger keyCount;
|
||||||
|
private AtomicInteger chunkCount;
|
||||||
|
|
||||||
|
@Setup(Level.Trial)
|
||||||
|
public void initialize() throws IOException {
|
||||||
|
datanodeUuid = UUID.randomUUID().toString();
|
||||||
|
pipelineChannel = new PipelineChannel("127.0.0.1",
|
||||||
|
LifeCycleState.OPEN, ReplicationType.STAND_ALONE,
|
||||||
|
ReplicationFactor.ONE, "SA-" + UUID.randomUUID());
|
||||||
|
|
||||||
|
// 1 MB of data
|
||||||
|
data = ByteString.copyFromUtf8(RandomStringUtils.randomAscii(1048576));
|
||||||
|
random = new Random();
|
||||||
|
Configuration conf = new OzoneConfiguration();
|
||||||
|
ContainerManager manager = new ContainerManagerImpl();
|
||||||
|
baseDir = System.getProperty("java.io.tmpdir") + File.separator +
|
||||||
|
datanodeUuid;
|
||||||
|
|
||||||
|
// data directory
|
||||||
|
conf.set("dfs.datanode.data.dir", baseDir + File.separator + "data");
|
||||||
|
|
||||||
|
// metadata directory
|
||||||
|
StorageLocation metadataDir = StorageLocation.parse(
|
||||||
|
baseDir+ File.separator + CONTAINER_ROOT_PREFIX);
|
||||||
|
List<StorageLocation> locations = Arrays.asList(metadataDir);
|
||||||
|
|
||||||
|
manager
|
||||||
|
.init(conf, locations, GenesisUtil.createDatanodeDetails(datanodeUuid));
|
||||||
|
manager.setChunkManager(new ChunkManagerImpl(manager));
|
||||||
|
manager.setKeyManager(new KeyManagerImpl(manager, conf));
|
||||||
|
|
||||||
|
dispatcher = new Dispatcher(manager, conf);
|
||||||
|
dispatcher.init();
|
||||||
|
|
||||||
|
containerCount = new AtomicInteger();
|
||||||
|
keyCount = new AtomicInteger();
|
||||||
|
chunkCount = new AtomicInteger();
|
||||||
|
|
||||||
|
// Create containers
|
||||||
|
for (int x = 0; x < 100; x++) {
|
||||||
|
String containerName = "container-" + containerCount.getAndIncrement();
|
||||||
|
dispatcher.dispatch(getCreateContainerCommand(containerName));
|
||||||
|
}
|
||||||
|
// Add chunk and keys to the containers
|
||||||
|
for (int x = 0; x < 50; x++) {
|
||||||
|
String chunkName = "chunk-" + chunkCount.getAndIncrement();
|
||||||
|
String keyName = "key-" + keyCount.getAndIncrement();
|
||||||
|
for (int y = 0; y < 100; y++) {
|
||||||
|
String containerName = "container-" + y;
|
||||||
|
dispatcher.dispatch(getWriteChunkCommand(containerName, chunkName));
|
||||||
|
dispatcher.dispatch(getPutKeyCommand(containerName, chunkName, keyName));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@TearDown(Level.Trial)
|
||||||
|
public void cleanup() throws IOException {
|
||||||
|
dispatcher.shutdown();
|
||||||
|
FileUtils.deleteDirectory(new File(baseDir));
|
||||||
|
}
|
||||||
|
|
||||||
|
private ContainerCommandRequestProto getCreateContainerCommand(
|
||||||
|
String containerName) {
|
||||||
|
CreateContainerRequestProto.Builder createRequest = CreateContainerRequestProto
|
||||||
|
.newBuilder();
|
||||||
|
createRequest.setPipeline(
|
||||||
|
new Pipeline(containerName, pipelineChannel).getProtobufMessage());
|
||||||
|
createRequest.setContainerData(
|
||||||
|
ContainerData.newBuilder().setName(containerName).build());
|
||||||
|
|
||||||
|
ContainerCommandRequestProto.Builder request =
|
||||||
|
ContainerCommandRequestProto.newBuilder();
|
||||||
|
request.setCmdType(ContainerProtos.Type.CreateContainer);
|
||||||
|
request.setCreateContainer(createRequest);
|
||||||
|
request.setDatanodeUuid(datanodeUuid);
|
||||||
|
request.setTraceID(containerName + "-trace");
|
||||||
|
return request.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private ContainerCommandRequestProto getWriteChunkCommand(
|
||||||
|
String containerName, String key) {
|
||||||
|
|
||||||
|
WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
|
||||||
|
.newBuilder()
|
||||||
|
.setPipeline(
|
||||||
|
new Pipeline(containerName, pipelineChannel).getProtobufMessage())
|
||||||
|
.setKeyName(key)
|
||||||
|
.setChunkData(getChunkInfo(containerName, key))
|
||||||
|
.setData(data);
|
||||||
|
|
||||||
|
ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
|
||||||
|
.newBuilder();
|
||||||
|
request.setCmdType(ContainerProtos.Type.WriteChunk)
|
||||||
|
.setTraceID(containerName + "-" + key +"-trace")
|
||||||
|
.setDatanodeUuid(datanodeUuid)
|
||||||
|
.setWriteChunk(writeChunkRequest);
|
||||||
|
return request.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private ContainerCommandRequestProto getReadChunkCommand(
|
||||||
|
String containerName, String key) {
|
||||||
|
ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto
|
||||||
|
.newBuilder()
|
||||||
|
.setPipeline(
|
||||||
|
new Pipeline(containerName, pipelineChannel).getProtobufMessage())
|
||||||
|
.setKeyName(key)
|
||||||
|
.setChunkData(getChunkInfo(containerName, key));
|
||||||
|
ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
|
||||||
|
.newBuilder();
|
||||||
|
request.setCmdType(ContainerProtos.Type.ReadChunk)
|
||||||
|
.setTraceID(containerName + "-" + key +"-trace")
|
||||||
|
.setDatanodeUuid(datanodeUuid)
|
||||||
|
.setReadChunk(readChunkRequest);
|
||||||
|
return request.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private ContainerProtos.ChunkInfo getChunkInfo(
|
||||||
|
String containerName, String key) {
|
||||||
|
ContainerProtos.ChunkInfo.Builder builder = ContainerProtos.ChunkInfo
|
||||||
|
.newBuilder()
|
||||||
|
.setChunkName(
|
||||||
|
DigestUtils.md5Hex(key) + "_stream_" + containerName + "_chunk_" + key)
|
||||||
|
.setOffset(0)
|
||||||
|
.setLen(data.size());
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private ContainerCommandRequestProto getPutKeyCommand(
|
||||||
|
String containerName, String chunkKey, String key) {
|
||||||
|
PutKeyRequestProto.Builder putKeyRequest = PutKeyRequestProto
|
||||||
|
.newBuilder()
|
||||||
|
.setPipeline(
|
||||||
|
new Pipeline(containerName, pipelineChannel).getProtobufMessage())
|
||||||
|
.setKeyData(getKeyData(containerName, chunkKey, key));
|
||||||
|
ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
|
||||||
|
.newBuilder();
|
||||||
|
request.setCmdType(ContainerProtos.Type.PutKey)
|
||||||
|
.setTraceID(containerName + "-" + key +"-trace")
|
||||||
|
.setDatanodeUuid(datanodeUuid)
|
||||||
|
.setPutKey(putKeyRequest);
|
||||||
|
return request.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private ContainerCommandRequestProto getGetKeyCommand(
|
||||||
|
String containerName, String chunkKey, String key) {
|
||||||
|
GetKeyRequestProto.Builder readKeyRequest = GetKeyRequestProto.newBuilder()
|
||||||
|
.setPipeline(
|
||||||
|
new Pipeline(containerName, pipelineChannel).getProtobufMessage())
|
||||||
|
.setKeyData(getKeyData(containerName, chunkKey, key));
|
||||||
|
ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
|
||||||
|
.newBuilder()
|
||||||
|
.setCmdType(ContainerProtos.Type.GetKey)
|
||||||
|
.setTraceID(containerName + "-" + key +"-trace")
|
||||||
|
.setDatanodeUuid(datanodeUuid)
|
||||||
|
.setGetKey(readKeyRequest);
|
||||||
|
return request.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private ContainerProtos.KeyData getKeyData(
|
||||||
|
String containerName, String chunkKey, String key) {
|
||||||
|
ContainerProtos.KeyData.Builder builder = ContainerProtos.KeyData
|
||||||
|
.newBuilder()
|
||||||
|
.setContainerName(containerName)
|
||||||
|
.setName(key)
|
||||||
|
.addChunks(getChunkInfo(containerName, chunkKey));
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
public void createContainer(BenchMarkDatanodeDispatcher bmdd) {
|
||||||
|
bmdd.dispatcher.dispatch(getCreateContainerCommand(
|
||||||
|
"container-" + containerCount.getAndIncrement()));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
public void writeChunk(BenchMarkDatanodeDispatcher bmdd) {
|
||||||
|
String containerName = "container-" + random.nextInt(containerCount.get());
|
||||||
|
bmdd.dispatcher.dispatch(getWriteChunkCommand(
|
||||||
|
containerName, "chunk-" + chunkCount.getAndIncrement()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
public void readChunk(BenchMarkDatanodeDispatcher bmdd) {
|
||||||
|
String containerName = "container-" + random.nextInt(containerCount.get());
|
||||||
|
String chunkKey = "chunk-" + random.nextInt(chunkCount.get());
|
||||||
|
bmdd.dispatcher.dispatch(getReadChunkCommand(containerName, chunkKey));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
public void putKey(BenchMarkDatanodeDispatcher bmdd) {
|
||||||
|
String containerName = "container-" + random.nextInt(containerCount.get());
|
||||||
|
String chunkKey = "chunk-" + random.nextInt(chunkCount.get());
|
||||||
|
bmdd.dispatcher.dispatch(getPutKeyCommand(
|
||||||
|
containerName, chunkKey,"key-" + keyCount.getAndIncrement()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
public void getKey(BenchMarkDatanodeDispatcher bmdd) {
|
||||||
|
String containerName = "container-" + random.nextInt(containerCount.get());
|
||||||
|
String chunkKey = "chunk-" + random.nextInt(chunkCount.get());
|
||||||
|
String key = "key-" + random.nextInt(keyCount.get());
|
||||||
|
bmdd.dispatcher.dispatch(getGetKeyCommand(containerName, chunkKey, key));
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,50 @@
|
|||||||
|
package org.apache.hadoop.ozone.genesis;
|
||||||
|
|
||||||
|
import org.apache.commons.lang.RandomStringUtils;
|
||||||
|
import org.apache.hadoop.utils.MetadataStore;
|
||||||
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
|
import org.openjdk.jmh.annotations.Param;
|
||||||
|
import org.openjdk.jmh.annotations.Scope;
|
||||||
|
import org.openjdk.jmh.annotations.Setup;
|
||||||
|
import org.openjdk.jmh.annotations.State;
|
||||||
|
import org.openjdk.jmh.infra.Blackhole;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.charset.Charset;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.ozone.genesis.GenesisUtil.CACHE_10MB_TYPE;
|
||||||
|
import static org.apache.hadoop.ozone.genesis.GenesisUtil.CACHE_1GB_TYPE;
|
||||||
|
import static org.apache.hadoop.ozone.genesis.GenesisUtil.CLOSED_TYPE;
|
||||||
|
import static org.apache.hadoop.ozone.genesis.GenesisUtil.DEFAULT_TYPE;
|
||||||
|
|
||||||
|
@State(Scope.Thread)
|
||||||
|
public class BenchMarkMetadataStoreReads {
|
||||||
|
|
||||||
|
private static final int DATA_LEN = 1024;
|
||||||
|
private static final long maxKeys = 1024 * 10;
|
||||||
|
|
||||||
|
private MetadataStore store;
|
||||||
|
|
||||||
|
@Param({DEFAULT_TYPE, CACHE_10MB_TYPE, CACHE_1GB_TYPE, CLOSED_TYPE})
|
||||||
|
private String type;
|
||||||
|
|
||||||
|
@Setup
|
||||||
|
public void initialize() throws IOException {
|
||||||
|
store = GenesisUtil.getMetadataStore(this.type);
|
||||||
|
byte[] data = RandomStringUtils.randomAlphanumeric(DATA_LEN)
|
||||||
|
.getBytes(Charset.forName("UTF-8"));
|
||||||
|
for (int x = 0; x < maxKeys; x++) {
|
||||||
|
store.put(Long.toHexString(x).getBytes(Charset.forName("UTF-8")), data);
|
||||||
|
}
|
||||||
|
if (type.compareTo(CLOSED_TYPE) == 0) {
|
||||||
|
store.compactDB();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
public void test(Blackhole bh) throws IOException {
|
||||||
|
long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, maxKeys);
|
||||||
|
bh.consume(
|
||||||
|
store.get(Long.toHexString(x).getBytes(Charset.forName("UTF-8"))));
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,43 @@
|
|||||||
|
package org.apache.hadoop.ozone.genesis;
|
||||||
|
|
||||||
|
import org.apache.commons.lang.RandomStringUtils;
|
||||||
|
import org.apache.hadoop.utils.MetadataStore;
|
||||||
|
import org.openjdk.jmh.annotations.Benchmark;
|
||||||
|
import org.openjdk.jmh.annotations.Param;
|
||||||
|
import org.openjdk.jmh.annotations.Scope;
|
||||||
|
import org.openjdk.jmh.annotations.Setup;
|
||||||
|
import org.openjdk.jmh.annotations.State;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.charset.Charset;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.ozone.genesis.GenesisUtil.CACHE_10MB_TYPE;
|
||||||
|
import static org.apache.hadoop.ozone.genesis.GenesisUtil.CACHE_1GB_TYPE;
|
||||||
|
import static org.apache.hadoop.ozone.genesis.GenesisUtil.DEFAULT_TYPE;
|
||||||
|
|
||||||
|
@State(Scope.Thread)
|
||||||
|
public class BenchMarkMetadataStoreWrites {
|
||||||
|
|
||||||
|
|
||||||
|
private static final int DATA_LEN = 1024;
|
||||||
|
private static final long maxKeys = 1024 * 10;
|
||||||
|
|
||||||
|
private MetadataStore store;
|
||||||
|
private byte[] data;
|
||||||
|
|
||||||
|
@Param({DEFAULT_TYPE, CACHE_10MB_TYPE, CACHE_1GB_TYPE})
|
||||||
|
private String type;
|
||||||
|
|
||||||
|
@Setup
|
||||||
|
public void initialize() throws IOException {
|
||||||
|
data = RandomStringUtils.randomAlphanumeric(DATA_LEN)
|
||||||
|
.getBytes(Charset.forName("UTF-8"));
|
||||||
|
store = GenesisUtil.getMetadataStore(this.type);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
public void test() throws IOException {
|
||||||
|
long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, maxKeys);
|
||||||
|
store.put(Long.toHexString(x).getBytes(Charset.forName("UTF-8")), data);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,115 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with this
|
||||||
|
* work for additional information regarding copyright ownership. The ASF
|
||||||
|
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
* License for the specific language governing permissions and limitations under
|
||||||
|
* the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.genesis;
|
||||||
|
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
|
import org.apache.commons.lang.RandomStringUtils;
|
||||||
|
import org.apache.hadoop.conf.StorageUnit;
|
||||||
|
import org.apache.hadoop.utils.MetadataStore;
|
||||||
|
import org.apache.hadoop.utils.RocksDBStore;
|
||||||
|
import org.openjdk.jmh.annotations.*;
|
||||||
|
import org.openjdk.jmh.infra.Blackhole;
|
||||||
|
import org.rocksdb.*;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.charset.Charset;
|
||||||
|
import java.nio.file.Paths;
|
||||||
|
|
||||||
|
@State(Scope.Thread)
|
||||||
|
public class BenchMarkRocksDbStore {
|
||||||
|
private static final int DATA_LEN = 1024;
|
||||||
|
private static final long MAX_KEYS = 1024 * 10;
|
||||||
|
private static final int DB_FILE_LEN = 7;
|
||||||
|
private static final String TMP_DIR = "java.io.tmpdir";
|
||||||
|
|
||||||
|
private MetadataStore store;
|
||||||
|
private byte[] data;
|
||||||
|
|
||||||
|
@Param(value = {"8"})
|
||||||
|
private String blockSize;// 4KB default
|
||||||
|
|
||||||
|
@Param(value = {"64"})
|
||||||
|
private String writeBufferSize; //64 MB default
|
||||||
|
|
||||||
|
@Param(value = {"16"})
|
||||||
|
private String maxWriteBufferNumber;// 2 default
|
||||||
|
|
||||||
|
@Param(value = {"4"})
|
||||||
|
private String maxBackgroundFlushes; // 1 default
|
||||||
|
|
||||||
|
@Param(value = {"512"})
|
||||||
|
private String maxBytesForLevelBase;
|
||||||
|
|
||||||
|
@Param(value = {"4"})
|
||||||
|
private String backgroundThreads;
|
||||||
|
|
||||||
|
@Param(value = {"5000"})
|
||||||
|
private String maxOpenFiles;
|
||||||
|
|
||||||
|
@Setup(Level.Trial)
|
||||||
|
public void initialize() throws IOException {
|
||||||
|
data = RandomStringUtils.randomAlphanumeric(DATA_LEN)
|
||||||
|
.getBytes(Charset.forName("UTF-8"));
|
||||||
|
org.rocksdb.Options opts = new org.rocksdb.Options();
|
||||||
|
File dbFile = Paths.get(System.getProperty(TMP_DIR))
|
||||||
|
.resolve(RandomStringUtils.randomNumeric(DB_FILE_LEN))
|
||||||
|
.toFile();
|
||||||
|
opts.setCreateIfMissing(true);
|
||||||
|
opts.setWriteBufferSize(
|
||||||
|
(long) StorageUnit.MB.toBytes(Long.valueOf(writeBufferSize)));
|
||||||
|
opts.setMaxWriteBufferNumber(Integer.valueOf(maxWriteBufferNumber));
|
||||||
|
opts.setMaxBackgroundFlushes(Integer.valueOf(maxBackgroundFlushes));
|
||||||
|
BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
|
||||||
|
tableConfig.setBlockSize(
|
||||||
|
(long) StorageUnit.KB.toBytes(Long.valueOf(blockSize)));
|
||||||
|
opts.setMaxOpenFiles(Integer.valueOf(maxOpenFiles));
|
||||||
|
opts.setMaxBytesForLevelBase(
|
||||||
|
(long) StorageUnit.MB.toBytes(Long.valueOf(maxBytesForLevelBase)));
|
||||||
|
opts.setCompactionStyle(CompactionStyle.UNIVERSAL);
|
||||||
|
opts.setLevel0FileNumCompactionTrigger(10);
|
||||||
|
opts.setLevel0SlowdownWritesTrigger(20);
|
||||||
|
opts.setLevel0StopWritesTrigger(40);
|
||||||
|
opts.setTargetFileSizeBase(
|
||||||
|
(long) StorageUnit.MB.toBytes(Long.valueOf(maxBytesForLevelBase)) / 10);
|
||||||
|
opts.setMaxBackgroundCompactions(8);
|
||||||
|
opts.setUseFsync(false);
|
||||||
|
opts.setBytesPerSync(8388608);
|
||||||
|
org.rocksdb.Filter bloomFilter = new org.rocksdb.BloomFilter(20);
|
||||||
|
tableConfig.setCacheIndexAndFilterBlocks(true);
|
||||||
|
tableConfig.setIndexType(IndexType.kHashSearch);
|
||||||
|
tableConfig.setFilter(bloomFilter);
|
||||||
|
opts.setTableFormatConfig(tableConfig);
|
||||||
|
opts.useCappedPrefixExtractor(4);
|
||||||
|
store = new RocksDBStore(dbFile, opts);
|
||||||
|
}
|
||||||
|
|
||||||
|
@TearDown(Level.Trial)
|
||||||
|
public void cleanup() throws IOException {
|
||||||
|
store.destroy();
|
||||||
|
FileUtils.deleteDirectory(new File(TMP_DIR));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Benchmark
|
||||||
|
public void test(Blackhole bh) throws IOException {
|
||||||
|
long x = org.apache.commons.lang3.RandomUtils.nextLong(0L, MAX_KEYS);
|
||||||
|
store.put(Long.toHexString(x).getBytes(Charset.forName("UTF-8")), data);
|
||||||
|
bh.consume(
|
||||||
|
store.get(Long.toHexString(x).getBytes(Charset.forName("UTF-8"))));
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,50 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with this
|
||||||
|
* work for additional information regarding copyright ownership. The ASF
|
||||||
|
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
* License for the specific language governing permissions and limitations under
|
||||||
|
* the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.ozone.genesis;
|
||||||
|
|
||||||
|
import org.openjdk.jmh.runner.Runner;
|
||||||
|
import org.openjdk.jmh.runner.RunnerException;
|
||||||
|
import org.openjdk.jmh.runner.options.Options;
|
||||||
|
import org.openjdk.jmh.runner.options.OptionsBuilder;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main class that executes a set of HDSL/Ozone benchmarks.
|
||||||
|
* We purposefully don't use the runner and tools classes from Hadoop.
|
||||||
|
* There are some name collisions with OpenJDK JMH package.
|
||||||
|
* <p>
|
||||||
|
* Hence, these classes do not use the Tool/Runner pattern of standard Hadoop
|
||||||
|
* CLI.
|
||||||
|
*/
|
||||||
|
public class Genesis {
|
||||||
|
public static void main(String[] args) throws RunnerException {
|
||||||
|
Options opt = new OptionsBuilder()
|
||||||
|
.include(BenchMarkContainerStateMap.class.getSimpleName())
|
||||||
|
.include(BenchMarkMetadataStoreReads.class.getSimpleName())
|
||||||
|
.include(BenchMarkMetadataStoreWrites.class.getSimpleName())
|
||||||
|
.include(BenchMarkDatanodeDispatcher.class.getSimpleName())
|
||||||
|
.include(BenchMarkRocksDbStore.class.getSimpleName())
|
||||||
|
.warmupIterations(5)
|
||||||
|
.measurementIterations(20)
|
||||||
|
.shouldDoGC(true)
|
||||||
|
.forks(1)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
new Runner(opt).run();
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,75 @@
|
|||||||
|
package org.apache.hadoop.ozone.genesis;
|
||||||
|
|
||||||
|
import org.apache.commons.lang.RandomStringUtils;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.conf.StorageUnit;
|
||||||
|
import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
|
||||||
|
import org.apache.hadoop.utils.MetadataStore;
|
||||||
|
import org.apache.hadoop.utils.MetadataStoreBuilder;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.file.Paths;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Utility class for benchmark test cases.
|
||||||
|
*/
|
||||||
|
public class GenesisUtil {
|
||||||
|
|
||||||
|
private GenesisUtil() {
|
||||||
|
// private constructor.
|
||||||
|
}
|
||||||
|
|
||||||
|
public static final String DEFAULT_TYPE = "default";
|
||||||
|
public static final String CACHE_10MB_TYPE = "Cache10MB";
|
||||||
|
public static final String CACHE_1GB_TYPE = "Cache1GB";
|
||||||
|
public static final String CLOSED_TYPE = "ClosedContainer";
|
||||||
|
|
||||||
|
private static final int DB_FILE_LEN = 7;
|
||||||
|
private static final String TMP_DIR = "java.io.tmpdir";
|
||||||
|
|
||||||
|
|
||||||
|
public static MetadataStore getMetadataStore(String dbType) throws IOException {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
MetadataStoreBuilder builder = MetadataStoreBuilder.newBuilder();
|
||||||
|
builder.setConf(conf);
|
||||||
|
builder.setCreateIfMissing(true);
|
||||||
|
builder.setDbFile(
|
||||||
|
Paths.get(System.getProperty(TMP_DIR))
|
||||||
|
.resolve(RandomStringUtils.randomNumeric(DB_FILE_LEN))
|
||||||
|
.toFile());
|
||||||
|
switch (dbType) {
|
||||||
|
case DEFAULT_TYPE:
|
||||||
|
break;
|
||||||
|
case CLOSED_TYPE:
|
||||||
|
break;
|
||||||
|
case CACHE_10MB_TYPE:
|
||||||
|
builder.setCacheSize((long) StorageUnit.MB.toBytes(10));
|
||||||
|
break;
|
||||||
|
case CACHE_1GB_TYPE:
|
||||||
|
builder.setCacheSize((long) StorageUnit.GB.toBytes(1));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new IllegalStateException("Unknown type: " + dbType);
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static DatanodeDetails createDatanodeDetails(String uuid) {
|
||||||
|
Random random = new Random();
|
||||||
|
String ipAddress =
|
||||||
|
random.nextInt(256) + "." + random.nextInt(256) + "." + random
|
||||||
|
.nextInt(256) + "." + random.nextInt(256);
|
||||||
|
|
||||||
|
DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
|
||||||
|
builder.setUuid(uuid)
|
||||||
|
.setHostName("localhost")
|
||||||
|
.setIpAddress(ipAddress)
|
||||||
|
.setInfoPort(0)
|
||||||
|
.setInfoSecurePort(0)
|
||||||
|
.setContainerPort(0)
|
||||||
|
.setRatisPort(0)
|
||||||
|
.setOzoneRestPort(0);
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,25 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with this
|
||||||
|
* work for additional information regarding copyright ownership. The ASF
|
||||||
|
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
* License for the specific language governing permissions and limitations under
|
||||||
|
* the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Zephyr contains a set of benchmarks for Ozone. This is a command line tool
|
||||||
|
* that can be run by end users to get a sense of what kind of performance
|
||||||
|
* the system is capable of; Since Ozone is a new system, these benchmarks
|
||||||
|
* will allow us to correlate a base line to real world performance.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.genesis;
|
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.test;
|
package org.apache.hadoop.test;
|
||||||
|
|
||||||
import org.apache.hadoop.ozone.tools.Freon;
|
import org.apache.hadoop.ozone.freon.Freon;
|
||||||
import org.apache.hadoop.util.ProgramDriver;
|
import org.apache.hadoop.util.ProgramDriver;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -668,13 +668,11 @@
|
|||||||
<groupId>org.openjdk.jmh</groupId>
|
<groupId>org.openjdk.jmh</groupId>
|
||||||
<artifactId>jmh-core</artifactId>
|
<artifactId>jmh-core</artifactId>
|
||||||
<version>1.19</version>
|
<version>1.19</version>
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.openjdk.jmh</groupId>
|
<groupId>org.openjdk.jmh</groupId>
|
||||||
<artifactId>jmh-generator-annprocess</artifactId>
|
<artifactId>jmh-generator-annprocess</artifactId>
|
||||||
<version>1.19</version>
|
<version>1.19</version>
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user