HDFS-13342. Ozone: Rename and fix ozone CLI scripts. Contributed by Shashikant Banerjee.
This commit is contained in:
parent
e1470c8e9a
commit
82eb5da2b6
@ -17,7 +17,7 @@
|
|||||||
version: "3"
|
version: "3"
|
||||||
services:
|
services:
|
||||||
namenode:
|
namenode:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
hostname: namenode
|
hostname: namenode
|
||||||
volumes:
|
volumes:
|
||||||
- ../..//hadoop-${VERSION}:/opt/hadoop
|
- ../..//hadoop-${VERSION}:/opt/hadoop
|
||||||
@ -29,38 +29,38 @@ services:
|
|||||||
- ./docker-config
|
- ./docker-config
|
||||||
command: ["/opt/hadoop/bin/hdfs","namenode"]
|
command: ["/opt/hadoop/bin/hdfs","namenode"]
|
||||||
datanode:
|
datanode:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
volumes:
|
volumes:
|
||||||
- ../..//hadoop-${VERSION}:/opt/hadoop
|
- ../..//hadoop-${VERSION}:/opt/hadoop
|
||||||
ports:
|
ports:
|
||||||
- 9864
|
- 9864
|
||||||
command: ["/opt/hadoop/bin/oz","datanode"]
|
command: ["/opt/hadoop/bin/ozone","datanode"]
|
||||||
env_file:
|
env_file:
|
||||||
- ./docker-config
|
- ./docker-config
|
||||||
jscsi:
|
jscsi:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
ports:
|
ports:
|
||||||
- 3260:3260
|
- 3260:3260
|
||||||
volumes:
|
volumes:
|
||||||
- ../..//hadoop-${VERSION}:/opt/hadoop
|
- ../..//hadoop-${VERSION}:/opt/hadoop
|
||||||
env_file:
|
env_file:
|
||||||
- ./docker-config
|
- ./docker-config
|
||||||
command: ["/opt/hadoop/bin/oz","jscsi"]
|
command: ["/opt/hadoop/bin/ozone","jscsi"]
|
||||||
cblock:
|
cblock:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
volumes:
|
volumes:
|
||||||
- ../..//hadoop-${VERSION}:/opt/hadoop
|
- ../..//hadoop-${VERSION}:/opt/hadoop
|
||||||
env_file:
|
env_file:
|
||||||
- ./docker-config
|
- ./docker-config
|
||||||
command: ["/opt/hadoop/bin/oz","cblockserver"]
|
command: ["/opt/hadoop/bin/ozone","cblockserver"]
|
||||||
scm:
|
scm:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
volumes:
|
volumes:
|
||||||
- ../..//hadoop-${VERSION}:/opt/hadoop
|
- ../..//hadoop-${VERSION}:/opt/hadoop
|
||||||
ports:
|
ports:
|
||||||
- 9876:9876
|
- 9876:9876
|
||||||
env_file:
|
env_file:
|
||||||
- ./docker-config
|
- ./docker-config
|
||||||
command: ["/opt/hadoop/bin/oz","scm"]
|
command: ["/opt/hadoop/bin/ozone","scm"]
|
||||||
environment:
|
environment:
|
||||||
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
|
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
version: "3"
|
version: "3"
|
||||||
services:
|
services:
|
||||||
namenode:
|
namenode:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
hostname: namenode
|
hostname: namenode
|
||||||
volumes:
|
volumes:
|
||||||
- ../..//hadoop-${VERSION}:/opt/hadoop
|
- ../..//hadoop-${VERSION}:/opt/hadoop
|
||||||
@ -29,16 +29,16 @@ services:
|
|||||||
- ./docker-config
|
- ./docker-config
|
||||||
command: ["/opt/hadoop/bin/hdfs","namenode"]
|
command: ["/opt/hadoop/bin/hdfs","namenode"]
|
||||||
datanode:
|
datanode:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
volumes:
|
volumes:
|
||||||
- ../..//hadoop-${VERSION}:/opt/hadoop
|
- ../..//hadoop-${VERSION}:/opt/hadoop
|
||||||
ports:
|
ports:
|
||||||
- 9864
|
- 9864
|
||||||
command: ["/opt/hadoop/bin/oz","datanode"]
|
command: ["/opt/hadoop/bin/ozone","datanode"]
|
||||||
env_file:
|
env_file:
|
||||||
- ./docker-config
|
- ./docker-config
|
||||||
ksm:
|
ksm:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
volumes:
|
volumes:
|
||||||
- ../..//hadoop-${VERSION}:/opt/hadoop
|
- ../..//hadoop-${VERSION}:/opt/hadoop
|
||||||
ports:
|
ports:
|
||||||
@ -47,9 +47,9 @@ services:
|
|||||||
ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
|
ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
|
||||||
env_file:
|
env_file:
|
||||||
- ./docker-config
|
- ./docker-config
|
||||||
command: ["/opt/hadoop/bin/oz","ksm"]
|
command: ["/opt/hadoop/bin/ozone","ksm"]
|
||||||
scm:
|
scm:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
volumes:
|
volumes:
|
||||||
- ../..//hadoop-${VERSION}:/opt/hadoop
|
- ../..//hadoop-${VERSION}:/opt/hadoop
|
||||||
ports:
|
ports:
|
||||||
@ -58,4 +58,4 @@ services:
|
|||||||
- ./docker-config
|
- ./docker-config
|
||||||
environment:
|
environment:
|
||||||
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
|
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
|
||||||
command: ["/opt/hadoop/bin/oz","scm"]
|
command: ["/opt/hadoop/bin/ozone","scm"]
|
||||||
|
@ -246,12 +246,12 @@ public String getName() {
|
|||||||
|
|
||||||
|
|
||||||
private static final String USAGE =
|
private static final String USAGE =
|
||||||
"Usage: \n oz scm [genericOptions] "
|
"Usage: \n ozone scm [genericOptions] "
|
||||||
+ "[ " + StartupOption.INIT.getName() + " [ "
|
+ "[ " + StartupOption.INIT.getName() + " [ "
|
||||||
+ StartupOption.CLUSTERID.getName() + " <cid> ] ]\n "
|
+ StartupOption.CLUSTERID.getName() + " <cid> ] ]\n "
|
||||||
+ "oz scm [genericOptions] [ "
|
+ "ozone scm [genericOptions] [ "
|
||||||
+ StartupOption.GENCLUSTERID.getName() + " ]\n " +
|
+ StartupOption.GENCLUSTERID.getName() + " ]\n " +
|
||||||
"oz scm [ "
|
"ozone scm [ "
|
||||||
+ StartupOption.HELP.getName() + " ]\n";
|
+ StartupOption.HELP.getName() + " ]\n";
|
||||||
/**
|
/**
|
||||||
* Creates a new StorageContainerManager. Configuration will be updated with
|
* Creates a new StorageContainerManager. Configuration will be updated with
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
version: "3"
|
version: "3"
|
||||||
services:
|
services:
|
||||||
namenode:
|
namenode:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
hostname: namenode
|
hostname: namenode
|
||||||
volumes:
|
volumes:
|
||||||
- ${HADOOPDIR}:/opt/hadoop
|
- ${HADOOPDIR}:/opt/hadoop
|
||||||
@ -29,16 +29,16 @@ services:
|
|||||||
- ./docker-config
|
- ./docker-config
|
||||||
command: ["/opt/hadoop/bin/hdfs","namenode"]
|
command: ["/opt/hadoop/bin/hdfs","namenode"]
|
||||||
datanode:
|
datanode:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
volumes:
|
volumes:
|
||||||
- ${HADOOPDIR}:/opt/hadoop
|
- ${HADOOPDIR}:/opt/hadoop
|
||||||
ports:
|
ports:
|
||||||
- 9864
|
- 9864
|
||||||
command: ["/opt/hadoop/bin/oz","datanode"]
|
command: ["/opt/hadoop/bin/ozone","datanode"]
|
||||||
env_file:
|
env_file:
|
||||||
- ./docker-config
|
- ./docker-config
|
||||||
ksm:
|
ksm:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
volumes:
|
volumes:
|
||||||
- ${HADOOPDIR}:/opt/hadoop
|
- ${HADOOPDIR}:/opt/hadoop
|
||||||
ports:
|
ports:
|
||||||
@ -47,9 +47,9 @@ services:
|
|||||||
ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
|
ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
|
||||||
env_file:
|
env_file:
|
||||||
- ./docker-config
|
- ./docker-config
|
||||||
command: ["/opt/hadoop/bin/oz","ksm"]
|
command: ["/opt/hadoop/bin/ozone","ksm"]
|
||||||
scm:
|
scm:
|
||||||
image: elek/hadoop-runner:o3-refactor
|
image: apache/hadoop-runner
|
||||||
volumes:
|
volumes:
|
||||||
- ${HADOOPDIR}:/opt/hadoop
|
- ${HADOOPDIR}:/opt/hadoop
|
||||||
ports:
|
ports:
|
||||||
@ -58,4 +58,4 @@ services:
|
|||||||
- ./docker-config
|
- ./docker-config
|
||||||
environment:
|
environment:
|
||||||
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
|
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
|
||||||
command: ["/opt/hadoop/bin/oz","scm"]
|
command: ["/opt/hadoop/bin/ozone","scm"]
|
||||||
|
@ -32,7 +32,7 @@ Daemons are running without error
|
|||||||
Is daemon running without error datanode
|
Is daemon running without error datanode
|
||||||
|
|
||||||
Check if datanode is connected to the scm
|
Check if datanode is connected to the scm
|
||||||
Wait Until Keyword Succeeds 2min 5sec Have healthy datanodes 1
|
Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 1
|
||||||
|
|
||||||
Scale it up to 5 datanodes
|
Scale it up to 5 datanodes
|
||||||
Scale datanodes up 5
|
Scale datanodes up 5
|
||||||
@ -48,15 +48,15 @@ Test rest interface
|
|||||||
${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
|
${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
|
||||||
Should contain ${result} 200 OK
|
Should contain ${result} 200 OK
|
||||||
|
|
||||||
Test oz cli
|
Test ozone cli
|
||||||
Execute on datanode oz oz -createVolume http://localhost:9880/hive -user bilbo -quota 100TB -root
|
Execute on datanode ozone oz -createVolume http://localhost:9880/hive -user bilbo -quota 100TB -root
|
||||||
${result} = Execute on datanode oz oz -listVolume http://localhost:9880/ -user bilbo | grep -v Removed | jq '.[] | select(.volumeName=="hive")'
|
${result} = Execute on datanode ozone oz -listVolume http://localhost:9880/ -user bilbo | grep -v Removed | jq '.[] | select(.volumeName=="hive")'
|
||||||
Should contain ${result} createdOn
|
Should contain ${result} createdOn
|
||||||
Execute on datanode oz oz -createBucket http://localhost:9880/hive/bb1
|
Execute on datanode ozone oz -createBucket http://localhost:9880/hive/bb1
|
||||||
${result} Execute on datanode oz oz -listBucket http://localhost:9880/hive/ | grep -v Removed | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
|
${result} Execute on datanode ozone oz -listBucket http://localhost:9880/hive/ | grep -v Removed | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
|
||||||
Should Be Equal ${result} hive
|
Should Be Equal ${result} hive
|
||||||
Execute on datanode oz oz -deleteBucket http://localhost:9880/hive/bb1
|
Execute on datanode ozone oz -deleteBucket http://localhost:9880/hive/bb1
|
||||||
Execute on datanode oz oz -deleteVolume http://localhost:9880/hive -user bilbo
|
Execute on datanode ozone oz -deleteVolume http://localhost:9880/hive -user bilbo
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -67,7 +67,7 @@ Check webui static resources
|
|||||||
Should contain ${result} 200
|
Should contain ${result} 200
|
||||||
|
|
||||||
Start freon testing
|
Start freon testing
|
||||||
${result} = Execute on ksm oz freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
|
${result} = Execute on ksm ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
|
||||||
Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125
|
Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125
|
||||||
Should Not Contain ${result} ERROR
|
Should Not Contain ${result} ERROR
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
# The name of the script being executed.
|
# The name of the script being executed.
|
||||||
HADOOP_SHELL_EXECNAME="oz"
|
HADOOP_SHELL_EXECNAME="ozone"
|
||||||
MYNAME="${BASH_SOURCE-$0}"
|
MYNAME="${BASH_SOURCE-$0}"
|
||||||
|
|
||||||
## @description build up the hdfs command's usage text.
|
## @description build up the hdfs command's usage text.
|
||||||
@ -98,13 +98,13 @@ function ozonecmd_case
|
|||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
freon)
|
freon)
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.Freon
|
HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.Freon
|
||||||
;;
|
;;
|
||||||
genesis)
|
genesis)
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis
|
HADOOP_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis
|
||||||
;;
|
;;
|
||||||
getozoneconf)
|
getozoneconf)
|
||||||
HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.OzoneGetConf
|
HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.OzoneGetConf;
|
||||||
;;
|
;;
|
||||||
jscsi)
|
jscsi)
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||||
@ -192,7 +192,7 @@ fi
|
|||||||
hadoop_add_client_opts
|
hadoop_add_client_opts
|
||||||
|
|
||||||
if [[ ${HADOOP_WORKER_MODE} = true ]]; then
|
if [[ ${HADOOP_WORKER_MODE} = true ]]; then
|
||||||
hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/oz" "${HADOOP_USER_PARAMS[@]}"
|
hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/ozone" "${HADOOP_USER_PARAMS[@]}"
|
||||||
exit $?
|
exit $?
|
||||||
fi
|
fi
|
||||||
|
|
@ -47,8 +47,8 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
|
SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
|
||||||
SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
|
SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
|
||||||
|
|
||||||
if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
|
if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
|
||||||
echo "Ozone is not supported in a security enabled cluster."
|
echo "Ozone is not supported in a security enabled cluster."
|
||||||
@ -57,7 +57,7 @@ fi
|
|||||||
|
|
||||||
#---------------------------------------------------------
|
#---------------------------------------------------------
|
||||||
# Check if ozone is enabled
|
# Check if ozone is enabled
|
||||||
OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
|
OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
|
||||||
if [[ "${OZONE_ENABLED}" != "true" ]]; then
|
if [[ "${OZONE_ENABLED}" != "true" ]]; then
|
||||||
echo "Operation is not supported because ozone is not enabled."
|
echo "Operation is not supported because ozone is not enabled."
|
||||||
exit -1
|
exit -1
|
||||||
@ -74,13 +74,13 @@ fi
|
|||||||
|
|
||||||
#---------------------------------------------------------
|
#---------------------------------------------------------
|
||||||
# Ozone keyspacemanager nodes
|
# Ozone keyspacemanager nodes
|
||||||
KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -keyspacemanagers 2>/dev/null)
|
KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 2>/dev/null)
|
||||||
echo "Starting key space manager nodes [${KSM_NODES}]"
|
echo "Starting key space manager nodes [${KSM_NODES}]"
|
||||||
if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
|
if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
|
||||||
KSM_NODES=$(hostname)
|
KSM_NODES=$(hostname)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/oz" \
|
hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--hostnames "${KSM_NODES}" \
|
--hostnames "${KSM_NODES}" \
|
||||||
@ -91,9 +91,9 @@ HADOOP_JUMBO_RETCOUNTER=$?
|
|||||||
|
|
||||||
#---------------------------------------------------------
|
#---------------------------------------------------------
|
||||||
# Ozone storagecontainermanager nodes
|
# Ozone storagecontainermanager nodes
|
||||||
SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -storagecontainermanagers 2>/dev/null)
|
SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -storagecontainermanagers 2>/dev/null)
|
||||||
echo "Starting storage container manager nodes [${SCM_NODES}]"
|
echo "Starting storage container manager nodes [${SCM_NODES}]"
|
||||||
hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/oz" \
|
hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--hostnames "${SCM_NODES}" \
|
--hostnames "${SCM_NODES}" \
|
||||||
|
@ -47,8 +47,8 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
|
SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
|
||||||
SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
|
SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
|
||||||
|
|
||||||
if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
|
if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
|
||||||
echo "Ozone is not supported in a security enabled cluster."
|
echo "Ozone is not supported in a security enabled cluster."
|
||||||
@ -57,7 +57,7 @@ fi
|
|||||||
|
|
||||||
#---------------------------------------------------------
|
#---------------------------------------------------------
|
||||||
# Check if ozone is enabled
|
# Check if ozone is enabled
|
||||||
OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
|
OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
|
||||||
if [[ "${OZONE_ENABLED}" != "true" ]]; then
|
if [[ "${OZONE_ENABLED}" != "true" ]]; then
|
||||||
echo "Operation is not supported because ozone is not enabled."
|
echo "Operation is not supported because ozone is not enabled."
|
||||||
exit -1
|
exit -1
|
||||||
@ -74,13 +74,13 @@ fi
|
|||||||
|
|
||||||
#---------------------------------------------------------
|
#---------------------------------------------------------
|
||||||
# Ozone keyspacemanager nodes
|
# Ozone keyspacemanager nodes
|
||||||
KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -keyspacemanagers 2>/dev/null)
|
KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 2>/dev/null)
|
||||||
echo "Stopping key space manager nodes [${KSM_NODES}]"
|
echo "Stopping key space manager nodes [${KSM_NODES}]"
|
||||||
if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
|
if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
|
||||||
KSM_NODES=$(hostname)
|
KSM_NODES=$(hostname)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/oz" \
|
hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--hostnames "${KSM_NODES}" \
|
--hostnames "${KSM_NODES}" \
|
||||||
@ -89,9 +89,9 @@ hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/oz" \
|
|||||||
|
|
||||||
#---------------------------------------------------------
|
#---------------------------------------------------------
|
||||||
# Ozone storagecontainermanager nodes
|
# Ozone storagecontainermanager nodes
|
||||||
SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -storagecontainermanagers 2>/dev/null)
|
SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -storagecontainermanagers 2>/dev/null)
|
||||||
echo "Stopping storage container manager nodes [${SCM_NODES}]"
|
echo "Stopping storage container manager nodes [${SCM_NODES}]"
|
||||||
hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/oz" \
|
hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--hostnames "${SCM_NODES}" \
|
--hostnames "${SCM_NODES}" \
|
||||||
|
@ -43,7 +43,7 @@
|
|||||||
*/
|
*/
|
||||||
public class OzoneGetConf extends Configured implements Tool {
|
public class OzoneGetConf extends Configured implements Tool {
|
||||||
|
|
||||||
private static final String DESCRIPTION = "oz getconf is utility for "
|
private static final String DESCRIPTION = "ozone getconf is utility for "
|
||||||
+ "getting configuration information from the config file.\n";
|
+ "getting configuration information from the config file.\n";
|
||||||
|
|
||||||
enum Command {
|
enum Command {
|
||||||
@ -102,7 +102,7 @@ public static OzoneGetConf.CommandHandler getHandler(String cmd) {
|
|||||||
|
|
||||||
/* Initialize USAGE based on Command values */
|
/* Initialize USAGE based on Command values */
|
||||||
StringBuilder usage = new StringBuilder(DESCRIPTION);
|
StringBuilder usage = new StringBuilder(DESCRIPTION);
|
||||||
usage.append("\noz getconf \n");
|
usage.append("\nozone getconf \n");
|
||||||
for (OzoneGetConf.Command cmd : OzoneGetConf.Command.values()) {
|
for (OzoneGetConf.Command cmd : OzoneGetConf.Command.values()) {
|
||||||
usage.append("\t[" + cmd.getUsage() + "]\t\t\t" + cmd.getDescription()
|
usage.append("\t[" + cmd.getUsage() + "]\t\t\t" + cmd.getDescription()
|
||||||
+ "\n");
|
+ "\n");
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = oz ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = ozone ]]; then
|
||||||
hadoop_add_profile ozone
|
hadoop_add_profile ozone
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -106,8 +106,8 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
|
|||||||
LoggerFactory.getLogger(KeySpaceManager.class);
|
LoggerFactory.getLogger(KeySpaceManager.class);
|
||||||
|
|
||||||
private static final String USAGE =
|
private static final String USAGE =
|
||||||
"Usage: \n oz ksm [genericOptions] " + "[ "
|
"Usage: \n ozone ksm [genericOptions] " + "[ "
|
||||||
+ StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "oz ksm [ "
|
+ StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "ozone ksm [ "
|
||||||
+ StartupOption.HELP.getName() + " ]\n";
|
+ StartupOption.HELP.getName() + " ]\n";
|
||||||
|
|
||||||
/** Startup options. */
|
/** Startup options. */
|
||||||
|
@ -193,25 +193,25 @@ private void addVolumeCommands(Options options) {
|
|||||||
|
|
||||||
|
|
||||||
Option createVolume = new Option(CREATE_VOLUME, true, "creates a volume" +
|
Option createVolume = new Option(CREATE_VOLUME, true, "creates a volume" +
|
||||||
"for the specified user.\n \t For example : hdfs oz -createVolume " +
|
"for the specified user.\n \t For example : hdfs o3 -createVolume " +
|
||||||
"<volumeURI> -root -user <userName>\n");
|
"<volumeURI> -root -user <userName>\n");
|
||||||
options.addOption(createVolume);
|
options.addOption(createVolume);
|
||||||
|
|
||||||
Option deleteVolume = new Option(DELETE_VOLUME, true, "deletes a volume" +
|
Option deleteVolume = new Option(DELETE_VOLUME, true, "deletes a volume" +
|
||||||
"if it is empty.\n \t For example : hdfs oz -deleteVolume <volumeURI>" +
|
"if it is empty.\n \t For example : ozone oz -deleteVolume <volumeURI>" +
|
||||||
" -root \n");
|
" -root \n");
|
||||||
options.addOption(deleteVolume);
|
options.addOption(deleteVolume);
|
||||||
|
|
||||||
Option listVolume =
|
Option listVolume =
|
||||||
new Option(LIST_VOLUME, true, "List the volumes of a given user.\n" +
|
new Option(LIST_VOLUME, true, "List the volumes of a given user.\n" +
|
||||||
"For example : hdfs oz -listVolume <ozoneURI>" +
|
"For example : ozone oz -listVolume <ozoneURI>" +
|
||||||
"-user <username> -root or hdfs oz " +
|
"-user <username> -root or ozone oz " +
|
||||||
"-listVolume");
|
"-listVolume");
|
||||||
options.addOption(listVolume);
|
options.addOption(listVolume);
|
||||||
|
|
||||||
Option updateVolume =
|
Option updateVolume =
|
||||||
new Option(UPDATE_VOLUME, true, "updates an existing volume.\n" +
|
new Option(UPDATE_VOLUME, true, "updates an existing volume.\n" +
|
||||||
"\t For example : hdfs oz " +
|
"\t For example : ozone oz " +
|
||||||
"-updateVolume <volumeURI> -quota " +
|
"-updateVolume <volumeURI> -quota " +
|
||||||
"100TB\n");
|
"100TB\n");
|
||||||
options.addOption(updateVolume);
|
options.addOption(updateVolume);
|
||||||
@ -230,7 +230,7 @@ private void addVolumeCommands(Options options) {
|
|||||||
private void addBucketCommands(Options opts) {
|
private void addBucketCommands(Options opts) {
|
||||||
Option createBucket = new Option(CREATE_BUCKET, true,
|
Option createBucket = new Option(CREATE_BUCKET, true,
|
||||||
"creates a bucket in a given volume." +
|
"creates a bucket in a given volume." +
|
||||||
"For example: hdfs oz -createBucket <bucketURI>");
|
"For example: ozone oz -createBucket <bucketURI>");
|
||||||
opts.addOption(createBucket);
|
opts.addOption(createBucket);
|
||||||
|
|
||||||
Option infoBucket =
|
Option infoBucket =
|
||||||
@ -247,7 +247,7 @@ private void addBucketCommands(Options opts) {
|
|||||||
|
|
||||||
Option updateBucket =
|
Option updateBucket =
|
||||||
new Option(UPDATE_BUCKET, true, "allows changing bucket attributes.\n" +
|
new Option(UPDATE_BUCKET, true, "allows changing bucket attributes.\n" +
|
||||||
" For example: hdfs oz -updateBucket <bucketURI> " +
|
" For example: ozone oz -updateBucket <bucketURI> " +
|
||||||
"-addAcl user:frodo:rw");
|
"-addAcl user:frodo:rw");
|
||||||
opts.addOption(updateBucket);
|
opts.addOption(updateBucket);
|
||||||
|
|
||||||
@ -396,7 +396,7 @@ private int dispatch(CommandLine cmd, Options opts)
|
|||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
HelpFormatter helpFormatter = new HelpFormatter();
|
HelpFormatter helpFormatter = new HelpFormatter();
|
||||||
helpFormatter.printHelp(eightyColumn, "hdfs oz -command uri [args]",
|
helpFormatter.printHelp(eightyColumn, "ozone oz -command uri [args]",
|
||||||
"Ozone Commands",
|
"Ozone Commands",
|
||||||
opts, "Please correct your command and try again.");
|
opts, "Please correct your command and try again.");
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -21,14 +21,14 @@ with simple authentication.
|
|||||||
|
|
||||||
The Ozone commands take the following format.
|
The Ozone commands take the following format.
|
||||||
|
|
||||||
* `hdfs oz --command_ http://hostname:port/volume/bucket/key -user
|
* `ozone oz --command_ http://hostname:port/volume/bucket/key -user
|
||||||
<name> -root`
|
<name> -root`
|
||||||
|
|
||||||
The *port* specified in command should match the port mentioned in the config
|
The *port* specified in command should match the port mentioned in the config
|
||||||
property `dfs.datanode.http.address`. This property can be set in `hdfs-site.xml`.
|
property `dfs.datanode.http.address`. This property can be set in `hdfs-site.xml`.
|
||||||
The default value for the port is `9864` and is used in below commands.
|
The default value for the port is `9864` and is used in below commands.
|
||||||
|
|
||||||
The *--root* option is a command line short cut that allows *hdfs oz*
|
The *--root* option is a command line short cut that allows *ozone oz*
|
||||||
commands to be run as the user that started the cluster. This is useful to
|
commands to be run as the user that started the cluster. This is useful to
|
||||||
indicate that you want the commands to be run as some admin user. The only
|
indicate that you want the commands to be run as some admin user. The only
|
||||||
reason for this option is that it makes the life of a lazy developer more
|
reason for this option is that it makes the life of a lazy developer more
|
||||||
@ -44,7 +44,7 @@ ozone cluster.
|
|||||||
|
|
||||||
Volumes can be created only by Admins. Here is an example of creating a volume.
|
Volumes can be created only by Admins. Here is an example of creating a volume.
|
||||||
|
|
||||||
* `hdfs oz -createVolume http://localhost:9864/hive -user bilbo -quota
|
* `ozone oz -createVolume http://localhost:9864/hive -user bilbo -quota
|
||||||
100TB -root`
|
100TB -root`
|
||||||
|
|
||||||
The above command creates a volume called `hive` owned by user `bilbo`. The
|
The above command creates a volume called `hive` owned by user `bilbo`. The
|
||||||
@ -55,26 +55,26 @@ admin in the cluster.
|
|||||||
|
|
||||||
Updates information like ownership and quota on an existing volume.
|
Updates information like ownership and quota on an existing volume.
|
||||||
|
|
||||||
* `hdfs oz -updateVolume http://localhost:9864/hive -quota 500TB -root`
|
* `ozone oz -updateVolume http://localhost:9864/hive -quota 500TB -root`
|
||||||
|
|
||||||
The above command changes the volume quota of hive from 100TB to 500TB.
|
The above command changes the volume quota of hive from 100TB to 500TB.
|
||||||
|
|
||||||
### Delete Volume
|
### Delete Volume
|
||||||
Deletes a Volume if it is empty.
|
Deletes a Volume if it is empty.
|
||||||
|
|
||||||
* `hdfs oz -deleteVolume http://localhost:9864/hive -root`
|
* `ozone oz -deleteVolume http://localhost:9864/hive -root`
|
||||||
|
|
||||||
|
|
||||||
### Info Volume
|
### Info Volume
|
||||||
Info volume command allows the owner or the administrator of the cluster to read meta-data about a specific volume.
|
Info volume command allows the owner or the administrator of the cluster to read meta-data about a specific volume.
|
||||||
|
|
||||||
* `hdfs oz -infoVolume http://localhost:9864/hive -root`
|
* `ozone oz -infoVolume http://localhost:9864/hive -root`
|
||||||
|
|
||||||
### List Volumes
|
### List Volumes
|
||||||
|
|
||||||
List volume command can be used by administrator to list volumes of any user. It can also be used by a user to list volumes owned by him.
|
List volume command can be used by administrator to list volumes of any user. It can also be used by a user to list volumes owned by him.
|
||||||
|
|
||||||
* `hdfs oz -listVolume http://localhost:9864/ -user bilbo -root`
|
* `ozone oz -listVolume http://localhost:9864/ -user bilbo -root`
|
||||||
|
|
||||||
The above command lists all volumes owned by user bilbo.
|
The above command lists all volumes owned by user bilbo.
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ Following examples assume that these commands are run by the owner of the volume
|
|||||||
|
|
||||||
Create bucket call allows the owner of a volume to create a bucket.
|
Create bucket call allows the owner of a volume to create a bucket.
|
||||||
|
|
||||||
* `hdfs oz -createBucket http://localhost:9864/hive/january`
|
* `ozone oz -createBucket http://localhost:9864/hive/january`
|
||||||
|
|
||||||
This call creates a bucket called `january` in the volume called `hive`. If
|
This call creates a bucket called `january` in the volume called `hive`. If
|
||||||
the volume does not exist, then this call will fail.
|
the volume does not exist, then this call will fail.
|
||||||
@ -98,23 +98,23 @@ the volume does not exist, then this call will fail.
|
|||||||
### Update Bucket
|
### Update Bucket
|
||||||
Updates bucket meta-data, like ACLs.
|
Updates bucket meta-data, like ACLs.
|
||||||
|
|
||||||
* `hdfs oz -updateBucket http://localhost:9864/hive/january -addAcl
|
* `ozone oz -updateBucket http://localhost:9864/hive/january -addAcl
|
||||||
user:spark:rw`
|
user:spark:rw`
|
||||||
|
|
||||||
### Delete Bucket
|
### Delete Bucket
|
||||||
Deletes a bucket if it is empty.
|
Deletes a bucket if it is empty.
|
||||||
|
|
||||||
* `hdfs oz -deleteBucket http://localhost:9864/hive/january`
|
* `ozone oz -deleteBucket http://localhost:9864/hive/january`
|
||||||
|
|
||||||
### Info Bucket
|
### Info Bucket
|
||||||
Returns information about a given bucket.
|
Returns information about a given bucket.
|
||||||
|
|
||||||
* `hdfs oz -infoBucket http://localhost:9864/hive/january`
|
* `ozone oz -infoBucket http://localhost:9864/hive/january`
|
||||||
|
|
||||||
### List Buckets
|
### List Buckets
|
||||||
List buckets on a given volume.
|
List buckets on a given volume.
|
||||||
|
|
||||||
* `hdfs oz -listBucket http://localhost:9864/hive`
|
* `ozone oz -listBucket http://localhost:9864/hive`
|
||||||
|
|
||||||
Ozone Key Commands
|
Ozone Key Commands
|
||||||
------------------
|
------------------
|
||||||
@ -125,26 +125,26 @@ Ozone key commands allows users to put, delete and get keys from ozone buckets.
|
|||||||
Creates or overwrites a key in ozone store, -file points to the file you want
|
Creates or overwrites a key in ozone store, -file points to the file you want
|
||||||
to upload.
|
to upload.
|
||||||
|
|
||||||
* `hdfs oz -putKey http://localhost:9864/hive/january/processed.orc -file
|
* `ozone oz -putKey http://localhost:9864/hive/january/processed.orc -file
|
||||||
processed.orc`
|
processed.orc`
|
||||||
|
|
||||||
### Get Key
|
### Get Key
|
||||||
Downloads a file from the ozone bucket.
|
Downloads a file from the ozone bucket.
|
||||||
|
|
||||||
* `hdfs oz -getKey http://localhost:9864/hive/january/processed.orc -file
|
* `ozone oz -getKey http://localhost:9864/hive/january/processed.orc -file
|
||||||
processed.orc.copy`
|
processed.orc.copy`
|
||||||
|
|
||||||
### Delete Key
|
### Delete Key
|
||||||
Deletes a key from the ozone store.
|
Deletes a key from the ozone store.
|
||||||
|
|
||||||
* `hdfs oz -deleteKey http://localhost:9864/hive/january/processed.orc`
|
* `ozone oz -deleteKey http://localhost:9864/hive/january/processed.orc`
|
||||||
|
|
||||||
### Info Key
|
### Info Key
|
||||||
Reads key metadata from the ozone store.
|
Reads key metadata from the ozone store.
|
||||||
|
|
||||||
* `hdfs oz -infoKey http://localhost:9864/hive/january/processed.orc`
|
* `ozone oz -infoKey http://localhost:9864/hive/january/processed.orc`
|
||||||
|
|
||||||
### List Keys
|
### List Keys
|
||||||
List all keys in an ozone bucket.
|
List all keys in an ozone bucket.
|
||||||
|
|
||||||
* `hdfs oz -listKey http://localhost:9864/hive/january`
|
* `ozone oz -listKey http://localhost:9864/hive/january`
|
||||||
|
Loading…
Reference in New Issue
Block a user