HDFS-13342. Ozone: Rename and fix ozone CLI scripts. Contributed by Shashikant Banerjee.

This commit is contained in:
Mukul Kumar Singh 2018-04-06 16:55:08 +05:30 committed by Owen O'Malley
parent e1470c8e9a
commit 82eb5da2b6
13 changed files with 83 additions and 83 deletions

View File

@ -17,7 +17,7 @@
version: "3"
services:
namenode:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
hostname: namenode
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
@ -29,38 +29,38 @@ services:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
ports:
- 9864
command: ["/opt/hadoop/bin/oz","datanode"]
command: ["/opt/hadoop/bin/ozone","datanode"]
env_file:
- ./docker-config
jscsi:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
ports:
- 3260:3260
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/oz","jscsi"]
command: ["/opt/hadoop/bin/ozone","jscsi"]
cblock:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/oz","cblockserver"]
command: ["/opt/hadoop/bin/ozone","cblockserver"]
scm:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
ports:
- 9876:9876
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/oz","scm"]
command: ["/opt/hadoop/bin/ozone","scm"]
environment:
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION

View File

@ -17,7 +17,7 @@
version: "3"
services:
namenode:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
hostname: namenode
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
@ -29,16 +29,16 @@ services:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
ports:
- 9864
command: ["/opt/hadoop/bin/oz","datanode"]
command: ["/opt/hadoop/bin/ozone","datanode"]
env_file:
- ./docker-config
ksm:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
ports:
@ -47,9 +47,9 @@ services:
ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/oz","ksm"]
command: ["/opt/hadoop/bin/ozone","ksm"]
scm:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
ports:
@ -58,4 +58,4 @@ services:
- ./docker-config
environment:
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
command: ["/opt/hadoop/bin/oz","scm"]
command: ["/opt/hadoop/bin/ozone","scm"]

View File

@ -246,12 +246,12 @@ public String getName() {
private static final String USAGE =
"Usage: \n oz scm [genericOptions] "
"Usage: \n ozone scm [genericOptions] "
+ "[ " + StartupOption.INIT.getName() + " [ "
+ StartupOption.CLUSTERID.getName() + " <cid> ] ]\n "
+ "oz scm [genericOptions] [ "
+ "ozone scm [genericOptions] [ "
+ StartupOption.GENCLUSTERID.getName() + " ]\n " +
"oz scm [ "
"ozone scm [ "
+ StartupOption.HELP.getName() + " ]\n";
/**
* Creates a new StorageContainerManager. Configuration will be updated with

View File

@ -17,7 +17,7 @@
version: "3"
services:
namenode:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
hostname: namenode
volumes:
- ${HADOOPDIR}:/opt/hadoop
@ -29,16 +29,16 @@ services:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
volumes:
- ${HADOOPDIR}:/opt/hadoop
ports:
- 9864
command: ["/opt/hadoop/bin/oz","datanode"]
command: ["/opt/hadoop/bin/ozone","datanode"]
env_file:
- ./docker-config
ksm:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
volumes:
- ${HADOOPDIR}:/opt/hadoop
ports:
@ -47,9 +47,9 @@ services:
ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/oz","ksm"]
command: ["/opt/hadoop/bin/ozone","ksm"]
scm:
image: elek/hadoop-runner:o3-refactor
image: apache/hadoop-runner
volumes:
- ${HADOOPDIR}:/opt/hadoop
ports:
@ -58,4 +58,4 @@ services:
- ./docker-config
environment:
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
command: ["/opt/hadoop/bin/oz","scm"]
command: ["/opt/hadoop/bin/ozone","scm"]

View File

@ -32,7 +32,7 @@ Daemons are running without error
Is daemon running without error datanode
Check if datanode is connected to the scm
Wait Until Keyword Succeeds 2min 5sec Have healthy datanodes 1
Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 1
Scale it up to 5 datanodes
Scale datanodes up 5
@ -48,15 +48,15 @@ Test rest interface
${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
Should contain ${result} 200 OK
Test oz cli
Execute on datanode oz oz -createVolume http://localhost:9880/hive -user bilbo -quota 100TB -root
${result} = Execute on datanode oz oz -listVolume http://localhost:9880/ -user bilbo | grep -v Removed | jq '.[] | select(.volumeName=="hive")'
Test ozone cli
Execute on datanode ozone oz -createVolume http://localhost:9880/hive -user bilbo -quota 100TB -root
${result} = Execute on datanode ozone oz -listVolume http://localhost:9880/ -user bilbo | grep -v Removed | jq '.[] | select(.volumeName=="hive")'
Should contain ${result} createdOn
Execute on datanode oz oz -createBucket http://localhost:9880/hive/bb1
${result} Execute on datanode oz oz -listBucket http://localhost:9880/hive/ | grep -v Removed | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
Execute on datanode ozone oz -createBucket http://localhost:9880/hive/bb1
${result} Execute on datanode ozone oz -listBucket http://localhost:9880/hive/ | grep -v Removed | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
Should Be Equal ${result} hive
Execute on datanode oz oz -deleteBucket http://localhost:9880/hive/bb1
Execute on datanode oz oz -deleteVolume http://localhost:9880/hive -user bilbo
Execute on datanode ozone oz -deleteBucket http://localhost:9880/hive/bb1
Execute on datanode ozone oz -deleteVolume http://localhost:9880/hive -user bilbo
@ -67,7 +67,7 @@ Check webui static resources
Should contain ${result} 200
Start freon testing
${result} = Execute on ksm oz freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
${result} = Execute on ksm ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125
Should Not Contain ${result} ERROR

View File

@ -16,7 +16,7 @@
# limitations under the License.
# The name of the script being executed.
HADOOP_SHELL_EXECNAME="oz"
HADOOP_SHELL_EXECNAME="ozone"
MYNAME="${BASH_SOURCE-$0}"
## @description build up the hdfs command's usage text.
@ -98,13 +98,13 @@ function ozonecmd_case
exit 0
;;
freon)
HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.Freon
HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.Freon
;;
genesis)
HADOOP_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis
;;
getozoneconf)
HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.OzoneGetConf
HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.OzoneGetConf;
;;
jscsi)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
@ -192,7 +192,7 @@ fi
hadoop_add_client_opts
if [[ ${HADOOP_WORKER_MODE} = true ]]; then
hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/oz" "${HADOOP_USER_PARAMS[@]}"
hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/ozone" "${HADOOP_USER_PARAMS[@]}"
exit $?
fi

View File

@ -47,8 +47,8 @@ else
exit 1
fi
SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
echo "Ozone is not supported in a security enabled cluster."
@ -57,7 +57,7 @@ fi
#---------------------------------------------------------
# Check if ozone is enabled
OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
if [[ "${OZONE_ENABLED}" != "true" ]]; then
echo "Operation is not supported because ozone is not enabled."
exit -1
@ -74,13 +74,13 @@ fi
#---------------------------------------------------------
# Ozone keyspacemanager nodes
KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -keyspacemanagers 2>/dev/null)
KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 2>/dev/null)
echo "Starting key space manager nodes [${KSM_NODES}]"
if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
KSM_NODES=$(hostname)
fi
hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/oz" \
hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \
--workers \
--config "${HADOOP_CONF_DIR}" \
--hostnames "${KSM_NODES}" \
@ -91,9 +91,9 @@ HADOOP_JUMBO_RETCOUNTER=$?
#---------------------------------------------------------
# Ozone storagecontainermanager nodes
SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -storagecontainermanagers 2>/dev/null)
SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -storagecontainermanagers 2>/dev/null)
echo "Starting storage container manager nodes [${SCM_NODES}]"
hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/oz" \
hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \
--workers \
--config "${HADOOP_CONF_DIR}" \
--hostnames "${SCM_NODES}" \

View File

@ -47,8 +47,8 @@ else
exit 1
fi
SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
echo "Ozone is not supported in a security enabled cluster."
@ -57,7 +57,7 @@ fi
#---------------------------------------------------------
# Check if ozone is enabled
OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
if [[ "${OZONE_ENABLED}" != "true" ]]; then
echo "Operation is not supported because ozone is not enabled."
exit -1
@ -74,13 +74,13 @@ fi
#---------------------------------------------------------
# Ozone keyspacemanager nodes
KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -keyspacemanagers 2>/dev/null)
KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 2>/dev/null)
echo "Stopping key space manager nodes [${KSM_NODES}]"
if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
KSM_NODES=$(hostname)
fi
hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/oz" \
hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \
--workers \
--config "${HADOOP_CONF_DIR}" \
--hostnames "${KSM_NODES}" \
@ -89,9 +89,9 @@ hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/oz" \
#---------------------------------------------------------
# Ozone storagecontainermanager nodes
SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -storagecontainermanagers 2>/dev/null)
SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -storagecontainermanagers 2>/dev/null)
echo "Stopping storage container manager nodes [${SCM_NODES}]"
hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/oz" \
hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \
--workers \
--config "${HADOOP_CONF_DIR}" \
--hostnames "${SCM_NODES}" \

View File

@ -43,7 +43,7 @@
*/
public class OzoneGetConf extends Configured implements Tool {
private static final String DESCRIPTION = "oz getconf is utility for "
private static final String DESCRIPTION = "ozone getconf is utility for "
+ "getting configuration information from the config file.\n";
enum Command {
@ -102,7 +102,7 @@ public static OzoneGetConf.CommandHandler getHandler(String cmd) {
/* Initialize USAGE based on Command values */
StringBuilder usage = new StringBuilder(DESCRIPTION);
usage.append("\noz getconf \n");
usage.append("\nozone getconf \n");
for (OzoneGetConf.Command cmd : OzoneGetConf.Command.values()) {
usage.append("\t[" + cmd.getUsage() + "]\t\t\t" + cmd.getDescription()
+ "\n");

View File

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
if [[ "${HADOOP_SHELL_EXECNAME}" = oz ]]; then
if [[ "${HADOOP_SHELL_EXECNAME}" = ozone ]]; then
hadoop_add_profile ozone
fi

View File

@ -106,8 +106,8 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
LoggerFactory.getLogger(KeySpaceManager.class);
private static final String USAGE =
"Usage: \n oz ksm [genericOptions] " + "[ "
+ StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "oz ksm [ "
"Usage: \n ozone ksm [genericOptions] " + "[ "
+ StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "ozone ksm [ "
+ StartupOption.HELP.getName() + " ]\n";
/** Startup options. */

View File

@ -193,25 +193,25 @@ private void addVolumeCommands(Options options) {
Option createVolume = new Option(CREATE_VOLUME, true, "creates a volume" +
"for the specified user.\n \t For example : hdfs oz -createVolume " +
"for the specified user.\n \t For example : hdfs o3 -createVolume " +
"<volumeURI> -root -user <userName>\n");
options.addOption(createVolume);
Option deleteVolume = new Option(DELETE_VOLUME, true, "deletes a volume" +
"if it is empty.\n \t For example : hdfs oz -deleteVolume <volumeURI>" +
"if it is empty.\n \t For example : ozone oz -deleteVolume <volumeURI>" +
" -root \n");
options.addOption(deleteVolume);
Option listVolume =
new Option(LIST_VOLUME, true, "List the volumes of a given user.\n" +
"For example : hdfs oz -listVolume <ozoneURI>" +
"-user <username> -root or hdfs oz " +
"For example : ozone oz -listVolume <ozoneURI>" +
"-user <username> -root or ozone oz " +
"-listVolume");
options.addOption(listVolume);
Option updateVolume =
new Option(UPDATE_VOLUME, true, "updates an existing volume.\n" +
"\t For example : hdfs oz " +
"\t For example : ozone oz " +
"-updateVolume <volumeURI> -quota " +
"100TB\n");
options.addOption(updateVolume);
@ -230,7 +230,7 @@ private void addVolumeCommands(Options options) {
private void addBucketCommands(Options opts) {
Option createBucket = new Option(CREATE_BUCKET, true,
"creates a bucket in a given volume." +
"For example: hdfs oz -createBucket <bucketURI>");
"For example: ozone oz -createBucket <bucketURI>");
opts.addOption(createBucket);
Option infoBucket =
@ -247,7 +247,7 @@ private void addBucketCommands(Options opts) {
Option updateBucket =
new Option(UPDATE_BUCKET, true, "allows changing bucket attributes.\n" +
" For example: hdfs oz -updateBucket <bucketURI> " +
" For example: ozone oz -updateBucket <bucketURI> " +
"-addAcl user:frodo:rw");
opts.addOption(updateBucket);
@ -396,7 +396,7 @@ private int dispatch(CommandLine cmd, Options opts)
return 0;
} else {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp(eightyColumn, "hdfs oz -command uri [args]",
helpFormatter.printHelp(eightyColumn, "ozone oz -command uri [args]",
"Ozone Commands",
opts, "Please correct your command and try again.");
return 1;

View File

@ -21,14 +21,14 @@ with simple authentication.
The Ozone commands take the following format.
* `hdfs oz --command_ http://hostname:port/volume/bucket/key -user
* `ozone oz --command_ http://hostname:port/volume/bucket/key -user
<name> -root`
The *port* specified in command should match the port mentioned in the config
property `dfs.datanode.http.address`. This property can be set in `hdfs-site.xml`.
The default value for the port is `9864` and is used in below commands.
The *--root* option is a command line short cut that allows *hdfs oz*
The *--root* option is a command line short cut that allows *ozone oz*
commands to be run as the user that started the cluster. This is useful to
indicate that you want the commands to be run as some admin user. The only
reason for this option is that it makes the life of a lazy developer more
@ -44,7 +44,7 @@ ozone cluster.
Volumes can be created only by Admins. Here is an example of creating a volume.
* `hdfs oz -createVolume http://localhost:9864/hive -user bilbo -quota
* `ozone oz -createVolume http://localhost:9864/hive -user bilbo -quota
100TB -root`
The above command creates a volume called `hive` owned by user `bilbo`. The
@ -55,26 +55,26 @@ admin in the cluster.
Updates information like ownership and quota on an existing volume.
* `hdfs oz -updateVolume http://localhost:9864/hive -quota 500TB -root`
* `ozone oz -updateVolume http://localhost:9864/hive -quota 500TB -root`
The above command changes the volume quota of hive from 100TB to 500TB.
### Delete Volume
Deletes a Volume if it is empty.
* `hdfs oz -deleteVolume http://localhost:9864/hive -root`
* `ozone oz -deleteVolume http://localhost:9864/hive -root`
### Info Volume
Info volume command allows the owner or the administrator of the cluster to read meta-data about a specific volume.
* `hdfs oz -infoVolume http://localhost:9864/hive -root`
* `ozone oz -infoVolume http://localhost:9864/hive -root`
### List Volumes
List volume command can be used by administrator to list volumes of any user. It can also be used by a user to list volumes owned by him.
* `hdfs oz -listVolume http://localhost:9864/ -user bilbo -root`
* `ozone oz -listVolume http://localhost:9864/ -user bilbo -root`
The above command lists all volumes owned by user bilbo.
@ -89,7 +89,7 @@ Following examples assume that these commands are run by the owner of the volume
Create bucket call allows the owner of a volume to create a bucket.
* `hdfs oz -createBucket http://localhost:9864/hive/january`
* `ozone oz -createBucket http://localhost:9864/hive/january`
This call creates a bucket called `january` in the volume called `hive`. If
the volume does not exist, then this call will fail.
@ -98,23 +98,23 @@ the volume does not exist, then this call will fail.
### Update Bucket
Updates bucket meta-data, like ACLs.
* `hdfs oz -updateBucket http://localhost:9864/hive/january -addAcl
* `ozone oz -updateBucket http://localhost:9864/hive/january -addAcl
user:spark:rw`
### Delete Bucket
Deletes a bucket if it is empty.
* `hdfs oz -deleteBucket http://localhost:9864/hive/january`
* `ozone oz -deleteBucket http://localhost:9864/hive/january`
### Info Bucket
Returns information about a given bucket.
* `hdfs oz -infoBucket http://localhost:9864/hive/january`
* `ozone oz -infoBucket http://localhost:9864/hive/january`
### List Buckets
List buckets on a given volume.
* `hdfs oz -listBucket http://localhost:9864/hive`
* `ozone oz -listBucket http://localhost:9864/hive`
Ozone Key Commands
------------------
@ -125,26 +125,26 @@ Ozone key commands allows users to put, delete and get keys from ozone buckets.
Creates or overwrites a key in ozone store, -file points to the file you want
to upload.
* `hdfs oz -putKey http://localhost:9864/hive/january/processed.orc -file
* `ozone oz -putKey http://localhost:9864/hive/january/processed.orc -file
processed.orc`
### Get Key
Downloads a file from the ozone bucket.
* `hdfs oz -getKey http://localhost:9864/hive/january/processed.orc -file
* `ozone oz -getKey http://localhost:9864/hive/january/processed.orc -file
processed.orc.copy`
### Delete Key
Deletes a key from the ozone store.
* `hdfs oz -deleteKey http://localhost:9864/hive/january/processed.orc`
* `ozone oz -deleteKey http://localhost:9864/hive/january/processed.orc`
### Info Key
Reads key metadata from the ozone store.
* `hdfs oz -infoKey http://localhost:9864/hive/january/processed.orc`
* `ozone oz -infoKey http://localhost:9864/hive/january/processed.orc`
### List Keys
List all keys in an ozone bucket.
* `hdfs oz -listKey http://localhost:9864/hive/january`
* `ozone oz -listKey http://localhost:9864/hive/january`