diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 12c2102e33..2a80a3d54b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -20,6 +20,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.ozone.client.ReplicationFactor; +import org.apache.hadoop.ozone.client.ReplicationType; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.client.rest.RestClient; import org.apache.hadoop.ozone.client.rpc.RpcClient; @@ -121,6 +123,14 @@ public final class OzoneConfigKeys { "ozone.client.connection.timeout.ms"; public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_MS_DEFAULT = 5000; + public static final String OZONE_REPLICATION = "ozone.replication"; + public static final int OZONE_REPLICATION_DEFAULT = + ReplicationFactor.THREE.getValue(); + + public static final String OZONE_REPLICATION_TYPE = "ozone.replication.type"; + public static final String OZONE_REPLICATION_TYPE_DEFAULT = + ReplicationType.RATIS.toString(); + /** * Configuration property to configure the cache size of client list calls. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java index 5d07df29bf..39b7bb8403 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java @@ -36,7 +36,7 @@ public final class BucketArgs { /** * Bucket Version flag. */ - private Boolean isVersionEnabled; + private Boolean versioning; /** * Type of storage to be used for this bucket. * [RAM_DISK, SSD, DISK, ARCHIVE] @@ -45,14 +45,14 @@ public final class BucketArgs { /** * Private constructor, constructed via builder. - * @param isVersionEnabled Bucket version flag. + * @param versioning Bucket version flag. * @param storageType Storage type to be used. * @param acls list of ACLs. */ - private BucketArgs(Boolean isVersionEnabled, StorageType storageType, + private BucketArgs(Boolean versioning, StorageType storageType, List acls) { this.acls = acls; - this.isVersionEnabled = isVersionEnabled; + this.versioning = versioning; this.storageType = storageType; } @@ -60,8 +60,8 @@ private BucketArgs(Boolean isVersionEnabled, StorageType storageType, * Returns true if bucket version is enabled, else false. * @return isVersionEnabled */ - public Boolean isVersionEnabled() { - return isVersionEnabled; + public Boolean getVersioning() { + return versioning; } /** @@ -93,12 +93,12 @@ public static BucketArgs.Builder newBuilder() { * Builder for KsmBucketInfo. */ public static class Builder { - private Boolean isVersionEnabled; + private Boolean versioning; private StorageType storageType; private List acls; - public BucketArgs.Builder setIsVersionEnabled(Boolean versionFlag) { - this.isVersionEnabled = versionFlag; + public BucketArgs.Builder setVersioning(Boolean versionFlag) { + this.versioning = versionFlag; return this; } @@ -117,7 +117,7 @@ public BucketArgs.Builder setAcls(List listOfAcls) { * @return instance of BucketArgs. */ public BucketArgs build() { - return new BucketArgs(isVersionEnabled, storageType, acls); + return new BucketArgs(versioning, storageType, acls); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 25dc3e368b..b94e0f7122 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -22,11 +22,11 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.protocol.proto.OzoneProtos; import java.io.IOException; import java.util.Iterator; @@ -51,6 +51,15 @@ public class OzoneBucket { * Name of the bucket. */ private final String name; + /** + * Default replication factor to be used while creating keys. + */ + private final ReplicationFactor defaultReplication; + + /** + * Default replication type to be used while creating keys. + */ + private final ReplicationType defaultReplicationType; /** * Bucket ACLs. */ @@ -100,6 +109,12 @@ public OzoneBucket(Configuration conf, ClientProtocol proxy, this.versioning = versioning; this.listCacheSize = OzoneClientUtils.getListCacheSize(conf); this.creationTime = creationTime; + this.defaultReplication = ReplicationFactor.valueOf(conf.getInt( + OzoneConfigKeys.OZONE_REPLICATION, + OzoneConfigKeys.OZONE_REPLICATION_DEFAULT)); + this.defaultReplicationType = ReplicationType.valueOf(conf.get( + OzoneConfigKeys.OZONE_REPLICATION_TYPE, + OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT)); } /** @@ -206,17 +221,37 @@ public void setVersioning(Boolean newVersioning) throws IOException { } /** - * Creates a new key in the bucket. + * Creates a new key in the bucket, with default replication type RATIS and + * with replication factor THREE. * @param key Name of the key to be created. * @param size Size of the data the key will point to. * @return OzoneOutputStream to which the data has to be written. * @throws IOException */ - public OzoneOutputStream createKey(String key, long size, OzoneProtos - .ReplicationType type, OzoneProtos.ReplicationFactor factor) + public OzoneOutputStream createKey(String key, long size) throws IOException { Preconditions.checkNotNull(proxy, "Client proxy is not set."); Preconditions.checkNotNull(key); + return createKey(key, size, defaultReplicationType, defaultReplication); + } + + /** + * Creates a new key in the bucket. + * @param key Name of the key to be created. + * @param size Size of the data the key will point to. + * @param type Replication type to be used. + * @param factor Replication factor of the key. + * @return OzoneOutputStream to which the data has to be written. + * @throws IOException + */ + public OzoneOutputStream createKey(String key, long size, + ReplicationType type, + ReplicationFactor factor) + throws IOException { + Preconditions.checkNotNull(proxy, "Client proxy is not set."); + Preconditions.checkNotNull(key); + Preconditions.checkNotNull(type); + Preconditions.checkNotNull(factor); return proxy.createKey(volumeName, name, key, size, type, factor); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ReplicationFactor.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ReplicationFactor.java new file mode 100644 index 0000000000..971cfec740 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ReplicationFactor.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client; + +/** + * The replication factor to be used while writing key into ozone. + */ +public enum ReplicationFactor { + ONE(1), + THREE(3); + + /** + * Integer representation of replication. + */ + private int value; + + /** + * Initializes ReplicationFactor with value. + * @param value replication value + */ + ReplicationFactor(int value) { + this.value = value; + } + + /** + * Returns enum value corresponding to the int value. + * @param value replication value + * @return ReplicationFactor + */ + public static ReplicationFactor valueOf(int value) { + if(value == 1) { + return ONE; + } + if (value == 3) { + return THREE; + } + throw new IllegalArgumentException("Unsupported value: " + value); + } + + /** + * Returns integer representation of ReplicationFactor. + * @return replication value + */ + public int getValue() { + return value; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ReplicationType.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ReplicationType.java new file mode 100644 index 0000000000..537c336e50 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ReplicationType.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client; + +/** + * The replication type to be used while writing key into ozone. + */ +public enum ReplicationType { + RATIS, + STAND_ALONE, + CHAINED +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 12b4f0fa38..64a970e50e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -25,10 +25,11 @@ import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneQuota; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.ReplicationFactor; +import org.apache.hadoop.ozone.client.ReplicationType; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.protocol.proto.OzoneProtos; import java.io.IOException; import java.util.List; @@ -258,9 +259,8 @@ List listBuckets(String volumeName, String bucketPrefix, * */ OzoneOutputStream createKey(String volumeName, String bucketName, - String keyName, long size, - OzoneProtos.ReplicationType type, - OzoneProtos.ReplicationFactor factor) + String keyName, long size, ReplicationType type, + ReplicationFactor factor) throws IOException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java index b6a55534da..a720b9947a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java @@ -27,10 +27,11 @@ import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneQuota; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.ReplicationFactor; +import org.apache.hadoop.ozone.client.ReplicationType; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.protocol.proto.OzoneProtos; import java.io.IOException; import java.util.List; @@ -183,9 +184,10 @@ public List listBuckets(String volumeName, String bucketPrefix, * @param factor @return {@link OzoneOutputStream} */ @Override - public OzoneOutputStream createKey(String volumeName, String bucketName, - String keyName, long size, OzoneProtos.ReplicationType type, - OzoneProtos.ReplicationFactor factor) throws IOException { + public OzoneOutputStream createKey( + String volumeName, String bucketName, String keyName, long size, + ReplicationType type, ReplicationFactor factor) + throws IOException { throw new UnsupportedOperationException("Not yet implemented."); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index c8393ee0a5..2604030041 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -31,6 +31,8 @@ import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneQuota; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.ReplicationFactor; +import org.apache.hadoop.ozone.client.ReplicationType; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream; import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream; @@ -297,8 +299,8 @@ public void createBucket( Preconditions.checkNotNull(bucketName); Preconditions.checkNotNull(bucketArgs); - Boolean isVersionEnabled = bucketArgs.isVersionEnabled() == null ? - Boolean.FALSE : bucketArgs.isVersionEnabled(); + Boolean isVersionEnabled = bucketArgs.getVersioning() == null ? + Boolean.FALSE : bucketArgs.getVersioning(); StorageType storageType = bucketArgs.getStorageType() == null ? StorageType.DEFAULT : bucketArgs.getStorageType(); List listOfAcls = new ArrayList<>(); @@ -438,7 +440,7 @@ public List listBuckets(String volumeName, String bucketPrefix, @Override public OzoneOutputStream createKey( String volumeName, String bucketName, String keyName, long size, - OzoneProtos.ReplicationType type, OzoneProtos.ReplicationFactor factor) + ReplicationType type, ReplicationFactor factor) throws IOException { String requestId = UUID.randomUUID().toString(); KsmKeyArgs keyArgs = new KsmKeyArgs.Builder() @@ -446,8 +448,8 @@ public OzoneOutputStream createKey( .setBucketName(bucketName) .setKeyName(keyName) .setDataSize(size) - .setType(type) - .setFactor(factor) + .setType(OzoneProtos.ReplicationType.valueOf(type.toString())) + .setFactor(OzoneProtos.ReplicationFactor.valueOf(factor.getValue())) .build(); OpenKeySession openKey = keySpaceManagerClient.openKey(keyArgs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/tools/Corona.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/tools/Corona.java index 2181850bf5..71da4d68ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/tools/Corona.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/tools/Corona.java @@ -41,7 +41,6 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.util.*; -import org.apache.hadoop.ozone.protocol.proto.OzoneProtos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -142,8 +141,8 @@ enum CoronaOps { private String numOfKeys; private String jsonDir; private boolean useRatis; - private OzoneProtos.ReplicationType type; - private OzoneProtos.ReplicationFactor factor; + private ReplicationType type; + private ReplicationFactor factor; private int threadPoolSize; private int keySize; @@ -372,18 +371,18 @@ private void parseOptions(CommandLine cmdLine) { useRatis = cmdLine.hasOption(RATIS); - type = OzoneProtos.ReplicationType.STAND_ALONE; - factor = OzoneProtos.ReplicationFactor.ONE; + type = ReplicationType.STAND_ALONE; + factor = ReplicationFactor.ONE; if (useRatis) { - type = OzoneProtos.ReplicationType.RATIS; + type = ReplicationType.RATIS; int replicationFactor = Integer.parseInt(cmdLine.getOptionValue(RATIS)); switch (replicationFactor) { case 1: - factor = OzoneProtos.ReplicationFactor.ONE; + factor = ReplicationFactor.ONE; break; case 3: - factor = OzoneProtos.ReplicationFactor.THREE; + factor = ReplicationFactor.THREE; break; default: throw new IllegalArgumentException("Illegal replication factor:" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml index 325d13d2c4..7c180f3789 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml @@ -1103,4 +1103,26 @@ Configuration property to configure the cache size of client list calls. + + + ozone.replication + 3 + OZONE, CLIENT + + Default replication value. The actual number of replications can be + specified when writing the key. The default is used if replication + is not specified. Supported values: 1 and 3. + + + + + ozone.replication.type + RATIS + OZONE, CLIENT + + Default replication type to be used while writing key into ozone. The + value can be specified when writing the key, default is used when + nothing is specified. Supported values: RATIS, STAND_ALONE and CHAINED. + + \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java index 24c1319c6f..ad1bcad19a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java @@ -33,10 +33,11 @@ import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneQuota; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.ReplicationFactor; +import org.apache.hadoop.ozone.client.ReplicationType; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.protocol.proto.OzoneProtos; import org.apache.hadoop.ozone.web.exceptions.OzoneException; import org.apache.hadoop.util.Time; import org.junit.AfterClass; @@ -198,7 +199,7 @@ public void testCreateBucketWithVersioning() store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setIsVersionEnabled(true); + builder.setVersioning(true); volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); Assert.assertEquals(bucketName, bucket.getName()); @@ -251,7 +252,7 @@ public void testCreateBucketWithAllArgument() store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setIsVersionEnabled(true) + builder.setVersioning(true) .setStorageType(StorageType.SSD) .setAcls(acls); volume.createBucket(bucketName, builder.build()); @@ -377,8 +378,8 @@ public void testPutKey() String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, OzoneProtos.ReplicationType.STAND_ALONE, - OzoneProtos.ReplicationFactor.ONE); + value.getBytes().length, ReplicationType.STAND_ALONE, + ReplicationFactor.ONE); out.write(value.getBytes()); out.close(); OzoneKey key = bucket.getKey(keyName); @@ -405,8 +406,8 @@ public void testDeleteKey() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, OzoneProtos.ReplicationType.STAND_ALONE, - OzoneProtos.ReplicationFactor.ONE); + value.getBytes().length, ReplicationType.STAND_ALONE, + ReplicationFactor.ONE); out.write(value.getBytes()); out.close(); OzoneKey key = bucket.getKey(keyName); @@ -570,26 +571,22 @@ public void testListKey() byte[] value = RandomStringUtils.randomAscii(10240).getBytes(); OzoneOutputStream one = volAbucketA.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, OzoneProtos.ReplicationType.STAND_ALONE, - OzoneProtos.ReplicationFactor.ONE); + value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE); one.write(value); one.close(); OzoneOutputStream two = volAbucketB.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, OzoneProtos.ReplicationType.STAND_ALONE, - OzoneProtos.ReplicationFactor.ONE); + value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE); two.write(value); two.close(); OzoneOutputStream three = volBbucketA.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, OzoneProtos.ReplicationType.STAND_ALONE, - OzoneProtos.ReplicationFactor.ONE); + value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE); three.write(value); three.close(); OzoneOutputStream four = volBbucketB.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, OzoneProtos.ReplicationType.STAND_ALONE, - OzoneProtos.ReplicationFactor.ONE); + value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE); four.write(value); four.close(); } @@ -603,26 +600,22 @@ public void testListKey() byte[] value = RandomStringUtils.randomAscii(10240).getBytes(); OzoneOutputStream one = volAbucketA.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, OzoneProtos.ReplicationType.STAND_ALONE, - OzoneProtos.ReplicationFactor.ONE); + value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE); one.write(value); one.close(); OzoneOutputStream two = volAbucketB.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, OzoneProtos.ReplicationType.STAND_ALONE, - OzoneProtos.ReplicationFactor.ONE); + value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE); two.write(value); two.close(); OzoneOutputStream three = volBbucketA.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, OzoneProtos.ReplicationType.STAND_ALONE, - OzoneProtos.ReplicationFactor.ONE); + value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE); three.write(value); three.close(); OzoneOutputStream four = volBbucketB.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, OzoneProtos.ReplicationType.STAND_ALONE, - OzoneProtos.ReplicationFactor.ONE); + value.length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE); four.write(value); four.close(); }