diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index a8ead77b5d..13c4a0c729 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -21,6 +21,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.ByteStringHelper; import org.apache.hadoop.hdds.scm.XceiverClientReply; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -585,7 +586,7 @@ public void close() throws IOException { } finally { cleanup(false); } - // TODO: Turn the below buffer empty check on whne Standalone pipeline + // TODO: Turn the below buffer empty check on when Standalone pipeline // is removed in the write path in tests // Preconditions.checkArgument(buffer.position() == 0); // bufferPool.checkBufferPoolEmpty(); @@ -676,9 +677,9 @@ private void checkOpen() throws IOException { */ private void writeChunkToContainer(ByteBuffer chunk) throws IOException { int effectiveChunkSize = chunk.remaining(); - ByteString data = ByteString.copyFrom(chunk); + ByteString data = ByteStringHelper.getByteString(chunk); Checksum checksum = new Checksum(checksumType, bytesPerChecksum); - ChecksumData checksumData = checksum.computeChecksum(data); + ChecksumData checksumData = checksum.computeChecksum(chunk); ChunkInfo chunkInfo = ChunkInfo.newBuilder() .setChunkName(DigestUtils.md5Hex(key) + "_stream_" + streamId + "_chunk_" + ++chunkIndex) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringHelper.java new file mode 100644 index 0000000000..ccdf4fac42 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringHelper.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm; + +import com.google.common.base.Preconditions; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicBoolean; +/** + * Helper class to perform Unsafe ByteString conversion from byteBuffer or byte + * array depending on the config "ozone.UnsafeByteOperations.enabled". + */ +public final class ByteStringHelper { + private static final AtomicBoolean INITIALIZED = new AtomicBoolean(); + private static volatile boolean isUnsafeByteOperationsEnabled; + + /** + * There is no need to instantiate this class. + */ + private ByteStringHelper() { + } + + public static void init(boolean isUnsafeByteOperation) { + final boolean set = INITIALIZED.compareAndSet(false, true); + if (set) { + ByteStringHelper.isUnsafeByteOperationsEnabled = + isUnsafeByteOperation; + } else { + // already initialized, check values + Preconditions.checkState(isUnsafeByteOperationsEnabled + == isUnsafeByteOperation); + } + } + + private static ByteString copyFrom(ByteBuffer buffer) { + final ByteString bytes = ByteString.copyFrom(buffer); + // flip the buffer so as to read the data starting from pos 0 again + buffer.flip(); + return bytes; + } + + public static ByteString getByteString(ByteBuffer buffer) { + return isUnsafeByteOperationsEnabled ? + UnsafeByteOperations.unsafeWrap(buffer) : copyFrom(buffer); + } + + public static ByteString getByteString(byte[] bytes) { + return isUnsafeByteOperationsEnabled ? + UnsafeByteOperations.unsafeWrap(bytes) : ByteString.copyFrom(bytes); + } + +} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index 73e402523a..5a1a75eb90 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -355,7 +355,7 @@ public static PutSmallFileResponseProto writeSmallFile( KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true") .build(); Checksum checksum = new Checksum(); - ChecksumData checksumData = checksum.computeChecksum(data); + ChecksumData checksumData = checksum.computeChecksum(data, 0, data.length); ChunkInfo chunk = ChunkInfo.newBuilder() .setChunkName(blockID.getLocalID() + "_chunk") diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 1388d00d03..a28ae3a2de 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -94,6 +94,11 @@ public final class OzoneConfigKeys { public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF = "OFF"; + public static final String OZONE_UNSAFEBYTEOPERATIONS_ENABLED = + "ozone.UnsafeByteOperations.enabled"; + public static final boolean OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT + = true; + public static final String OZONE_CONTAINER_CACHE_SIZE = "ozone.container.cache.size"; public static final int OZONE_CONTAINER_CACHE_DEFAULT = 1024; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java index 277753545d..1a359fe5c4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java @@ -19,6 +19,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.primitives.Longs; + +import java.nio.ByteBuffer; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; @@ -76,12 +78,13 @@ public Checksum() { /** * Computes checksum for give data. - * @param byteString input data in the form of ByteString. + * @param byteBuffer input data in the form of ByteString. * @return ChecksumData computed for input data. */ - public ChecksumData computeChecksum(ByteString byteString) + public ChecksumData computeChecksum(ByteBuffer byteBuffer) throws OzoneChecksumException { - return computeChecksum(byteString.toByteArray()); + return computeChecksum(byteBuffer.array(), byteBuffer.position(), + byteBuffer.limit()); } /** @@ -91,6 +94,16 @@ public ChecksumData computeChecksum(ByteString byteString) */ public ChecksumData computeChecksum(byte[] data) throws OzoneChecksumException { + return computeChecksum(data, 0, data.length); + } + + /** + * Computes checksum for give data. + * @param data input data in the form of byte array. + * @return ChecksumData computed for input data. + */ + public ChecksumData computeChecksum(byte[] data, int offset, int len) + throws OzoneChecksumException { ChecksumData checksumData = new ChecksumData(this.checksumType, this .bytesPerChecksum); if (checksumType == ChecksumType.NONE) { @@ -120,7 +133,7 @@ public ChecksumData computeChecksum(byte[] data) // Compute number of checksums needs for given data length based on bytes // per checksum. - int dataSize = data.length; + int dataSize = len - offset; int numChecksums = (dataSize + bytesPerChecksum - 1) / bytesPerChecksum; // Checksum is computed for each bytesPerChecksum number of bytes of data @@ -128,7 +141,7 @@ public ChecksumData computeChecksum(byte[] data) // remaining data with length less than bytesPerChecksum. List checksumList = new ArrayList<>(numChecksums); for (int index = 0; index < numChecksums; index++) { - checksumList.add(computeChecksumAtIndex(data, index)); + checksumList.add(computeChecksumAtIndex(data, index, offset, len)); } checksumData.setChecksums(checksumList); @@ -140,15 +153,19 @@ public ChecksumData computeChecksum(byte[] data) * and a max length of bytesPerChecksum. * @param data input data * @param index index to compute the offset from where data must be read + * @param start start pos of the array where the computation has to start + * @length length of array till which checksum needs to be computed * @return computed checksum ByteString * @throws OzoneChecksumException thrown when ChecksumType is not recognized */ - private ByteString computeChecksumAtIndex(byte[] data, int index) + private ByteString computeChecksumAtIndex(byte[] data, int index, int start, + int length) throws OzoneChecksumException { - int offset = index * bytesPerChecksum; + int offset = start + index * bytesPerChecksum; + int dataLength = length - start; int len = bytesPerChecksum; - if ((offset + len) > data.length) { - len = data.length - offset; + if ((offset + len) > dataLength) { + len = dataLength - offset; } byte[] checksumBytes = null; switch (checksumType) { @@ -236,7 +253,8 @@ public static boolean verifyChecksum(byte[] data, ChecksumData checksumData) int bytesPerChecksum = checksumData.getBytesPerChecksum(); Checksum checksum = new Checksum(checksumType, bytesPerChecksum); - ChecksumData computedChecksumData = checksum.computeChecksum(data); + ChecksumData computedChecksumData = + checksum.computeChecksum(data, 0, data.length); return checksumData.verifyChecksumDataMatches(computedChecksumData); } diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 731bf2823b..ca61c1a465 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -386,6 +386,14 @@ assumed. + + ozone.UnsafeByteOperations.enabled + true + OZONE, PERFORMANCE, CLIENT + It specifies whether to use unsafe or safe buffer to byteString + copy. + + ozone.client.connection.timeout 5000ms diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index d48d1dfe52..d2d7bf793a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .PutSmallFileRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; +import org.apache.hadoop.hdds.scm.ByteStringHelper; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; @@ -146,6 +147,10 @@ public KeyValueHandler(Configuration config, StateContext context, // this handler lock is used for synchronizing createContainer Requests, // so using a fair lock here. containerCreationLock = new AutoCloseableLock(new ReentrantLock(true)); + boolean isUnsafeByteOperationsEnabled = conf.getBoolean( + OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, + OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT); + ByteStringHelper.init(isUnsafeByteOperationsEnabled); } @VisibleForTesting diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java index 6df6f3d015..2781bfacca 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java @@ -25,6 +25,7 @@ .ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ReadChunkResponseProto; +import org.apache.hadoop.hdds.scm.ByteStringHelper; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; import org.apache.hadoop.io.IOUtils; @@ -33,7 +34,6 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats; import org.apache.hadoop.util.Time; import org.slf4j.Logger; @@ -315,7 +315,8 @@ public static ContainerCommandResponseProto getReadChunkResponse( ReadChunkResponseProto.Builder response = ReadChunkResponseProto.newBuilder(); response.setChunkData(info.getProtoBufMessage()); - response.setData(ByteString.copyFrom(data)); + response.setData( + ByteStringHelper.getByteString(data)); response.setBlockID(msg.getReadChunk().getBlockID()); ContainerCommandResponseProto.Builder builder = diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index e81cca76e1..445c00096d 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ChecksumType; +import org.apache.hadoop.hdds.scm.ByteStringHelper; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.tracing.TracingUtil; @@ -215,6 +216,10 @@ public RpcClient(Configuration conf) throws IOException { OZONE_CLIENT_MAX_RETRIES_DEFAULT); dtService = getOMProxyProvider().getProxy().getDelegationTokenService(); + boolean isUnsafeByteOperationsEnabled = conf.getBoolean( + OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, + OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT); + ByteStringHelper.init(isUnsafeByteOperationsEnabled); } private InetSocketAddress getScmAddressForClient() throws IOException { diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java index 05c2ae7a51..20b19719f1 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java @@ -21,6 +21,7 @@ import com.google.common.base.Strings; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.scm.ByteStringHelper; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.io.IOUtils; @@ -158,6 +159,11 @@ public DistributedStorageHandler(OzoneConfiguration conf, this.maxRetryCount = conf.getInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, OzoneConfigKeys. OZONE_CLIENT_MAX_RETRIES_DEFAULT); + boolean isUnsafeByteOperationsEnabled = conf.getBoolean( + OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, + OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT); + ByteStringHelper.init(isUnsafeByteOperationsEnabled); + } @Override diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java index 3cf44161cd..fdcb822bf4 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java @@ -23,18 +23,15 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.junit.AfterClass; import org.junit.Assert; -import org.junit.BeforeClass; import org.junit.Test; /** * Tests Freon, with MiniOzoneCluster and validate data. */ -public class TestDataValidate { +public abstract class TestDataValidate { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; + private static MiniOzoneCluster cluster = null; /** * Create a MiniDFSCluster for testing. @@ -42,9 +39,7 @@ public class TestDataValidate { * Ozone is made active by setting OZONE_ENABLED = true * */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); + static void startCluster(OzoneConfiguration conf) throws Exception { conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms"); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5).build(); @@ -54,8 +49,7 @@ public static void init() throws Exception { /** * Shutdown MiniDFSCluster. */ - @AfterClass - public static void shutdown() { + static void shutdownCluster() { if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithSafeByteOperations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithSafeByteOperations.java new file mode 100644 index 0000000000..745cee42f1 --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithSafeByteOperations.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.freon; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +/** + * Tests Freon, with MiniOzoneCluster and validate data. + */ +public class TestDataValidateWithSafeByteOperations extends TestDataValidate { + + /** + * Create a MiniDFSCluster for testing. + *

+ * Ozone is made active by setting OZONE_ENABLED = true + * + */ + @BeforeClass + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, + false); + startCluster(conf); + } + + /** + * Shutdown MiniDFSCluster. + */ + @AfterClass + public static void shutdown() { + shutdownCluster(); + } +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithUnsafeByteOperations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithUnsafeByteOperations.java new file mode 100644 index 0000000000..5ecef9b00e --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithUnsafeByteOperations.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.freon; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +/** + * Tests Freon, with MiniOzoneCluster and validate data. + */ +public class TestDataValidateWithUnsafeByteOperations extends TestDataValidate { + + /** + * Create a MiniDFSCluster for testing. + *

+ * Ozone is made active by setting OZONE_ENABLED = true + * + */ + @BeforeClass + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, + true); + startCluster(conf); + } + + /** + * Shutdown MiniDFSCluster. + */ + @AfterClass + public static void shutdown() { + shutdownCluster(); + } +}