From a16aa2f60b27628da2538d4590292cd72eb5b6a9 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Thu, 15 Nov 2018 17:36:09 -0800 Subject: [PATCH] HDDS-825. Code cleanup based on messages from ErrorProne. Contributed by Anu Engineer. --- .../hadoop/hdds/scm/XceiverClientGrpc.java | 1 + .../hadoop/hdds/scm/XceiverClientManager.java | 1 + .../hadoop/hdds/scm/XceiverClientRatis.java | 2 + .../hdds/scm/storage/ChunkInputStream.java | 2 +- .../hadoop/hdds/scm/pipeline/Pipeline.java | 11 ++ .../hadoop/ozone/common/StorageInfo.java | 1 - .../org/apache/hadoop/utils/db/RDBStore.java | 7 +- .../hadoop/ozone/audit/DummyAction.java | 2 +- .../hadoop/ozone/lease/TestLeaseManager.java | 17 ++- .../hadoop/utils/TestHddsIdFactory.java | 2 +- .../hadoop/utils/TestMetadataStore.java | 96 ++++++------ .../hadoop/utils/TestRocksDBStoreMBean.java | 6 +- .../hadoop/utils/db/TestDBStoreBuilder.java | 4 +- .../hadoop/utils/db/TestRDBTableStore.java | 6 +- .../org/apache/hadoop/utils/package-info.java | 22 +++ .../keyvalue/impl/BlockManagerImpl.java | 4 +- .../replication/GrpcReplicationClient.java | 2 +- .../ozone/container/common/ScmTestMock.java | 4 +- .../common/TestDatanodeStateMachine.java | 12 +- .../common/impl/TestHddsDispatcher.java | 3 +- .../common/report/TestReportPublisher.java | 2 +- .../common/volume/TestHddsVolume.java | 54 +++---- .../common/volume/TestVolumeSet.java | 6 +- .../container/common/volume/package-info.java | 22 +++ .../keyvalue/TestBlockManagerImpl.java | 139 ++++++++---------- .../keyvalue/TestChunkManagerImpl.java | 3 +- .../keyvalue/TestKeyValueBlockIterator.java | 4 +- .../keyvalue/TestKeyValueContainer.java | 11 +- .../keyvalue/TestKeyValueHandler.java | 7 +- .../keyvalue/TestTarContainerPacker.java | 11 +- .../container/keyvalue/package-info.java | 22 +++ .../hadoop/hdds/server/events/EventQueue.java | 7 +- .../hdds/server/events/TestEventWatcher.java | 49 +++--- .../hdds/server/events/package-info.java | 22 +++ .../scm/block/PendingDeleteStatusList.java | 10 +- .../hdds/scm/chillmode/ChillModePrecheck.java | 1 + .../hdds/scm/container/ContainerReplica.java | 16 +- .../placement/algorithms/SCMCommonPolicy.java | 6 +- .../SCMContainerPlacementRandom.java | 1 + .../placement/metrics/DatanodeMetric.java | 3 +- .../placement/metrics/LongMetric.java | 1 - .../placement/metrics/SCMNodeMetric.java | 4 +- .../placement/metrics/SCMNodeStat.java | 10 +- .../ReplicationActivityStatus.java | 3 +- .../replication/ReplicationManager.java | 2 + .../hadoop/hdds/scm/node/CommandQueue.java | 8 +- .../hdds/scm/node/NodeStateManager.java | 8 +- .../scm/node/states/Node2ContainerMap.java | 2 + .../hdds/scm/node/states/NodeStateMap.java | 4 +- .../hdds/scm/pipeline/SCMPipelineManager.java | 4 +- .../scm/server/SCMClientProtocolServer.java | 3 +- .../scm/server/SCMDatanodeProtocolServer.java | 4 +- .../hadoop/hdds/scm/TestHddsServerUtils.java | 19 ++- .../hadoop/hdds/scm/node/TestNodeManager.java | 12 +- .../scm/node/TestSCMNodeStorageStatMap.java | 16 +- .../node/states/TestNode2ContainerMap.java | 6 +- .../placement/TestDatanodeMetrics.java | 15 +- .../hadoop/ozone/client/ObjectStore.java | 2 + .../hadoop/ozone/client/rpc/RpcClient.java | 2 + .../ozone/client/TestHddsClientUtils.java | 42 +++--- .../ozone/om/helpers/OmOzoneAclMap.java | 1 + .../hadoop/ozone/web/response/BucketInfo.java | 6 +- .../hadoop/ozone/web/TestBucketInfo.java | 25 ++-- .../apache/hadoop/ozone/web/TestQuota.java | 55 ++++--- .../hdds/scm/pipeline/TestPipelineClose.java | 27 ++-- .../hdds/scm/pipeline/package-info.java | 22 +++ .../apache/hadoop/ozone/OzoneTestUtils.java | 12 +- .../TestStorageContainerManagerHelper.java | 10 +- .../TestCloseContainerHandlingByClient.java | 46 +++--- .../org/apache/hadoop/ozone/package-info.java | 22 +++ .../hadoop/ozone/web/TestOzoneVolumes.java | 4 + .../hadoop/ozone/web/client/package-info.java | 22 +++ .../apache/hadoop/ozone/web/package-info.java | 22 +++ .../web/handlers/BucketProcessTemplate.java | 4 +- .../ozone/web/interfaces/StorageHandler.java | 1 + .../hadoop/ozone/web/TestErrorCode.java | 6 +- .../apache/hadoop/ozone/web/package-info.java | 22 +++ .../hadoop/ozone/om/BucketManagerImpl.java | 1 + .../ozone/om/OmMetadataManagerImpl.java | 3 +- .../apache/hadoop/ozone/om/OzoneManager.java | 1 + .../ozone/om/ServiceListJSONServlet.java | 1 + .../hadoop/ozone/om/VolumeManagerImpl.java | 11 +- .../hadoop/ozone/om/TestChunkStreams.java | 39 ++--- .../hadoop/ozone/om/TestKeyManagerImpl.java | 19 +-- .../hadoop/fs/ozone/OzoneFileSystem.java | 3 + .../fs/ozone/TestOzoneFileInterfaces.java | 3 +- .../apache/hadoop/fs/ozone/package-info.java | 22 +++ .../s3/header/AuthorizationHeaderV2.java | 1 + .../s3/header/AuthorizationHeaderV4.java | 1 + .../hadoop/ozone/s3/header/Credential.java | 1 + .../hadoop/ozone/client/ObjectStoreStub.java | 2 + .../ozone/s3/endpoint/TestBucketDelete.java | 2 +- .../TestMultiDeleteRequestUnmarshaller.java | 6 +- .../ozone/s3/endpoint/TestObjectGet.java | 9 +- .../ozone/s3/endpoint/TestObjectHead.java | 10 +- .../ozone/freon/RandomKeyGenerator.java | 2 +- .../TestFreonWithDatanodeFastRestart.java | 4 +- .../hadoop/ozone/freon/package-info.java | 22 +++ .../apache/hadoop/ozone/om/TestOmSQLCli.java | 8 +- .../apache/hadoop/ozone/om/package-info.java | 22 +++ 100 files changed, 812 insertions(+), 456 deletions(-) create mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/package-info.java create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java create mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java create mode 100644 hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/package-info.java create mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java create mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/package-info.java create mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/package-info.java diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 9acd832a68..bbd334006e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -296,6 +296,7 @@ public void createPipeline() { // For stand alone pipeline, there is no notion called setup pipeline. } + @Override public void destroyPipeline() { // For stand alone pipeline, there is no notion called destroy pipeline. } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java index 1973c1d492..b2735bc79f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java @@ -170,6 +170,7 @@ public XceiverClientSpi call() throws Exception { /** * Close and remove all the cached clients. */ + @Override public void close() { //closing is done through RemovalListener clientCache.invalidateAll(); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index e4b711afa9..dbda2e6211 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -100,6 +100,7 @@ private XceiverClientRatis(Pipeline pipeline, RpcType rpcType, /** * {@inheritDoc} */ + @Override public void createPipeline() throws IOException { final RaftGroup group = RatisHelper.newRaftGroup(pipeline); LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group); @@ -110,6 +111,7 @@ public void createPipeline() throws IOException { /** * {@inheritDoc} */ + @Override public void destroyPipeline() throws IOException { final RaftGroup group = RatisHelper.newRaftGroup(pipeline); LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java index 21b8974216..7b243d8381 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java @@ -83,7 +83,7 @@ public ChunkInputStream( } private void initializeChunkOffset() { - int tempOffset = 0; + long tempOffset = 0; for (int i = 0; i < chunks.size(); i++) { chunkOffset[i] = tempOffset; tempOffset += chunks.get(i).getLen(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index ef055a1bb5..62081f4835 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -196,6 +196,17 @@ public static Builder newBuilder(Pipeline pipeline) { return new Builder(pipeline); } + @Override + public String toString() { + return "Pipeline{" + + "id=" + id + + ", type=" + type + + ", factor=" + factor + + ", state=" + state + + ", nodeStatus=" + nodeStatus + + '}'; + } + /** * Builder class for Pipeline. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java index 0e98a4c8a9..1cf39b212d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java @@ -68,7 +68,6 @@ public StorageInfo(NodeType type, String cid, long cT) throws IOException { Preconditions.checkNotNull(type); Preconditions.checkNotNull(cid); - Preconditions.checkNotNull(cT); properties.setProperty(NODE_TYPE, type.name()); properties.setProperty(CLUSTER_ID, cid); properties.setProperty(CREATION_TIME, String.valueOf(cT)); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java index d0644b6d30..24cd96da6b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java @@ -24,11 +24,9 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.utils.RocksDBStoreMBean; -import org.apache.ratis.thirdparty.com.google.common.annotations. - VisibleForTesting; +import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; - import org.rocksdb.DBOptions; import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; @@ -192,7 +190,6 @@ public void move(byte[] key, Table source, Table dest) throws IOException { } } - @Override public void move(byte[] key, byte[] value, Table source, Table dest) throws IOException { @@ -226,7 +223,7 @@ public void move(byte[] sourceKey, byte[] destKey, byte[] value, Table source, } catch (RocksDBException rockdbException) { LOG.error("Move of key failed. Key:{}", DFSUtil.bytes2String(sourceKey)); throw toIOException("Unable to move key: " + - DFSUtil.bytes2String(sourceKey), rockdbException); + DFSUtil.bytes2String(sourceKey), rockdbException); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java index 6044c0a2f5..76cd39ad78 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java @@ -37,7 +37,7 @@ public enum DummyAction implements AuditAction { SET_OWNER("SET_OWNER"), SET_QUOTA("SET_QUOTA"); - private String action; + private final String action; DummyAction(String action) { this.action = action; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java index bdc70fc983..38878334f5 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java @@ -41,7 +41,7 @@ public class TestLeaseManager { /** * Dummy resource on which leases can be acquired. */ - private final class DummyResource { + private static final class DummyResource { private final String name; @@ -61,6 +61,21 @@ public boolean equals(Object obj) { } return false; } + + /** + * Adding to String method to fix the ErrorProne warning that this method + * is later used in String functions, which would print out (e.g. + * `org.apache.hadoop.ozone.lease.TestLeaseManager.DummyResource@ + * 4488aabb`) instead of useful information. + * + * @return Name of the Dummy object. + */ + @Override + public String toString() { + return "DummyResource{" + + "name='" + name + '\'' + + '}'; + } } @Test diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestHddsIdFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestHddsIdFactory.java index a341ccc223..35e1b3ed09 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestHddsIdFactory.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestHddsIdFactory.java @@ -55,7 +55,7 @@ public void testGetLongId() throws Exception { List> result = executor.invokeAll(tasks); assertEquals(IDS_PER_THREAD * NUM_OF_THREADS, ID_SET.size()); for (Future r : result) { - assertEquals(r.get().intValue(), IDS_PER_THREAD); + assertEquals(IDS_PER_THREAD, r.get().intValue()); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java index a91bc80188..5da8fbc1c9 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java @@ -1,24 +1,21 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. */ package org.apache.hadoop.utils; -import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; - import com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.ImmutablePair; @@ -28,9 +25,9 @@ import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.utils.MetadataStore.KeyValue; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter; +import org.apache.hadoop.utils.MetadataStore.KeyValue; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -50,14 +47,14 @@ import java.util.Map; import java.util.NoSuchElementException; import java.util.UUID; - import java.util.concurrent.atomic.AtomicInteger; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; - import static org.junit.runners.Parameterized.Parameters; /** @@ -66,27 +63,24 @@ @RunWith(Parameterized.class) public class TestMetadataStore { + private final static int MAX_GETRANGE_LENGTH = 100; private final String storeImpl; - + @Rule + public ExpectedException expectedException = ExpectedException.none(); + private MetadataStore store; + private File testDir; public TestMetadataStore(String metadataImpl) { this.storeImpl = metadataImpl; } @Parameters public static Collection data() { - return Arrays.asList(new Object[][] { + return Arrays.asList(new Object[][]{ {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB}, {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB} }); } - private MetadataStore store; - private File testDir; - private final static int MAX_GETRANGE_LENGTH = 100; - - @Rule - public ExpectedException expectedException = ExpectedException.none(); - @Before public void init() throws IOException { if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) { @@ -109,7 +103,7 @@ public void init() throws IOException { // Add 20 entries. // {a0 : a-value0} to {a9 : a-value9} // {b0 : b-value0} to {b9 : b-value9} - for (int i=0; i<10; i++) { + for (int i = 0; i < 10; i++) { store.put(getBytes("a" + i), getBytes("a-value" + i)); store.put(getBytes("b" + i), getBytes("b-value" + i)); } @@ -178,7 +172,7 @@ public void testMetaStoreConfigDifferentFromType() throws IOException { GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG); GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG); - if(storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) { + if (storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) { dbType = "RocksDB"; } else { dbType = "LevelDB"; @@ -241,7 +235,7 @@ private String getString(byte[] bytes) { @Test public void testGetDelete() throws IOException { - for (int i=0; i<10; i++) { + for (int i = 0; i < 10; i++) { byte[] va = store.get(getBytes("a" + i)); assertEquals("a-value" + i, getString(va)); @@ -273,7 +267,7 @@ private String getExpectedValue(String key) { return null; } char[] arr = key.toCharArray(); - return new StringBuffer().append(arr[0]).append("-value") + return new StringBuilder().append(arr[0]).append("-value") .append(arr[arr.length - 1]).toString(); } @@ -326,14 +320,14 @@ public void testIterateKeys() throws IOException { char num = value.charAt(value.length() - 1); // each value adds 1 int i = Character.getNumericValue(num) + 1; - value = value.substring(0, value.length() - 1) + i; + value = value.substring(0, value.length() - 1) + i; result.add(value); return true; }); assertFalse(result.isEmpty()); - for (int i=0; i - new String(entry.getKey()).startsWith("b") + new String(entry.getKey(), UTF_8).startsWith("b") )); assertEquals(20, filter1.getKeysScannedNum()); assertEquals(10, filter1.getKeysHintedNum()); @@ -416,7 +410,7 @@ public void testGetRangeKVs() throws IOException { assertEquals("b-value2", getString(result.get(0).getValue())); // If filter is null, no effect. - result = store.getRangeKVs(null, 1, null); + result = store.getRangeKVs(null, 1, (MetadataKeyFilter[]) null); assertEquals(1, result.size()); assertEquals("a0", getString(result.get(0).getKey())); } @@ -461,7 +455,7 @@ public void testInvalidStartKey() throws IOException { // If startKey is invalid, the returned list should be empty. List> kvs = store.getRangeKVs(getBytes("unknownKey"), MAX_GETRANGE_LENGTH); - assertEquals(kvs.size(), 0); + assertEquals(0, kvs.size()); } @Test @@ -504,7 +498,7 @@ public void testBatchWrite() throws IOException { .build(); List expectedResult = Lists.newArrayList(); - for (int i = 0; i<10; i++) { + for (int i = 0; i < 10; i++) { dbStore.put(getBytes("batch-" + i), getBytes("batch-value-" + i)); expectedResult.add("batch-" + i); } @@ -541,43 +535,44 @@ public void testKeyPrefixFilter() throws IOException { new KeyPrefixFilter().addFilter("b0", true).addFilter("b"); } catch (IllegalArgumentException e) { exception = e; + assertTrue(exception.getMessage().contains("KeyPrefix: b already " + + "rejected")); } - assertTrue(exception.getMessage().contains("KeyPrefix: b already " + - "rejected")); try { new KeyPrefixFilter().addFilter("b0").addFilter("b", true); } catch (IllegalArgumentException e) { exception = e; + assertTrue(exception.getMessage().contains("KeyPrefix: b already " + + "accepted")); } - assertTrue(exception.getMessage().contains("KeyPrefix: b already " + - "accepted")); try { new KeyPrefixFilter().addFilter("b", true).addFilter("b0"); } catch (IllegalArgumentException e) { exception = e; + assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " + + "rejected")); } - assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " + - "rejected")); try { new KeyPrefixFilter().addFilter("b").addFilter("b0", true); } catch (IllegalArgumentException e) { exception = e; + assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " + + "accepted")); } - assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " + - "accepted")); MetadataKeyFilter filter1 = new KeyPrefixFilter(true) - .addFilter("a0") - .addFilter("a1") - .addFilter("b", true); + .addFilter("a0") + .addFilter("a1") + .addFilter("b", true); result = store.getRangeKVs(null, 100, filter1); assertEquals(2, result.size()); - assertTrue(result.stream().anyMatch(entry -> new String(entry.getKey()) + assertTrue(result.stream().anyMatch(entry -> new String(entry.getKey(), + UTF_8) .startsWith("a0")) && result.stream().anyMatch(entry -> new String( - entry.getKey()).startsWith("a1"))); + entry.getKey(), UTF_8).startsWith("a1"))); filter1 = new KeyPrefixFilter(true).addFilter("b", true); result = store.getRangeKVs(null, 100, filter1); @@ -586,7 +581,8 @@ public void testKeyPrefixFilter() throws IOException { filter1 = new KeyPrefixFilter().addFilter("b", true); result = store.getRangeKVs(null, 100, filter1); assertEquals(10, result.size()); - assertTrue(result.stream().allMatch(entry -> new String(entry.getKey()) + assertTrue(result.stream().allMatch(entry -> new String(entry.getKey(), + UTF_8) .startsWith("a"))); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java index db2572cd02..ccf19b0e16 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java @@ -29,12 +29,14 @@ import java.io.File; import java.lang.management.ManagementFactory; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Test the JMX interface for the rocksdb metastore implementation. */ public class TestRocksDBStoreMBean { - Configuration conf; + private Configuration conf; @Before public void init() throws Exception { @@ -57,7 +59,7 @@ public void testJmxBeans() throws Exception { .setCreateIfMissing(true).setDbFile(testDir).build(); for (int i = 0; i < 10; i++) { - metadataStore.put("key".getBytes(), "value".getBytes()); + metadataStore.put("key".getBytes(UTF_8), "value".getBytes(UTF_8)); } MBeanServer platformMBeanServer = diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestDBStoreBuilder.java index 3e1f364f09..47ad5972b0 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestDBStoreBuilder.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestDBStoreBuilder.java @@ -131,7 +131,7 @@ public void builderWithDataWrites() throws Exception { RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8); firstTable.put(key, value); byte[] temp = firstTable.get(key); - Arrays.equals(value, temp); + Assert.assertTrue(Arrays.equals(value, temp)); } try (Table secondTable = dbStore.getTable("Second")) { @@ -161,7 +161,7 @@ public void builderWithDiskProfileWrites() throws Exception { RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8); firstTable.put(key, value); byte[] temp = firstTable.get(key); - Arrays.equals(value, temp); + Assert.assertTrue(Arrays.equals(value, temp)); } try (Table secondTable = dbStore.getTable("Second")) { diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java index cd25548403..9524f5f4c8 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java @@ -35,9 +35,9 @@ import org.rocksdb.WriteBatch; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Set; @@ -112,8 +112,8 @@ public void putGetAndEmpty() throws Exception { @Test public void delete() throws Exception { - List deletedKeys = new LinkedList<>(); - List validKeys = new LinkedList<>(); + List deletedKeys = new ArrayList<>(); + List validKeys = new ArrayList<>(); byte[] value = RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); for (int x = 0; x < 100; x++) { diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/package-info.java new file mode 100644 index 0000000000..1fafbd3749 --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * DB test Utils. + */ +package org.apache.hadoop.utils; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index ea0e819173..86865acb4a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -141,8 +141,8 @@ public BlockData getBlock(Container container, BlockID blockID) long bcsId = blockID.getBlockCommitSequenceId(); Preconditions.checkNotNull(blockID, "BlockID cannot be null in GetBlock request"); - Preconditions.checkNotNull(blockID.getContainerID(), - "Container name cannot be null"); + Preconditions.checkNotNull(container, + "Container cannot be null"); KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java index c8a40b2bc5..8149e2c83f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java @@ -114,7 +114,7 @@ public StreamDownloader(long containerId, CompletableFuture response, this.containerId = containerId; this.outputPath = outputPath; try { - outputPath = Preconditions.checkNotNull(outputPath); + Preconditions.checkNotNull(outputPath, "Output path cannot be null"); Path parentPath = Preconditions.checkNotNull(outputPath.getParent()); Files.createDirectories(parentPath); stream = diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java index 55fcf26c56..c4b29ba272 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java @@ -71,8 +71,8 @@ public ScmTestMock() { new HashMap<>(); private Map nodeReports = new HashMap<>(); private AtomicInteger commandStatusReport = new AtomicInteger(0); - private List cmdStatusList = new LinkedList<>(); - private List scmCommandRequests = new LinkedList<>(); + private List cmdStatusList = new ArrayList<>(); + private List scmCommandRequests = new ArrayList<>(); /** * Returns the number of heartbeats made to this class. * diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 260b15893b..8b84b8e737 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -50,7 +50,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.nio.file.Paths; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.UUID; @@ -86,9 +86,9 @@ public void setUp() throws Exception { conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500, TimeUnit.MILLISECONDS); conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - serverAddresses = new LinkedList<>(); - scmServers = new LinkedList<>(); - mockServers = new LinkedList<>(); + serverAddresses = new ArrayList<>(); + scmServers = new ArrayList<>(); + mockServers = new ArrayList<>(); for (int x = 0; x < scmServerCount; x++) { int port = SCMTestUtils.getReuseableAddress().getPort(); String address = "127.0.0.1"; @@ -361,8 +361,8 @@ public void testDatanodeStateMachineWithIdWriteFail() throws Exception { @Test public void testDatanodeStateMachineWithInvalidConfiguration() throws Exception { - LinkedList> confList = - new LinkedList>(); + List> confList = + new ArrayList<>(); confList.add(Maps.immutableEntry(ScmConfigKeys.OZONE_SCM_NAMES, "")); // Invalid ozone.scm.names diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 76632bf9c2..35cda0051d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -49,6 +49,7 @@ import java.io.IOException; import java.util.UUID; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -160,7 +161,7 @@ private ContainerCommandRequestProto getWriteChunkRequest( String datanodeId, Long containerId, Long localId) { ByteString data = ByteString.copyFrom( - UUID.randomUUID().toString().getBytes()); + UUID.randomUUID().toString().getBytes(UTF_8)); ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo .newBuilder() .setChunkName( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java index b632e0211f..03f0cd4d81 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java @@ -61,7 +61,7 @@ public static void setup() { /** * Dummy report publisher for testing. */ - private class DummyReportPublisher extends ReportPublisher { + private static class DummyReportPublisher extends ReportPublisher { private final long frequency; private int getReportCount = 0; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java index 6b46762273..0e58e69864 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java @@ -1,19 +1,18 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. */ package org.apache.hadoop.ozone.container.common.volume; @@ -23,7 +22,6 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import static org.junit.Assert.*; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -35,19 +33,22 @@ import java.util.Properties; import java.util.UUID; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + /** * Unit tests for {@link HddsVolume}. */ public class TestHddsVolume { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - private static final String DATANODE_UUID = UUID.randomUUID().toString(); private static final String CLUSTER_ID = UUID.randomUUID().toString(); private static final Configuration CONF = new Configuration(); private static final String DU_CACHE_FILE = "scmUsed"; - + @Rule + public TemporaryFolder folder = new TemporaryFolder(); private File rootDir; private HddsVolume volume; private File versionFile; @@ -69,9 +70,9 @@ public void testHddsVolumeInitialization() throws Exception { // clusterID is not specified and the version file should not be written // to disk. assertTrue(volume.getClusterID() == null); - assertEquals(volume.getStorageType(), StorageType.DEFAULT); - assertEquals(volume.getStorageState(), - HddsVolume.VolumeState.NOT_FORMATTED); + assertEquals(StorageType.DEFAULT, volume.getStorageType()); + assertEquals(HddsVolume.VolumeState.NOT_FORMATTED, + volume.getStorageState()); assertFalse("Version file should not be created when clusterID is not " + "known.", versionFile.exists()); @@ -84,7 +85,7 @@ public void testHddsVolumeInitialization() throws Exception { assertTrue("Volume format should create Version file", versionFile.exists()); assertEquals(volume.getClusterID(), CLUSTER_ID); - assertEquals(volume.getStorageState(), HddsVolume.VolumeState.NORMAL); + assertEquals(HddsVolume.VolumeState.NORMAL, volume.getStorageState()); } @Test @@ -111,7 +112,7 @@ public void testReadPropertiesFromVersionFile() throws Exception { } @Test - public void testShutdown() throws Exception{ + public void testShutdown() throws Exception { // Return dummy value > 0 for scmUsage so that scm cache file is written // during shutdown. GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class); @@ -125,8 +126,7 @@ public void testShutdown() throws Exception{ volume.shutdown(); // Volume state should be "NON_EXISTENT" when volume is shutdown. - assertEquals(volume.getStorageState(), - HddsVolume.VolumeState.NON_EXISTENT); + assertEquals(HddsVolume.VolumeState.NON_EXISTENT, volume.getStorageState()); // Volume should save scmUsed cache file once volume is shutdown File scmUsedFile = new File(folder.getRoot(), DU_CACHE_FILE); @@ -139,7 +139,7 @@ public void testShutdown() throws Exception{ // as usage thread is shutdown. volume.getAvailable(); fail("HddsVolume#shutdown test failed"); - } catch (Exception ex){ + } catch (Exception ex) { assertTrue(ex instanceof IOException); assertTrue(ex.getMessage().contains( "Volume Usage thread is not running.")); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 7bb8a43d7b..c50ec78381 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -22,7 +22,6 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -69,7 +68,7 @@ private void initializeVolumeSet() throws Exception { } @Rule - public Timeout testTimeout = new Timeout(300_000); + public Timeout testTimeout = new Timeout(300000); @Before public void setup() throws Exception { @@ -153,8 +152,7 @@ public void testFailVolume() throws Exception { assertTrue(volumeSet.getFailedVolumesList().get(0).isFailed()); // Failed volume should not exist in VolumeMap - Path volume1Path = new Path(volume1); - assertFalse(volumeSet.getVolumeMap().containsKey(volume1Path)); + assertFalse(volumeSet.getVolumeMap().containsKey(volume1)); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java new file mode 100644 index 0000000000..3328deb06d --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * Tests for Container Volumes. + */ +package org.apache.hadoop.ozone.container.common.volume; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java index 6fe6d81ee4..e3e683cb30 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java @@ -25,10 +25,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.volume - .RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; +import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Before; @@ -37,12 +36,14 @@ import org.junit.rules.TemporaryFolder; import org.mockito.Mockito; -import java.io.IOException; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; import java.util.UUID; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.mock; @@ -52,6 +53,8 @@ */ public class TestBlockManagerImpl { + @Rule + public TemporaryFolder folder = new TemporaryFolder(); private OzoneConfiguration config; private String scmId = UUID.randomUUID().toString(); private VolumeSet volumeSet; @@ -62,10 +65,6 @@ public class TestBlockManagerImpl { private BlockManagerImpl blockManager; private BlockID blockID; - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Before public void setUp() throws Exception { config = new OzoneConfiguration(); @@ -93,7 +92,7 @@ public void setUp() throws Exception { blockData = new BlockData(blockID); blockData.addMetadata("VOLUME", "ozone"); blockData.addMetadata("OWNER", "hdfs"); - List chunkList = new LinkedList<>(); + List chunkList = new ArrayList<>(); ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID .getLocalID(), 0), 0, 1024); chunkList.add(info.getProtoBufMessage()); @@ -124,88 +123,74 @@ public void testPutAndGetBlock() throws Exception { } - @Test public void testDeleteBlock() throws Exception { + assertEquals(0, + keyValueContainer.getContainerData().getKeyCount()); + //Put Block + blockManager.putBlock(keyValueContainer, blockData); + assertEquals(1, + keyValueContainer.getContainerData().getKeyCount()); + //Delete Block + blockManager.deleteBlock(keyValueContainer, blockID); + assertEquals(0, + keyValueContainer.getContainerData().getKeyCount()); try { - assertEquals(0, - keyValueContainer.getContainerData().getKeyCount()); - //Put Block - blockManager.putBlock(keyValueContainer, blockData); - assertEquals(1, - keyValueContainer.getContainerData().getKeyCount()); - //Delete Block - blockManager.deleteBlock(keyValueContainer, blockID); - assertEquals(0, - keyValueContainer.getContainerData().getKeyCount()); - try { - blockManager.getBlock(keyValueContainer, blockID); - fail("testDeleteBlock"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains( - "Unable to find the block", ex); - } - } catch (IOException ex) { - fail("testDeleteBlock failed"); + blockManager.getBlock(keyValueContainer, blockID); + fail("testDeleteBlock"); + } catch (StorageContainerException ex) { + GenericTestUtils.assertExceptionContains( + "Unable to find the block", ex); } } @Test public void testListBlock() throws Exception { - try { + blockManager.putBlock(keyValueContainer, blockData); + List listBlockData = blockManager.listBlock( + keyValueContainer, 1, 10); + assertNotNull(listBlockData); + assertTrue(listBlockData.size() == 1); + + for (long i = 2; i <= 10; i++) { + blockID = new BlockID(1L, i); + blockData = new BlockData(blockID); + blockData.addMetadata("VOLUME", "ozone"); + blockData.addMetadata("OWNER", "hdfs"); + List chunkList = new ArrayList<>(); + ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID + .getLocalID(), 0), 0, 1024); + chunkList.add(info.getProtoBufMessage()); + blockData.setChunks(chunkList); blockManager.putBlock(keyValueContainer, blockData); - List listBlockData = blockManager.listBlock( - keyValueContainer, 1, 10); - assertNotNull(listBlockData); - assertTrue(listBlockData.size() == 1); - - for (long i = 2; i <= 10; i++) { - blockID = new BlockID(1L, i); - blockData = new BlockData(blockID); - blockData.addMetadata("VOLUME", "ozone"); - blockData.addMetadata("OWNER", "hdfs"); - List chunkList = new LinkedList<>(); - ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, 1024); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - blockManager.putBlock(keyValueContainer, blockData); - } - - listBlockData = blockManager.listBlock( - keyValueContainer, 1, 10); - assertNotNull(listBlockData); - assertTrue(listBlockData.size() == 10); - - } catch (IOException ex) { - fail("testListBlock failed"); } + + listBlockData = blockManager.listBlock( + keyValueContainer, 1, 10); + assertNotNull(listBlockData); + assertTrue(listBlockData.size() == 10); } @Test public void testGetNoSuchBlock() throws Exception { + assertEquals(0, + keyValueContainer.getContainerData().getKeyCount()); + //Put Block + blockManager.putBlock(keyValueContainer, blockData); + assertEquals(1, + keyValueContainer.getContainerData().getKeyCount()); + //Delete Block + blockManager.deleteBlock(keyValueContainer, blockID); + assertEquals(0, + keyValueContainer.getContainerData().getKeyCount()); try { - assertEquals(0, - keyValueContainer.getContainerData().getKeyCount()); - //Put Block - blockManager.putBlock(keyValueContainer, blockData); - assertEquals(1, - keyValueContainer.getContainerData().getKeyCount()); - //Delete Block - blockManager.deleteBlock(keyValueContainer, blockID); - assertEquals(0, - keyValueContainer.getContainerData().getKeyCount()); - try { - //Since the block has been deleted, we should not be able to find it - blockManager.getBlock(keyValueContainer, blockID); - fail("testGetNoSuchBlock failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains( - "Unable to find the block", ex); - assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult()); - } - } catch (IOException ex) { + //Since the block has been deleted, we should not be able to find it + blockManager.getBlock(keyValueContainer, blockID); fail("testGetNoSuchBlock failed"); + } catch (StorageContainerException ex) { + GenericTestUtils.assertExceptionContains( + "Unable to find the block", ex); + assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult()); } } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java index 9e3edf74fb..3f181d1822 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java @@ -43,6 +43,7 @@ import java.util.Arrays; import java.util.UUID; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.*; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyLong; @@ -88,7 +89,7 @@ public void setUp() throws Exception { keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - data = "testing write chunks".getBytes(); + data = "testing write chunks".getBytes(UTF_8); // Creating BlockData blockID = new BlockID(1L, 1L); chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index fbc5ad033a..5fa7b540c5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -42,9 +42,9 @@ import org.junit.runners.Parameterized; import java.io.File; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.LinkedList; import java.util.List; import java.util.NoSuchElementException; import java.util.UUID; @@ -252,7 +252,7 @@ private void createContainerWithBlocks(long containerId, int .randomUUID().toString()); MetadataStore metadataStore = BlockUtils.getDB(containerData, conf); - List chunkList = new LinkedList<>(); + List chunkList = new ArrayList<>(); ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024); chunkList.add(info.getProtoBufMessage()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 8c0db4a6a6..770a9f7f91 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -51,12 +51,13 @@ import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; import java.util.Map; import java.util.List; -import java.util.LinkedList; import java.util.UUID; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.ratis.util.Preconditions.assertTrue; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -135,7 +136,7 @@ private void addBlocks(int count) throws Exception { BlockData blockData = new BlockData(blockID); blockData.addMetadata("VOLUME", "ozone"); blockData.addMetadata("OWNER", "hdfs"); - List chunkList = new LinkedList<>(); + List chunkList = new ArrayList<>(); ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID .getLocalID(), 0), 0, 1024); chunkList.add(info.getProtoBufMessage()); @@ -163,8 +164,6 @@ public void testCreateContainer() throws Exception { // Check whether containerMetaDataPath and chunksPath exists or not. assertTrue(containerMetaDataPath != null); assertTrue(chunksPath != null); - File containerMetaDataLoc = new File(containerMetaDataPath); - //Check whether container file and container db file exists or not. assertTrue(keyValueContainer.getContainerFile().exists(), ".Container File does not exist"); @@ -190,7 +189,7 @@ public void testContainerImportExport() throws Exception { //write one few keys to check the key count after import MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, conf); for (int i = 0; i < numberOfKeysToWrite; i++) { - metadataStore.put(("test" + i).getBytes(), "test".getBytes()); + metadataStore.put(("test" + i).getBytes(UTF_8), "test".getBytes(UTF_8)); } metadataStore.close(); @@ -247,7 +246,7 @@ public void testContainerImportExport() throws Exception { container.importContainerData(fis, packer); } fail("Container is imported twice. Previous files are overwritten"); - } catch (Exception ex) { + } catch (IOException ex) { //all good } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index dcda10b6bc..7fc065f2fc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -226,9 +226,10 @@ public void testVolumeSetInKeyValueHandler() throws Exception{ VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, cset, volumeSet, metrics); - assertEquals(keyValueHandler.getVolumeChoosingPolicyForTesting() - .getClass().getName(), "org.apache.hadoop.ozone.container.common" + - ".volume.RoundRobinVolumeChoosingPolicy"); + assertEquals("org.apache.hadoop.ozone.container.common" + + ".volume.RoundRobinVolumeChoosingPolicy", + keyValueHandler.getVolumeChoosingPolicyForTesting() + .getClass().getName()); //Set a class which is not of sub class of VolumeChoosingPolicy conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java index a599f72169..1a92ca47a9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java @@ -23,7 +23,6 @@ import java.io.FileWriter; import java.io.IOException; import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -45,6 +44,8 @@ import org.junit.BeforeClass; import org.junit.Test; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Test the tar/untar for a given container. */ @@ -161,7 +162,7 @@ public void pack() throws IOException, CompressorException { //read the container descriptor only try (FileInputStream input = new FileInputStream(targetFile.toFile())) { String containerYaml = new String(packer.unpackContainerDescriptor(input), - Charset.forName(StandardCharsets.UTF_8.name())); + Charset.forName(UTF_8.name())); Assert.assertEquals(TEST_DESCRIPTOR_FILE_CONTENT, containerYaml); } @@ -177,7 +178,7 @@ public void pack() throws IOException, CompressorException { try (FileInputStream input = new FileInputStream(targetFile.toFile())) { descriptor = new String(packer.unpackContainerData(destinationContainer, input), - Charset.forName(StandardCharsets.UTF_8.name())); + Charset.forName(UTF_8.name())); } assertExampleMetadataDbIsGood( @@ -204,7 +205,7 @@ private void assertExampleMetadataDbIsGood(Path dbPath) try (FileInputStream testFile = new FileInputStream(dbFile.toFile())) { List strings = IOUtils - .readLines(testFile, Charset.forName(StandardCharsets.UTF_8.name())); + .readLines(testFile, Charset.forName(UTF_8.name())); Assert.assertEquals(1, strings.size()); Assert.assertEquals(TEST_DB_FILE_CONTENT, strings.get(0)); } @@ -222,7 +223,7 @@ private void assertExampleChunkFileIsGood(Path chunkDirPath) try (FileInputStream testFile = new FileInputStream(chunkFile.toFile())) { List strings = IOUtils - .readLines(testFile, Charset.forName(StandardCharsets.UTF_8.name())); + .readLines(testFile, Charset.forName(UTF_8.name())); Assert.assertEquals(1, strings.size()); Assert.assertEquals(TEST_CHUNK_FILE_CONTENT, strings.get(0)); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java new file mode 100644 index 0000000000..afbf274a8f --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * Chunk Manager Checks. + */ +package org.apache.hadoop.ozone.container.keyvalue; \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java index 9aeab7bfc3..1a6555c140 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java @@ -139,6 +139,7 @@ public > void addHandler( * @throws IllegalArgumentException If there is no EventHandler for * the specific event. */ + @Override public > void fireEvent( EVENT_TYPE event, PAYLOAD payload) { @@ -219,7 +220,9 @@ public void processAll(long timeout) { try { Thread.sleep(100); } catch (InterruptedException e) { - e.printStackTrace(); + LOG.warn("Interrupted exception while sleeping.", e); + // We ignore this exception for time being. Review? should we + // propogate it back to caller? } if (Time.now() > currentTime + timeout) { @@ -229,7 +232,7 @@ public void processAll(long timeout) { } } } - + @Override public void close() { isRunning = false; diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java index b72d2ae768..88ac3786c7 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java @@ -1,24 +1,21 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. */ package org.apache.hadoop.hdds.server.events; -import java.util.List; -import java.util.Objects; import org.apache.hadoop.hdds.HddsIdFactory; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.ozone.lease.LeaseManager; @@ -27,6 +24,9 @@ import org.junit.Before; import org.junit.Test; +import java.util.List; +import java.util.Objects; + /** * Test the basic functionality of event watcher. */ @@ -41,7 +41,7 @@ public class TestEventWatcher { private static final TypedEvent REPLICATION_COMPLETED = new TypedEvent<>(ReplicationCompletedEvent.class); - LeaseManager leaseManager; + private LeaseManager leaseManager; @Before public void startLeaseManager() { @@ -56,7 +56,6 @@ public void stopLeaseManager() { DefaultMetricsSystem.shutdown(); } - @Test public void testEventHandling() throws InterruptedException { EventQueue queue = new EventQueue(); @@ -180,7 +179,7 @@ public void testMetrics() throws InterruptedException { queue.fireEvent(REPLICATION_COMPLETED, event1Completed); - Thread.sleep(2200l); + Thread.sleep(2200L); //until now: 3 in-progress activities are tracked with three // UnderreplicatedEvents. The first one is completed, the remaining two @@ -201,27 +200,29 @@ public void testMetrics() throws InterruptedException { } private EventWatcher - createEventWatcher() { + createEventWatcher() { return new CommandWatcherExample(WATCH_UNDER_REPLICATED, REPLICATION_COMPLETED, leaseManager); } - private class CommandWatcherExample + private static class CommandWatcherExample extends EventWatcher { - public CommandWatcherExample(Event startEvent, + CommandWatcherExample(Event startEvent, Event completionEvent, LeaseManager leaseManager) { super("TestCommandWatcher", startEvent, completionEvent, leaseManager); } @Override - protected void onTimeout(EventPublisher publisher, UnderreplicatedEvent payload) { + protected void onTimeout(EventPublisher publisher, + UnderreplicatedEvent payload) { publisher.fireEvent(UNDER_REPLICATED, payload); } @Override - protected void onFinished(EventPublisher publisher, UnderreplicatedEvent payload) { + protected void onFinished(EventPublisher publisher, + UnderreplicatedEvent payload) { //Good job. We did it. } @@ -240,13 +241,14 @@ private static class ReplicationCompletedEvent private final String datanodeId; - public ReplicationCompletedEvent(long id, String containerId, + ReplicationCompletedEvent(long id, String containerId, String datanodeId) { this.id = id; this.containerId = containerId; this.datanodeId = datanodeId; } + @Override public long getId() { return id; } @@ -279,11 +281,12 @@ private static class UnderreplicatedEvent private final String containerId; - public UnderreplicatedEvent(long id, String containerId) { + UnderreplicatedEvent(long id, String containerId) { this.containerId = containerId; this.id = id; } + @Override public long getId() { return id; } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java new file mode 100644 index 0000000000..720dd6fa4d --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * Tests for Event Watcher. + */ +package org.apache.hadoop.hdds.server.events; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java index 904762db59..ee64c488cd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java @@ -19,9 +19,12 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; +/** + * Pending Deletes in the block space. + */ public class PendingDeleteStatusList { private List pendingDeleteStatuses; @@ -29,7 +32,7 @@ public class PendingDeleteStatusList { public PendingDeleteStatusList(DatanodeDetails datanodeDetails) { this.datanodeDetails = datanodeDetails; - pendingDeleteStatuses = new LinkedList<>(); + pendingDeleteStatuses = new ArrayList<>(); } public void addPendingDeleteStatus(long dnDeleteTransactionId, @@ -39,6 +42,9 @@ public void addPendingDeleteStatus(long dnDeleteTransactionId, containerId)); } + /** + * Status of pending deletes. + */ public static class PendingDeleteStatus { private long dnDeleteTransactionId; private long scmDeleteTransactionId; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModePrecheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModePrecheck.java index 0ed06dd568..c6367c2a05 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModePrecheck.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModePrecheck.java @@ -44,6 +44,7 @@ public ChillModePrecheck(Configuration conf) { } } + @Override public boolean check(ScmOps op) throws SCMException { if (inChillMode.get() && ChillModeRestrictedOps .isRestrictedInChillMode(op)) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java index 9445fe8e89..8bfcb848ec 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java @@ -134,6 +134,16 @@ public static ContainerReplicaBuilder newBuilder() { return new ContainerReplicaBuilder(); } + @Override + public String toString() { + return "ContainerReplica{" + + "containerID=" + containerID + + ", datanodeDetails=" + datanodeDetails + + ", placeOfBirth=" + placeOfBirth + + ", sequenceId=" + sequenceId + + '}'; + } + /** * Used for building ContainerReplica instance. */ @@ -148,12 +158,12 @@ public static class ContainerReplicaBuilder { /** * Set Container Id. * - * @param containerId ContainerID + * @param cID ContainerID * @return ContainerReplicaBuilder */ public ContainerReplicaBuilder setContainerID( - final ContainerID containerId) { - containerID = containerId; + final ContainerID cID) { + this.containerID = cID; return this; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java index 60861b770c..9fc47eaf74 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java @@ -27,7 +27,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; import java.util.Random; import java.util.stream.Collectors; @@ -102,7 +102,7 @@ public Configuration getConf() { * @return list of datanodes chosen. * @throws SCMException SCM exception. */ - + @Override public List chooseDatanodes( List excludedNodes, int nodesRequired, final long sizeRequired) throws SCMException { @@ -167,7 +167,7 @@ private boolean hasEnoughSpace(DatanodeDetails datanodeDetails, public List getResultSet( int nodesRequired, List healthyNodes) throws SCMException { - List results = new LinkedList<>(); + List results = new ArrayList<>(); for (int x = 0; x < nodesRequired; x++) { // invoke the choose function defined in the derived classes. DatanodeDetails nodeId = chooseNode(healthyNodes); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java index 76702d555e..a70f633e82 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java @@ -83,6 +83,7 @@ public List chooseDatanodes( * @param healthyNodes - all healthy datanodes. * @return one randomly chosen datanode that from two randomly chosen datanode */ + @Override public DatanodeDetails chooseNode(final List healthyNodes) { DatanodeDetails selectedNode = healthyNodes.get(getRand().nextInt(healthyNodes.size())); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java index a6e732c750..530594258a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java @@ -23,7 +23,7 @@ * DatanodeMetric acts as the basis for all the metric that is used in * comparing 2 datanodes. */ -public interface DatanodeMetric extends Comparable { +public interface DatanodeMetric { /** * Some syntactic sugar over Comparable interface. This makes code easier to @@ -87,5 +87,4 @@ public interface DatanodeMetric extends Comparable { */ void subtract(T value); - } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java index 050d26bd23..e1c8f87d6a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java @@ -136,7 +136,6 @@ public void subtract(Long subValue) { * @throws ClassCastException if the specified object's type prevents it * from being compared to this object. */ - @Override public int compareTo(Long o) { return Long.compare(this.value, o); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java index efd5fd60ab..a886084b98 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java @@ -23,7 +23,7 @@ /** * SCM Node Metric that is used in the placement classes. */ -public class SCMNodeMetric implements DatanodeMetric { +public class SCMNodeMetric implements DatanodeMetric { private SCMNodeStat stat; /** @@ -191,7 +191,7 @@ public void subtract(SCMNodeStat value) { * @throws ClassCastException if the specified object's type prevents it * from being compared to this object. */ - @Override + //@Override public int compareTo(SCMNodeStat o) { if (isEqual(o)) { return 0; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java index 3c871d3ef5..962bbb464e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java @@ -52,6 +52,7 @@ public SCMNodeStat(long capacity, long used, long remaining) { /** * @return the total configured capacity of the node. */ + @Override public LongMetric getCapacity() { return capacity; } @@ -59,6 +60,7 @@ public LongMetric getCapacity() { /** * @return the total SCM used space on the node. */ + @Override public LongMetric getScmUsed() { return scmUsed; } @@ -66,6 +68,7 @@ public LongMetric getScmUsed() { /** * @return the total remaining space available on the node. */ + @Override public LongMetric getRemaining() { return remaining; } @@ -77,12 +80,9 @@ public LongMetric getRemaining() { * @param newUsed in bytes * @param newRemaining in bytes */ + @Override @VisibleForTesting public void set(long newCapacity, long newUsed, long newRemaining) { - Preconditions.checkNotNull(newCapacity, "Capacity cannot be null"); - Preconditions.checkNotNull(newUsed, "used cannot be null"); - Preconditions.checkNotNull(newRemaining, "remaining cannot be null"); - Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " + "negative."); Preconditions.checkArgument(newUsed >= 0, "used space cannot be " + @@ -101,6 +101,7 @@ public void set(long newCapacity, long newUsed, long newRemaining) { * @param stat Nodestat. * @return SCMNodeStat */ + @Override public SCMNodeStat add(NodeStat stat) { this.capacity.set(this.getCapacity().get() + stat.getCapacity().get()); this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get()); @@ -114,6 +115,7 @@ public SCMNodeStat add(NodeStat stat) { * @param stat SCMNodeStat. * @return Modified SCMNodeStat */ + @Override public SCMNodeStat subtract(NodeStat stat) { this.capacity.set(this.getCapacity().get() - stat.getCapacity().get()); this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java index 993a986024..5a2e2b112d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java @@ -48,12 +48,13 @@ public ReplicationActivityStatus(){ replicationStatusListener = new ReplicationStatusListener(); chillModeStatusListener = new ChillModeStatusListener(); } - + @Override public boolean isReplicationEnabled() { return replicationEnabled.get(); } @VisibleForTesting + @Override public void setReplicationEnabled(boolean enabled) { replicationEnabled.set(enabled); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java index 8c11e84e66..e700ecde9e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java @@ -98,6 +98,7 @@ public void start() { threadFactory.newThread(this).start(); } + @Override public void run() { while (running) { @@ -168,6 +169,7 @@ public void run() { } else if (deficit < 0) { //TODO: too many replicas. Not handled yet. + LOG.debug("Too many replicas is not handled yet."); } } catch (Exception e) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java index 996478caaa..eb6dc0d424 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java @@ -22,8 +22,8 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.util.Time; +import java.util.ArrayList; import java.util.HashMap; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.UUID; @@ -39,7 +39,7 @@ */ public class CommandQueue { // This list is used as default return value. - private static final List DEFAULT_LIST = new LinkedList<>(); + private static final List DEFAULT_LIST = new ArrayList<>(); private final Map commandMap; private final Lock lock; private long commandsInQueue; @@ -136,7 +136,7 @@ private static class Commands { * Constructs a Commands class. */ Commands() { - commands = new LinkedList<>(); + commands = new ArrayList<>(); updateTime = 0; readTime = 0; } @@ -182,7 +182,7 @@ public void add(SCMCommand command) { */ public List getCommands() { List temp = this.commands; - this.commands = new LinkedList<>(); + this.commands = new ArrayList<>(); readTime = Time.monotonicNow(); return temp; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index a45951903c..cddd3ae720 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -144,6 +144,8 @@ public NodeStateManager(Configuration conf, EventPublisher eventPublisher) { executorService = HadoopExecutors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("SCM Heartbeat Processing Thread - %d").build()); + //BUG:BUG TODO: The return value is ignored, if an exception is thrown in + // the executing funtion, it will be ignored. executorService.schedule(this, heartbeatCheckerIntervalMs, TimeUnit.MILLISECONDS); } @@ -331,7 +333,7 @@ public List getDeadNodes() { * @return list of nodes */ public List getNodes(NodeState state) { - List nodes = new LinkedList<>(); + List nodes = new ArrayList<>(); nodeStateMap.getNodes(state).forEach( uuid -> { try { @@ -352,7 +354,7 @@ public List getNodes(NodeState state) { * @return all the managed nodes */ public List getAllNodes() { - List nodes = new LinkedList<>(); + List nodes = new ArrayList<>(); nodeStateMap.getAllNodes().forEach( uuid -> { try { @@ -613,6 +615,8 @@ public void run() { if (!Thread.currentThread().isInterrupted() && !executorService.isShutdown()) { + //BUGBUG: The return future needs to checked here to make sure the + // exceptions are handled correctly. executorService.schedule(this, heartbeatCheckerIntervalMs, TimeUnit.MILLISECONDS); } else { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java index 9625f81908..c0f46f15fe 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java @@ -59,6 +59,7 @@ public Set getContainers(UUID datanode) { * @param datanodeID -- Datanode UUID * @param containerIDs - List of ContainerIDs. */ + @Override public void insertNewDatanode(UUID datanodeID, Set containerIDs) throws SCMException { super.insertNewDatanode(datanodeID, containerIDs); @@ -84,6 +85,7 @@ public void setContainersForDatanode(UUID datanodeID, } @VisibleForTesting + @Override public int size() { return dn2ObjectMap.size(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java index a917e7964d..a68e2b52ec 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java @@ -172,7 +172,7 @@ public DatanodeInfo getNodeInfo(UUID uuid) throws NodeNotFoundException { public List getNodes(NodeState state) { lock.readLock().lock(); try { - return new LinkedList<>(stateMap.get(state)); + return new ArrayList<>(stateMap.get(state)); } finally { lock.readLock().unlock(); } @@ -186,7 +186,7 @@ public List getNodes(NodeState state) { public List getAllNodes() { lock.readLock().lock(); try { - return new LinkedList<>(nodeMap.keySet()); + return new ArrayList<>(nodeMap.keySet()); } finally { lock.readLock().unlock(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index 5e8d0dc2f7..cf1955dfe3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.utils.MetadataStoreBuilder; import org.slf4j.Logger; @@ -94,7 +95,8 @@ private void initializePipelineState() throws IOException { return; } List> pipelines = - pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE, null); + pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE, + (MetadataKeyFilters.MetadataKeyFilter[])null); for (Map.Entry entry : pipelines) { Pipeline pipeline = Pipeline.getFromProtobuf( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index f141ae5b54..b59042e75a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -58,7 +58,6 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; -import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.TreeSet; @@ -354,7 +353,7 @@ public boolean forceExitChillMode() throws IOException { */ public List queryNode(HddsProtos.NodeState state) { Preconditions.checkNotNull(state, "Node Query set cannot be null"); - return new LinkedList<>(queryNodeState(state)); + return new ArrayList<>(queryNodeState(state)); } @VisibleForTesting diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index 0beceab5ba..77ef9c849f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -89,7 +89,7 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; @@ -225,7 +225,7 @@ public static SCMRegisteredResponseProto getRegisteredResponse( @Override public SCMHeartbeatResponseProto sendHeartbeat( SCMHeartbeatRequestProto heartbeat) throws IOException { - List cmdResponses = new LinkedList<>(); + List cmdResponses = new ArrayList<>(); for (SCMCommand cmd : heartbeatDispatcher.dispatch(heartbeat)) { cmdResponses.add(getCommandResponse(cmd)); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java index 6c4f249f76..6f894e8626 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java @@ -42,7 +42,7 @@ import static org.junit.Assert.assertTrue; /** - * Unit tests for {@link HddsServerUtil} + * Unit tests for {@link HddsServerUtil}. */ public class TestHddsServerUtils { public static final Logger LOG = LoggerFactory.getLogger( @@ -58,6 +58,7 @@ public class TestHddsServerUtils { * Test getting OZONE_SCM_DATANODE_ADDRESS_KEY with port. */ @Test + @SuppressWarnings("StringSplitter") public void testGetDatanodeAddressWithPort() { final String scmHost = "host123:100"; final Configuration conf = new OzoneConfiguration(); @@ -78,8 +79,8 @@ public void testGetDatanodeAddressWithoutPort() { conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost); final InetSocketAddress address = HddsServerUtil.getScmAddressForDataNodes(conf); - assertEquals(address.getHostName(), scmHost); - assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT); + assertEquals(scmHost, address.getHostName()); + assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort()); } /** @@ -93,8 +94,8 @@ public void testDatanodeAddressFallbackToClientNoPort() { conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost); final InetSocketAddress address = HddsServerUtil.getScmAddressForDataNodes(conf); - assertEquals(address.getHostName(), scmHost); - assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT); + assertEquals(scmHost, address.getHostName()); + assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort()); } /** @@ -103,6 +104,7 @@ public void testDatanodeAddressFallbackToClientNoPort() { * OZONE_SCM_CLIENT_ADDRESS_KEY should be ignored. */ @Test + @SuppressWarnings("StringSplitter") public void testDatanodeAddressFallbackToClientWithPort() { final String scmHost = "host123:100"; final Configuration conf = new OzoneConfiguration(); @@ -124,8 +126,8 @@ public void testDatanodeAddressFallbackToScmNamesNoPort() { conf.set(OZONE_SCM_NAMES, scmHost); final InetSocketAddress address = HddsServerUtil.getScmAddressForDataNodes(conf); - assertEquals(address.getHostName(), scmHost); - assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT); + assertEquals(scmHost, address.getHostName()); + assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort()); } /** @@ -134,6 +136,7 @@ public void testDatanodeAddressFallbackToScmNamesNoPort() { * defined by OZONE_SCM_NAMES should be ignored. */ @Test + @SuppressWarnings("StringSplitter") public void testDatanodeAddressFallbackToScmNamesWithPort() { final String scmHost = "host123:100"; final Configuration conf = new OzoneConfiguration(); @@ -141,7 +144,7 @@ public void testDatanodeAddressFallbackToScmNamesWithPort() { final InetSocketAddress address = HddsServerUtil.getScmAddressForDataNodes(conf); assertEquals(address.getHostName(), scmHost.split(":")[0]); - assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT); + assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort()); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java index 08cbdd7022..631283c2bc 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java @@ -48,7 +48,7 @@ import java.io.File; import java.io.IOException; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -378,12 +378,9 @@ public void testScmDetectStaleAndDeadNode() throws IOException, * Check for NPE when datanodeDetails is passed null for sendHeartbeat. * * @throws IOException - * @throws InterruptedException - * @throws TimeoutException */ @Test - public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException, - InterruptedException, TimeoutException { + public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException { try (SCMNodeManager nodeManager = createNodeManager(getConf())) { nodeManager.processHeartbeat(null); } catch (NullPointerException npe) { @@ -588,7 +585,7 @@ private void heartbeatNodeSet(SCMNodeManager manager, */ private List createNodeSet(SCMNodeManager nodeManager, int count) { - List list = new LinkedList<>(); + List list = new ArrayList<>(); for (int x = 0; x < count; x++) { DatanodeDetails datanodeDetails = TestUtils .createRandomDatanodeAndRegister(nodeManager); @@ -943,7 +940,7 @@ public void testScmNodeReportUpdate() throws IOException, } @Test - public void testHandlingSCMCommandEvent() { + public void testHandlingSCMCommandEvent() throws IOException { OzoneConfiguration conf = getConf(); conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, TimeUnit.MILLISECONDS); @@ -974,6 +971,7 @@ public void testHandlingSCMCommandEvent() { .assertEquals(command.get(0).getClass(), CloseContainerCommand.class); } catch (IOException e) { e.printStackTrace(); + throw e; } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java index 623fc16a92..e12c6433c9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java @@ -150,22 +150,22 @@ public void testProcessNodeReportCheckOneNode() throws IOException { path, reportCapacity, reportScmUsed, reportRemaining, null); StorageReportResult result = map.processNodeReport(key, TestUtils.createNodeReport(storageReport)); - Assert.assertEquals(result.getStatus(), - SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL); + Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL, + result.getStatus()); StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb = NodeReportProto.newBuilder(); StorageReportProto srb = reportSet.iterator().next().getProtoBufMessage(); reportList.add(srb); result = map.processNodeReport(key, TestUtils.createNodeReport(reportList)); - Assert.assertEquals(result.getStatus(), - SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL); + Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL, + result.getStatus()); reportList.add(TestUtils .createStorageReport(UUID.randomUUID(), path, reportCapacity, reportCapacity, 0, null)); result = map.processNodeReport(key, TestUtils.createNodeReport(reportList)); - Assert.assertEquals(result.getStatus(), - SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE); + Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE, + result.getStatus()); // Mark a disk failed StorageReportProto srb2 = StorageReportProto.newBuilder() .setStorageUuid(UUID.randomUUID().toString()) @@ -174,8 +174,8 @@ public void testProcessNodeReportCheckOneNode() throws IOException { reportList.add(srb2); nrb.addAllStorageReport(reportList); result = map.processNodeReport(key, nrb.addStorageReport(srb).build()); - Assert.assertEquals(result.getStatus(), - SCMNodeStorageStatMap.ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE); + Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus + .FAILED_AND_OUT_OF_SPACE_STORAGE, result.getStatus()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java index ec1d5279d0..77ed9075ae 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java @@ -51,7 +51,7 @@ private void generateData() { for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) { TreeSet currentSet = new TreeSet<>(); for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) { - long currentCnIndex = (dnIndex * CONTAINER_COUNT) + cnIndex; + long currentCnIndex = (long) (dnIndex * CONTAINER_COUNT) + cnIndex; currentSet.add(new ContainerID(currentCnIndex)); } testData.put(UUID.randomUUID(), currentSet); @@ -115,8 +115,8 @@ public void testProcessReportCheckOneNode() throws SCMException { map.insertNewDatanode(key, values); Assert.assertTrue(map.isKnownDatanode(key)); ReportResult result = map.processReport(key, values); - Assert.assertEquals(result.getStatus(), - ReportResult.ReportStatus.ALL_IS_WELL); + Assert.assertEquals(ReportResult.ReportStatus.ALL_IS_WELL, + result.getStatus()); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java index 7150d1b94f..328ba307b9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java @@ -19,9 +19,8 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -30,20 +29,18 @@ * Tests that test Metrics that support placement. */ public class TestDatanodeMetrics { - @Rule - public ExpectedException exception = ExpectedException.none(); @Test public void testSCMNodeMetric() { SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L); assertEquals((long) stat.getCapacity().get(), 100L); - assertEquals((long) stat.getScmUsed().get(), 10L); - assertEquals((long) stat.getRemaining().get(), 90L); + assertEquals(10L, (long) stat.getScmUsed().get()); + assertEquals(90L, (long) stat.getRemaining().get()); SCMNodeMetric metric = new SCMNodeMetric(stat); SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L); - assertEquals((long) stat.getCapacity().get(), 100L); - assertEquals((long) stat.getScmUsed().get(), 10L); - assertEquals((long) stat.getRemaining().get(), 90L); + assertEquals(100L, (long) stat.getCapacity().get()); + assertEquals(10L, (long) stat.getScmUsed().get()); + assertEquals(90L, (long) stat.getRemaining().get()); SCMNodeMetric newMetric = new SCMNodeMetric(newStat); assertTrue(metric.isEqual(newMetric.get())); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index 136a684542..353996b204 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -124,6 +124,7 @@ public String getOzoneBucketMapping(String s3BucketName) throws IOException { * @return String - Ozone Volume name. * @throws IOException - Throws if the s3Bucket does not exist. */ + @SuppressWarnings("StringSplitter") public String getOzoneVolumeName(String s3BucketName) throws IOException { String mapping = getOzoneBucketMapping(s3BucketName); return mapping.split("/")[0]; @@ -136,6 +137,7 @@ public String getOzoneVolumeName(String s3BucketName) throws IOException { * @return String - Ozone bucket Name. * @throws IOException - Throws if the s3bucket does not exist. */ + @SuppressWarnings("StringSplitter") public String getOzoneBucketName(String s3BucketName) throws IOException { String mapping = getOzoneBucketMapping(s3BucketName); return mapping.split("/")[1]; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index ea002ec99f..98481e138e 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -610,6 +610,7 @@ public String getOzoneBucketMapping(String s3BucketName) throws IOException { } @Override + @SuppressWarnings("StringSplitter") public String getOzoneVolumeName(String s3BucketName) throws IOException { String mapping = getOzoneBucketMapping(s3BucketName); return mapping.split("/")[0]; @@ -617,6 +618,7 @@ public String getOzoneVolumeName(String s3BucketName) throws IOException { } @Override + @SuppressWarnings("StringSplitter") public String getOzoneBucketName(String s3BucketName) throws IOException { String mapping = getOzoneBucketMapping(s3BucketName); return mapping.split("/")[1]; diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java index 9850778516..9e60e4ec8f 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java @@ -40,11 +40,12 @@ /** * This test class verifies the parsing of SCM endpoint config settings. The - * parsing logic is in {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}. + * parsing logic is in + * {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}. */ public class TestHddsClientUtils { @Rule - public Timeout timeout = new Timeout(300_000); + public Timeout timeout = new Timeout(300000); @Rule public ExpectedException thrown= ExpectedException.none(); @@ -114,13 +115,14 @@ public void testBlockClientFallbackToClientNoPort() { final String scmHost = "host123"; final Configuration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost); - final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients( + final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients( conf); - assertEquals(address.getHostName(), scmHost); - assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT); + assertEquals(scmHost, address.getHostName()); + assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort()); } @Test + @SuppressWarnings("StringSplitter") public void testBlockClientFallbackToClientWithPort() { // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY is undefined it should // fallback to OZONE_SCM_CLIENT_ADDRESS_KEY. @@ -132,8 +134,8 @@ public void testBlockClientFallbackToClientWithPort() { conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost); final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients( conf); - assertEquals(address.getHostName(), scmHost.split(":")[0]); - assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT); + assertEquals(scmHost.split(":")[0], address.getHostName()); + assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort()); } @Test @@ -143,13 +145,14 @@ public void testBlockClientFallbackToScmNamesNoPort() { final String scmHost = "host456"; final Configuration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); - final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients( + final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients( conf); - assertEquals(address.getHostName(), scmHost); - assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT); + assertEquals(scmHost, address.getHostName()); + assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort()); } @Test + @SuppressWarnings("StringSplitter") public void testBlockClientFallbackToScmNamesWithPort() { // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY // are undefined it should fallback to OZONE_SCM_NAMES. @@ -159,10 +162,10 @@ public void testBlockClientFallbackToScmNamesWithPort() { final String scmHost = "host456:200"; final Configuration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); - final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients( + final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients( conf); - assertEquals(address.getHostName(), scmHost.split(":")[0]); - assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT); + assertEquals(scmHost.split(":")[0], address.getHostName()); + assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort()); } @Test @@ -172,12 +175,13 @@ public void testClientFallbackToScmNamesNoPort() { final String scmHost = "host456"; final Configuration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); - final InetSocketAddress address =HddsUtils.getScmAddressForClients(conf); - assertEquals(address.getHostName(), scmHost); - assertEquals(address.getPort(), OZONE_SCM_CLIENT_PORT_DEFAULT); + final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf); + assertEquals(scmHost, address.getHostName()); + assertEquals(OZONE_SCM_CLIENT_PORT_DEFAULT, address.getPort()); } @Test + @SuppressWarnings("StringSplitter") public void testClientFallbackToScmNamesWithPort() { // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, it should fallback // to OZONE_SCM_NAMES. @@ -187,9 +191,9 @@ public void testClientFallbackToScmNamesWithPort() { final String scmHost = "host456:300"; final Configuration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); - final InetSocketAddress address =HddsUtils.getScmAddressForClients(conf); - assertEquals(address.getHostName(), scmHost.split(":")[0]); - assertEquals(address.getPort(), OZONE_SCM_CLIENT_PORT_DEFAULT); + final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf); + assertEquals(scmHost.split(":")[0], address.getHostName()); + assertEquals(OZONE_SCM_CLIENT_PORT_DEFAULT, address.getPort()); } @Test diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java index de75a05e9d..2584eb58c2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java @@ -34,6 +34,7 @@ /** * This helper class keeps a map of all user and their permissions. */ +@SuppressWarnings("ProtocolBufferOrdinal") public class OmOzoneAclMap { // per Acl Type user:rights map private ArrayList> aclMaps; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java index cf2810f801..26798e9241 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.web.response; import java.io.IOException; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hdds.protocol.StorageType; @@ -85,7 +85,7 @@ public BucketInfo(String volumeName, String bucketName) { * Default constructor for BucketInfo. */ public BucketInfo() { - acls = new LinkedList(); + acls = new ArrayList<>(); } /** @@ -318,7 +318,7 @@ public void setKeyCount(long keyCount) { * for the Json serialization. */ @JsonFilter(BUCKET_INFO) - class MixIn { + static class MixIn { } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java index f45883b695..e364e7db81 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java @@ -18,17 +18,19 @@ package org.apache.hadoop.ozone.web; - -import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.web.response.BucketInfo; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.web.response.BucketInfo; import org.junit.Test; import java.io.IOException; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + /** * Test Ozone Bucket Info operation. */ @@ -38,7 +40,7 @@ public void testBucketInfoJson() throws IOException { BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName"); String bucketInfoString = bucketInfo.toJsonString(); BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString); - assert(bucketInfo.equals(newBucketInfo)); + assertEquals(bucketInfo, newBucketInfo); } @Test @@ -46,7 +48,7 @@ public void testBucketInfoDBString() throws IOException { BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName"); String bucketInfoString = bucketInfo.toDBString(); BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString); - assert(bucketInfo.equals(newBucketInfo)); + assertEquals(bucketInfo, newBucketInfo); } @Test @@ -54,18 +56,17 @@ public void testBucketInfoAddAcls() throws IOException { BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName"); String bucketInfoString = bucketInfo.toDBString(); BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString); - assert(bucketInfo.equals(newBucketInfo)); - List aclList = new LinkedList<>(); + assertEquals(bucketInfo, newBucketInfo); + List aclList = new ArrayList<>(); aclList.add(OzoneAcl.parseAcl("user:bilbo:r")); aclList.add(OzoneAcl.parseAcl("user:samwise:rw")); newBucketInfo.setAcls(aclList); - assert(newBucketInfo.getAcls() != null); - assert(newBucketInfo.getAcls().size() == 2); + assertNotNull(newBucketInfo.getAcls()); + assertEquals(2, newBucketInfo.getAcls().size()); } - @Test public void testBucketInfoVersionAndType() throws IOException { BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName"); @@ -75,7 +76,7 @@ public void testBucketInfoVersionAndType() throws IOException { String bucketInfoString = bucketInfo.toDBString(); BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString); - assert(bucketInfo.equals(newBucketInfo)); + assertEquals(bucketInfo, newBucketInfo); } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java index d777d0cd89..ba4a5acf0c 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java @@ -1,19 +1,18 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

* http://www.apache.org/licenses/LICENSE-2.0 - * + *

* Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. */ package org.apache.hadoop.ozone.web; @@ -82,29 +81,29 @@ public void testParseQuota() { @Test public void testVerifyQuota() { OzoneQuota qt = OzoneQuota.parseQuota("10TB"); - assertEquals(qt.getSize(), 10); - assertEquals(qt.getUnit(), OzoneQuota.Units.TB); - assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L * 1024L * 1024L)); + assertEquals(10, qt.getSize()); + assertEquals(OzoneQuota.Units.TB, qt.getUnit()); + assertEquals(10L * (1024L * 1024L * 1024L * 1024L), qt.sizeInBytes()); qt = OzoneQuota.parseQuota("10MB"); - assertEquals(qt.getSize(), 10); - assertEquals(qt.getUnit(), OzoneQuota.Units.MB); - assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L)); + assertEquals(10, qt.getSize()); + assertEquals(OzoneQuota.Units.MB, qt.getUnit()); + assertEquals(10L * (1024L * 1024L), qt.sizeInBytes()); qt = OzoneQuota.parseQuota("10GB"); - assertEquals(qt.getSize(), 10); - assertEquals(qt.getUnit(), OzoneQuota.Units.GB); - assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L * 1024L)); + assertEquals(10, qt.getSize()); + assertEquals(OzoneQuota.Units.GB, qt.getUnit()); + assertEquals(10L * (1024L * 1024L * 1024L), qt.sizeInBytes()); qt = OzoneQuota.parseQuota("10BYTES"); - assertEquals(qt.getSize(), 10); - assertEquals(qt.getUnit(), OzoneQuota.Units.BYTES); - assertEquals(qt.sizeInBytes(), 10L); + assertEquals(10, qt.getSize()); + assertEquals(OzoneQuota.Units.BYTES, qt.getUnit()); + assertEquals(10L, qt.sizeInBytes()); OzoneQuota emptyQuota = new OzoneQuota(); - assertEquals(emptyQuota.sizeInBytes(), -1L); - assertEquals(emptyQuota.getSize(), 0); - assertEquals(emptyQuota.getUnit(), OzoneQuota.Units.UNDEFINED); + assertEquals(-1L, emptyQuota.sizeInBytes()); + assertEquals(0, emptyQuota.getSize()); + assertEquals(OzoneQuota.Units.UNDEFINED, emptyQuota.getUnit()); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java index 66bdb5b0f5..4f8943e632 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java @@ -23,8 +23,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.common.helpers - .ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.junit.AfterClass; @@ -36,11 +35,12 @@ import java.util.Set; import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos - .ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos - .ReplicationType.RATIS; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; +/** + * Tests for Pipeline Closing. + */ public class TestPipelineClose { private static MiniOzoneCluster cluster; @@ -88,7 +88,6 @@ public static void shutdown() { } } - @Test public void testPipelineCloseWithClosedContainer() throws IOException { Set set = pipelineManager @@ -112,8 +111,8 @@ public void testPipelineCloseWithClosedContainer() throws IOException { pipelineManager.finalizePipeline(ratisContainer1.getPipeline().getId()); Pipeline pipeline1 = pipelineManager .getPipeline(ratisContainer1.getPipeline().getId()); - Assert.assertEquals(pipeline1.getPipelineState(), - Pipeline.PipelineState.CLOSED); + Assert.assertEquals(Pipeline.PipelineState.CLOSED, + pipeline1.getPipelineState()); pipelineManager.removePipeline(pipeline1.getId()); for (DatanodeDetails dn : ratisContainer1.getPipeline().getNodes()) { // Assert that the pipeline has been removed from Node2PipelineMap as well @@ -131,12 +130,12 @@ public void testPipelineCloseWithOpenContainer() throws IOException, ContainerID cId2 = ratisContainer2.getContainerInfo().containerID(); pipelineManager.finalizePipeline(ratisContainer2.getPipeline().getId()); - Assert.assertEquals( - pipelineManager.getPipeline(ratisContainer2.getPipeline().getId()) - .getPipelineState(), Pipeline.PipelineState.CLOSED); + Assert.assertEquals(Pipeline.PipelineState.CLOSED, + pipelineManager.getPipeline( + ratisContainer2.getPipeline().getId()).getPipelineState()); Pipeline pipeline2 = pipelineManager .getPipeline(ratisContainer2.getPipeline().getId()); - Assert.assertEquals(pipeline2.getPipelineState(), - Pipeline.PipelineState.CLOSED); + Assert.assertEquals(Pipeline.PipelineState.CLOSED, + pipeline2.getPipelineState()); } } \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java new file mode 100644 index 0000000000..f685b17cd3 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * Package info tests. + */ +package org.apache.hadoop.hdds.scm.pipeline; \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java index 9982da42af..ae55746cc2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java @@ -29,7 +29,15 @@ import java.util.List; import java.util.function.Consumer; -public class OzoneTestUtils { +/** + * Helper class for Tests. + */ +public final class OzoneTestUtils { + /** + * Never Constructed. + */ + private OzoneTestUtils() { + } /** * Close containers which contain the blocks listed in @@ -55,7 +63,7 @@ public static boolean closeContainers( .getContainer(ContainerID.valueof( blockID.getContainerID())).isOpen()); } catch (IOException e) { - e.printStackTrace(); + throw new AssertionError("Failed to close the container", e); } }, omKeyLocationInfoGroups); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java index 0051ecbe15..c52490fed4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java @@ -40,12 +40,11 @@ import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter; import org.apache.hadoop.utils.MetadataStore; import java.io.IOException; import java.io.OutputStream; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; @@ -83,8 +82,8 @@ public Map createKeys(int numOfKeys, int keySize) storageHandler.createVolume(createVolumeArgs); BucketArgs bucketArgs = new BucketArgs(bucket, createVolumeArgs); - bucketArgs.setAddAcls(new LinkedList<>()); - bucketArgs.setRemoveAcls(new LinkedList<>()); + bucketArgs.setAddAcls(new ArrayList<>()); + bucketArgs.setRemoveAcls(new ArrayList<>()); bucketArgs.setStorageType(StorageType.DISK); storageHandler.createBucket(bucketArgs); @@ -144,9 +143,6 @@ public List getAllBlocks(Set containerIDs) public List getAllBlocks(Long containeID) throws IOException { List allBlocks = Lists.newArrayList(); MetadataStore meta = getContainerMetadata(containeID); - MetadataKeyFilter filter = - (preKey, currentKey, nextKey) -> !DFSUtil.bytes2String(currentKey) - .startsWith(OzoneConsts.DELETING_KEY_PREFIX); List> kvs = meta.getRangeKVs(null, Integer.MAX_VALUE, MetadataKeyFilters.getNormalKeyFilter()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java index 935423de12..405ce8ed4a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java @@ -61,6 +61,8 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Tests Close Container Exception handling by Ozone Client. */ @@ -121,7 +123,8 @@ public void testBlockWritesWithFlushAndClose() throws Exception { OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); // write data more than 1 chunk byte[] data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes(); + .getFixedLengthString(keyString, chunkSize + chunkSize / 2) + .getBytes(UTF_8); key.write(data); Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream); @@ -141,9 +144,9 @@ public void testBlockWritesWithFlushAndClose() throws Exception { Assert.assertEquals(2 * data.length, keyInfo.getDataSize()); // Written the same data twice - String dataString = new String(data); - dataString.concat(dataString); - validateData(keyName, dataString.getBytes()); + String dataString = new String(data, UTF_8); + dataString = dataString.concat(dataString); + validateData(keyName, dataString.getBytes(UTF_8)); } @Test @@ -152,7 +155,8 @@ public void testBlockWritesCloseConsistency() throws Exception { OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); // write data more than 1 chunk byte[] data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes(); + .getFixedLengthString(keyString, chunkSize + chunkSize / 2) + .getBytes(UTF_8); key.write(data); Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream); @@ -184,7 +188,7 @@ public void testMultiBlockWrites() throws Exception { // write data more than 1 chunk byte[] data = ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize)) - .getBytes(); + .getBytes(UTF_8); Assert.assertEquals(data.length, 3 * blockSize); key.write(data); @@ -199,7 +203,7 @@ public void testMultiBlockWrites() throws Exception { // write 1 more block worth of data. It will fail and new block will be // allocated key.write(ContainerTestHelper.getFixedLengthString(keyString, blockSize) - .getBytes()); + .getBytes(UTF_8)); key.close(); // read the key from OM again and match the length.The length will still @@ -232,13 +236,13 @@ public void testMultiBlockWrites2() throws Exception { Assert.assertEquals(4, groupOutputStream.getStreamEntries().size()); String dataString = ContainerTestHelper.getFixedLengthString(keyString, (2 * blockSize)); - byte[] data = dataString.getBytes(); + byte[] data = dataString.getBytes(UTF_8); key.write(data); // 3 block are completely written to the DataNode in 3 blocks. // Data of length half of chunkSize resides in the chunkOutput stream buffer String dataString2 = ContainerTestHelper.getFixedLengthString(keyString, chunkSize * 1 / 2); - key.write(dataString2.getBytes()); + key.write(dataString2.getBytes(UTF_8)); //get the name of a valid container OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) @@ -257,9 +261,9 @@ public void testMultiBlockWrites2() throws Exception { // closeContainerException and remaining data in the chunkOutputStream // buffer will be copied into a different allocated block and will be // committed. - Assert.assertEquals(dataString.concat(dataString2).getBytes().length, + Assert.assertEquals(dataString.concat(dataString2).getBytes(UTF_8).length, keyInfo.getDataSize()); - validateData(keyName, dataString.concat(dataString2).getBytes()); + validateData(keyName, dataString.concat(dataString2).getBytes(UTF_8)); } @Test @@ -274,7 +278,8 @@ public void testMultiBlockWrites3() throws Exception { Assert.assertEquals(4, groupOutputStream.getStreamEntries().size()); // write data 3 blocks and one more chunk byte[] writtenData = - ContainerTestHelper.getFixedLengthString(keyString, keyLen).getBytes(); + ContainerTestHelper.getFixedLengthString(keyString, keyLen) + .getBytes(UTF_8); byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize + chunkSize); Assert.assertEquals(data.length, 3 * blockSize + chunkSize); key.write(data); @@ -367,8 +372,8 @@ private void waitForContainerClose(HddsProtos.ReplicationType type, .isContainerPresent(cluster, containerID, dn))) { for (DatanodeDetails datanodeDetails : datanodes) { GenericTestUtils.waitFor(() -> ContainerTestHelper - .isContainerClosed(cluster, containerID, datanodeDetails), 500, - 15 * 1000); + .isContainerClosed(cluster, containerID, datanodeDetails), + 500, 15 * 1000); //double check if it's really closed // (waitFor also throws an exception) Assert.assertTrue(ContainerTestHelper @@ -395,7 +400,7 @@ public void testDiscardPreallocatedBlocks() throws Exception { Assert.assertEquals(2, groupOutputStream.getStreamEntries().size()); String dataString = ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize)); - byte[] data = dataString.getBytes(); + byte[] data = dataString.getBytes(UTF_8); key.write(data); List locationInfos = new ArrayList<>(groupOutputStream.getLocationInfoList()); @@ -411,7 +416,7 @@ public void testDiscardPreallocatedBlocks() throws Exception { waitForContainerClose(keyName, key, HddsProtos.ReplicationType.RATIS); dataString = ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize)); - data = dataString.getBytes(); + data = dataString.getBytes(UTF_8); key.write(data); Assert.assertEquals(2, groupOutputStream.getStreamEntries().size()); @@ -443,7 +448,8 @@ public void testBlockWriteViaRatis() throws Exception { String keyName = "ratis"; OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); byte[] data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes(); + .getFixedLengthString(keyString, chunkSize + chunkSize / 2) + .getBytes(UTF_8); key.write(data); //get the name of a valid container @@ -462,9 +468,9 @@ public void testBlockWriteViaRatis() throws Exception { // updated correctly in OzoneManager once the steam is closed key.close(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - String dataString = new String(data); - dataString.concat(dataString); + String dataString = new String(data, UTF_8); + dataString = dataString.concat(dataString); Assert.assertEquals(2 * data.length, keyInfo.getDataSize()); - validateData(keyName, dataString.getBytes()); + validateData(keyName, dataString.getBytes(UTF_8)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java new file mode 100644 index 0000000000..84eb8dd25b --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * Test utils for Ozone. + */ +package org.apache.hadoop.ozone; \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java index 290e834057..baf93a21e0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java @@ -23,6 +23,7 @@ import org.apache.hadoop.ozone.TestOzoneHelper; import org.apache.log4j.Level; import org.apache.log4j.Logger; +import org.junit.Ignore; import org.junit.Rule; import org.junit.BeforeClass; import org.junit.AfterClass; @@ -158,6 +159,7 @@ public void testCreateVolumesInLoop() throws IOException { * * @throws IOException */ + @Ignore("Test is ignored for time being, to be enabled after security.") public void testGetVolumesByUser() throws IOException { testGetVolumesByUser(port); } @@ -167,6 +169,7 @@ public void testGetVolumesByUser() throws IOException { * * @throws IOException */ + @Ignore("Test is ignored for time being, to be enabled after security.") public void testGetVolumesOfAnotherUser() throws IOException { super.testGetVolumesOfAnotherUser(port); } @@ -177,6 +180,7 @@ public void testGetVolumesOfAnotherUser() throws IOException { * * @throws IOException */ + @Ignore("Test is ignored for time being, to be enabled after security.") public void testGetVolumesOfAnotherUserShouldFail() throws IOException { super.testGetVolumesOfAnotherUserShouldFail(port); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java new file mode 100644 index 0000000000..91a013c845 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * REST client tests. + */ +package org.apache.hadoop.ozone.web.client; \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java new file mode 100644 index 0000000000..fba9a39461 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * Rest Client Tests. + */ +package org.apache.hadoop.ozone.web; \ No newline at end of file diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java index c6625fdb57..8f68cc403f 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java @@ -26,7 +26,7 @@ import java.nio.file.DirectoryNotEmptyException; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hdds.protocol.StorageType; @@ -211,7 +211,7 @@ List getAcls(BucketArgs args, String tag) { args.getHeaders().getRequestHeader(Header.OZONE_ACLS); List filteredSet = null; if (aclStrings != null) { - filteredSet = new LinkedList<>(); + filteredSet = new ArrayList<>(); for (String s : aclStrings) { if (s.startsWith(tag)) { filteredSet.add(s.replaceFirst(tag, "")); diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java index 9c115a8028..836c03ab0e 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java @@ -312,5 +312,6 @@ void renameKey(KeyArgs args, String toKeyName) /** * Closes all the opened resources. */ + @Override void close(); } diff --git a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java index abb61bb8c3..291ffd32c2 100644 --- a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java +++ b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java @@ -37,9 +37,9 @@ public void testErrorGen() { OzoneException e = ErrorTable .newError(ErrorTable.ACCESS_DENIED, getRequestID(), "/test/path", "localhost"); - assertEquals(e.getHostID(), "localhost"); - assertEquals(e.getShortMessage(), - ErrorTable.ACCESS_DENIED.getShortMessage()); + assertEquals("localhost", e.getHostID()); + assertEquals(ErrorTable.ACCESS_DENIED.getShortMessage(), + e.getShortMessage()); } @Test diff --git a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/package-info.java b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/package-info.java new file mode 100644 index 0000000000..4ebe859caa --- /dev/null +++ b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * Tests the REST error codes. + */ +package org.apache.hadoop.ozone.web; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index be44bce93d..b99b98e7fc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -257,6 +257,7 @@ private List getUpdatedAclList(List existingAcls, * @param bucketName - Name of the bucket. * @throws IOException - on Failure. */ + @Override public void deleteBucket(String volumeName, String bucketName) throws IOException { Preconditions.checkNotNull(volumeName); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index cb7ca1f893..0bfbc1f498 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -22,7 +22,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; @@ -117,7 +116,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager { public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException { File metaDir = OmUtils.getOmDbDir(conf); this.lock = new OzoneManagerLock(conf); - this.openKeyExpireThresholdMS = 1000 * conf.getInt( + this.openKeyExpireThresholdMS = 1000L * conf.getInt( OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index da56850b46..bc24a5067c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -989,6 +989,7 @@ public void setBucketProperty(OmBucketArgs args) * @param bucket - Name of the bucket. * @throws IOException */ + @Override public void deleteBucket(String volume, String bucket) throws IOException { Map auditMap = buildAuditMap(volume); auditMap.put(OzoneConsts.BUCKET, bucket); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java index acc1634105..9aab823d49 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java @@ -66,6 +66,7 @@ public class ServiceListJSONServlet extends HttpServlet { private transient OzoneManager om; + @Override public void init() throws ServletException { this.om = (OzoneManager) getServletContext() .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index c232bf1be7..b948acc2e5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -35,7 +35,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.LinkedList; +import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -73,7 +73,7 @@ private void addVolumeToOwnerList(String volume, String owner, // Get the volume list byte[] dbUserKey = metadataManager.getUserKey(owner); byte[] volumeList = metadataManager.getUserTable().get(dbUserKey); - List prevVolList = new LinkedList<>(); + List prevVolList = new ArrayList<>(); if (volumeList != null) { VolumeList vlist = VolumeList.parseFrom(volumeList); prevVolList.addAll(vlist.getVolumeNamesList()); @@ -98,7 +98,7 @@ private void delVolumeFromOwnerList(String volume, String owner, // Get the volume list byte[] dbUserKey = metadataManager.getUserKey(owner); byte[] volumeList = metadataManager.getUserTable().get(dbUserKey); - List prevVolList = new LinkedList<>(); + List prevVolList = new ArrayList<>(); if (volumeList != null) { VolumeList vlist = VolumeList.parseFrom(volumeList); prevVolList.addAll(vlist.getVolumeNamesList()); @@ -140,7 +140,7 @@ public void createVolume(OmVolumeArgs args) throws IOException { try(WriteBatch batch = new WriteBatch()) { // Write the vol info - List metadataList = new LinkedList<>(); + List metadataList = new ArrayList<>(); for (Map.Entry entry : args.getKeyValueMap().entrySet()) { metadataList.add(HddsProtos.KeyValue.newBuilder() @@ -250,6 +250,7 @@ public void setOwner(String volume, String owner) throws IOException { * @param quota - Quota in bytes. * @throws IOException */ + @Override public void setQuota(String volume, long quota) throws IOException { Preconditions.checkNotNull(volume); metadataManager.getLock().acquireVolumeLock(volume); @@ -293,6 +294,7 @@ public void setQuota(String volume, long quota) throws IOException { * @return VolumeArgs or exception is thrown. * @throws IOException */ + @Override public OmVolumeArgs getVolumeInfo(String volume) throws IOException { Preconditions.checkNotNull(volume); metadataManager.getLock().acquireVolumeLock(volume); @@ -384,6 +386,7 @@ public void deleteVolume(String volume) throws IOException { * @return true if the user has access for the volume, false otherwise * @throws IOException */ + @Override public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) throws IOException { Preconditions.checkNotNull(volume); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java index 177694c2a0..7dd31daa33 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.util.ArrayList; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertEquals; /** @@ -62,7 +63,7 @@ public void testWriteGroupOutputStream() throws Exception { assertEquals(0, groupOutputStream.getByteOffset()); String dataString = RandomStringUtils.randomAscii(500); - byte[] data = dataString.getBytes(); + byte[] data = dataString.getBytes(UTF_8); groupOutputStream.write(data, 0, data.length); assertEquals(500, groupOutputStream.getByteOffset()); @@ -95,7 +96,8 @@ public void testErrorWriteGroupOutputStream() throws Exception { assertEquals(0, groupOutputStream.getByteOffset()); // first writes of 100 bytes should succeed - groupOutputStream.write(RandomStringUtils.randomAscii(100).getBytes()); + groupOutputStream.write(RandomStringUtils.randomAscii(100) + .getBytes(UTF_8)); assertEquals(100, groupOutputStream.getByteOffset()); // second writes of 500 bytes should fail, as there should be only 400 @@ -104,7 +106,8 @@ public void testErrorWriteGroupOutputStream() throws Exception { // other add more informative error code rather than exception, need to // change this part. exception.expect(Exception.class); - groupOutputStream.write(RandomStringUtils.randomAscii(500).getBytes()); + groupOutputStream.write(RandomStringUtils.randomAscii(500) + .getBytes(UTF_8)); assertEquals(100, groupOutputStream.getByteOffset()); } } @@ -115,7 +118,7 @@ public void testReadGroupInputStream() throws Exception { ArrayList inputStreams = new ArrayList<>(); String dataString = RandomStringUtils.randomAscii(500); - byte[] buf = dataString.getBytes(); + byte[] buf = dataString.getBytes(UTF_8); int offset = 0; for (int i = 0; i < 5; i++) { int tempOffset = offset; @@ -126,12 +129,12 @@ public void testReadGroupInputStream() throws Exception { new ByteArrayInputStream(buf, tempOffset, 100); @Override - public void seek(long pos) throws IOException { + public synchronized void seek(long pos) throws IOException { throw new UnsupportedOperationException(); } @Override - public long getPos() throws IOException { + public synchronized long getPos() throws IOException { return pos; } @@ -142,12 +145,13 @@ public boolean seekToNewSource(long targetPos) } @Override - public int read() throws IOException { + public synchronized int read() throws IOException { return in.read(); } @Override - public int read(byte[] b, int off, int len) throws IOException { + public synchronized int read(byte[] b, int off, int len) + throws IOException { int readLen = in.read(b, off, len); pos += readLen; return readLen; @@ -162,7 +166,7 @@ public int read(byte[] b, int off, int len) throws IOException { int len = groupInputStream.read(resBuf, 0, 500); assertEquals(500, len); - assertEquals(dataString, new String(resBuf)); + assertEquals(dataString, new String(resBuf, UTF_8)); } } @@ -172,7 +176,7 @@ public void testErrorReadGroupInputStream() throws Exception { ArrayList inputStreams = new ArrayList<>(); String dataString = RandomStringUtils.randomAscii(500); - byte[] buf = dataString.getBytes(); + byte[] buf = dataString.getBytes(UTF_8); int offset = 0; for (int i = 0; i < 5; i++) { int tempOffset = offset; @@ -183,28 +187,29 @@ public void testErrorReadGroupInputStream() throws Exception { new ByteArrayInputStream(buf, tempOffset, 100); @Override - public void seek(long pos) throws IOException { + public synchronized void seek(long pos) throws IOException { throw new UnsupportedOperationException(); } @Override - public long getPos() throws IOException { + public synchronized long getPos() throws IOException { return pos; } @Override - public boolean seekToNewSource(long targetPos) + public synchronized boolean seekToNewSource(long targetPos) throws IOException { throw new UnsupportedOperationException(); } @Override - public int read() throws IOException { + public synchronized int read() throws IOException { return in.read(); } @Override - public int read(byte[] b, int off, int len) throws IOException { + public synchronized int read(byte[] b, int off, int len) + throws IOException { int readLen = in.read(b, off, len); pos += readLen; return readLen; @@ -222,14 +227,14 @@ public int read(byte[] b, int off, int len) throws IOException { assertEquals(60, groupInputStream.getRemainingOfIndex(3)); assertEquals(340, len); assertEquals(dataString.substring(0, 340), - new String(resBuf).substring(0, 340)); + new String(resBuf, UTF_8).substring(0, 340)); // read following 300 bytes, but only 200 left len = groupInputStream.read(resBuf, 340, 260); assertEquals(4, groupInputStream.getCurrentStreamIndex()); assertEquals(0, groupInputStream.getRemainingOfIndex(4)); assertEquals(160, len); - assertEquals(dataString, new String(resBuf).substring(0, 500)); + assertEquals(dataString, new String(resBuf, UTF_8).substring(0, 500)); // further read should get EOF len = groupInputStream.read(resBuf, 0, 1); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index d72215579d..55843e149e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om; -import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashSet; import java.util.Set; @@ -49,6 +48,8 @@ import org.rocksdb.Statistics; import org.rocksdb.StatsLevel; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Test class for @{@link KeyManagerImpl}. * */ @@ -93,11 +94,11 @@ private void setupMocks() throws Exception { Mockito.when(metadataManager.getLock()) .thenReturn(new OzoneManagerLock(conf)); Mockito.when(metadataManager.getVolumeKey(VOLUME_NAME)) - .thenReturn(VOLUME_NAME.getBytes()); + .thenReturn(VOLUME_NAME.getBytes(UTF_8)); Mockito.when(metadataManager.getBucketKey(VOLUME_NAME, BUCKET_NAME)) - .thenReturn(BUCKET_NAME.getBytes()); + .thenReturn(BUCKET_NAME.getBytes(UTF_8)); Mockito.when(metadataManager.getOpenKeyBytes(VOLUME_NAME, BUCKET_NAME, - KEY_NAME, 1)).thenReturn(KEY_NAME.getBytes()); + KEY_NAME, 1)).thenReturn(KEY_NAME.getBytes(UTF_8)); } private void setupRocksDb() throws Exception { @@ -129,11 +130,11 @@ private void setupRocksDb() throws Exception { rdbStore = new RDBStore(folder.newFolder(), options, configSet); rdbTable = rdbStore.getTable("testTable"); - rdbTable.put(VOLUME_NAME.getBytes(), - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8)); - rdbTable.put(BUCKET_NAME.getBytes(), - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8)); - rdbTable.put(KEY_NAME.getBytes(), keyData.toByteArray()); + rdbTable.put(VOLUME_NAME.getBytes(UTF_8), + RandomStringUtils.random(10).getBytes(UTF_8)); + rdbTable.put(BUCKET_NAME.getBytes(UTF_8), + RandomStringUtils.random(10).getBytes(UTF_8)); + rdbTable.put(KEY_NAME.getBytes(UTF_8), keyData.toByteArray()); } @Test diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index 78b6e5d210..6077b329da 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -252,6 +252,7 @@ private class RenameIterator extends OzoneListingIterator { LOG.trace("rename from:{} to:{}", srcKey, dstKey); } + @Override boolean processKey(String key) throws IOException { String newKeyName = dstKey.concat(key.substring(srcKey.length())); bucket.renameKey(key, newKeyName); @@ -370,6 +371,7 @@ && listStatus(f).length != 0) { } } + @Override boolean processKey(String key) throws IOException { if (key.equals("")) { LOG.trace("Skipping deleting root directory"); @@ -496,6 +498,7 @@ private class ListStatusIterator extends OzoneListingIterator { * @return always returns true * @throws IOException */ + @Override boolean processKey(String key) throws IOException { Path keyPath = new Path(OZONE_URI_DELIMITER + key); if (key.equals(getPathKey())) { diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index 0eb8dcedde..d32c25aca6 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -51,6 +51,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -176,7 +177,7 @@ public void testOzFsReadWrite() throws IOException { byte[] buffer = new byte[stringLen]; // This read will not change the offset inside the file int readBytes = inputStream.read(0, buffer, 0, buffer.length); - String out = new String(buffer, 0, buffer.length); + String out = new String(buffer, 0, buffer.length, UTF_8); assertEquals(data, out); assertEquals(readBytes, buffer.length); assertEquals(0, inputStream.getPos()); diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java new file mode 100644 index 0000000000..51284c2db4 --- /dev/null +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * Ozone FS Contract tests. + */ +package org.apache.hadoop.fs.ozone; \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java index 8e745f27ed..dfafc3a5ac 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java @@ -49,6 +49,7 @@ public AuthorizationHeaderV2(String auth) throws OS3Exception { * * @throws OS3Exception */ + @SuppressWarnings("StringSplitter") public void parseHeader() throws OS3Exception { String[] split = authHeader.split(" "); if (split.length != 2) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java index 88c64ca4ef..c3f7072121 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java @@ -60,6 +60,7 @@ public AuthorizationHeaderV4(String header) throws OS3Exception { * Signature=db81b057718d7c1b3b8dffa29933099551c51d787b3b13b9e0f9ebed45982bf2 * @throws OS3Exception */ + @SuppressWarnings("StringSplitter") public void parseAuthHeader() throws OS3Exception { String[] split = authHeader.split(" "); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/Credential.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/Credential.java index 19699a0163..8db10a8ee2 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/Credential.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/Credential.java @@ -54,6 +54,7 @@ public class Credential { * * @throws OS3Exception */ + @SuppressWarnings("StringSplitter") public void parseCredential() throws OS3Exception { String[] split = credential.split("/"); if (split.length == 5) { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java index de15a407e2..6c85b8177f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java @@ -209,6 +209,7 @@ public String getOzoneBucketMapping(String s3BucketName) throws IOException { } @Override + @SuppressWarnings("StringSplitter") public String getOzoneVolumeName(String s3BucketName) throws IOException { if (bucketVolumeMap.get(s3BucketName) == null) { throw new IOException("S3_BUCKET_NOT_FOUND"); @@ -217,6 +218,7 @@ public String getOzoneVolumeName(String s3BucketName) throws IOException { } @Override + @SuppressWarnings("StringSplitter") public String getOzoneBucketName(String s3BucketName) throws IOException { if (bucketVolumeMap.get(s3BucketName) == null) { throw new IOException("S3_BUCKET_NOT_FOUND"); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java index 5114a471eb..ea574d42ed 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java @@ -63,7 +63,7 @@ public void setup() throws Exception { @Test public void testBucketEndpoint() throws Exception { Response response = bucketEndpoint.delete(bucketName); - assertEquals(response.getStatus(), HttpStatus.SC_NO_CONTENT); + assertEquals(HttpStatus.SC_NO_CONTENT, response.getStatus()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java index b3b1be00f2..c15a1280c3 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java @@ -23,6 +23,8 @@ import java.io.IOException; import org.junit.Assert; + +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.*; import org.junit.Test; @@ -40,7 +42,7 @@ public void fromStreamWithNamespace() throws IOException { + ".com/doc/2006-03-01/\">key1key2" + "key3" + "") - .getBytes()); + .getBytes(UTF_8)); //WHEN MultiDeleteRequest multiDeleteRequest = @@ -58,7 +60,7 @@ public void fromStreamWithoutNamespace() throws IOException { ("key1key2" + "key3" + "") - .getBytes()); + .getBytes(UTF_8)); //WHEN MultiDeleteRequest multiDeleteRequest = diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java index 2d0504dc73..2426ecc791 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java @@ -37,6 +37,8 @@ import org.junit.Test; import org.mockito.Mockito; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Test get object. */ @@ -54,15 +56,16 @@ public void get() throws IOException, OS3Exception { OzoneBucket bucket = volume.getBucket("b1"); OzoneOutputStream keyStream = - bucket.createKey("key1", CONTENT.getBytes().length); - keyStream.write(CONTENT.getBytes()); + bucket.createKey("key1", CONTENT.getBytes(UTF_8).length); + keyStream.write(CONTENT.getBytes(UTF_8)); keyStream.close(); ObjectEndpoint rest = new ObjectEndpoint(); rest.setClient(client); HttpHeaders headers = Mockito.mock(HttpHeaders.class); rest.setHeaders(headers); - ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes()); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); //WHEN rest.get("b1", "key1", body); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java index 6c166d7bc0..8a8c0158b8 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.nio.charset.StandardCharsets.UTF_8; import org.apache.commons.lang3.RandomStringUtils; import org.junit.Assert; import org.junit.Before; @@ -69,9 +70,9 @@ public void testHeadObject() throws Exception { //GIVEN String value = RandomStringUtils.randomAlphanumeric(32); OzoneOutputStream out = bucket.createKey("key1", - value.getBytes().length, ReplicationType.STAND_ALONE, + value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); //WHEN @@ -79,7 +80,7 @@ public void testHeadObject() throws Exception { //THEN Assert.assertEquals(200, response.getStatus()); - Assert.assertEquals(value.getBytes().length, + Assert.assertEquals(value.getBytes(UTF_8).length, Long.parseLong(response.getHeaderString("Content-Length"))); DateTimeFormatter.RFC_1123_DATE_TIME @@ -91,7 +92,8 @@ public void testHeadObject() throws Exception { public void testHeadFailByBadName() throws Exception { //Head an object that doesn't exist. try { - keyEndpoint.head(bucketName, "badKeyName"); + Response response = keyEndpoint.head(bucketName, "badKeyName"); + Assert.assertEquals(404, response.getStatus()); } catch (OS3Exception ex) { Assert.assertTrue(ex.getCode().contains("NoSuchObject")); Assert.assertTrue(ex.getErrorMessage().contains("object does not exist")); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java index d73e37e062..184b075e1c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java @@ -286,7 +286,7 @@ private Thread getProgressBarThread() { long maxValue; currentValue = () -> numberOfKeysAdded.get(); - maxValue = numOfVolumes * + maxValue = (long) numOfVolumes * numOfBuckets * numOfKeys; diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java index 149d65e486..a91e1904ab 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java @@ -88,8 +88,8 @@ public void testRestart() throws Exception { String expectedSnapFile = storage.getSnapshotFile(termIndexBeforeRestart.getTerm(), termIndexBeforeRestart.getIndex()).getAbsolutePath(); - Assert.assertEquals(snapshotInfo.getFile().getPath().toString(), - expectedSnapFile); + Assert.assertEquals(expectedSnapFile, + snapshotInfo.getFile().getPath().toString()); Assert.assertEquals(termInSnapshot, termIndexBeforeRestart); // After restart the term index might have progressed to apply pending diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/package-info.java new file mode 100644 index 0000000000..f7cb075781 --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * Freon Ozone Load Generator. + */ +package org.apache.hadoop.ozone.freon; \ No newline at end of file diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java index 1dc3cab785..88fcb7b687 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java @@ -45,10 +45,10 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; -import java.util.LinkedList; import java.util.List; import java.util.UUID; @@ -182,7 +182,7 @@ public void testOmDB() throws Exception { String sql = "SELECT * FROM volumeList"; ResultSet rs = executeQuery(conn, sql); List expectedValues = - new LinkedList<>(Arrays.asList(volumeName0, volumeName1)); + new ArrayList<>(Arrays.asList(volumeName0, volumeName1)); while (rs.next()) { String userNameRs = rs.getString("userName"); String volumeNameRs = rs.getString("volumeName"); @@ -194,7 +194,7 @@ public void testOmDB() throws Exception { sql = "SELECT * FROM volumeInfo"; rs = executeQuery(conn, sql); expectedValues = - new LinkedList<>(Arrays.asList(volumeName0, volumeName1)); + new ArrayList<>(Arrays.asList(volumeName0, volumeName1)); while (rs.next()) { String adName = rs.getString("adminName"); String ownerName = rs.getString("ownerName"); @@ -208,7 +208,7 @@ public void testOmDB() throws Exception { sql = "SELECT * FROM aclInfo"; rs = executeQuery(conn, sql); expectedValues = - new LinkedList<>(Arrays.asList(volumeName0, volumeName1)); + new ArrayList<>(Arrays.asList(volumeName0, volumeName1)); while (rs.next()) { String adName = rs.getString("adminName"); String ownerName = rs.getString("ownerName"); diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/package-info.java new file mode 100644 index 0000000000..595708cd29 --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * OM to SQL Converter. Currently broken. + */ +package org.apache.hadoop.ozone.om; \ No newline at end of file