HDDS-825. Code cleanup based on messages from ErrorProne.
Contributed by Anu Engineer.
This commit is contained in:
parent
fcd94eeab8
commit
a16aa2f60b
@ -296,6 +296,7 @@ public void createPipeline() {
|
|||||||
// For stand alone pipeline, there is no notion called setup pipeline.
|
// For stand alone pipeline, there is no notion called setup pipeline.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public void destroyPipeline() {
|
public void destroyPipeline() {
|
||||||
// For stand alone pipeline, there is no notion called destroy pipeline.
|
// For stand alone pipeline, there is no notion called destroy pipeline.
|
||||||
}
|
}
|
||||||
|
@ -170,6 +170,7 @@ public XceiverClientSpi call() throws Exception {
|
|||||||
/**
|
/**
|
||||||
* Close and remove all the cached clients.
|
* Close and remove all the cached clients.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void close() {
|
public void close() {
|
||||||
//closing is done through RemovalListener
|
//closing is done through RemovalListener
|
||||||
clientCache.invalidateAll();
|
clientCache.invalidateAll();
|
||||||
|
@ -100,6 +100,7 @@ private XceiverClientRatis(Pipeline pipeline, RpcType rpcType,
|
|||||||
/**
|
/**
|
||||||
* {@inheritDoc}
|
* {@inheritDoc}
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void createPipeline() throws IOException {
|
public void createPipeline() throws IOException {
|
||||||
final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
|
final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
|
||||||
LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group);
|
LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group);
|
||||||
@ -110,6 +111,7 @@ public void createPipeline() throws IOException {
|
|||||||
/**
|
/**
|
||||||
* {@inheritDoc}
|
* {@inheritDoc}
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void destroyPipeline() throws IOException {
|
public void destroyPipeline() throws IOException {
|
||||||
final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
|
final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
|
||||||
LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group);
|
LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group);
|
||||||
|
@ -83,7 +83,7 @@ public ChunkInputStream(
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void initializeChunkOffset() {
|
private void initializeChunkOffset() {
|
||||||
int tempOffset = 0;
|
long tempOffset = 0;
|
||||||
for (int i = 0; i < chunks.size(); i++) {
|
for (int i = 0; i < chunks.size(); i++) {
|
||||||
chunkOffset[i] = tempOffset;
|
chunkOffset[i] = tempOffset;
|
||||||
tempOffset += chunks.get(i).getLen();
|
tempOffset += chunks.get(i).getLen();
|
||||||
|
@ -196,6 +196,17 @@ public static Builder newBuilder(Pipeline pipeline) {
|
|||||||
return new Builder(pipeline);
|
return new Builder(pipeline);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "Pipeline{" +
|
||||||
|
"id=" + id +
|
||||||
|
", type=" + type +
|
||||||
|
", factor=" + factor +
|
||||||
|
", state=" + state +
|
||||||
|
", nodeStatus=" + nodeStatus +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Builder class for Pipeline.
|
* Builder class for Pipeline.
|
||||||
*/
|
*/
|
||||||
|
@ -68,7 +68,6 @@ public StorageInfo(NodeType type, String cid, long cT)
|
|||||||
throws IOException {
|
throws IOException {
|
||||||
Preconditions.checkNotNull(type);
|
Preconditions.checkNotNull(type);
|
||||||
Preconditions.checkNotNull(cid);
|
Preconditions.checkNotNull(cid);
|
||||||
Preconditions.checkNotNull(cT);
|
|
||||||
properties.setProperty(NODE_TYPE, type.name());
|
properties.setProperty(NODE_TYPE, type.name());
|
||||||
properties.setProperty(CLUSTER_ID, cid);
|
properties.setProperty(CLUSTER_ID, cid);
|
||||||
properties.setProperty(CREATION_TIME, String.valueOf(cT));
|
properties.setProperty(CREATION_TIME, String.valueOf(cT));
|
||||||
|
@ -24,11 +24,9 @@
|
|||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.metrics2.util.MBeans;
|
import org.apache.hadoop.metrics2.util.MBeans;
|
||||||
import org.apache.hadoop.utils.RocksDBStoreMBean;
|
import org.apache.hadoop.utils.RocksDBStoreMBean;
|
||||||
import org.apache.ratis.thirdparty.com.google.common.annotations.
|
import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||||
VisibleForTesting;
|
|
||||||
import org.rocksdb.ColumnFamilyDescriptor;
|
import org.rocksdb.ColumnFamilyDescriptor;
|
||||||
import org.rocksdb.ColumnFamilyHandle;
|
import org.rocksdb.ColumnFamilyHandle;
|
||||||
|
|
||||||
import org.rocksdb.DBOptions;
|
import org.rocksdb.DBOptions;
|
||||||
import org.rocksdb.RocksDB;
|
import org.rocksdb.RocksDB;
|
||||||
import org.rocksdb.RocksDBException;
|
import org.rocksdb.RocksDBException;
|
||||||
@ -192,7 +190,6 @@ public void move(byte[] key, Table source, Table dest) throws IOException {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void move(byte[] key, byte[] value, Table source,
|
public void move(byte[] key, byte[] value, Table source,
|
||||||
Table dest) throws IOException {
|
Table dest) throws IOException {
|
||||||
@ -226,7 +223,7 @@ public void move(byte[] sourceKey, byte[] destKey, byte[] value, Table source,
|
|||||||
} catch (RocksDBException rockdbException) {
|
} catch (RocksDBException rockdbException) {
|
||||||
LOG.error("Move of key failed. Key:{}", DFSUtil.bytes2String(sourceKey));
|
LOG.error("Move of key failed. Key:{}", DFSUtil.bytes2String(sourceKey));
|
||||||
throw toIOException("Unable to move key: " +
|
throw toIOException("Unable to move key: " +
|
||||||
DFSUtil.bytes2String(sourceKey), rockdbException);
|
DFSUtil.bytes2String(sourceKey), rockdbException);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ public enum DummyAction implements AuditAction {
|
|||||||
SET_OWNER("SET_OWNER"),
|
SET_OWNER("SET_OWNER"),
|
||||||
SET_QUOTA("SET_QUOTA");
|
SET_QUOTA("SET_QUOTA");
|
||||||
|
|
||||||
private String action;
|
private final String action;
|
||||||
|
|
||||||
DummyAction(String action) {
|
DummyAction(String action) {
|
||||||
this.action = action;
|
this.action = action;
|
||||||
|
@ -41,7 +41,7 @@ public class TestLeaseManager {
|
|||||||
/**
|
/**
|
||||||
* Dummy resource on which leases can be acquired.
|
* Dummy resource on which leases can be acquired.
|
||||||
*/
|
*/
|
||||||
private final class DummyResource {
|
private static final class DummyResource {
|
||||||
|
|
||||||
private final String name;
|
private final String name;
|
||||||
|
|
||||||
@ -61,6 +61,21 @@ public boolean equals(Object obj) {
|
|||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adding to String method to fix the ErrorProne warning that this method
|
||||||
|
* is later used in String functions, which would print out (e.g.
|
||||||
|
* `org.apache.hadoop.ozone.lease.TestLeaseManager.DummyResource@
|
||||||
|
* 4488aabb`) instead of useful information.
|
||||||
|
*
|
||||||
|
* @return Name of the Dummy object.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "DummyResource{" +
|
||||||
|
"name='" + name + '\'' +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -55,7 +55,7 @@ public void testGetLongId() throws Exception {
|
|||||||
List<Future<Integer>> result = executor.invokeAll(tasks);
|
List<Future<Integer>> result = executor.invokeAll(tasks);
|
||||||
assertEquals(IDS_PER_THREAD * NUM_OF_THREADS, ID_SET.size());
|
assertEquals(IDS_PER_THREAD * NUM_OF_THREADS, ID_SET.size());
|
||||||
for (Future<Integer> r : result) {
|
for (Future<Integer> r : result) {
|
||||||
assertEquals(r.get().intValue(), IDS_PER_THREAD);
|
assertEquals(IDS_PER_THREAD, r.get().intValue());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,24 +1,21 @@
|
|||||||
/**
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* contributor license agreements. See the NOTICE file distributed with this
|
||||||
* distributed with this work for additional information
|
* work for additional information regarding copyright ownership. The ASF
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||||
* to you under the Apache License, Version 2.0 (the
|
* "License"); you may not use this file except in compliance with the License.
|
||||||
* "License"); you may not use this file except in compliance
|
* You may obtain a copy of the License at
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
* <p>
|
* <p>
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
* <p>
|
* <p>
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
* See the License for the specific language governing permissions and
|
* License for the specific language governing permissions and limitations under
|
||||||
* limitations under the License.
|
* the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.utils;
|
package org.apache.hadoop.utils;
|
||||||
|
|
||||||
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
|
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import org.apache.commons.io.FileUtils;
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.commons.lang3.tuple.ImmutablePair;
|
import org.apache.commons.lang3.tuple.ImmutablePair;
|
||||||
@ -28,9 +25,9 @@
|
|||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.utils.MetadataStore.KeyValue;
|
|
||||||
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
|
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
|
||||||
import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
|
import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
|
||||||
|
import org.apache.hadoop.utils.MetadataStore.KeyValue;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
@ -50,14 +47,14 @@
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.NoSuchElementException;
|
import java.util.NoSuchElementException;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
|
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import static org.junit.runners.Parameterized.Parameters;
|
import static org.junit.runners.Parameterized.Parameters;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -66,27 +63,24 @@
|
|||||||
@RunWith(Parameterized.class)
|
@RunWith(Parameterized.class)
|
||||||
public class TestMetadataStore {
|
public class TestMetadataStore {
|
||||||
|
|
||||||
|
private final static int MAX_GETRANGE_LENGTH = 100;
|
||||||
private final String storeImpl;
|
private final String storeImpl;
|
||||||
|
@Rule
|
||||||
|
public ExpectedException expectedException = ExpectedException.none();
|
||||||
|
private MetadataStore store;
|
||||||
|
private File testDir;
|
||||||
public TestMetadataStore(String metadataImpl) {
|
public TestMetadataStore(String metadataImpl) {
|
||||||
this.storeImpl = metadataImpl;
|
this.storeImpl = metadataImpl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Parameters
|
@Parameters
|
||||||
public static Collection<Object[]> data() {
|
public static Collection<Object[]> data() {
|
||||||
return Arrays.asList(new Object[][] {
|
return Arrays.asList(new Object[][]{
|
||||||
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
|
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
|
||||||
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
|
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
private MetadataStore store;
|
|
||||||
private File testDir;
|
|
||||||
private final static int MAX_GETRANGE_LENGTH = 100;
|
|
||||||
|
|
||||||
@Rule
|
|
||||||
public ExpectedException expectedException = ExpectedException.none();
|
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void init() throws IOException {
|
public void init() throws IOException {
|
||||||
if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) {
|
if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) {
|
||||||
@ -109,7 +103,7 @@ public void init() throws IOException {
|
|||||||
// Add 20 entries.
|
// Add 20 entries.
|
||||||
// {a0 : a-value0} to {a9 : a-value9}
|
// {a0 : a-value0} to {a9 : a-value9}
|
||||||
// {b0 : b-value0} to {b9 : b-value9}
|
// {b0 : b-value0} to {b9 : b-value9}
|
||||||
for (int i=0; i<10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
store.put(getBytes("a" + i), getBytes("a-value" + i));
|
store.put(getBytes("a" + i), getBytes("a-value" + i));
|
||||||
store.put(getBytes("b" + i), getBytes("b-value" + i));
|
store.put(getBytes("b" + i), getBytes("b-value" + i));
|
||||||
}
|
}
|
||||||
@ -178,7 +172,7 @@ public void testMetaStoreConfigDifferentFromType() throws IOException {
|
|||||||
GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG);
|
GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG);
|
||||||
GenericTestUtils.LogCapturer logCapturer =
|
GenericTestUtils.LogCapturer logCapturer =
|
||||||
GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);
|
GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);
|
||||||
if(storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) {
|
if (storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) {
|
||||||
dbType = "RocksDB";
|
dbType = "RocksDB";
|
||||||
} else {
|
} else {
|
||||||
dbType = "LevelDB";
|
dbType = "LevelDB";
|
||||||
@ -241,7 +235,7 @@ private String getString(byte[] bytes) {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetDelete() throws IOException {
|
public void testGetDelete() throws IOException {
|
||||||
for (int i=0; i<10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
byte[] va = store.get(getBytes("a" + i));
|
byte[] va = store.get(getBytes("a" + i));
|
||||||
assertEquals("a-value" + i, getString(va));
|
assertEquals("a-value" + i, getString(va));
|
||||||
|
|
||||||
@ -273,7 +267,7 @@ private String getExpectedValue(String key) {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
char[] arr = key.toCharArray();
|
char[] arr = key.toCharArray();
|
||||||
return new StringBuffer().append(arr[0]).append("-value")
|
return new StringBuilder().append(arr[0]).append("-value")
|
||||||
.append(arr[arr.length - 1]).toString();
|
.append(arr[arr.length - 1]).toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,14 +320,14 @@ public void testIterateKeys() throws IOException {
|
|||||||
char num = value.charAt(value.length() - 1);
|
char num = value.charAt(value.length() - 1);
|
||||||
// each value adds 1
|
// each value adds 1
|
||||||
int i = Character.getNumericValue(num) + 1;
|
int i = Character.getNumericValue(num) + 1;
|
||||||
value = value.substring(0, value.length() - 1) + i;
|
value = value.substring(0, value.length() - 1) + i;
|
||||||
result.add(value);
|
result.add(value);
|
||||||
return true;
|
return true;
|
||||||
});
|
});
|
||||||
|
|
||||||
assertFalse(result.isEmpty());
|
assertFalse(result.isEmpty());
|
||||||
for (int i=0; i<result.size(); i++) {
|
for (int i = 0; i < result.size(); i++) {
|
||||||
assertEquals("b-value" + (i+1), result.get(i));
|
assertEquals("b-value" + (i + 1), result.get(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
// iterate from a non exist key
|
// iterate from a non exist key
|
||||||
@ -388,7 +382,7 @@ public void testGetRangeKVs() throws IOException {
|
|||||||
result = store.getRangeKVs(null, 100, filter1);
|
result = store.getRangeKVs(null, 100, filter1);
|
||||||
assertEquals(10, result.size());
|
assertEquals(10, result.size());
|
||||||
assertTrue(result.stream().allMatch(entry ->
|
assertTrue(result.stream().allMatch(entry ->
|
||||||
new String(entry.getKey()).startsWith("b")
|
new String(entry.getKey(), UTF_8).startsWith("b")
|
||||||
));
|
));
|
||||||
assertEquals(20, filter1.getKeysScannedNum());
|
assertEquals(20, filter1.getKeysScannedNum());
|
||||||
assertEquals(10, filter1.getKeysHintedNum());
|
assertEquals(10, filter1.getKeysHintedNum());
|
||||||
@ -416,7 +410,7 @@ public void testGetRangeKVs() throws IOException {
|
|||||||
assertEquals("b-value2", getString(result.get(0).getValue()));
|
assertEquals("b-value2", getString(result.get(0).getValue()));
|
||||||
|
|
||||||
// If filter is null, no effect.
|
// If filter is null, no effect.
|
||||||
result = store.getRangeKVs(null, 1, null);
|
result = store.getRangeKVs(null, 1, (MetadataKeyFilter[]) null);
|
||||||
assertEquals(1, result.size());
|
assertEquals(1, result.size());
|
||||||
assertEquals("a0", getString(result.get(0).getKey()));
|
assertEquals("a0", getString(result.get(0).getKey()));
|
||||||
}
|
}
|
||||||
@ -461,7 +455,7 @@ public void testInvalidStartKey() throws IOException {
|
|||||||
// If startKey is invalid, the returned list should be empty.
|
// If startKey is invalid, the returned list should be empty.
|
||||||
List<Map.Entry<byte[], byte[]>> kvs =
|
List<Map.Entry<byte[], byte[]>> kvs =
|
||||||
store.getRangeKVs(getBytes("unknownKey"), MAX_GETRANGE_LENGTH);
|
store.getRangeKVs(getBytes("unknownKey"), MAX_GETRANGE_LENGTH);
|
||||||
assertEquals(kvs.size(), 0);
|
assertEquals(0, kvs.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -504,7 +498,7 @@ public void testBatchWrite() throws IOException {
|
|||||||
.build();
|
.build();
|
||||||
|
|
||||||
List<String> expectedResult = Lists.newArrayList();
|
List<String> expectedResult = Lists.newArrayList();
|
||||||
for (int i = 0; i<10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
dbStore.put(getBytes("batch-" + i), getBytes("batch-value-" + i));
|
dbStore.put(getBytes("batch-" + i), getBytes("batch-value-" + i));
|
||||||
expectedResult.add("batch-" + i);
|
expectedResult.add("batch-" + i);
|
||||||
}
|
}
|
||||||
@ -541,43 +535,44 @@ public void testKeyPrefixFilter() throws IOException {
|
|||||||
new KeyPrefixFilter().addFilter("b0", true).addFilter("b");
|
new KeyPrefixFilter().addFilter("b0", true).addFilter("b");
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
exception = e;
|
exception = e;
|
||||||
|
assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
|
||||||
|
"rejected"));
|
||||||
}
|
}
|
||||||
assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
|
|
||||||
"rejected"));
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
new KeyPrefixFilter().addFilter("b0").addFilter("b", true);
|
new KeyPrefixFilter().addFilter("b0").addFilter("b", true);
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
exception = e;
|
exception = e;
|
||||||
|
assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
|
||||||
|
"accepted"));
|
||||||
}
|
}
|
||||||
assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
|
|
||||||
"accepted"));
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
new KeyPrefixFilter().addFilter("b", true).addFilter("b0");
|
new KeyPrefixFilter().addFilter("b", true).addFilter("b0");
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
exception = e;
|
exception = e;
|
||||||
|
assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
|
||||||
|
"rejected"));
|
||||||
}
|
}
|
||||||
assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
|
|
||||||
"rejected"));
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
new KeyPrefixFilter().addFilter("b").addFilter("b0", true);
|
new KeyPrefixFilter().addFilter("b").addFilter("b0", true);
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
exception = e;
|
exception = e;
|
||||||
|
assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
|
||||||
|
"accepted"));
|
||||||
}
|
}
|
||||||
assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
|
|
||||||
"accepted"));
|
|
||||||
|
|
||||||
MetadataKeyFilter filter1 = new KeyPrefixFilter(true)
|
MetadataKeyFilter filter1 = new KeyPrefixFilter(true)
|
||||||
.addFilter("a0")
|
.addFilter("a0")
|
||||||
.addFilter("a1")
|
.addFilter("a1")
|
||||||
.addFilter("b", true);
|
.addFilter("b", true);
|
||||||
result = store.getRangeKVs(null, 100, filter1);
|
result = store.getRangeKVs(null, 100, filter1);
|
||||||
assertEquals(2, result.size());
|
assertEquals(2, result.size());
|
||||||
assertTrue(result.stream().anyMatch(entry -> new String(entry.getKey())
|
assertTrue(result.stream().anyMatch(entry -> new String(entry.getKey(),
|
||||||
|
UTF_8)
|
||||||
.startsWith("a0")) && result.stream().anyMatch(entry -> new String(
|
.startsWith("a0")) && result.stream().anyMatch(entry -> new String(
|
||||||
entry.getKey()).startsWith("a1")));
|
entry.getKey(), UTF_8).startsWith("a1")));
|
||||||
|
|
||||||
filter1 = new KeyPrefixFilter(true).addFilter("b", true);
|
filter1 = new KeyPrefixFilter(true).addFilter("b", true);
|
||||||
result = store.getRangeKVs(null, 100, filter1);
|
result = store.getRangeKVs(null, 100, filter1);
|
||||||
@ -586,7 +581,8 @@ public void testKeyPrefixFilter() throws IOException {
|
|||||||
filter1 = new KeyPrefixFilter().addFilter("b", true);
|
filter1 = new KeyPrefixFilter().addFilter("b", true);
|
||||||
result = store.getRangeKVs(null, 100, filter1);
|
result = store.getRangeKVs(null, 100, filter1);
|
||||||
assertEquals(10, result.size());
|
assertEquals(10, result.size());
|
||||||
assertTrue(result.stream().allMatch(entry -> new String(entry.getKey())
|
assertTrue(result.stream().allMatch(entry -> new String(entry.getKey(),
|
||||||
|
UTF_8)
|
||||||
.startsWith("a")));
|
.startsWith("a")));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,12 +29,14 @@
|
|||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.lang.management.ManagementFactory;
|
import java.lang.management.ManagementFactory;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test the JMX interface for the rocksdb metastore implementation.
|
* Test the JMX interface for the rocksdb metastore implementation.
|
||||||
*/
|
*/
|
||||||
public class TestRocksDBStoreMBean {
|
public class TestRocksDBStoreMBean {
|
||||||
|
|
||||||
Configuration conf;
|
private Configuration conf;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void init() throws Exception {
|
public void init() throws Exception {
|
||||||
@ -57,7 +59,7 @@ public void testJmxBeans() throws Exception {
|
|||||||
.setCreateIfMissing(true).setDbFile(testDir).build();
|
.setCreateIfMissing(true).setDbFile(testDir).build();
|
||||||
|
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
metadataStore.put("key".getBytes(), "value".getBytes());
|
metadataStore.put("key".getBytes(UTF_8), "value".getBytes(UTF_8));
|
||||||
}
|
}
|
||||||
|
|
||||||
MBeanServer platformMBeanServer =
|
MBeanServer platformMBeanServer =
|
||||||
|
@ -131,7 +131,7 @@ public void builderWithDataWrites() throws Exception {
|
|||||||
RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8);
|
RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8);
|
||||||
firstTable.put(key, value);
|
firstTable.put(key, value);
|
||||||
byte[] temp = firstTable.get(key);
|
byte[] temp = firstTable.get(key);
|
||||||
Arrays.equals(value, temp);
|
Assert.assertTrue(Arrays.equals(value, temp));
|
||||||
}
|
}
|
||||||
|
|
||||||
try (Table secondTable = dbStore.getTable("Second")) {
|
try (Table secondTable = dbStore.getTable("Second")) {
|
||||||
@ -161,7 +161,7 @@ public void builderWithDiskProfileWrites() throws Exception {
|
|||||||
RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8);
|
RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8);
|
||||||
firstTable.put(key, value);
|
firstTable.put(key, value);
|
||||||
byte[] temp = firstTable.get(key);
|
byte[] temp = firstTable.get(key);
|
||||||
Arrays.equals(value, temp);
|
Assert.assertTrue(Arrays.equals(value, temp));
|
||||||
}
|
}
|
||||||
|
|
||||||
try (Table secondTable = dbStore.getTable("Second")) {
|
try (Table secondTable = dbStore.getTable("Second")) {
|
||||||
|
@ -35,9 +35,9 @@
|
|||||||
import org.rocksdb.WriteBatch;
|
import org.rocksdb.WriteBatch;
|
||||||
|
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.LinkedList;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
@ -112,8 +112,8 @@ public void putGetAndEmpty() throws Exception {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void delete() throws Exception {
|
public void delete() throws Exception {
|
||||||
List<byte[]> deletedKeys = new LinkedList<>();
|
List<byte[]> deletedKeys = new ArrayList<>();
|
||||||
List<byte[]> validKeys = new LinkedList<>();
|
List<byte[]> validKeys = new ArrayList<>();
|
||||||
byte[] value =
|
byte[] value =
|
||||||
RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
|
RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
|
||||||
for (int x = 0; x < 100; x++) {
|
for (int x = 0; x < 100; x++) {
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* DB test Utils.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.utils;
|
@ -141,8 +141,8 @@ public BlockData getBlock(Container container, BlockID blockID)
|
|||||||
long bcsId = blockID.getBlockCommitSequenceId();
|
long bcsId = blockID.getBlockCommitSequenceId();
|
||||||
Preconditions.checkNotNull(blockID,
|
Preconditions.checkNotNull(blockID,
|
||||||
"BlockID cannot be null in GetBlock request");
|
"BlockID cannot be null in GetBlock request");
|
||||||
Preconditions.checkNotNull(blockID.getContainerID(),
|
Preconditions.checkNotNull(container,
|
||||||
"Container name cannot be null");
|
"Container cannot be null");
|
||||||
|
|
||||||
KeyValueContainerData containerData = (KeyValueContainerData) container
|
KeyValueContainerData containerData = (KeyValueContainerData) container
|
||||||
.getContainerData();
|
.getContainerData();
|
||||||
|
@ -114,7 +114,7 @@ public StreamDownloader(long containerId, CompletableFuture<Path> response,
|
|||||||
this.containerId = containerId;
|
this.containerId = containerId;
|
||||||
this.outputPath = outputPath;
|
this.outputPath = outputPath;
|
||||||
try {
|
try {
|
||||||
outputPath = Preconditions.checkNotNull(outputPath);
|
Preconditions.checkNotNull(outputPath, "Output path cannot be null");
|
||||||
Path parentPath = Preconditions.checkNotNull(outputPath.getParent());
|
Path parentPath = Preconditions.checkNotNull(outputPath.getParent());
|
||||||
Files.createDirectories(parentPath);
|
Files.createDirectories(parentPath);
|
||||||
stream =
|
stream =
|
||||||
|
@ -71,8 +71,8 @@ public ScmTestMock() {
|
|||||||
new HashMap<>();
|
new HashMap<>();
|
||||||
private Map<DatanodeDetails, NodeReportProto> nodeReports = new HashMap<>();
|
private Map<DatanodeDetails, NodeReportProto> nodeReports = new HashMap<>();
|
||||||
private AtomicInteger commandStatusReport = new AtomicInteger(0);
|
private AtomicInteger commandStatusReport = new AtomicInteger(0);
|
||||||
private List<CommandStatus> cmdStatusList = new LinkedList<>();
|
private List<CommandStatus> cmdStatusList = new ArrayList<>();
|
||||||
private List<SCMCommandProto> scmCommandRequests = new LinkedList<>();
|
private List<SCMCommandProto> scmCommandRequests = new ArrayList<>();
|
||||||
/**
|
/**
|
||||||
* Returns the number of heartbeats made to this class.
|
* Returns the number of heartbeats made to this class.
|
||||||
*
|
*
|
||||||
|
@ -50,7 +50,7 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.nio.file.Paths;
|
import java.nio.file.Paths;
|
||||||
import java.util.LinkedList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
@ -86,9 +86,9 @@ public void setUp() throws Exception {
|
|||||||
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500,
|
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500,
|
||||||
TimeUnit.MILLISECONDS);
|
TimeUnit.MILLISECONDS);
|
||||||
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
|
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
|
||||||
serverAddresses = new LinkedList<>();
|
serverAddresses = new ArrayList<>();
|
||||||
scmServers = new LinkedList<>();
|
scmServers = new ArrayList<>();
|
||||||
mockServers = new LinkedList<>();
|
mockServers = new ArrayList<>();
|
||||||
for (int x = 0; x < scmServerCount; x++) {
|
for (int x = 0; x < scmServerCount; x++) {
|
||||||
int port = SCMTestUtils.getReuseableAddress().getPort();
|
int port = SCMTestUtils.getReuseableAddress().getPort();
|
||||||
String address = "127.0.0.1";
|
String address = "127.0.0.1";
|
||||||
@ -361,8 +361,8 @@ public void testDatanodeStateMachineWithIdWriteFail() throws Exception {
|
|||||||
@Test
|
@Test
|
||||||
public void testDatanodeStateMachineWithInvalidConfiguration()
|
public void testDatanodeStateMachineWithInvalidConfiguration()
|
||||||
throws Exception {
|
throws Exception {
|
||||||
LinkedList<Map.Entry<String, String>> confList =
|
List<Map.Entry<String, String>> confList =
|
||||||
new LinkedList<Map.Entry<String, String>>();
|
new ArrayList<>();
|
||||||
confList.add(Maps.immutableEntry(ScmConfigKeys.OZONE_SCM_NAMES, ""));
|
confList.add(Maps.immutableEntry(ScmConfigKeys.OZONE_SCM_NAMES, ""));
|
||||||
|
|
||||||
// Invalid ozone.scm.names
|
// Invalid ozone.scm.names
|
||||||
|
@ -49,6 +49,7 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
|
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
|
||||||
import static org.mockito.Mockito.times;
|
import static org.mockito.Mockito.times;
|
||||||
import static org.mockito.Mockito.verify;
|
import static org.mockito.Mockito.verify;
|
||||||
@ -160,7 +161,7 @@ private ContainerCommandRequestProto getWriteChunkRequest(
|
|||||||
String datanodeId, Long containerId, Long localId) {
|
String datanodeId, Long containerId, Long localId) {
|
||||||
|
|
||||||
ByteString data = ByteString.copyFrom(
|
ByteString data = ByteString.copyFrom(
|
||||||
UUID.randomUUID().toString().getBytes());
|
UUID.randomUUID().toString().getBytes(UTF_8));
|
||||||
ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo
|
ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo
|
||||||
.newBuilder()
|
.newBuilder()
|
||||||
.setChunkName(
|
.setChunkName(
|
||||||
|
@ -61,7 +61,7 @@ public static void setup() {
|
|||||||
/**
|
/**
|
||||||
* Dummy report publisher for testing.
|
* Dummy report publisher for testing.
|
||||||
*/
|
*/
|
||||||
private class DummyReportPublisher extends ReportPublisher {
|
private static class DummyReportPublisher extends ReportPublisher {
|
||||||
|
|
||||||
private final long frequency;
|
private final long frequency;
|
||||||
private int getReportCount = 0;
|
private int getReportCount = 0;
|
||||||
|
@ -1,19 +1,18 @@
|
|||||||
/**
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* contributor license agreements. See the NOTICE file distributed with this
|
||||||
* distributed with this work for additional information
|
* work for additional information regarding copyright ownership. The ASF
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||||
* to you under the Apache License, Version 2.0 (the
|
* "License"); you may not use this file except in compliance with the License.
|
||||||
* "License"); you may not use this file except in compliance
|
* You may obtain a copy of the License at
|
||||||
* with the License. You may obtain a copy of the License at
|
* <p>
|
||||||
*
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* <p>
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
* See the License for the specific language governing permissions and
|
* License for the specific language governing permissions and limitations under
|
||||||
* limitations under the License.
|
* the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.container.common.volume;
|
package org.apache.hadoop.ozone.container.common.volume;
|
||||||
@ -23,7 +22,6 @@
|
|||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
|
import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
|
||||||
import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
|
import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
|
||||||
import static org.junit.Assert.*;
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
@ -35,19 +33,22 @@
|
|||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unit tests for {@link HddsVolume}.
|
* Unit tests for {@link HddsVolume}.
|
||||||
*/
|
*/
|
||||||
public class TestHddsVolume {
|
public class TestHddsVolume {
|
||||||
|
|
||||||
@Rule
|
|
||||||
public TemporaryFolder folder = new TemporaryFolder();
|
|
||||||
|
|
||||||
private static final String DATANODE_UUID = UUID.randomUUID().toString();
|
private static final String DATANODE_UUID = UUID.randomUUID().toString();
|
||||||
private static final String CLUSTER_ID = UUID.randomUUID().toString();
|
private static final String CLUSTER_ID = UUID.randomUUID().toString();
|
||||||
private static final Configuration CONF = new Configuration();
|
private static final Configuration CONF = new Configuration();
|
||||||
private static final String DU_CACHE_FILE = "scmUsed";
|
private static final String DU_CACHE_FILE = "scmUsed";
|
||||||
|
@Rule
|
||||||
|
public TemporaryFolder folder = new TemporaryFolder();
|
||||||
private File rootDir;
|
private File rootDir;
|
||||||
private HddsVolume volume;
|
private HddsVolume volume;
|
||||||
private File versionFile;
|
private File versionFile;
|
||||||
@ -69,9 +70,9 @@ public void testHddsVolumeInitialization() throws Exception {
|
|||||||
// clusterID is not specified and the version file should not be written
|
// clusterID is not specified and the version file should not be written
|
||||||
// to disk.
|
// to disk.
|
||||||
assertTrue(volume.getClusterID() == null);
|
assertTrue(volume.getClusterID() == null);
|
||||||
assertEquals(volume.getStorageType(), StorageType.DEFAULT);
|
assertEquals(StorageType.DEFAULT, volume.getStorageType());
|
||||||
assertEquals(volume.getStorageState(),
|
assertEquals(HddsVolume.VolumeState.NOT_FORMATTED,
|
||||||
HddsVolume.VolumeState.NOT_FORMATTED);
|
volume.getStorageState());
|
||||||
assertFalse("Version file should not be created when clusterID is not " +
|
assertFalse("Version file should not be created when clusterID is not " +
|
||||||
"known.", versionFile.exists());
|
"known.", versionFile.exists());
|
||||||
|
|
||||||
@ -84,7 +85,7 @@ public void testHddsVolumeInitialization() throws Exception {
|
|||||||
assertTrue("Volume format should create Version file",
|
assertTrue("Volume format should create Version file",
|
||||||
versionFile.exists());
|
versionFile.exists());
|
||||||
assertEquals(volume.getClusterID(), CLUSTER_ID);
|
assertEquals(volume.getClusterID(), CLUSTER_ID);
|
||||||
assertEquals(volume.getStorageState(), HddsVolume.VolumeState.NORMAL);
|
assertEquals(HddsVolume.VolumeState.NORMAL, volume.getStorageState());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -111,7 +112,7 @@ public void testReadPropertiesFromVersionFile() throws Exception {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testShutdown() throws Exception{
|
public void testShutdown() throws Exception {
|
||||||
// Return dummy value > 0 for scmUsage so that scm cache file is written
|
// Return dummy value > 0 for scmUsage so that scm cache file is written
|
||||||
// during shutdown.
|
// during shutdown.
|
||||||
GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class);
|
GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class);
|
||||||
@ -125,8 +126,7 @@ public void testShutdown() throws Exception{
|
|||||||
volume.shutdown();
|
volume.shutdown();
|
||||||
|
|
||||||
// Volume state should be "NON_EXISTENT" when volume is shutdown.
|
// Volume state should be "NON_EXISTENT" when volume is shutdown.
|
||||||
assertEquals(volume.getStorageState(),
|
assertEquals(HddsVolume.VolumeState.NON_EXISTENT, volume.getStorageState());
|
||||||
HddsVolume.VolumeState.NON_EXISTENT);
|
|
||||||
|
|
||||||
// Volume should save scmUsed cache file once volume is shutdown
|
// Volume should save scmUsed cache file once volume is shutdown
|
||||||
File scmUsedFile = new File(folder.getRoot(), DU_CACHE_FILE);
|
File scmUsedFile = new File(folder.getRoot(), DU_CACHE_FILE);
|
||||||
@ -139,7 +139,7 @@ public void testShutdown() throws Exception{
|
|||||||
// as usage thread is shutdown.
|
// as usage thread is shutdown.
|
||||||
volume.getAvailable();
|
volume.getAvailable();
|
||||||
fail("HddsVolume#shutdown test failed");
|
fail("HddsVolume#shutdown test failed");
|
||||||
} catch (Exception ex){
|
} catch (Exception ex) {
|
||||||
assertTrue(ex instanceof IOException);
|
assertTrue(ex instanceof IOException);
|
||||||
assertTrue(ex.getMessage().contains(
|
assertTrue(ex.getMessage().contains(
|
||||||
"Volume Usage thread is not running."));
|
"Volume Usage thread is not running."));
|
||||||
|
@ -22,7 +22,6 @@
|
|||||||
import org.apache.commons.io.FileUtils;
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
@ -69,7 +68,7 @@ private void initializeVolumeSet() throws Exception {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Rule
|
@Rule
|
||||||
public Timeout testTimeout = new Timeout(300_000);
|
public Timeout testTimeout = new Timeout(300000);
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setup() throws Exception {
|
public void setup() throws Exception {
|
||||||
@ -153,8 +152,7 @@ public void testFailVolume() throws Exception {
|
|||||||
assertTrue(volumeSet.getFailedVolumesList().get(0).isFailed());
|
assertTrue(volumeSet.getFailedVolumesList().get(0).isFailed());
|
||||||
|
|
||||||
// Failed volume should not exist in VolumeMap
|
// Failed volume should not exist in VolumeMap
|
||||||
Path volume1Path = new Path(volume1);
|
assertFalse(volumeSet.getVolumeMap().containsKey(volume1));
|
||||||
assertFalse(volumeSet.getVolumeMap().containsKey(volume1Path));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Tests for Container Volumes.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.container.common.volume;
|
@ -25,10 +25,9 @@
|
|||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
|
||||||
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
|
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
|
||||||
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
|
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
|
||||||
import org.apache.hadoop.ozone.container.common.volume
|
|
||||||
.RoundRobinVolumeChoosingPolicy;
|
|
||||||
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
|
|
||||||
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
|
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
|
||||||
|
import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
|
||||||
|
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
|
||||||
import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
|
import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
@ -37,12 +36,14 @@
|
|||||||
import org.junit.rules.TemporaryFolder;
|
import org.junit.rules.TemporaryFolder;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.util.ArrayList;
|
||||||
import java.util.LinkedList;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
import static org.mockito.ArgumentMatchers.anyList;
|
import static org.mockito.ArgumentMatchers.anyList;
|
||||||
import static org.mockito.ArgumentMatchers.anyLong;
|
import static org.mockito.ArgumentMatchers.anyLong;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
@ -52,6 +53,8 @@
|
|||||||
*/
|
*/
|
||||||
public class TestBlockManagerImpl {
|
public class TestBlockManagerImpl {
|
||||||
|
|
||||||
|
@Rule
|
||||||
|
public TemporaryFolder folder = new TemporaryFolder();
|
||||||
private OzoneConfiguration config;
|
private OzoneConfiguration config;
|
||||||
private String scmId = UUID.randomUUID().toString();
|
private String scmId = UUID.randomUUID().toString();
|
||||||
private VolumeSet volumeSet;
|
private VolumeSet volumeSet;
|
||||||
@ -62,10 +65,6 @@ public class TestBlockManagerImpl {
|
|||||||
private BlockManagerImpl blockManager;
|
private BlockManagerImpl blockManager;
|
||||||
private BlockID blockID;
|
private BlockID blockID;
|
||||||
|
|
||||||
@Rule
|
|
||||||
public TemporaryFolder folder = new TemporaryFolder();
|
|
||||||
|
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
config = new OzoneConfiguration();
|
config = new OzoneConfiguration();
|
||||||
@ -93,7 +92,7 @@ public void setUp() throws Exception {
|
|||||||
blockData = new BlockData(blockID);
|
blockData = new BlockData(blockID);
|
||||||
blockData.addMetadata("VOLUME", "ozone");
|
blockData.addMetadata("VOLUME", "ozone");
|
||||||
blockData.addMetadata("OWNER", "hdfs");
|
blockData.addMetadata("OWNER", "hdfs");
|
||||||
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
|
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
|
||||||
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
|
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
|
||||||
.getLocalID(), 0), 0, 1024);
|
.getLocalID(), 0), 0, 1024);
|
||||||
chunkList.add(info.getProtoBufMessage());
|
chunkList.add(info.getProtoBufMessage());
|
||||||
@ -124,88 +123,74 @@ public void testPutAndGetBlock() throws Exception {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDeleteBlock() throws Exception {
|
public void testDeleteBlock() throws Exception {
|
||||||
|
assertEquals(0,
|
||||||
|
keyValueContainer.getContainerData().getKeyCount());
|
||||||
|
//Put Block
|
||||||
|
blockManager.putBlock(keyValueContainer, blockData);
|
||||||
|
assertEquals(1,
|
||||||
|
keyValueContainer.getContainerData().getKeyCount());
|
||||||
|
//Delete Block
|
||||||
|
blockManager.deleteBlock(keyValueContainer, blockID);
|
||||||
|
assertEquals(0,
|
||||||
|
keyValueContainer.getContainerData().getKeyCount());
|
||||||
try {
|
try {
|
||||||
assertEquals(0,
|
blockManager.getBlock(keyValueContainer, blockID);
|
||||||
keyValueContainer.getContainerData().getKeyCount());
|
fail("testDeleteBlock");
|
||||||
//Put Block
|
} catch (StorageContainerException ex) {
|
||||||
blockManager.putBlock(keyValueContainer, blockData);
|
GenericTestUtils.assertExceptionContains(
|
||||||
assertEquals(1,
|
"Unable to find the block", ex);
|
||||||
keyValueContainer.getContainerData().getKeyCount());
|
|
||||||
//Delete Block
|
|
||||||
blockManager.deleteBlock(keyValueContainer, blockID);
|
|
||||||
assertEquals(0,
|
|
||||||
keyValueContainer.getContainerData().getKeyCount());
|
|
||||||
try {
|
|
||||||
blockManager.getBlock(keyValueContainer, blockID);
|
|
||||||
fail("testDeleteBlock");
|
|
||||||
} catch (StorageContainerException ex) {
|
|
||||||
GenericTestUtils.assertExceptionContains(
|
|
||||||
"Unable to find the block", ex);
|
|
||||||
}
|
|
||||||
} catch (IOException ex) {
|
|
||||||
fail("testDeleteBlock failed");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testListBlock() throws Exception {
|
public void testListBlock() throws Exception {
|
||||||
try {
|
blockManager.putBlock(keyValueContainer, blockData);
|
||||||
|
List<BlockData> listBlockData = blockManager.listBlock(
|
||||||
|
keyValueContainer, 1, 10);
|
||||||
|
assertNotNull(listBlockData);
|
||||||
|
assertTrue(listBlockData.size() == 1);
|
||||||
|
|
||||||
|
for (long i = 2; i <= 10; i++) {
|
||||||
|
blockID = new BlockID(1L, i);
|
||||||
|
blockData = new BlockData(blockID);
|
||||||
|
blockData.addMetadata("VOLUME", "ozone");
|
||||||
|
blockData.addMetadata("OWNER", "hdfs");
|
||||||
|
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
|
||||||
|
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
|
||||||
|
.getLocalID(), 0), 0, 1024);
|
||||||
|
chunkList.add(info.getProtoBufMessage());
|
||||||
|
blockData.setChunks(chunkList);
|
||||||
blockManager.putBlock(keyValueContainer, blockData);
|
blockManager.putBlock(keyValueContainer, blockData);
|
||||||
List<BlockData> listBlockData = blockManager.listBlock(
|
|
||||||
keyValueContainer, 1, 10);
|
|
||||||
assertNotNull(listBlockData);
|
|
||||||
assertTrue(listBlockData.size() == 1);
|
|
||||||
|
|
||||||
for (long i = 2; i <= 10; i++) {
|
|
||||||
blockID = new BlockID(1L, i);
|
|
||||||
blockData = new BlockData(blockID);
|
|
||||||
blockData.addMetadata("VOLUME", "ozone");
|
|
||||||
blockData.addMetadata("OWNER", "hdfs");
|
|
||||||
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
|
|
||||||
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
|
|
||||||
.getLocalID(), 0), 0, 1024);
|
|
||||||
chunkList.add(info.getProtoBufMessage());
|
|
||||||
blockData.setChunks(chunkList);
|
|
||||||
blockManager.putBlock(keyValueContainer, blockData);
|
|
||||||
}
|
|
||||||
|
|
||||||
listBlockData = blockManager.listBlock(
|
|
||||||
keyValueContainer, 1, 10);
|
|
||||||
assertNotNull(listBlockData);
|
|
||||||
assertTrue(listBlockData.size() == 10);
|
|
||||||
|
|
||||||
} catch (IOException ex) {
|
|
||||||
fail("testListBlock failed");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
listBlockData = blockManager.listBlock(
|
||||||
|
keyValueContainer, 1, 10);
|
||||||
|
assertNotNull(listBlockData);
|
||||||
|
assertTrue(listBlockData.size() == 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetNoSuchBlock() throws Exception {
|
public void testGetNoSuchBlock() throws Exception {
|
||||||
|
assertEquals(0,
|
||||||
|
keyValueContainer.getContainerData().getKeyCount());
|
||||||
|
//Put Block
|
||||||
|
blockManager.putBlock(keyValueContainer, blockData);
|
||||||
|
assertEquals(1,
|
||||||
|
keyValueContainer.getContainerData().getKeyCount());
|
||||||
|
//Delete Block
|
||||||
|
blockManager.deleteBlock(keyValueContainer, blockID);
|
||||||
|
assertEquals(0,
|
||||||
|
keyValueContainer.getContainerData().getKeyCount());
|
||||||
try {
|
try {
|
||||||
assertEquals(0,
|
//Since the block has been deleted, we should not be able to find it
|
||||||
keyValueContainer.getContainerData().getKeyCount());
|
blockManager.getBlock(keyValueContainer, blockID);
|
||||||
//Put Block
|
|
||||||
blockManager.putBlock(keyValueContainer, blockData);
|
|
||||||
assertEquals(1,
|
|
||||||
keyValueContainer.getContainerData().getKeyCount());
|
|
||||||
//Delete Block
|
|
||||||
blockManager.deleteBlock(keyValueContainer, blockID);
|
|
||||||
assertEquals(0,
|
|
||||||
keyValueContainer.getContainerData().getKeyCount());
|
|
||||||
try {
|
|
||||||
//Since the block has been deleted, we should not be able to find it
|
|
||||||
blockManager.getBlock(keyValueContainer, blockID);
|
|
||||||
fail("testGetNoSuchBlock failed");
|
|
||||||
} catch (StorageContainerException ex) {
|
|
||||||
GenericTestUtils.assertExceptionContains(
|
|
||||||
"Unable to find the block", ex);
|
|
||||||
assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult());
|
|
||||||
}
|
|
||||||
} catch (IOException ex) {
|
|
||||||
fail("testGetNoSuchBlock failed");
|
fail("testGetNoSuchBlock failed");
|
||||||
|
} catch (StorageContainerException ex) {
|
||||||
|
GenericTestUtils.assertExceptionContains(
|
||||||
|
"Unable to find the block", ex);
|
||||||
|
assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -43,6 +43,7 @@
|
|||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
import static org.mockito.ArgumentMatchers.anyList;
|
import static org.mockito.ArgumentMatchers.anyList;
|
||||||
import static org.mockito.ArgumentMatchers.anyLong;
|
import static org.mockito.ArgumentMatchers.anyLong;
|
||||||
@ -88,7 +89,7 @@ public void setUp() throws Exception {
|
|||||||
|
|
||||||
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
|
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
|
||||||
|
|
||||||
data = "testing write chunks".getBytes();
|
data = "testing write chunks".getBytes(UTF_8);
|
||||||
// Creating BlockData
|
// Creating BlockData
|
||||||
blockID = new BlockID(1L, 1L);
|
blockID = new BlockID(1L, 1L);
|
||||||
chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
|
chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
|
||||||
|
@ -42,9 +42,9 @@
|
|||||||
import org.junit.runners.Parameterized;
|
import org.junit.runners.Parameterized;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.LinkedList;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.NoSuchElementException;
|
import java.util.NoSuchElementException;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
@ -252,7 +252,7 @@ private void createContainerWithBlocks(long containerId, int
|
|||||||
.randomUUID().toString());
|
.randomUUID().toString());
|
||||||
MetadataStore metadataStore = BlockUtils.getDB(containerData, conf);
|
MetadataStore metadataStore = BlockUtils.getDB(containerData, conf);
|
||||||
|
|
||||||
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
|
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
|
||||||
ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
|
ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
|
||||||
chunkList.add(info.getProtoBufMessage());
|
chunkList.add(info.getProtoBufMessage());
|
||||||
|
|
||||||
|
@ -51,12 +51,13 @@
|
|||||||
import java.io.FileInputStream;
|
import java.io.FileInputStream;
|
||||||
import java.io.FileOutputStream;
|
import java.io.FileOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.LinkedList;
|
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
import static org.apache.ratis.util.Preconditions.assertTrue;
|
import static org.apache.ratis.util.Preconditions.assertTrue;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
@ -135,7 +136,7 @@ private void addBlocks(int count) throws Exception {
|
|||||||
BlockData blockData = new BlockData(blockID);
|
BlockData blockData = new BlockData(blockID);
|
||||||
blockData.addMetadata("VOLUME", "ozone");
|
blockData.addMetadata("VOLUME", "ozone");
|
||||||
blockData.addMetadata("OWNER", "hdfs");
|
blockData.addMetadata("OWNER", "hdfs");
|
||||||
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
|
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
|
||||||
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
|
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
|
||||||
.getLocalID(), 0), 0, 1024);
|
.getLocalID(), 0), 0, 1024);
|
||||||
chunkList.add(info.getProtoBufMessage());
|
chunkList.add(info.getProtoBufMessage());
|
||||||
@ -163,8 +164,6 @@ public void testCreateContainer() throws Exception {
|
|||||||
// Check whether containerMetaDataPath and chunksPath exists or not.
|
// Check whether containerMetaDataPath and chunksPath exists or not.
|
||||||
assertTrue(containerMetaDataPath != null);
|
assertTrue(containerMetaDataPath != null);
|
||||||
assertTrue(chunksPath != null);
|
assertTrue(chunksPath != null);
|
||||||
File containerMetaDataLoc = new File(containerMetaDataPath);
|
|
||||||
|
|
||||||
//Check whether container file and container db file exists or not.
|
//Check whether container file and container db file exists or not.
|
||||||
assertTrue(keyValueContainer.getContainerFile().exists(),
|
assertTrue(keyValueContainer.getContainerFile().exists(),
|
||||||
".Container File does not exist");
|
".Container File does not exist");
|
||||||
@ -190,7 +189,7 @@ public void testContainerImportExport() throws Exception {
|
|||||||
//write one few keys to check the key count after import
|
//write one few keys to check the key count after import
|
||||||
MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, conf);
|
MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, conf);
|
||||||
for (int i = 0; i < numberOfKeysToWrite; i++) {
|
for (int i = 0; i < numberOfKeysToWrite; i++) {
|
||||||
metadataStore.put(("test" + i).getBytes(), "test".getBytes());
|
metadataStore.put(("test" + i).getBytes(UTF_8), "test".getBytes(UTF_8));
|
||||||
}
|
}
|
||||||
metadataStore.close();
|
metadataStore.close();
|
||||||
|
|
||||||
@ -247,7 +246,7 @@ public void testContainerImportExport() throws Exception {
|
|||||||
container.importContainerData(fis, packer);
|
container.importContainerData(fis, packer);
|
||||||
}
|
}
|
||||||
fail("Container is imported twice. Previous files are overwritten");
|
fail("Container is imported twice. Previous files are overwritten");
|
||||||
} catch (Exception ex) {
|
} catch (IOException ex) {
|
||||||
//all good
|
//all good
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,9 +226,10 @@ public void testVolumeSetInKeyValueHandler() throws Exception{
|
|||||||
VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
|
VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
|
||||||
KeyValueHandler keyValueHandler = new KeyValueHandler(conf, cset,
|
KeyValueHandler keyValueHandler = new KeyValueHandler(conf, cset,
|
||||||
volumeSet, metrics);
|
volumeSet, metrics);
|
||||||
assertEquals(keyValueHandler.getVolumeChoosingPolicyForTesting()
|
assertEquals("org.apache.hadoop.ozone.container.common" +
|
||||||
.getClass().getName(), "org.apache.hadoop.ozone.container.common" +
|
".volume.RoundRobinVolumeChoosingPolicy",
|
||||||
".volume.RoundRobinVolumeChoosingPolicy");
|
keyValueHandler.getVolumeChoosingPolicyForTesting()
|
||||||
|
.getClass().getName());
|
||||||
|
|
||||||
//Set a class which is not of sub class of VolumeChoosingPolicy
|
//Set a class which is not of sub class of VolumeChoosingPolicy
|
||||||
conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY,
|
conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY,
|
||||||
|
@ -23,7 +23,6 @@
|
|||||||
import java.io.FileWriter;
|
import java.io.FileWriter;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.charset.Charset;
|
import java.nio.charset.Charset;
|
||||||
import java.nio.charset.StandardCharsets;
|
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.nio.file.Paths;
|
import java.nio.file.Paths;
|
||||||
@ -45,6 +44,8 @@
|
|||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test the tar/untar for a given container.
|
* Test the tar/untar for a given container.
|
||||||
*/
|
*/
|
||||||
@ -161,7 +162,7 @@ public void pack() throws IOException, CompressorException {
|
|||||||
//read the container descriptor only
|
//read the container descriptor only
|
||||||
try (FileInputStream input = new FileInputStream(targetFile.toFile())) {
|
try (FileInputStream input = new FileInputStream(targetFile.toFile())) {
|
||||||
String containerYaml = new String(packer.unpackContainerDescriptor(input),
|
String containerYaml = new String(packer.unpackContainerDescriptor(input),
|
||||||
Charset.forName(StandardCharsets.UTF_8.name()));
|
Charset.forName(UTF_8.name()));
|
||||||
Assert.assertEquals(TEST_DESCRIPTOR_FILE_CONTENT, containerYaml);
|
Assert.assertEquals(TEST_DESCRIPTOR_FILE_CONTENT, containerYaml);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -177,7 +178,7 @@ public void pack() throws IOException, CompressorException {
|
|||||||
try (FileInputStream input = new FileInputStream(targetFile.toFile())) {
|
try (FileInputStream input = new FileInputStream(targetFile.toFile())) {
|
||||||
descriptor =
|
descriptor =
|
||||||
new String(packer.unpackContainerData(destinationContainer, input),
|
new String(packer.unpackContainerData(destinationContainer, input),
|
||||||
Charset.forName(StandardCharsets.UTF_8.name()));
|
Charset.forName(UTF_8.name()));
|
||||||
}
|
}
|
||||||
|
|
||||||
assertExampleMetadataDbIsGood(
|
assertExampleMetadataDbIsGood(
|
||||||
@ -204,7 +205,7 @@ private void assertExampleMetadataDbIsGood(Path dbPath)
|
|||||||
|
|
||||||
try (FileInputStream testFile = new FileInputStream(dbFile.toFile())) {
|
try (FileInputStream testFile = new FileInputStream(dbFile.toFile())) {
|
||||||
List<String> strings = IOUtils
|
List<String> strings = IOUtils
|
||||||
.readLines(testFile, Charset.forName(StandardCharsets.UTF_8.name()));
|
.readLines(testFile, Charset.forName(UTF_8.name()));
|
||||||
Assert.assertEquals(1, strings.size());
|
Assert.assertEquals(1, strings.size());
|
||||||
Assert.assertEquals(TEST_DB_FILE_CONTENT, strings.get(0));
|
Assert.assertEquals(TEST_DB_FILE_CONTENT, strings.get(0));
|
||||||
}
|
}
|
||||||
@ -222,7 +223,7 @@ private void assertExampleChunkFileIsGood(Path chunkDirPath)
|
|||||||
|
|
||||||
try (FileInputStream testFile = new FileInputStream(chunkFile.toFile())) {
|
try (FileInputStream testFile = new FileInputStream(chunkFile.toFile())) {
|
||||||
List<String> strings = IOUtils
|
List<String> strings = IOUtils
|
||||||
.readLines(testFile, Charset.forName(StandardCharsets.UTF_8.name()));
|
.readLines(testFile, Charset.forName(UTF_8.name()));
|
||||||
Assert.assertEquals(1, strings.size());
|
Assert.assertEquals(1, strings.size());
|
||||||
Assert.assertEquals(TEST_CHUNK_FILE_CONTENT, strings.get(0));
|
Assert.assertEquals(TEST_CHUNK_FILE_CONTENT, strings.get(0));
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Chunk Manager Checks.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.container.keyvalue;
|
@ -139,6 +139,7 @@ public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
|
|||||||
* @throws IllegalArgumentException If there is no EventHandler for
|
* @throws IllegalArgumentException If there is no EventHandler for
|
||||||
* the specific event.
|
* the specific event.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
|
public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
|
||||||
EVENT_TYPE event, PAYLOAD payload) {
|
EVENT_TYPE event, PAYLOAD payload) {
|
||||||
|
|
||||||
@ -219,7 +220,9 @@ public void processAll(long timeout) {
|
|||||||
try {
|
try {
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
e.printStackTrace();
|
LOG.warn("Interrupted exception while sleeping.", e);
|
||||||
|
// We ignore this exception for time being. Review? should we
|
||||||
|
// propogate it back to caller?
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Time.now() > currentTime + timeout) {
|
if (Time.now() > currentTime + timeout) {
|
||||||
@ -229,7 +232,7 @@ public void processAll(long timeout) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
public void close() {
|
public void close() {
|
||||||
|
|
||||||
isRunning = false;
|
isRunning = false;
|
||||||
|
@ -1,24 +1,21 @@
|
|||||||
/**
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* contributor license agreements. See the NOTICE file distributed with this
|
||||||
* distributed with this work for additional information
|
* work for additional information regarding copyright ownership. The ASF
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||||
* to you under the Apache License, Version 2.0 (the
|
* "License"); you may not use this file except in compliance with the License.
|
||||||
* "License"); you may not use this file except in compliance
|
* You may obtain a copy of the License at
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
* <p>
|
* <p>
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
* <p>
|
* <p>
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
* See the License for the specific language governing permissions and
|
* License for the specific language governing permissions and limitations under
|
||||||
* limitations under the License.
|
* the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdds.server.events;
|
package org.apache.hadoop.hdds.server.events;
|
||||||
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Objects;
|
|
||||||
import org.apache.hadoop.hdds.HddsIdFactory;
|
import org.apache.hadoop.hdds.HddsIdFactory;
|
||||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||||
import org.apache.hadoop.ozone.lease.LeaseManager;
|
import org.apache.hadoop.ozone.lease.LeaseManager;
|
||||||
@ -27,6 +24,9 @@
|
|||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test the basic functionality of event watcher.
|
* Test the basic functionality of event watcher.
|
||||||
*/
|
*/
|
||||||
@ -41,7 +41,7 @@ public class TestEventWatcher {
|
|||||||
private static final TypedEvent<ReplicationCompletedEvent>
|
private static final TypedEvent<ReplicationCompletedEvent>
|
||||||
REPLICATION_COMPLETED = new TypedEvent<>(ReplicationCompletedEvent.class);
|
REPLICATION_COMPLETED = new TypedEvent<>(ReplicationCompletedEvent.class);
|
||||||
|
|
||||||
LeaseManager<Long> leaseManager;
|
private LeaseManager<Long> leaseManager;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void startLeaseManager() {
|
public void startLeaseManager() {
|
||||||
@ -56,7 +56,6 @@ public void stopLeaseManager() {
|
|||||||
DefaultMetricsSystem.shutdown();
|
DefaultMetricsSystem.shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testEventHandling() throws InterruptedException {
|
public void testEventHandling() throws InterruptedException {
|
||||||
EventQueue queue = new EventQueue();
|
EventQueue queue = new EventQueue();
|
||||||
@ -180,7 +179,7 @@ public void testMetrics() throws InterruptedException {
|
|||||||
|
|
||||||
queue.fireEvent(REPLICATION_COMPLETED, event1Completed);
|
queue.fireEvent(REPLICATION_COMPLETED, event1Completed);
|
||||||
|
|
||||||
Thread.sleep(2200l);
|
Thread.sleep(2200L);
|
||||||
|
|
||||||
//until now: 3 in-progress activities are tracked with three
|
//until now: 3 in-progress activities are tracked with three
|
||||||
// UnderreplicatedEvents. The first one is completed, the remaining two
|
// UnderreplicatedEvents. The first one is completed, the remaining two
|
||||||
@ -201,27 +200,29 @@ public void testMetrics() throws InterruptedException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
|
private EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
|
||||||
createEventWatcher() {
|
createEventWatcher() {
|
||||||
return new CommandWatcherExample(WATCH_UNDER_REPLICATED,
|
return new CommandWatcherExample(WATCH_UNDER_REPLICATED,
|
||||||
REPLICATION_COMPLETED, leaseManager);
|
REPLICATION_COMPLETED, leaseManager);
|
||||||
}
|
}
|
||||||
|
|
||||||
private class CommandWatcherExample
|
private static class CommandWatcherExample
|
||||||
extends EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent> {
|
extends EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent> {
|
||||||
|
|
||||||
public CommandWatcherExample(Event<UnderreplicatedEvent> startEvent,
|
CommandWatcherExample(Event<UnderreplicatedEvent> startEvent,
|
||||||
Event<ReplicationCompletedEvent> completionEvent,
|
Event<ReplicationCompletedEvent> completionEvent,
|
||||||
LeaseManager<Long> leaseManager) {
|
LeaseManager<Long> leaseManager) {
|
||||||
super("TestCommandWatcher", startEvent, completionEvent, leaseManager);
|
super("TestCommandWatcher", startEvent, completionEvent, leaseManager);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void onTimeout(EventPublisher publisher, UnderreplicatedEvent payload) {
|
protected void onTimeout(EventPublisher publisher,
|
||||||
|
UnderreplicatedEvent payload) {
|
||||||
publisher.fireEvent(UNDER_REPLICATED, payload);
|
publisher.fireEvent(UNDER_REPLICATED, payload);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void onFinished(EventPublisher publisher, UnderreplicatedEvent payload) {
|
protected void onFinished(EventPublisher publisher,
|
||||||
|
UnderreplicatedEvent payload) {
|
||||||
//Good job. We did it.
|
//Good job. We did it.
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,13 +241,14 @@ private static class ReplicationCompletedEvent
|
|||||||
|
|
||||||
private final String datanodeId;
|
private final String datanodeId;
|
||||||
|
|
||||||
public ReplicationCompletedEvent(long id, String containerId,
|
ReplicationCompletedEvent(long id, String containerId,
|
||||||
String datanodeId) {
|
String datanodeId) {
|
||||||
this.id = id;
|
this.id = id;
|
||||||
this.containerId = containerId;
|
this.containerId = containerId;
|
||||||
this.datanodeId = datanodeId;
|
this.datanodeId = datanodeId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public long getId() {
|
public long getId() {
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
@ -279,11 +281,12 @@ private static class UnderreplicatedEvent
|
|||||||
|
|
||||||
private final String containerId;
|
private final String containerId;
|
||||||
|
|
||||||
public UnderreplicatedEvent(long id, String containerId) {
|
UnderreplicatedEvent(long id, String containerId) {
|
||||||
this.containerId = containerId;
|
this.containerId = containerId;
|
||||||
this.id = id;
|
this.id = id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public long getId() {
|
public long getId() {
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Tests for Event Watcher.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdds.server.events;
|
@ -19,9 +19,12 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
|
|
||||||
import java.util.LinkedList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pending Deletes in the block space.
|
||||||
|
*/
|
||||||
public class PendingDeleteStatusList {
|
public class PendingDeleteStatusList {
|
||||||
|
|
||||||
private List<PendingDeleteStatus> pendingDeleteStatuses;
|
private List<PendingDeleteStatus> pendingDeleteStatuses;
|
||||||
@ -29,7 +32,7 @@ public class PendingDeleteStatusList {
|
|||||||
|
|
||||||
public PendingDeleteStatusList(DatanodeDetails datanodeDetails) {
|
public PendingDeleteStatusList(DatanodeDetails datanodeDetails) {
|
||||||
this.datanodeDetails = datanodeDetails;
|
this.datanodeDetails = datanodeDetails;
|
||||||
pendingDeleteStatuses = new LinkedList<>();
|
pendingDeleteStatuses = new ArrayList<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addPendingDeleteStatus(long dnDeleteTransactionId,
|
public void addPendingDeleteStatus(long dnDeleteTransactionId,
|
||||||
@ -39,6 +42,9 @@ public void addPendingDeleteStatus(long dnDeleteTransactionId,
|
|||||||
containerId));
|
containerId));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Status of pending deletes.
|
||||||
|
*/
|
||||||
public static class PendingDeleteStatus {
|
public static class PendingDeleteStatus {
|
||||||
private long dnDeleteTransactionId;
|
private long dnDeleteTransactionId;
|
||||||
private long scmDeleteTransactionId;
|
private long scmDeleteTransactionId;
|
||||||
|
@ -44,6 +44,7 @@ public ChillModePrecheck(Configuration conf) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public boolean check(ScmOps op) throws SCMException {
|
public boolean check(ScmOps op) throws SCMException {
|
||||||
if (inChillMode.get() && ChillModeRestrictedOps
|
if (inChillMode.get() && ChillModeRestrictedOps
|
||||||
.isRestrictedInChillMode(op)) {
|
.isRestrictedInChillMode(op)) {
|
||||||
|
@ -134,6 +134,16 @@ public static ContainerReplicaBuilder newBuilder() {
|
|||||||
return new ContainerReplicaBuilder();
|
return new ContainerReplicaBuilder();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "ContainerReplica{" +
|
||||||
|
"containerID=" + containerID +
|
||||||
|
", datanodeDetails=" + datanodeDetails +
|
||||||
|
", placeOfBirth=" + placeOfBirth +
|
||||||
|
", sequenceId=" + sequenceId +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Used for building ContainerReplica instance.
|
* Used for building ContainerReplica instance.
|
||||||
*/
|
*/
|
||||||
@ -148,12 +158,12 @@ public static class ContainerReplicaBuilder {
|
|||||||
/**
|
/**
|
||||||
* Set Container Id.
|
* Set Container Id.
|
||||||
*
|
*
|
||||||
* @param containerId ContainerID
|
* @param cID ContainerID
|
||||||
* @return ContainerReplicaBuilder
|
* @return ContainerReplicaBuilder
|
||||||
*/
|
*/
|
||||||
public ContainerReplicaBuilder setContainerID(
|
public ContainerReplicaBuilder setContainerID(
|
||||||
final ContainerID containerId) {
|
final ContainerID cID) {
|
||||||
containerID = containerId;
|
this.containerID = cID;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.util.LinkedList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
@ -102,7 +102,7 @@ public Configuration getConf() {
|
|||||||
* @return list of datanodes chosen.
|
* @return list of datanodes chosen.
|
||||||
* @throws SCMException SCM exception.
|
* @throws SCMException SCM exception.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public List<DatanodeDetails> chooseDatanodes(
|
public List<DatanodeDetails> chooseDatanodes(
|
||||||
List<DatanodeDetails> excludedNodes,
|
List<DatanodeDetails> excludedNodes,
|
||||||
int nodesRequired, final long sizeRequired) throws SCMException {
|
int nodesRequired, final long sizeRequired) throws SCMException {
|
||||||
@ -167,7 +167,7 @@ private boolean hasEnoughSpace(DatanodeDetails datanodeDetails,
|
|||||||
public List<DatanodeDetails> getResultSet(
|
public List<DatanodeDetails> getResultSet(
|
||||||
int nodesRequired, List<DatanodeDetails> healthyNodes)
|
int nodesRequired, List<DatanodeDetails> healthyNodes)
|
||||||
throws SCMException {
|
throws SCMException {
|
||||||
List<DatanodeDetails> results = new LinkedList<>();
|
List<DatanodeDetails> results = new ArrayList<>();
|
||||||
for (int x = 0; x < nodesRequired; x++) {
|
for (int x = 0; x < nodesRequired; x++) {
|
||||||
// invoke the choose function defined in the derived classes.
|
// invoke the choose function defined in the derived classes.
|
||||||
DatanodeDetails nodeId = chooseNode(healthyNodes);
|
DatanodeDetails nodeId = chooseNode(healthyNodes);
|
||||||
|
@ -83,6 +83,7 @@ public List<DatanodeDetails> chooseDatanodes(
|
|||||||
* @param healthyNodes - all healthy datanodes.
|
* @param healthyNodes - all healthy datanodes.
|
||||||
* @return one randomly chosen datanode that from two randomly chosen datanode
|
* @return one randomly chosen datanode that from two randomly chosen datanode
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public DatanodeDetails chooseNode(final List<DatanodeDetails> healthyNodes) {
|
public DatanodeDetails chooseNode(final List<DatanodeDetails> healthyNodes) {
|
||||||
DatanodeDetails selectedNode =
|
DatanodeDetails selectedNode =
|
||||||
healthyNodes.get(getRand().nextInt(healthyNodes.size()));
|
healthyNodes.get(getRand().nextInt(healthyNodes.size()));
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
* DatanodeMetric acts as the basis for all the metric that is used in
|
* DatanodeMetric acts as the basis for all the metric that is used in
|
||||||
* comparing 2 datanodes.
|
* comparing 2 datanodes.
|
||||||
*/
|
*/
|
||||||
public interface DatanodeMetric<T, S> extends Comparable<T> {
|
public interface DatanodeMetric<T, S> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Some syntactic sugar over Comparable interface. This makes code easier to
|
* Some syntactic sugar over Comparable interface. This makes code easier to
|
||||||
@ -87,5 +87,4 @@ public interface DatanodeMetric<T, S> extends Comparable<T> {
|
|||||||
*/
|
*/
|
||||||
void subtract(T value);
|
void subtract(T value);
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -136,7 +136,6 @@ public void subtract(Long subValue) {
|
|||||||
* @throws ClassCastException if the specified object's type prevents it
|
* @throws ClassCastException if the specified object's type prevents it
|
||||||
* from being compared to this object.
|
* from being compared to this object.
|
||||||
*/
|
*/
|
||||||
@Override
|
|
||||||
public int compareTo(Long o) {
|
public int compareTo(Long o) {
|
||||||
return Long.compare(this.value, o);
|
return Long.compare(this.value, o);
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
/**
|
/**
|
||||||
* SCM Node Metric that is used in the placement classes.
|
* SCM Node Metric that is used in the placement classes.
|
||||||
*/
|
*/
|
||||||
public class SCMNodeMetric implements DatanodeMetric<SCMNodeStat, Long> {
|
public class SCMNodeMetric implements DatanodeMetric<SCMNodeStat, Long> {
|
||||||
private SCMNodeStat stat;
|
private SCMNodeStat stat;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -191,7 +191,7 @@ public void subtract(SCMNodeStat value) {
|
|||||||
* @throws ClassCastException if the specified object's type prevents it
|
* @throws ClassCastException if the specified object's type prevents it
|
||||||
* from being compared to this object.
|
* from being compared to this object.
|
||||||
*/
|
*/
|
||||||
@Override
|
//@Override
|
||||||
public int compareTo(SCMNodeStat o) {
|
public int compareTo(SCMNodeStat o) {
|
||||||
if (isEqual(o)) {
|
if (isEqual(o)) {
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -52,6 +52,7 @@ public SCMNodeStat(long capacity, long used, long remaining) {
|
|||||||
/**
|
/**
|
||||||
* @return the total configured capacity of the node.
|
* @return the total configured capacity of the node.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public LongMetric getCapacity() {
|
public LongMetric getCapacity() {
|
||||||
return capacity;
|
return capacity;
|
||||||
}
|
}
|
||||||
@ -59,6 +60,7 @@ public LongMetric getCapacity() {
|
|||||||
/**
|
/**
|
||||||
* @return the total SCM used space on the node.
|
* @return the total SCM used space on the node.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public LongMetric getScmUsed() {
|
public LongMetric getScmUsed() {
|
||||||
return scmUsed;
|
return scmUsed;
|
||||||
}
|
}
|
||||||
@ -66,6 +68,7 @@ public LongMetric getScmUsed() {
|
|||||||
/**
|
/**
|
||||||
* @return the total remaining space available on the node.
|
* @return the total remaining space available on the node.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public LongMetric getRemaining() {
|
public LongMetric getRemaining() {
|
||||||
return remaining;
|
return remaining;
|
||||||
}
|
}
|
||||||
@ -77,12 +80,9 @@ public LongMetric getRemaining() {
|
|||||||
* @param newUsed in bytes
|
* @param newUsed in bytes
|
||||||
* @param newRemaining in bytes
|
* @param newRemaining in bytes
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public void set(long newCapacity, long newUsed, long newRemaining) {
|
public void set(long newCapacity, long newUsed, long newRemaining) {
|
||||||
Preconditions.checkNotNull(newCapacity, "Capacity cannot be null");
|
|
||||||
Preconditions.checkNotNull(newUsed, "used cannot be null");
|
|
||||||
Preconditions.checkNotNull(newRemaining, "remaining cannot be null");
|
|
||||||
|
|
||||||
Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " +
|
Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " +
|
||||||
"negative.");
|
"negative.");
|
||||||
Preconditions.checkArgument(newUsed >= 0, "used space cannot be " +
|
Preconditions.checkArgument(newUsed >= 0, "used space cannot be " +
|
||||||
@ -101,6 +101,7 @@ public void set(long newCapacity, long newUsed, long newRemaining) {
|
|||||||
* @param stat Nodestat.
|
* @param stat Nodestat.
|
||||||
* @return SCMNodeStat
|
* @return SCMNodeStat
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public SCMNodeStat add(NodeStat stat) {
|
public SCMNodeStat add(NodeStat stat) {
|
||||||
this.capacity.set(this.getCapacity().get() + stat.getCapacity().get());
|
this.capacity.set(this.getCapacity().get() + stat.getCapacity().get());
|
||||||
this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get());
|
this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get());
|
||||||
@ -114,6 +115,7 @@ public SCMNodeStat add(NodeStat stat) {
|
|||||||
* @param stat SCMNodeStat.
|
* @param stat SCMNodeStat.
|
||||||
* @return Modified SCMNodeStat
|
* @return Modified SCMNodeStat
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public SCMNodeStat subtract(NodeStat stat) {
|
public SCMNodeStat subtract(NodeStat stat) {
|
||||||
this.capacity.set(this.getCapacity().get() - stat.getCapacity().get());
|
this.capacity.set(this.getCapacity().get() - stat.getCapacity().get());
|
||||||
this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get());
|
this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get());
|
||||||
|
@ -48,12 +48,13 @@ public ReplicationActivityStatus(){
|
|||||||
replicationStatusListener = new ReplicationStatusListener();
|
replicationStatusListener = new ReplicationStatusListener();
|
||||||
chillModeStatusListener = new ChillModeStatusListener();
|
chillModeStatusListener = new ChillModeStatusListener();
|
||||||
}
|
}
|
||||||
|
@Override
|
||||||
public boolean isReplicationEnabled() {
|
public boolean isReplicationEnabled() {
|
||||||
return replicationEnabled.get();
|
return replicationEnabled.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@Override
|
||||||
public void setReplicationEnabled(boolean enabled) {
|
public void setReplicationEnabled(boolean enabled) {
|
||||||
replicationEnabled.set(enabled);
|
replicationEnabled.set(enabled);
|
||||||
}
|
}
|
||||||
|
@ -98,6 +98,7 @@ public void start() {
|
|||||||
threadFactory.newThread(this).start();
|
threadFactory.newThread(this).start();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
|
|
||||||
while (running) {
|
while (running) {
|
||||||
@ -168,6 +169,7 @@ public void run() {
|
|||||||
|
|
||||||
} else if (deficit < 0) {
|
} else if (deficit < 0) {
|
||||||
//TODO: too many replicas. Not handled yet.
|
//TODO: too many replicas. Not handled yet.
|
||||||
|
LOG.debug("Too many replicas is not handled yet.");
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
@ -22,8 +22,8 @@
|
|||||||
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
|
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.LinkedList;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
@ -39,7 +39,7 @@
|
|||||||
*/
|
*/
|
||||||
public class CommandQueue {
|
public class CommandQueue {
|
||||||
// This list is used as default return value.
|
// This list is used as default return value.
|
||||||
private static final List<SCMCommand> DEFAULT_LIST = new LinkedList<>();
|
private static final List<SCMCommand> DEFAULT_LIST = new ArrayList<>();
|
||||||
private final Map<UUID, Commands> commandMap;
|
private final Map<UUID, Commands> commandMap;
|
||||||
private final Lock lock;
|
private final Lock lock;
|
||||||
private long commandsInQueue;
|
private long commandsInQueue;
|
||||||
@ -136,7 +136,7 @@ private static class Commands {
|
|||||||
* Constructs a Commands class.
|
* Constructs a Commands class.
|
||||||
*/
|
*/
|
||||||
Commands() {
|
Commands() {
|
||||||
commands = new LinkedList<>();
|
commands = new ArrayList<>();
|
||||||
updateTime = 0;
|
updateTime = 0;
|
||||||
readTime = 0;
|
readTime = 0;
|
||||||
}
|
}
|
||||||
@ -182,7 +182,7 @@ public void add(SCMCommand command) {
|
|||||||
*/
|
*/
|
||||||
public List<SCMCommand> getCommands() {
|
public List<SCMCommand> getCommands() {
|
||||||
List<SCMCommand> temp = this.commands;
|
List<SCMCommand> temp = this.commands;
|
||||||
this.commands = new LinkedList<>();
|
this.commands = new ArrayList<>();
|
||||||
readTime = Time.monotonicNow();
|
readTime = Time.monotonicNow();
|
||||||
return temp;
|
return temp;
|
||||||
}
|
}
|
||||||
|
@ -144,6 +144,8 @@ public NodeStateManager(Configuration conf, EventPublisher eventPublisher) {
|
|||||||
executorService = HadoopExecutors.newScheduledThreadPool(1,
|
executorService = HadoopExecutors.newScheduledThreadPool(1,
|
||||||
new ThreadFactoryBuilder().setDaemon(true)
|
new ThreadFactoryBuilder().setDaemon(true)
|
||||||
.setNameFormat("SCM Heartbeat Processing Thread - %d").build());
|
.setNameFormat("SCM Heartbeat Processing Thread - %d").build());
|
||||||
|
//BUG:BUG TODO: The return value is ignored, if an exception is thrown in
|
||||||
|
// the executing funtion, it will be ignored.
|
||||||
executorService.schedule(this, heartbeatCheckerIntervalMs,
|
executorService.schedule(this, heartbeatCheckerIntervalMs,
|
||||||
TimeUnit.MILLISECONDS);
|
TimeUnit.MILLISECONDS);
|
||||||
}
|
}
|
||||||
@ -331,7 +333,7 @@ public List<DatanodeDetails> getDeadNodes() {
|
|||||||
* @return list of nodes
|
* @return list of nodes
|
||||||
*/
|
*/
|
||||||
public List<DatanodeDetails> getNodes(NodeState state) {
|
public List<DatanodeDetails> getNodes(NodeState state) {
|
||||||
List<DatanodeDetails> nodes = new LinkedList<>();
|
List<DatanodeDetails> nodes = new ArrayList<>();
|
||||||
nodeStateMap.getNodes(state).forEach(
|
nodeStateMap.getNodes(state).forEach(
|
||||||
uuid -> {
|
uuid -> {
|
||||||
try {
|
try {
|
||||||
@ -352,7 +354,7 @@ public List<DatanodeDetails> getNodes(NodeState state) {
|
|||||||
* @return all the managed nodes
|
* @return all the managed nodes
|
||||||
*/
|
*/
|
||||||
public List<DatanodeDetails> getAllNodes() {
|
public List<DatanodeDetails> getAllNodes() {
|
||||||
List<DatanodeDetails> nodes = new LinkedList<>();
|
List<DatanodeDetails> nodes = new ArrayList<>();
|
||||||
nodeStateMap.getAllNodes().forEach(
|
nodeStateMap.getAllNodes().forEach(
|
||||||
uuid -> {
|
uuid -> {
|
||||||
try {
|
try {
|
||||||
@ -613,6 +615,8 @@ public void run() {
|
|||||||
|
|
||||||
if (!Thread.currentThread().isInterrupted() &&
|
if (!Thread.currentThread().isInterrupted() &&
|
||||||
!executorService.isShutdown()) {
|
!executorService.isShutdown()) {
|
||||||
|
//BUGBUG: The return future needs to checked here to make sure the
|
||||||
|
// exceptions are handled correctly.
|
||||||
executorService.schedule(this, heartbeatCheckerIntervalMs,
|
executorService.schedule(this, heartbeatCheckerIntervalMs,
|
||||||
TimeUnit.MILLISECONDS);
|
TimeUnit.MILLISECONDS);
|
||||||
} else {
|
} else {
|
||||||
|
@ -59,6 +59,7 @@ public Set<ContainerID> getContainers(UUID datanode) {
|
|||||||
* @param datanodeID -- Datanode UUID
|
* @param datanodeID -- Datanode UUID
|
||||||
* @param containerIDs - List of ContainerIDs.
|
* @param containerIDs - List of ContainerIDs.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void insertNewDatanode(UUID datanodeID, Set<ContainerID> containerIDs)
|
public void insertNewDatanode(UUID datanodeID, Set<ContainerID> containerIDs)
|
||||||
throws SCMException {
|
throws SCMException {
|
||||||
super.insertNewDatanode(datanodeID, containerIDs);
|
super.insertNewDatanode(datanodeID, containerIDs);
|
||||||
@ -84,6 +85,7 @@ public void setContainersForDatanode(UUID datanodeID,
|
|||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@Override
|
||||||
public int size() {
|
public int size() {
|
||||||
return dn2ObjectMap.size();
|
return dn2ObjectMap.size();
|
||||||
}
|
}
|
||||||
|
@ -172,7 +172,7 @@ public DatanodeInfo getNodeInfo(UUID uuid) throws NodeNotFoundException {
|
|||||||
public List<UUID> getNodes(NodeState state) {
|
public List<UUID> getNodes(NodeState state) {
|
||||||
lock.readLock().lock();
|
lock.readLock().lock();
|
||||||
try {
|
try {
|
||||||
return new LinkedList<>(stateMap.get(state));
|
return new ArrayList<>(stateMap.get(state));
|
||||||
} finally {
|
} finally {
|
||||||
lock.readLock().unlock();
|
lock.readLock().unlock();
|
||||||
}
|
}
|
||||||
@ -186,7 +186,7 @@ public List<UUID> getNodes(NodeState state) {
|
|||||||
public List<UUID> getAllNodes() {
|
public List<UUID> getAllNodes() {
|
||||||
lock.readLock().lock();
|
lock.readLock().lock();
|
||||||
try {
|
try {
|
||||||
return new LinkedList<>(nodeMap.keySet());
|
return new ArrayList<>(nodeMap.keySet());
|
||||||
} finally {
|
} finally {
|
||||||
lock.readLock().unlock();
|
lock.readLock().unlock();
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
import org.apache.hadoop.hdds.server.ServerUtils;
|
import org.apache.hadoop.hdds.server.ServerUtils;
|
||||||
import org.apache.hadoop.hdds.server.events.EventPublisher;
|
import org.apache.hadoop.hdds.server.events.EventPublisher;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
|
import org.apache.hadoop.utils.MetadataKeyFilters;
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
import org.apache.hadoop.utils.MetadataStore;
|
||||||
import org.apache.hadoop.utils.MetadataStoreBuilder;
|
import org.apache.hadoop.utils.MetadataStoreBuilder;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
@ -94,7 +95,8 @@ private void initializePipelineState() throws IOException {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
List<Map.Entry<byte[], byte[]>> pipelines =
|
List<Map.Entry<byte[], byte[]>> pipelines =
|
||||||
pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE, null);
|
pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE,
|
||||||
|
(MetadataKeyFilters.MetadataKeyFilter[])null);
|
||||||
|
|
||||||
for (Map.Entry<byte[], byte[]> entry : pipelines) {
|
for (Map.Entry<byte[], byte[]> entry : pipelines) {
|
||||||
Pipeline pipeline = Pipeline.getFromProtobuf(
|
Pipeline pipeline = Pipeline.getFromProtobuf(
|
||||||
|
@ -58,7 +58,6 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.LinkedList;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.TreeSet;
|
import java.util.TreeSet;
|
||||||
@ -354,7 +353,7 @@ public boolean forceExitChillMode() throws IOException {
|
|||||||
*/
|
*/
|
||||||
public List<DatanodeDetails> queryNode(HddsProtos.NodeState state) {
|
public List<DatanodeDetails> queryNode(HddsProtos.NodeState state) {
|
||||||
Preconditions.checkNotNull(state, "Node Query set cannot be null");
|
Preconditions.checkNotNull(state, "Node Query set cannot be null");
|
||||||
return new LinkedList<>(queryNodeState(state));
|
return new ArrayList<>(queryNodeState(state));
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@ -89,7 +89,7 @@
|
|||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.util.LinkedList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
@ -225,7 +225,7 @@ public static SCMRegisteredResponseProto getRegisteredResponse(
|
|||||||
@Override
|
@Override
|
||||||
public SCMHeartbeatResponseProto sendHeartbeat(
|
public SCMHeartbeatResponseProto sendHeartbeat(
|
||||||
SCMHeartbeatRequestProto heartbeat) throws IOException {
|
SCMHeartbeatRequestProto heartbeat) throws IOException {
|
||||||
List<SCMCommandProto> cmdResponses = new LinkedList<>();
|
List<SCMCommandProto> cmdResponses = new ArrayList<>();
|
||||||
for (SCMCommand cmd : heartbeatDispatcher.dispatch(heartbeat)) {
|
for (SCMCommand cmd : heartbeatDispatcher.dispatch(heartbeat)) {
|
||||||
cmdResponses.add(getCommandResponse(cmd));
|
cmdResponses.add(getCommandResponse(cmd));
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unit tests for {@link HddsServerUtil}
|
* Unit tests for {@link HddsServerUtil}.
|
||||||
*/
|
*/
|
||||||
public class TestHddsServerUtils {
|
public class TestHddsServerUtils {
|
||||||
public static final Logger LOG = LoggerFactory.getLogger(
|
public static final Logger LOG = LoggerFactory.getLogger(
|
||||||
@ -58,6 +58,7 @@ public class TestHddsServerUtils {
|
|||||||
* Test getting OZONE_SCM_DATANODE_ADDRESS_KEY with port.
|
* Test getting OZONE_SCM_DATANODE_ADDRESS_KEY with port.
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public void testGetDatanodeAddressWithPort() {
|
public void testGetDatanodeAddressWithPort() {
|
||||||
final String scmHost = "host123:100";
|
final String scmHost = "host123:100";
|
||||||
final Configuration conf = new OzoneConfiguration();
|
final Configuration conf = new OzoneConfiguration();
|
||||||
@ -78,8 +79,8 @@ public void testGetDatanodeAddressWithoutPort() {
|
|||||||
conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost);
|
conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost);
|
||||||
final InetSocketAddress address =
|
final InetSocketAddress address =
|
||||||
HddsServerUtil.getScmAddressForDataNodes(conf);
|
HddsServerUtil.getScmAddressForDataNodes(conf);
|
||||||
assertEquals(address.getHostName(), scmHost);
|
assertEquals(scmHost, address.getHostName());
|
||||||
assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
|
assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -93,8 +94,8 @@ public void testDatanodeAddressFallbackToClientNoPort() {
|
|||||||
conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
|
conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
|
||||||
final InetSocketAddress address =
|
final InetSocketAddress address =
|
||||||
HddsServerUtil.getScmAddressForDataNodes(conf);
|
HddsServerUtil.getScmAddressForDataNodes(conf);
|
||||||
assertEquals(address.getHostName(), scmHost);
|
assertEquals(scmHost, address.getHostName());
|
||||||
assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
|
assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -103,6 +104,7 @@ public void testDatanodeAddressFallbackToClientNoPort() {
|
|||||||
* OZONE_SCM_CLIENT_ADDRESS_KEY should be ignored.
|
* OZONE_SCM_CLIENT_ADDRESS_KEY should be ignored.
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public void testDatanodeAddressFallbackToClientWithPort() {
|
public void testDatanodeAddressFallbackToClientWithPort() {
|
||||||
final String scmHost = "host123:100";
|
final String scmHost = "host123:100";
|
||||||
final Configuration conf = new OzoneConfiguration();
|
final Configuration conf = new OzoneConfiguration();
|
||||||
@ -124,8 +126,8 @@ public void testDatanodeAddressFallbackToScmNamesNoPort() {
|
|||||||
conf.set(OZONE_SCM_NAMES, scmHost);
|
conf.set(OZONE_SCM_NAMES, scmHost);
|
||||||
final InetSocketAddress address =
|
final InetSocketAddress address =
|
||||||
HddsServerUtil.getScmAddressForDataNodes(conf);
|
HddsServerUtil.getScmAddressForDataNodes(conf);
|
||||||
assertEquals(address.getHostName(), scmHost);
|
assertEquals(scmHost, address.getHostName());
|
||||||
assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
|
assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -134,6 +136,7 @@ public void testDatanodeAddressFallbackToScmNamesNoPort() {
|
|||||||
* defined by OZONE_SCM_NAMES should be ignored.
|
* defined by OZONE_SCM_NAMES should be ignored.
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public void testDatanodeAddressFallbackToScmNamesWithPort() {
|
public void testDatanodeAddressFallbackToScmNamesWithPort() {
|
||||||
final String scmHost = "host123:100";
|
final String scmHost = "host123:100";
|
||||||
final Configuration conf = new OzoneConfiguration();
|
final Configuration conf = new OzoneConfiguration();
|
||||||
@ -141,7 +144,7 @@ public void testDatanodeAddressFallbackToScmNamesWithPort() {
|
|||||||
final InetSocketAddress address =
|
final InetSocketAddress address =
|
||||||
HddsServerUtil.getScmAddressForDataNodes(conf);
|
HddsServerUtil.getScmAddressForDataNodes(conf);
|
||||||
assertEquals(address.getHostName(), scmHost.split(":")[0]);
|
assertEquals(address.getHostName(), scmHost.split(":")[0]);
|
||||||
assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
|
assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -48,7 +48,7 @@
|
|||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.LinkedList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
@ -378,12 +378,9 @@ public void testScmDetectStaleAndDeadNode() throws IOException,
|
|||||||
* Check for NPE when datanodeDetails is passed null for sendHeartbeat.
|
* Check for NPE when datanodeDetails is passed null for sendHeartbeat.
|
||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* @throws InterruptedException
|
|
||||||
* @throws TimeoutException
|
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException,
|
public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException {
|
||||||
InterruptedException, TimeoutException {
|
|
||||||
try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
|
try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
|
||||||
nodeManager.processHeartbeat(null);
|
nodeManager.processHeartbeat(null);
|
||||||
} catch (NullPointerException npe) {
|
} catch (NullPointerException npe) {
|
||||||
@ -588,7 +585,7 @@ private void heartbeatNodeSet(SCMNodeManager manager,
|
|||||||
*/
|
*/
|
||||||
private List<DatanodeDetails> createNodeSet(SCMNodeManager nodeManager, int
|
private List<DatanodeDetails> createNodeSet(SCMNodeManager nodeManager, int
|
||||||
count) {
|
count) {
|
||||||
List<DatanodeDetails> list = new LinkedList<>();
|
List<DatanodeDetails> list = new ArrayList<>();
|
||||||
for (int x = 0; x < count; x++) {
|
for (int x = 0; x < count; x++) {
|
||||||
DatanodeDetails datanodeDetails = TestUtils
|
DatanodeDetails datanodeDetails = TestUtils
|
||||||
.createRandomDatanodeAndRegister(nodeManager);
|
.createRandomDatanodeAndRegister(nodeManager);
|
||||||
@ -943,7 +940,7 @@ public void testScmNodeReportUpdate() throws IOException,
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testHandlingSCMCommandEvent() {
|
public void testHandlingSCMCommandEvent() throws IOException {
|
||||||
OzoneConfiguration conf = getConf();
|
OzoneConfiguration conf = getConf();
|
||||||
conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
|
conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
|
||||||
100, TimeUnit.MILLISECONDS);
|
100, TimeUnit.MILLISECONDS);
|
||||||
@ -974,6 +971,7 @@ public void testHandlingSCMCommandEvent() {
|
|||||||
.assertEquals(command.get(0).getClass(), CloseContainerCommand.class);
|
.assertEquals(command.get(0).getClass(), CloseContainerCommand.class);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,22 +150,22 @@ public void testProcessNodeReportCheckOneNode() throws IOException {
|
|||||||
path, reportCapacity, reportScmUsed, reportRemaining, null);
|
path, reportCapacity, reportScmUsed, reportRemaining, null);
|
||||||
StorageReportResult result =
|
StorageReportResult result =
|
||||||
map.processNodeReport(key, TestUtils.createNodeReport(storageReport));
|
map.processNodeReport(key, TestUtils.createNodeReport(storageReport));
|
||||||
Assert.assertEquals(result.getStatus(),
|
Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL,
|
||||||
SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
|
result.getStatus());
|
||||||
StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb =
|
StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb =
|
||||||
NodeReportProto.newBuilder();
|
NodeReportProto.newBuilder();
|
||||||
StorageReportProto srb = reportSet.iterator().next().getProtoBufMessage();
|
StorageReportProto srb = reportSet.iterator().next().getProtoBufMessage();
|
||||||
reportList.add(srb);
|
reportList.add(srb);
|
||||||
result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
|
result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
|
||||||
Assert.assertEquals(result.getStatus(),
|
Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL,
|
||||||
SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
|
result.getStatus());
|
||||||
|
|
||||||
reportList.add(TestUtils
|
reportList.add(TestUtils
|
||||||
.createStorageReport(UUID.randomUUID(), path, reportCapacity,
|
.createStorageReport(UUID.randomUUID(), path, reportCapacity,
|
||||||
reportCapacity, 0, null));
|
reportCapacity, 0, null));
|
||||||
result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
|
result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
|
||||||
Assert.assertEquals(result.getStatus(),
|
Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE,
|
||||||
SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE);
|
result.getStatus());
|
||||||
// Mark a disk failed
|
// Mark a disk failed
|
||||||
StorageReportProto srb2 = StorageReportProto.newBuilder()
|
StorageReportProto srb2 = StorageReportProto.newBuilder()
|
||||||
.setStorageUuid(UUID.randomUUID().toString())
|
.setStorageUuid(UUID.randomUUID().toString())
|
||||||
@ -174,8 +174,8 @@ public void testProcessNodeReportCheckOneNode() throws IOException {
|
|||||||
reportList.add(srb2);
|
reportList.add(srb2);
|
||||||
nrb.addAllStorageReport(reportList);
|
nrb.addAllStorageReport(reportList);
|
||||||
result = map.processNodeReport(key, nrb.addStorageReport(srb).build());
|
result = map.processNodeReport(key, nrb.addStorageReport(srb).build());
|
||||||
Assert.assertEquals(result.getStatus(),
|
Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus
|
||||||
SCMNodeStorageStatMap.ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE);
|
.FAILED_AND_OUT_OF_SPACE_STORAGE, result.getStatus());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ private void generateData() {
|
|||||||
for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
|
for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
|
||||||
TreeSet<ContainerID> currentSet = new TreeSet<>();
|
TreeSet<ContainerID> currentSet = new TreeSet<>();
|
||||||
for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
|
for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
|
||||||
long currentCnIndex = (dnIndex * CONTAINER_COUNT) + cnIndex;
|
long currentCnIndex = (long) (dnIndex * CONTAINER_COUNT) + cnIndex;
|
||||||
currentSet.add(new ContainerID(currentCnIndex));
|
currentSet.add(new ContainerID(currentCnIndex));
|
||||||
}
|
}
|
||||||
testData.put(UUID.randomUUID(), currentSet);
|
testData.put(UUID.randomUUID(), currentSet);
|
||||||
@ -115,8 +115,8 @@ public void testProcessReportCheckOneNode() throws SCMException {
|
|||||||
map.insertNewDatanode(key, values);
|
map.insertNewDatanode(key, values);
|
||||||
Assert.assertTrue(map.isKnownDatanode(key));
|
Assert.assertTrue(map.isKnownDatanode(key));
|
||||||
ReportResult result = map.processReport(key, values);
|
ReportResult result = map.processReport(key, values);
|
||||||
Assert.assertEquals(result.getStatus(),
|
Assert.assertEquals(ReportResult.ReportStatus.ALL_IS_WELL,
|
||||||
ReportResult.ReportStatus.ALL_IS_WELL);
|
result.getStatus());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -19,9 +19,8 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
|
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
|
||||||
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
|
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
|
||||||
import org.junit.Rule;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.rules.ExpectedException;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
@ -30,20 +29,18 @@
|
|||||||
* Tests that test Metrics that support placement.
|
* Tests that test Metrics that support placement.
|
||||||
*/
|
*/
|
||||||
public class TestDatanodeMetrics {
|
public class TestDatanodeMetrics {
|
||||||
@Rule
|
|
||||||
public ExpectedException exception = ExpectedException.none();
|
|
||||||
@Test
|
@Test
|
||||||
public void testSCMNodeMetric() {
|
public void testSCMNodeMetric() {
|
||||||
SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L);
|
SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L);
|
||||||
assertEquals((long) stat.getCapacity().get(), 100L);
|
assertEquals((long) stat.getCapacity().get(), 100L);
|
||||||
assertEquals((long) stat.getScmUsed().get(), 10L);
|
assertEquals(10L, (long) stat.getScmUsed().get());
|
||||||
assertEquals((long) stat.getRemaining().get(), 90L);
|
assertEquals(90L, (long) stat.getRemaining().get());
|
||||||
SCMNodeMetric metric = new SCMNodeMetric(stat);
|
SCMNodeMetric metric = new SCMNodeMetric(stat);
|
||||||
|
|
||||||
SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L);
|
SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L);
|
||||||
assertEquals((long) stat.getCapacity().get(), 100L);
|
assertEquals(100L, (long) stat.getCapacity().get());
|
||||||
assertEquals((long) stat.getScmUsed().get(), 10L);
|
assertEquals(10L, (long) stat.getScmUsed().get());
|
||||||
assertEquals((long) stat.getRemaining().get(), 90L);
|
assertEquals(90L, (long) stat.getRemaining().get());
|
||||||
|
|
||||||
SCMNodeMetric newMetric = new SCMNodeMetric(newStat);
|
SCMNodeMetric newMetric = new SCMNodeMetric(newStat);
|
||||||
assertTrue(metric.isEqual(newMetric.get()));
|
assertTrue(metric.isEqual(newMetric.get()));
|
||||||
|
@ -124,6 +124,7 @@ public String getOzoneBucketMapping(String s3BucketName) throws IOException {
|
|||||||
* @return String - Ozone Volume name.
|
* @return String - Ozone Volume name.
|
||||||
* @throws IOException - Throws if the s3Bucket does not exist.
|
* @throws IOException - Throws if the s3Bucket does not exist.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public String getOzoneVolumeName(String s3BucketName) throws IOException {
|
public String getOzoneVolumeName(String s3BucketName) throws IOException {
|
||||||
String mapping = getOzoneBucketMapping(s3BucketName);
|
String mapping = getOzoneBucketMapping(s3BucketName);
|
||||||
return mapping.split("/")[0];
|
return mapping.split("/")[0];
|
||||||
@ -136,6 +137,7 @@ public String getOzoneVolumeName(String s3BucketName) throws IOException {
|
|||||||
* @return String - Ozone bucket Name.
|
* @return String - Ozone bucket Name.
|
||||||
* @throws IOException - Throws if the s3bucket does not exist.
|
* @throws IOException - Throws if the s3bucket does not exist.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public String getOzoneBucketName(String s3BucketName) throws IOException {
|
public String getOzoneBucketName(String s3BucketName) throws IOException {
|
||||||
String mapping = getOzoneBucketMapping(s3BucketName);
|
String mapping = getOzoneBucketMapping(s3BucketName);
|
||||||
return mapping.split("/")[1];
|
return mapping.split("/")[1];
|
||||||
|
@ -610,6 +610,7 @@ public String getOzoneBucketMapping(String s3BucketName) throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public String getOzoneVolumeName(String s3BucketName) throws IOException {
|
public String getOzoneVolumeName(String s3BucketName) throws IOException {
|
||||||
String mapping = getOzoneBucketMapping(s3BucketName);
|
String mapping = getOzoneBucketMapping(s3BucketName);
|
||||||
return mapping.split("/")[0];
|
return mapping.split("/")[0];
|
||||||
@ -617,6 +618,7 @@ public String getOzoneVolumeName(String s3BucketName) throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public String getOzoneBucketName(String s3BucketName) throws IOException {
|
public String getOzoneBucketName(String s3BucketName) throws IOException {
|
||||||
String mapping = getOzoneBucketMapping(s3BucketName);
|
String mapping = getOzoneBucketMapping(s3BucketName);
|
||||||
return mapping.split("/")[1];
|
return mapping.split("/")[1];
|
||||||
|
@ -40,11 +40,12 @@
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* This test class verifies the parsing of SCM endpoint config settings. The
|
* This test class verifies the parsing of SCM endpoint config settings. The
|
||||||
* parsing logic is in {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}.
|
* parsing logic is in
|
||||||
|
* {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}.
|
||||||
*/
|
*/
|
||||||
public class TestHddsClientUtils {
|
public class TestHddsClientUtils {
|
||||||
@Rule
|
@Rule
|
||||||
public Timeout timeout = new Timeout(300_000);
|
public Timeout timeout = new Timeout(300000);
|
||||||
|
|
||||||
@Rule
|
@Rule
|
||||||
public ExpectedException thrown= ExpectedException.none();
|
public ExpectedException thrown= ExpectedException.none();
|
||||||
@ -114,13 +115,14 @@ public void testBlockClientFallbackToClientNoPort() {
|
|||||||
final String scmHost = "host123";
|
final String scmHost = "host123";
|
||||||
final Configuration conf = new OzoneConfiguration();
|
final Configuration conf = new OzoneConfiguration();
|
||||||
conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
|
conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
|
||||||
final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
|
final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients(
|
||||||
conf);
|
conf);
|
||||||
assertEquals(address.getHostName(), scmHost);
|
assertEquals(scmHost, address.getHostName());
|
||||||
assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
|
assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public void testBlockClientFallbackToClientWithPort() {
|
public void testBlockClientFallbackToClientWithPort() {
|
||||||
// When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY is undefined it should
|
// When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY is undefined it should
|
||||||
// fallback to OZONE_SCM_CLIENT_ADDRESS_KEY.
|
// fallback to OZONE_SCM_CLIENT_ADDRESS_KEY.
|
||||||
@ -132,8 +134,8 @@ public void testBlockClientFallbackToClientWithPort() {
|
|||||||
conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
|
conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
|
||||||
final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
|
final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
|
||||||
conf);
|
conf);
|
||||||
assertEquals(address.getHostName(), scmHost.split(":")[0]);
|
assertEquals(scmHost.split(":")[0], address.getHostName());
|
||||||
assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
|
assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -143,13 +145,14 @@ public void testBlockClientFallbackToScmNamesNoPort() {
|
|||||||
final String scmHost = "host456";
|
final String scmHost = "host456";
|
||||||
final Configuration conf = new OzoneConfiguration();
|
final Configuration conf = new OzoneConfiguration();
|
||||||
conf.set(OZONE_SCM_NAMES, scmHost);
|
conf.set(OZONE_SCM_NAMES, scmHost);
|
||||||
final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
|
final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients(
|
||||||
conf);
|
conf);
|
||||||
assertEquals(address.getHostName(), scmHost);
|
assertEquals(scmHost, address.getHostName());
|
||||||
assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
|
assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public void testBlockClientFallbackToScmNamesWithPort() {
|
public void testBlockClientFallbackToScmNamesWithPort() {
|
||||||
// When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
|
// When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
|
||||||
// are undefined it should fallback to OZONE_SCM_NAMES.
|
// are undefined it should fallback to OZONE_SCM_NAMES.
|
||||||
@ -159,10 +162,10 @@ public void testBlockClientFallbackToScmNamesWithPort() {
|
|||||||
final String scmHost = "host456:200";
|
final String scmHost = "host456:200";
|
||||||
final Configuration conf = new OzoneConfiguration();
|
final Configuration conf = new OzoneConfiguration();
|
||||||
conf.set(OZONE_SCM_NAMES, scmHost);
|
conf.set(OZONE_SCM_NAMES, scmHost);
|
||||||
final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
|
final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients(
|
||||||
conf);
|
conf);
|
||||||
assertEquals(address.getHostName(), scmHost.split(":")[0]);
|
assertEquals(scmHost.split(":")[0], address.getHostName());
|
||||||
assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
|
assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -172,12 +175,13 @@ public void testClientFallbackToScmNamesNoPort() {
|
|||||||
final String scmHost = "host456";
|
final String scmHost = "host456";
|
||||||
final Configuration conf = new OzoneConfiguration();
|
final Configuration conf = new OzoneConfiguration();
|
||||||
conf.set(OZONE_SCM_NAMES, scmHost);
|
conf.set(OZONE_SCM_NAMES, scmHost);
|
||||||
final InetSocketAddress address =HddsUtils.getScmAddressForClients(conf);
|
final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf);
|
||||||
assertEquals(address.getHostName(), scmHost);
|
assertEquals(scmHost, address.getHostName());
|
||||||
assertEquals(address.getPort(), OZONE_SCM_CLIENT_PORT_DEFAULT);
|
assertEquals(OZONE_SCM_CLIENT_PORT_DEFAULT, address.getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public void testClientFallbackToScmNamesWithPort() {
|
public void testClientFallbackToScmNamesWithPort() {
|
||||||
// When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, it should fallback
|
// When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, it should fallback
|
||||||
// to OZONE_SCM_NAMES.
|
// to OZONE_SCM_NAMES.
|
||||||
@ -187,9 +191,9 @@ public void testClientFallbackToScmNamesWithPort() {
|
|||||||
final String scmHost = "host456:300";
|
final String scmHost = "host456:300";
|
||||||
final Configuration conf = new OzoneConfiguration();
|
final Configuration conf = new OzoneConfiguration();
|
||||||
conf.set(OZONE_SCM_NAMES, scmHost);
|
conf.set(OZONE_SCM_NAMES, scmHost);
|
||||||
final InetSocketAddress address =HddsUtils.getScmAddressForClients(conf);
|
final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf);
|
||||||
assertEquals(address.getHostName(), scmHost.split(":")[0]);
|
assertEquals(scmHost.split(":")[0], address.getHostName());
|
||||||
assertEquals(address.getPort(), OZONE_SCM_CLIENT_PORT_DEFAULT);
|
assertEquals(OZONE_SCM_CLIENT_PORT_DEFAULT, address.getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
/**
|
/**
|
||||||
* This helper class keeps a map of all user and their permissions.
|
* This helper class keeps a map of all user and their permissions.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("ProtocolBufferOrdinal")
|
||||||
public class OmOzoneAclMap {
|
public class OmOzoneAclMap {
|
||||||
// per Acl Type user:rights map
|
// per Acl Type user:rights map
|
||||||
private ArrayList<Map<String, OzoneAclRights>> aclMaps;
|
private ArrayList<Map<String, OzoneAclRights>> aclMaps;
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
package org.apache.hadoop.ozone.web.response;
|
package org.apache.hadoop.ozone.web.response;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.LinkedList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.hdds.protocol.StorageType;
|
import org.apache.hadoop.hdds.protocol.StorageType;
|
||||||
@ -85,7 +85,7 @@ public BucketInfo(String volumeName, String bucketName) {
|
|||||||
* Default constructor for BucketInfo.
|
* Default constructor for BucketInfo.
|
||||||
*/
|
*/
|
||||||
public BucketInfo() {
|
public BucketInfo() {
|
||||||
acls = new LinkedList<OzoneAcl>();
|
acls = new ArrayList<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -318,7 +318,7 @@ public void setKeyCount(long keyCount) {
|
|||||||
* for the Json serialization.
|
* for the Json serialization.
|
||||||
*/
|
*/
|
||||||
@JsonFilter(BUCKET_INFO)
|
@JsonFilter(BUCKET_INFO)
|
||||||
class MixIn {
|
static class MixIn {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,17 +18,19 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.ozone.web;
|
package org.apache.hadoop.ozone.web;
|
||||||
|
|
||||||
|
|
||||||
import org.apache.hadoop.ozone.OzoneAcl;
|
|
||||||
import org.apache.hadoop.hdds.protocol.StorageType;
|
import org.apache.hadoop.hdds.protocol.StorageType;
|
||||||
import org.apache.hadoop.ozone.web.response.BucketInfo;
|
import org.apache.hadoop.ozone.OzoneAcl;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
|
import org.apache.hadoop.ozone.web.response.BucketInfo;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.LinkedList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test Ozone Bucket Info operation.
|
* Test Ozone Bucket Info operation.
|
||||||
*/
|
*/
|
||||||
@ -38,7 +40,7 @@ public void testBucketInfoJson() throws IOException {
|
|||||||
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
|
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
|
||||||
String bucketInfoString = bucketInfo.toJsonString();
|
String bucketInfoString = bucketInfo.toJsonString();
|
||||||
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
|
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
|
||||||
assert(bucketInfo.equals(newBucketInfo));
|
assertEquals(bucketInfo, newBucketInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -46,7 +48,7 @@ public void testBucketInfoDBString() throws IOException {
|
|||||||
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
|
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
|
||||||
String bucketInfoString = bucketInfo.toDBString();
|
String bucketInfoString = bucketInfo.toDBString();
|
||||||
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
|
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
|
||||||
assert(bucketInfo.equals(newBucketInfo));
|
assertEquals(bucketInfo, newBucketInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -54,18 +56,17 @@ public void testBucketInfoAddAcls() throws IOException {
|
|||||||
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
|
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
|
||||||
String bucketInfoString = bucketInfo.toDBString();
|
String bucketInfoString = bucketInfo.toDBString();
|
||||||
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
|
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
|
||||||
assert(bucketInfo.equals(newBucketInfo));
|
assertEquals(bucketInfo, newBucketInfo);
|
||||||
List<OzoneAcl> aclList = new LinkedList<>();
|
List<OzoneAcl> aclList = new ArrayList<>();
|
||||||
|
|
||||||
aclList.add(OzoneAcl.parseAcl("user:bilbo:r"));
|
aclList.add(OzoneAcl.parseAcl("user:bilbo:r"));
|
||||||
aclList.add(OzoneAcl.parseAcl("user:samwise:rw"));
|
aclList.add(OzoneAcl.parseAcl("user:samwise:rw"));
|
||||||
newBucketInfo.setAcls(aclList);
|
newBucketInfo.setAcls(aclList);
|
||||||
|
|
||||||
assert(newBucketInfo.getAcls() != null);
|
assertNotNull(newBucketInfo.getAcls());
|
||||||
assert(newBucketInfo.getAcls().size() == 2);
|
assertEquals(2, newBucketInfo.getAcls().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testBucketInfoVersionAndType() throws IOException {
|
public void testBucketInfoVersionAndType() throws IOException {
|
||||||
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
|
BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
|
||||||
@ -75,7 +76,7 @@ public void testBucketInfoVersionAndType() throws IOException {
|
|||||||
String bucketInfoString = bucketInfo.toDBString();
|
String bucketInfoString = bucketInfo.toDBString();
|
||||||
|
|
||||||
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
|
BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
|
||||||
assert(bucketInfo.equals(newBucketInfo));
|
assertEquals(bucketInfo, newBucketInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,19 +1,18 @@
|
|||||||
/**
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* contributor license agreements. See the NOTICE file distributed with this
|
||||||
* distributed with this work for additional information
|
* work for additional information regarding copyright ownership. The ASF
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||||
* to you under the Apache License, Version 2.0 (the
|
* "License"); you may not use this file except in compliance with the License.
|
||||||
* "License"); you may not use this file except in compliance
|
* You may obtain a copy of the License at
|
||||||
* with the License. You may obtain a copy of the License at
|
* <p>
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
*
|
* <p>
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
* See the License for the specific language governing permissions and
|
* License for the specific language governing permissions and limitations under
|
||||||
* limitations under the License.
|
* the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.web;
|
package org.apache.hadoop.ozone.web;
|
||||||
@ -82,29 +81,29 @@ public void testParseQuota() {
|
|||||||
@Test
|
@Test
|
||||||
public void testVerifyQuota() {
|
public void testVerifyQuota() {
|
||||||
OzoneQuota qt = OzoneQuota.parseQuota("10TB");
|
OzoneQuota qt = OzoneQuota.parseQuota("10TB");
|
||||||
assertEquals(qt.getSize(), 10);
|
assertEquals(10, qt.getSize());
|
||||||
assertEquals(qt.getUnit(), OzoneQuota.Units.TB);
|
assertEquals(OzoneQuota.Units.TB, qt.getUnit());
|
||||||
assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L * 1024L * 1024L));
|
assertEquals(10L * (1024L * 1024L * 1024L * 1024L), qt.sizeInBytes());
|
||||||
|
|
||||||
qt = OzoneQuota.parseQuota("10MB");
|
qt = OzoneQuota.parseQuota("10MB");
|
||||||
assertEquals(qt.getSize(), 10);
|
assertEquals(10, qt.getSize());
|
||||||
assertEquals(qt.getUnit(), OzoneQuota.Units.MB);
|
assertEquals(OzoneQuota.Units.MB, qt.getUnit());
|
||||||
assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L));
|
assertEquals(10L * (1024L * 1024L), qt.sizeInBytes());
|
||||||
|
|
||||||
qt = OzoneQuota.parseQuota("10GB");
|
qt = OzoneQuota.parseQuota("10GB");
|
||||||
assertEquals(qt.getSize(), 10);
|
assertEquals(10, qt.getSize());
|
||||||
assertEquals(qt.getUnit(), OzoneQuota.Units.GB);
|
assertEquals(OzoneQuota.Units.GB, qt.getUnit());
|
||||||
assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L * 1024L));
|
assertEquals(10L * (1024L * 1024L * 1024L), qt.sizeInBytes());
|
||||||
|
|
||||||
qt = OzoneQuota.parseQuota("10BYTES");
|
qt = OzoneQuota.parseQuota("10BYTES");
|
||||||
assertEquals(qt.getSize(), 10);
|
assertEquals(10, qt.getSize());
|
||||||
assertEquals(qt.getUnit(), OzoneQuota.Units.BYTES);
|
assertEquals(OzoneQuota.Units.BYTES, qt.getUnit());
|
||||||
assertEquals(qt.sizeInBytes(), 10L);
|
assertEquals(10L, qt.sizeInBytes());
|
||||||
|
|
||||||
OzoneQuota emptyQuota = new OzoneQuota();
|
OzoneQuota emptyQuota = new OzoneQuota();
|
||||||
assertEquals(emptyQuota.sizeInBytes(), -1L);
|
assertEquals(-1L, emptyQuota.sizeInBytes());
|
||||||
assertEquals(emptyQuota.getSize(), 0);
|
assertEquals(0, emptyQuota.getSize());
|
||||||
assertEquals(emptyQuota.getUnit(), OzoneQuota.Units.UNDEFINED);
|
assertEquals(OzoneQuota.Units.UNDEFINED, emptyQuota.getUnit());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -23,8 +23,7 @@
|
|||||||
import org.apache.hadoop.hdds.scm.container.ContainerID;
|
import org.apache.hadoop.hdds.scm.container.ContainerID;
|
||||||
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.scm.container.ContainerManager;
|
import org.apache.hadoop.hdds.scm.container.ContainerManager;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
.ContainerWithPipeline;
|
|
||||||
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
|
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
@ -36,11 +35,12 @@
|
|||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.TimeoutException;
|
import java.util.concurrent.TimeoutException;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
|
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
|
||||||
.ReplicationFactor.THREE;
|
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
|
||||||
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
|
|
||||||
.ReplicationType.RATIS;
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests for Pipeline Closing.
|
||||||
|
*/
|
||||||
public class TestPipelineClose {
|
public class TestPipelineClose {
|
||||||
|
|
||||||
private static MiniOzoneCluster cluster;
|
private static MiniOzoneCluster cluster;
|
||||||
@ -88,7 +88,6 @@ public static void shutdown() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPipelineCloseWithClosedContainer() throws IOException {
|
public void testPipelineCloseWithClosedContainer() throws IOException {
|
||||||
Set<ContainerID> set = pipelineManager
|
Set<ContainerID> set = pipelineManager
|
||||||
@ -112,8 +111,8 @@ public void testPipelineCloseWithClosedContainer() throws IOException {
|
|||||||
pipelineManager.finalizePipeline(ratisContainer1.getPipeline().getId());
|
pipelineManager.finalizePipeline(ratisContainer1.getPipeline().getId());
|
||||||
Pipeline pipeline1 = pipelineManager
|
Pipeline pipeline1 = pipelineManager
|
||||||
.getPipeline(ratisContainer1.getPipeline().getId());
|
.getPipeline(ratisContainer1.getPipeline().getId());
|
||||||
Assert.assertEquals(pipeline1.getPipelineState(),
|
Assert.assertEquals(Pipeline.PipelineState.CLOSED,
|
||||||
Pipeline.PipelineState.CLOSED);
|
pipeline1.getPipelineState());
|
||||||
pipelineManager.removePipeline(pipeline1.getId());
|
pipelineManager.removePipeline(pipeline1.getId());
|
||||||
for (DatanodeDetails dn : ratisContainer1.getPipeline().getNodes()) {
|
for (DatanodeDetails dn : ratisContainer1.getPipeline().getNodes()) {
|
||||||
// Assert that the pipeline has been removed from Node2PipelineMap as well
|
// Assert that the pipeline has been removed from Node2PipelineMap as well
|
||||||
@ -131,12 +130,12 @@ public void testPipelineCloseWithOpenContainer() throws IOException,
|
|||||||
|
|
||||||
ContainerID cId2 = ratisContainer2.getContainerInfo().containerID();
|
ContainerID cId2 = ratisContainer2.getContainerInfo().containerID();
|
||||||
pipelineManager.finalizePipeline(ratisContainer2.getPipeline().getId());
|
pipelineManager.finalizePipeline(ratisContainer2.getPipeline().getId());
|
||||||
Assert.assertEquals(
|
Assert.assertEquals(Pipeline.PipelineState.CLOSED,
|
||||||
pipelineManager.getPipeline(ratisContainer2.getPipeline().getId())
|
pipelineManager.getPipeline(
|
||||||
.getPipelineState(), Pipeline.PipelineState.CLOSED);
|
ratisContainer2.getPipeline().getId()).getPipelineState());
|
||||||
Pipeline pipeline2 = pipelineManager
|
Pipeline pipeline2 = pipelineManager
|
||||||
.getPipeline(ratisContainer2.getPipeline().getId());
|
.getPipeline(ratisContainer2.getPipeline().getId());
|
||||||
Assert.assertEquals(pipeline2.getPipelineState(),
|
Assert.assertEquals(Pipeline.PipelineState.CLOSED,
|
||||||
Pipeline.PipelineState.CLOSED);
|
pipeline2.getPipelineState());
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -0,0 +1,22 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Package info tests.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdds.scm.pipeline;
|
@ -29,7 +29,15 @@
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
|
|
||||||
public class OzoneTestUtils {
|
/**
|
||||||
|
* Helper class for Tests.
|
||||||
|
*/
|
||||||
|
public final class OzoneTestUtils {
|
||||||
|
/**
|
||||||
|
* Never Constructed.
|
||||||
|
*/
|
||||||
|
private OzoneTestUtils() {
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Close containers which contain the blocks listed in
|
* Close containers which contain the blocks listed in
|
||||||
@ -55,7 +63,7 @@ public static boolean closeContainers(
|
|||||||
.getContainer(ContainerID.valueof(
|
.getContainer(ContainerID.valueof(
|
||||||
blockID.getContainerID())).isOpen());
|
blockID.getContainerID())).isOpen());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
e.printStackTrace();
|
throw new AssertionError("Failed to close the container", e);
|
||||||
}
|
}
|
||||||
}, omKeyLocationInfoGroups);
|
}, omKeyLocationInfoGroups);
|
||||||
}
|
}
|
||||||
|
@ -40,12 +40,11 @@
|
|||||||
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
|
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
|
||||||
import org.apache.hadoop.utils.MetadataKeyFilters;
|
import org.apache.hadoop.utils.MetadataKeyFilters;
|
||||||
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
|
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
|
||||||
import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
|
|
||||||
import org.apache.hadoop.utils.MetadataStore;
|
import org.apache.hadoop.utils.MetadataStore;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.util.LinkedList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
@ -83,8 +82,8 @@ public Map<String, OmKeyInfo> createKeys(int numOfKeys, int keySize)
|
|||||||
storageHandler.createVolume(createVolumeArgs);
|
storageHandler.createVolume(createVolumeArgs);
|
||||||
|
|
||||||
BucketArgs bucketArgs = new BucketArgs(bucket, createVolumeArgs);
|
BucketArgs bucketArgs = new BucketArgs(bucket, createVolumeArgs);
|
||||||
bucketArgs.setAddAcls(new LinkedList<>());
|
bucketArgs.setAddAcls(new ArrayList<>());
|
||||||
bucketArgs.setRemoveAcls(new LinkedList<>());
|
bucketArgs.setRemoveAcls(new ArrayList<>());
|
||||||
bucketArgs.setStorageType(StorageType.DISK);
|
bucketArgs.setStorageType(StorageType.DISK);
|
||||||
storageHandler.createBucket(bucketArgs);
|
storageHandler.createBucket(bucketArgs);
|
||||||
|
|
||||||
@ -144,9 +143,6 @@ public List<Long> getAllBlocks(Set<Long> containerIDs)
|
|||||||
public List<Long> getAllBlocks(Long containeID) throws IOException {
|
public List<Long> getAllBlocks(Long containeID) throws IOException {
|
||||||
List<Long> allBlocks = Lists.newArrayList();
|
List<Long> allBlocks = Lists.newArrayList();
|
||||||
MetadataStore meta = getContainerMetadata(containeID);
|
MetadataStore meta = getContainerMetadata(containeID);
|
||||||
MetadataKeyFilter filter =
|
|
||||||
(preKey, currentKey, nextKey) -> !DFSUtil.bytes2String(currentKey)
|
|
||||||
.startsWith(OzoneConsts.DELETING_KEY_PREFIX);
|
|
||||||
List<Map.Entry<byte[], byte[]>> kvs =
|
List<Map.Entry<byte[], byte[]>> kvs =
|
||||||
meta.getRangeKVs(null, Integer.MAX_VALUE,
|
meta.getRangeKVs(null, Integer.MAX_VALUE,
|
||||||
MetadataKeyFilters.getNormalKeyFilter());
|
MetadataKeyFilters.getNormalKeyFilter());
|
||||||
|
@ -61,6 +61,8 @@
|
|||||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
|
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
|
||||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
|
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests Close Container Exception handling by Ozone Client.
|
* Tests Close Container Exception handling by Ozone Client.
|
||||||
*/
|
*/
|
||||||
@ -121,7 +123,8 @@ public void testBlockWritesWithFlushAndClose() throws Exception {
|
|||||||
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
|
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
|
||||||
// write data more than 1 chunk
|
// write data more than 1 chunk
|
||||||
byte[] data = ContainerTestHelper
|
byte[] data = ContainerTestHelper
|
||||||
.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
|
.getFixedLengthString(keyString, chunkSize + chunkSize / 2)
|
||||||
|
.getBytes(UTF_8);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
|
|
||||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
||||||
@ -141,9 +144,9 @@ public void testBlockWritesWithFlushAndClose() throws Exception {
|
|||||||
Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
|
Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
|
||||||
|
|
||||||
// Written the same data twice
|
// Written the same data twice
|
||||||
String dataString = new String(data);
|
String dataString = new String(data, UTF_8);
|
||||||
dataString.concat(dataString);
|
dataString = dataString.concat(dataString);
|
||||||
validateData(keyName, dataString.getBytes());
|
validateData(keyName, dataString.getBytes(UTF_8));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -152,7 +155,8 @@ public void testBlockWritesCloseConsistency() throws Exception {
|
|||||||
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
|
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
|
||||||
// write data more than 1 chunk
|
// write data more than 1 chunk
|
||||||
byte[] data = ContainerTestHelper
|
byte[] data = ContainerTestHelper
|
||||||
.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
|
.getFixedLengthString(keyString, chunkSize + chunkSize / 2)
|
||||||
|
.getBytes(UTF_8);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
|
|
||||||
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
|
||||||
@ -184,7 +188,7 @@ public void testMultiBlockWrites() throws Exception {
|
|||||||
// write data more than 1 chunk
|
// write data more than 1 chunk
|
||||||
byte[] data =
|
byte[] data =
|
||||||
ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize))
|
ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize))
|
||||||
.getBytes();
|
.getBytes(UTF_8);
|
||||||
Assert.assertEquals(data.length, 3 * blockSize);
|
Assert.assertEquals(data.length, 3 * blockSize);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
|
|
||||||
@ -199,7 +203,7 @@ public void testMultiBlockWrites() throws Exception {
|
|||||||
// write 1 more block worth of data. It will fail and new block will be
|
// write 1 more block worth of data. It will fail and new block will be
|
||||||
// allocated
|
// allocated
|
||||||
key.write(ContainerTestHelper.getFixedLengthString(keyString, blockSize)
|
key.write(ContainerTestHelper.getFixedLengthString(keyString, blockSize)
|
||||||
.getBytes());
|
.getBytes(UTF_8));
|
||||||
|
|
||||||
key.close();
|
key.close();
|
||||||
// read the key from OM again and match the length.The length will still
|
// read the key from OM again and match the length.The length will still
|
||||||
@ -232,13 +236,13 @@ public void testMultiBlockWrites2() throws Exception {
|
|||||||
Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
|
Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
|
||||||
String dataString =
|
String dataString =
|
||||||
ContainerTestHelper.getFixedLengthString(keyString, (2 * blockSize));
|
ContainerTestHelper.getFixedLengthString(keyString, (2 * blockSize));
|
||||||
byte[] data = dataString.getBytes();
|
byte[] data = dataString.getBytes(UTF_8);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
// 3 block are completely written to the DataNode in 3 blocks.
|
// 3 block are completely written to the DataNode in 3 blocks.
|
||||||
// Data of length half of chunkSize resides in the chunkOutput stream buffer
|
// Data of length half of chunkSize resides in the chunkOutput stream buffer
|
||||||
String dataString2 =
|
String dataString2 =
|
||||||
ContainerTestHelper.getFixedLengthString(keyString, chunkSize * 1 / 2);
|
ContainerTestHelper.getFixedLengthString(keyString, chunkSize * 1 / 2);
|
||||||
key.write(dataString2.getBytes());
|
key.write(dataString2.getBytes(UTF_8));
|
||||||
//get the name of a valid container
|
//get the name of a valid container
|
||||||
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
|
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
|
||||||
.setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
|
.setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
|
||||||
@ -257,9 +261,9 @@ public void testMultiBlockWrites2() throws Exception {
|
|||||||
// closeContainerException and remaining data in the chunkOutputStream
|
// closeContainerException and remaining data in the chunkOutputStream
|
||||||
// buffer will be copied into a different allocated block and will be
|
// buffer will be copied into a different allocated block and will be
|
||||||
// committed.
|
// committed.
|
||||||
Assert.assertEquals(dataString.concat(dataString2).getBytes().length,
|
Assert.assertEquals(dataString.concat(dataString2).getBytes(UTF_8).length,
|
||||||
keyInfo.getDataSize());
|
keyInfo.getDataSize());
|
||||||
validateData(keyName, dataString.concat(dataString2).getBytes());
|
validateData(keyName, dataString.concat(dataString2).getBytes(UTF_8));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -274,7 +278,8 @@ public void testMultiBlockWrites3() throws Exception {
|
|||||||
Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
|
Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
|
||||||
// write data 3 blocks and one more chunk
|
// write data 3 blocks and one more chunk
|
||||||
byte[] writtenData =
|
byte[] writtenData =
|
||||||
ContainerTestHelper.getFixedLengthString(keyString, keyLen).getBytes();
|
ContainerTestHelper.getFixedLengthString(keyString, keyLen)
|
||||||
|
.getBytes(UTF_8);
|
||||||
byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize + chunkSize);
|
byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize + chunkSize);
|
||||||
Assert.assertEquals(data.length, 3 * blockSize + chunkSize);
|
Assert.assertEquals(data.length, 3 * blockSize + chunkSize);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
@ -367,8 +372,8 @@ private void waitForContainerClose(HddsProtos.ReplicationType type,
|
|||||||
.isContainerPresent(cluster, containerID, dn))) {
|
.isContainerPresent(cluster, containerID, dn))) {
|
||||||
for (DatanodeDetails datanodeDetails : datanodes) {
|
for (DatanodeDetails datanodeDetails : datanodes) {
|
||||||
GenericTestUtils.waitFor(() -> ContainerTestHelper
|
GenericTestUtils.waitFor(() -> ContainerTestHelper
|
||||||
.isContainerClosed(cluster, containerID, datanodeDetails), 500,
|
.isContainerClosed(cluster, containerID, datanodeDetails),
|
||||||
15 * 1000);
|
500, 15 * 1000);
|
||||||
//double check if it's really closed
|
//double check if it's really closed
|
||||||
// (waitFor also throws an exception)
|
// (waitFor also throws an exception)
|
||||||
Assert.assertTrue(ContainerTestHelper
|
Assert.assertTrue(ContainerTestHelper
|
||||||
@ -395,7 +400,7 @@ public void testDiscardPreallocatedBlocks() throws Exception {
|
|||||||
Assert.assertEquals(2, groupOutputStream.getStreamEntries().size());
|
Assert.assertEquals(2, groupOutputStream.getStreamEntries().size());
|
||||||
String dataString =
|
String dataString =
|
||||||
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
|
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
|
||||||
byte[] data = dataString.getBytes();
|
byte[] data = dataString.getBytes(UTF_8);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
List<OmKeyLocationInfo> locationInfos =
|
List<OmKeyLocationInfo> locationInfos =
|
||||||
new ArrayList<>(groupOutputStream.getLocationInfoList());
|
new ArrayList<>(groupOutputStream.getLocationInfoList());
|
||||||
@ -411,7 +416,7 @@ public void testDiscardPreallocatedBlocks() throws Exception {
|
|||||||
waitForContainerClose(keyName, key, HddsProtos.ReplicationType.RATIS);
|
waitForContainerClose(keyName, key, HddsProtos.ReplicationType.RATIS);
|
||||||
dataString =
|
dataString =
|
||||||
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
|
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
|
||||||
data = dataString.getBytes();
|
data = dataString.getBytes(UTF_8);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
Assert.assertEquals(2, groupOutputStream.getStreamEntries().size());
|
Assert.assertEquals(2, groupOutputStream.getStreamEntries().size());
|
||||||
|
|
||||||
@ -443,7 +448,8 @@ public void testBlockWriteViaRatis() throws Exception {
|
|||||||
String keyName = "ratis";
|
String keyName = "ratis";
|
||||||
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
|
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
|
||||||
byte[] data = ContainerTestHelper
|
byte[] data = ContainerTestHelper
|
||||||
.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
|
.getFixedLengthString(keyString, chunkSize + chunkSize / 2)
|
||||||
|
.getBytes(UTF_8);
|
||||||
key.write(data);
|
key.write(data);
|
||||||
|
|
||||||
//get the name of a valid container
|
//get the name of a valid container
|
||||||
@ -462,9 +468,9 @@ public void testBlockWriteViaRatis() throws Exception {
|
|||||||
// updated correctly in OzoneManager once the steam is closed
|
// updated correctly in OzoneManager once the steam is closed
|
||||||
key.close();
|
key.close();
|
||||||
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
|
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
|
||||||
String dataString = new String(data);
|
String dataString = new String(data, UTF_8);
|
||||||
dataString.concat(dataString);
|
dataString = dataString.concat(dataString);
|
||||||
Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
|
Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
|
||||||
validateData(keyName, dataString.getBytes());
|
validateData(keyName, dataString.getBytes(UTF_8));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Test utils for Ozone.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone;
|
@ -23,6 +23,7 @@
|
|||||||
import org.apache.hadoop.ozone.TestOzoneHelper;
|
import org.apache.hadoop.ozone.TestOzoneHelper;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
import org.junit.Ignore;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
@ -158,6 +159,7 @@ public void testCreateVolumesInLoop() throws IOException {
|
|||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Ignore("Test is ignored for time being, to be enabled after security.")
|
||||||
public void testGetVolumesByUser() throws IOException {
|
public void testGetVolumesByUser() throws IOException {
|
||||||
testGetVolumesByUser(port);
|
testGetVolumesByUser(port);
|
||||||
}
|
}
|
||||||
@ -167,6 +169,7 @@ public void testGetVolumesByUser() throws IOException {
|
|||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Ignore("Test is ignored for time being, to be enabled after security.")
|
||||||
public void testGetVolumesOfAnotherUser() throws IOException {
|
public void testGetVolumesOfAnotherUser() throws IOException {
|
||||||
super.testGetVolumesOfAnotherUser(port);
|
super.testGetVolumesOfAnotherUser(port);
|
||||||
}
|
}
|
||||||
@ -177,6 +180,7 @@ public void testGetVolumesOfAnotherUser() throws IOException {
|
|||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Ignore("Test is ignored for time being, to be enabled after security.")
|
||||||
public void testGetVolumesOfAnotherUserShouldFail() throws IOException {
|
public void testGetVolumesOfAnotherUserShouldFail() throws IOException {
|
||||||
super.testGetVolumesOfAnotherUserShouldFail(port);
|
super.testGetVolumesOfAnotherUserShouldFail(port);
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* REST client tests.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.web.client;
|
@ -0,0 +1,22 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Rest Client Tests.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.web;
|
@ -26,7 +26,7 @@
|
|||||||
import java.nio.file.DirectoryNotEmptyException;
|
import java.nio.file.DirectoryNotEmptyException;
|
||||||
import java.nio.file.FileAlreadyExistsException;
|
import java.nio.file.FileAlreadyExistsException;
|
||||||
import java.nio.file.NoSuchFileException;
|
import java.nio.file.NoSuchFileException;
|
||||||
import java.util.LinkedList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.hdds.protocol.StorageType;
|
import org.apache.hadoop.hdds.protocol.StorageType;
|
||||||
@ -211,7 +211,7 @@ List<String> getAcls(BucketArgs args, String tag) {
|
|||||||
args.getHeaders().getRequestHeader(Header.OZONE_ACLS);
|
args.getHeaders().getRequestHeader(Header.OZONE_ACLS);
|
||||||
List<String> filteredSet = null;
|
List<String> filteredSet = null;
|
||||||
if (aclStrings != null) {
|
if (aclStrings != null) {
|
||||||
filteredSet = new LinkedList<>();
|
filteredSet = new ArrayList<>();
|
||||||
for (String s : aclStrings) {
|
for (String s : aclStrings) {
|
||||||
if (s.startsWith(tag)) {
|
if (s.startsWith(tag)) {
|
||||||
filteredSet.add(s.replaceFirst(tag, ""));
|
filteredSet.add(s.replaceFirst(tag, ""));
|
||||||
|
@ -312,5 +312,6 @@ void renameKey(KeyArgs args, String toKeyName)
|
|||||||
/**
|
/**
|
||||||
* Closes all the opened resources.
|
* Closes all the opened resources.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
void close();
|
void close();
|
||||||
}
|
}
|
||||||
|
@ -37,9 +37,9 @@ public void testErrorGen() {
|
|||||||
OzoneException e = ErrorTable
|
OzoneException e = ErrorTable
|
||||||
.newError(ErrorTable.ACCESS_DENIED, getRequestID(), "/test/path",
|
.newError(ErrorTable.ACCESS_DENIED, getRequestID(), "/test/path",
|
||||||
"localhost");
|
"localhost");
|
||||||
assertEquals(e.getHostID(), "localhost");
|
assertEquals("localhost", e.getHostID());
|
||||||
assertEquals(e.getShortMessage(),
|
assertEquals(ErrorTable.ACCESS_DENIED.getShortMessage(),
|
||||||
ErrorTable.ACCESS_DENIED.getShortMessage());
|
e.getShortMessage());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Tests the REST error codes.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.web;
|
@ -257,6 +257,7 @@ private List<OzoneAcl> getUpdatedAclList(List<OzoneAcl> existingAcls,
|
|||||||
* @param bucketName - Name of the bucket.
|
* @param bucketName - Name of the bucket.
|
||||||
* @throws IOException - on Failure.
|
* @throws IOException - on Failure.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void deleteBucket(String volumeName, String bucketName)
|
public void deleteBucket(String volumeName, String bucketName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Preconditions.checkNotNull(volumeName);
|
Preconditions.checkNotNull(volumeName);
|
||||||
|
@ -22,7 +22,6 @@
|
|||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.hdds.client.BlockID;
|
import org.apache.hadoop.hdds.client.BlockID;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.hdds.server.ServerUtils;
|
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.ozone.OmUtils;
|
import org.apache.hadoop.ozone.OmUtils;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
@ -117,7 +116,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
|
|||||||
public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
|
public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
|
||||||
File metaDir = OmUtils.getOmDbDir(conf);
|
File metaDir = OmUtils.getOmDbDir(conf);
|
||||||
this.lock = new OzoneManagerLock(conf);
|
this.lock = new OzoneManagerLock(conf);
|
||||||
this.openKeyExpireThresholdMS = 1000 * conf.getInt(
|
this.openKeyExpireThresholdMS = 1000L * conf.getInt(
|
||||||
OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS,
|
OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS,
|
||||||
OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT);
|
OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT);
|
||||||
|
|
||||||
|
@ -989,6 +989,7 @@ public void setBucketProperty(OmBucketArgs args)
|
|||||||
* @param bucket - Name of the bucket.
|
* @param bucket - Name of the bucket.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void deleteBucket(String volume, String bucket) throws IOException {
|
public void deleteBucket(String volume, String bucket) throws IOException {
|
||||||
Map<String, String> auditMap = buildAuditMap(volume);
|
Map<String, String> auditMap = buildAuditMap(volume);
|
||||||
auditMap.put(OzoneConsts.BUCKET, bucket);
|
auditMap.put(OzoneConsts.BUCKET, bucket);
|
||||||
|
@ -66,6 +66,7 @@ public class ServiceListJSONServlet extends HttpServlet {
|
|||||||
|
|
||||||
private transient OzoneManager om;
|
private transient OzoneManager om;
|
||||||
|
|
||||||
|
@Override
|
||||||
public void init() throws ServletException {
|
public void init() throws ServletException {
|
||||||
this.om = (OzoneManager) getServletContext()
|
this.om = (OzoneManager) getServletContext()
|
||||||
.getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
|
.getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
|
||||||
|
@ -35,7 +35,7 @@
|
|||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.LinkedList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
@ -73,7 +73,7 @@ private void addVolumeToOwnerList(String volume, String owner,
|
|||||||
// Get the volume list
|
// Get the volume list
|
||||||
byte[] dbUserKey = metadataManager.getUserKey(owner);
|
byte[] dbUserKey = metadataManager.getUserKey(owner);
|
||||||
byte[] volumeList = metadataManager.getUserTable().get(dbUserKey);
|
byte[] volumeList = metadataManager.getUserTable().get(dbUserKey);
|
||||||
List<String> prevVolList = new LinkedList<>();
|
List<String> prevVolList = new ArrayList<>();
|
||||||
if (volumeList != null) {
|
if (volumeList != null) {
|
||||||
VolumeList vlist = VolumeList.parseFrom(volumeList);
|
VolumeList vlist = VolumeList.parseFrom(volumeList);
|
||||||
prevVolList.addAll(vlist.getVolumeNamesList());
|
prevVolList.addAll(vlist.getVolumeNamesList());
|
||||||
@ -98,7 +98,7 @@ private void delVolumeFromOwnerList(String volume, String owner,
|
|||||||
// Get the volume list
|
// Get the volume list
|
||||||
byte[] dbUserKey = metadataManager.getUserKey(owner);
|
byte[] dbUserKey = metadataManager.getUserKey(owner);
|
||||||
byte[] volumeList = metadataManager.getUserTable().get(dbUserKey);
|
byte[] volumeList = metadataManager.getUserTable().get(dbUserKey);
|
||||||
List<String> prevVolList = new LinkedList<>();
|
List<String> prevVolList = new ArrayList<>();
|
||||||
if (volumeList != null) {
|
if (volumeList != null) {
|
||||||
VolumeList vlist = VolumeList.parseFrom(volumeList);
|
VolumeList vlist = VolumeList.parseFrom(volumeList);
|
||||||
prevVolList.addAll(vlist.getVolumeNamesList());
|
prevVolList.addAll(vlist.getVolumeNamesList());
|
||||||
@ -140,7 +140,7 @@ public void createVolume(OmVolumeArgs args) throws IOException {
|
|||||||
|
|
||||||
try(WriteBatch batch = new WriteBatch()) {
|
try(WriteBatch batch = new WriteBatch()) {
|
||||||
// Write the vol info
|
// Write the vol info
|
||||||
List<HddsProtos.KeyValue> metadataList = new LinkedList<>();
|
List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
|
||||||
for (Map.Entry<String, String> entry :
|
for (Map.Entry<String, String> entry :
|
||||||
args.getKeyValueMap().entrySet()) {
|
args.getKeyValueMap().entrySet()) {
|
||||||
metadataList.add(HddsProtos.KeyValue.newBuilder()
|
metadataList.add(HddsProtos.KeyValue.newBuilder()
|
||||||
@ -250,6 +250,7 @@ public void setOwner(String volume, String owner) throws IOException {
|
|||||||
* @param quota - Quota in bytes.
|
* @param quota - Quota in bytes.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void setQuota(String volume, long quota) throws IOException {
|
public void setQuota(String volume, long quota) throws IOException {
|
||||||
Preconditions.checkNotNull(volume);
|
Preconditions.checkNotNull(volume);
|
||||||
metadataManager.getLock().acquireVolumeLock(volume);
|
metadataManager.getLock().acquireVolumeLock(volume);
|
||||||
@ -293,6 +294,7 @@ public void setQuota(String volume, long quota) throws IOException {
|
|||||||
* @return VolumeArgs or exception is thrown.
|
* @return VolumeArgs or exception is thrown.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
|
public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
|
||||||
Preconditions.checkNotNull(volume);
|
Preconditions.checkNotNull(volume);
|
||||||
metadataManager.getLock().acquireVolumeLock(volume);
|
metadataManager.getLock().acquireVolumeLock(volume);
|
||||||
@ -384,6 +386,7 @@ public void deleteVolume(String volume) throws IOException {
|
|||||||
* @return true if the user has access for the volume, false otherwise
|
* @return true if the user has access for the volume, false otherwise
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
|
public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Preconditions.checkNotNull(volume);
|
Preconditions.checkNotNull(volume);
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -62,7 +63,7 @@ public void testWriteGroupOutputStream() throws Exception {
|
|||||||
assertEquals(0, groupOutputStream.getByteOffset());
|
assertEquals(0, groupOutputStream.getByteOffset());
|
||||||
|
|
||||||
String dataString = RandomStringUtils.randomAscii(500);
|
String dataString = RandomStringUtils.randomAscii(500);
|
||||||
byte[] data = dataString.getBytes();
|
byte[] data = dataString.getBytes(UTF_8);
|
||||||
groupOutputStream.write(data, 0, data.length);
|
groupOutputStream.write(data, 0, data.length);
|
||||||
assertEquals(500, groupOutputStream.getByteOffset());
|
assertEquals(500, groupOutputStream.getByteOffset());
|
||||||
|
|
||||||
@ -95,7 +96,8 @@ public void testErrorWriteGroupOutputStream() throws Exception {
|
|||||||
assertEquals(0, groupOutputStream.getByteOffset());
|
assertEquals(0, groupOutputStream.getByteOffset());
|
||||||
|
|
||||||
// first writes of 100 bytes should succeed
|
// first writes of 100 bytes should succeed
|
||||||
groupOutputStream.write(RandomStringUtils.randomAscii(100).getBytes());
|
groupOutputStream.write(RandomStringUtils.randomAscii(100)
|
||||||
|
.getBytes(UTF_8));
|
||||||
assertEquals(100, groupOutputStream.getByteOffset());
|
assertEquals(100, groupOutputStream.getByteOffset());
|
||||||
|
|
||||||
// second writes of 500 bytes should fail, as there should be only 400
|
// second writes of 500 bytes should fail, as there should be only 400
|
||||||
@ -104,7 +106,8 @@ public void testErrorWriteGroupOutputStream() throws Exception {
|
|||||||
// other add more informative error code rather than exception, need to
|
// other add more informative error code rather than exception, need to
|
||||||
// change this part.
|
// change this part.
|
||||||
exception.expect(Exception.class);
|
exception.expect(Exception.class);
|
||||||
groupOutputStream.write(RandomStringUtils.randomAscii(500).getBytes());
|
groupOutputStream.write(RandomStringUtils.randomAscii(500)
|
||||||
|
.getBytes(UTF_8));
|
||||||
assertEquals(100, groupOutputStream.getByteOffset());
|
assertEquals(100, groupOutputStream.getByteOffset());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -115,7 +118,7 @@ public void testReadGroupInputStream() throws Exception {
|
|||||||
ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
|
ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
|
||||||
|
|
||||||
String dataString = RandomStringUtils.randomAscii(500);
|
String dataString = RandomStringUtils.randomAscii(500);
|
||||||
byte[] buf = dataString.getBytes();
|
byte[] buf = dataString.getBytes(UTF_8);
|
||||||
int offset = 0;
|
int offset = 0;
|
||||||
for (int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
int tempOffset = offset;
|
int tempOffset = offset;
|
||||||
@ -126,12 +129,12 @@ public void testReadGroupInputStream() throws Exception {
|
|||||||
new ByteArrayInputStream(buf, tempOffset, 100);
|
new ByteArrayInputStream(buf, tempOffset, 100);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void seek(long pos) throws IOException {
|
public synchronized void seek(long pos) throws IOException {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getPos() throws IOException {
|
public synchronized long getPos() throws IOException {
|
||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,12 +145,13 @@ public boolean seekToNewSource(long targetPos)
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int read() throws IOException {
|
public synchronized int read() throws IOException {
|
||||||
return in.read();
|
return in.read();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int read(byte[] b, int off, int len) throws IOException {
|
public synchronized int read(byte[] b, int off, int len)
|
||||||
|
throws IOException {
|
||||||
int readLen = in.read(b, off, len);
|
int readLen = in.read(b, off, len);
|
||||||
pos += readLen;
|
pos += readLen;
|
||||||
return readLen;
|
return readLen;
|
||||||
@ -162,7 +166,7 @@ public int read(byte[] b, int off, int len) throws IOException {
|
|||||||
int len = groupInputStream.read(resBuf, 0, 500);
|
int len = groupInputStream.read(resBuf, 0, 500);
|
||||||
|
|
||||||
assertEquals(500, len);
|
assertEquals(500, len);
|
||||||
assertEquals(dataString, new String(resBuf));
|
assertEquals(dataString, new String(resBuf, UTF_8));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,7 +176,7 @@ public void testErrorReadGroupInputStream() throws Exception {
|
|||||||
ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
|
ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
|
||||||
|
|
||||||
String dataString = RandomStringUtils.randomAscii(500);
|
String dataString = RandomStringUtils.randomAscii(500);
|
||||||
byte[] buf = dataString.getBytes();
|
byte[] buf = dataString.getBytes(UTF_8);
|
||||||
int offset = 0;
|
int offset = 0;
|
||||||
for (int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
int tempOffset = offset;
|
int tempOffset = offset;
|
||||||
@ -183,28 +187,29 @@ public void testErrorReadGroupInputStream() throws Exception {
|
|||||||
new ByteArrayInputStream(buf, tempOffset, 100);
|
new ByteArrayInputStream(buf, tempOffset, 100);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void seek(long pos) throws IOException {
|
public synchronized void seek(long pos) throws IOException {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getPos() throws IOException {
|
public synchronized long getPos() throws IOException {
|
||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean seekToNewSource(long targetPos)
|
public synchronized boolean seekToNewSource(long targetPos)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int read() throws IOException {
|
public synchronized int read() throws IOException {
|
||||||
return in.read();
|
return in.read();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int read(byte[] b, int off, int len) throws IOException {
|
public synchronized int read(byte[] b, int off, int len)
|
||||||
|
throws IOException {
|
||||||
int readLen = in.read(b, off, len);
|
int readLen = in.read(b, off, len);
|
||||||
pos += readLen;
|
pos += readLen;
|
||||||
return readLen;
|
return readLen;
|
||||||
@ -222,14 +227,14 @@ public int read(byte[] b, int off, int len) throws IOException {
|
|||||||
assertEquals(60, groupInputStream.getRemainingOfIndex(3));
|
assertEquals(60, groupInputStream.getRemainingOfIndex(3));
|
||||||
assertEquals(340, len);
|
assertEquals(340, len);
|
||||||
assertEquals(dataString.substring(0, 340),
|
assertEquals(dataString.substring(0, 340),
|
||||||
new String(resBuf).substring(0, 340));
|
new String(resBuf, UTF_8).substring(0, 340));
|
||||||
|
|
||||||
// read following 300 bytes, but only 200 left
|
// read following 300 bytes, but only 200 left
|
||||||
len = groupInputStream.read(resBuf, 340, 260);
|
len = groupInputStream.read(resBuf, 340, 260);
|
||||||
assertEquals(4, groupInputStream.getCurrentStreamIndex());
|
assertEquals(4, groupInputStream.getCurrentStreamIndex());
|
||||||
assertEquals(0, groupInputStream.getRemainingOfIndex(4));
|
assertEquals(0, groupInputStream.getRemainingOfIndex(4));
|
||||||
assertEquals(160, len);
|
assertEquals(160, len);
|
||||||
assertEquals(dataString, new String(resBuf).substring(0, 500));
|
assertEquals(dataString, new String(resBuf, UTF_8).substring(0, 500));
|
||||||
|
|
||||||
// further read should get EOF
|
// further read should get EOF
|
||||||
len = groupInputStream.read(resBuf, 0, 1);
|
len = groupInputStream.read(resBuf, 0, 1);
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.ozone.om;
|
package org.apache.hadoop.ozone.om;
|
||||||
|
|
||||||
import java.nio.charset.StandardCharsets;
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
@ -49,6 +48,8 @@
|
|||||||
import org.rocksdb.Statistics;
|
import org.rocksdb.Statistics;
|
||||||
import org.rocksdb.StatsLevel;
|
import org.rocksdb.StatsLevel;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test class for @{@link KeyManagerImpl}.
|
* Test class for @{@link KeyManagerImpl}.
|
||||||
* */
|
* */
|
||||||
@ -93,11 +94,11 @@ private void setupMocks() throws Exception {
|
|||||||
Mockito.when(metadataManager.getLock())
|
Mockito.when(metadataManager.getLock())
|
||||||
.thenReturn(new OzoneManagerLock(conf));
|
.thenReturn(new OzoneManagerLock(conf));
|
||||||
Mockito.when(metadataManager.getVolumeKey(VOLUME_NAME))
|
Mockito.when(metadataManager.getVolumeKey(VOLUME_NAME))
|
||||||
.thenReturn(VOLUME_NAME.getBytes());
|
.thenReturn(VOLUME_NAME.getBytes(UTF_8));
|
||||||
Mockito.when(metadataManager.getBucketKey(VOLUME_NAME, BUCKET_NAME))
|
Mockito.when(metadataManager.getBucketKey(VOLUME_NAME, BUCKET_NAME))
|
||||||
.thenReturn(BUCKET_NAME.getBytes());
|
.thenReturn(BUCKET_NAME.getBytes(UTF_8));
|
||||||
Mockito.when(metadataManager.getOpenKeyBytes(VOLUME_NAME, BUCKET_NAME,
|
Mockito.when(metadataManager.getOpenKeyBytes(VOLUME_NAME, BUCKET_NAME,
|
||||||
KEY_NAME, 1)).thenReturn(KEY_NAME.getBytes());
|
KEY_NAME, 1)).thenReturn(KEY_NAME.getBytes(UTF_8));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setupRocksDb() throws Exception {
|
private void setupRocksDb() throws Exception {
|
||||||
@ -129,11 +130,11 @@ private void setupRocksDb() throws Exception {
|
|||||||
|
|
||||||
rdbStore = new RDBStore(folder.newFolder(), options, configSet);
|
rdbStore = new RDBStore(folder.newFolder(), options, configSet);
|
||||||
rdbTable = rdbStore.getTable("testTable");
|
rdbTable = rdbStore.getTable("testTable");
|
||||||
rdbTable.put(VOLUME_NAME.getBytes(),
|
rdbTable.put(VOLUME_NAME.getBytes(UTF_8),
|
||||||
RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8));
|
RandomStringUtils.random(10).getBytes(UTF_8));
|
||||||
rdbTable.put(BUCKET_NAME.getBytes(),
|
rdbTable.put(BUCKET_NAME.getBytes(UTF_8),
|
||||||
RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8));
|
RandomStringUtils.random(10).getBytes(UTF_8));
|
||||||
rdbTable.put(KEY_NAME.getBytes(), keyData.toByteArray());
|
rdbTable.put(KEY_NAME.getBytes(UTF_8), keyData.toByteArray());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -252,6 +252,7 @@ private class RenameIterator extends OzoneListingIterator {
|
|||||||
LOG.trace("rename from:{} to:{}", srcKey, dstKey);
|
LOG.trace("rename from:{} to:{}", srcKey, dstKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
boolean processKey(String key) throws IOException {
|
boolean processKey(String key) throws IOException {
|
||||||
String newKeyName = dstKey.concat(key.substring(srcKey.length()));
|
String newKeyName = dstKey.concat(key.substring(srcKey.length()));
|
||||||
bucket.renameKey(key, newKeyName);
|
bucket.renameKey(key, newKeyName);
|
||||||
@ -370,6 +371,7 @@ && listStatus(f).length != 0) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
boolean processKey(String key) throws IOException {
|
boolean processKey(String key) throws IOException {
|
||||||
if (key.equals("")) {
|
if (key.equals("")) {
|
||||||
LOG.trace("Skipping deleting root directory");
|
LOG.trace("Skipping deleting root directory");
|
||||||
@ -496,6 +498,7 @@ private class ListStatusIterator extends OzoneListingIterator {
|
|||||||
* @return always returns true
|
* @return always returns true
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
boolean processKey(String key) throws IOException {
|
boolean processKey(String key) throws IOException {
|
||||||
Path keyPath = new Path(OZONE_URI_DELIMITER + key);
|
Path keyPath = new Path(OZONE_URI_DELIMITER + key);
|
||||||
if (key.equals(getPathKey())) {
|
if (key.equals(getPathKey())) {
|
||||||
|
@ -51,6 +51,7 @@
|
|||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
|
import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
@ -176,7 +177,7 @@ public void testOzFsReadWrite() throws IOException {
|
|||||||
byte[] buffer = new byte[stringLen];
|
byte[] buffer = new byte[stringLen];
|
||||||
// This read will not change the offset inside the file
|
// This read will not change the offset inside the file
|
||||||
int readBytes = inputStream.read(0, buffer, 0, buffer.length);
|
int readBytes = inputStream.read(0, buffer, 0, buffer.length);
|
||||||
String out = new String(buffer, 0, buffer.length);
|
String out = new String(buffer, 0, buffer.length, UTF_8);
|
||||||
assertEquals(data, out);
|
assertEquals(data, out);
|
||||||
assertEquals(readBytes, buffer.length);
|
assertEquals(readBytes, buffer.length);
|
||||||
assertEquals(0, inputStream.getPos());
|
assertEquals(0, inputStream.getPos());
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Ozone FS Contract tests.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs.ozone;
|
@ -49,6 +49,7 @@ public AuthorizationHeaderV2(String auth) throws OS3Exception {
|
|||||||
*
|
*
|
||||||
* @throws OS3Exception
|
* @throws OS3Exception
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public void parseHeader() throws OS3Exception {
|
public void parseHeader() throws OS3Exception {
|
||||||
String[] split = authHeader.split(" ");
|
String[] split = authHeader.split(" ");
|
||||||
if (split.length != 2) {
|
if (split.length != 2) {
|
||||||
|
@ -60,6 +60,7 @@ public AuthorizationHeaderV4(String header) throws OS3Exception {
|
|||||||
* Signature=db81b057718d7c1b3b8dffa29933099551c51d787b3b13b9e0f9ebed45982bf2
|
* Signature=db81b057718d7c1b3b8dffa29933099551c51d787b3b13b9e0f9ebed45982bf2
|
||||||
* @throws OS3Exception
|
* @throws OS3Exception
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public void parseAuthHeader() throws OS3Exception {
|
public void parseAuthHeader() throws OS3Exception {
|
||||||
String[] split = authHeader.split(" ");
|
String[] split = authHeader.split(" ");
|
||||||
|
|
||||||
|
@ -54,6 +54,7 @@ public class Credential {
|
|||||||
*
|
*
|
||||||
* @throws OS3Exception
|
* @throws OS3Exception
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public void parseCredential() throws OS3Exception {
|
public void parseCredential() throws OS3Exception {
|
||||||
String[] split = credential.split("/");
|
String[] split = credential.split("/");
|
||||||
if (split.length == 5) {
|
if (split.length == 5) {
|
||||||
|
@ -209,6 +209,7 @@ public String getOzoneBucketMapping(String s3BucketName) throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public String getOzoneVolumeName(String s3BucketName) throws IOException {
|
public String getOzoneVolumeName(String s3BucketName) throws IOException {
|
||||||
if (bucketVolumeMap.get(s3BucketName) == null) {
|
if (bucketVolumeMap.get(s3BucketName) == null) {
|
||||||
throw new IOException("S3_BUCKET_NOT_FOUND");
|
throw new IOException("S3_BUCKET_NOT_FOUND");
|
||||||
@ -217,6 +218,7 @@ public String getOzoneVolumeName(String s3BucketName) throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@SuppressWarnings("StringSplitter")
|
||||||
public String getOzoneBucketName(String s3BucketName) throws IOException {
|
public String getOzoneBucketName(String s3BucketName) throws IOException {
|
||||||
if (bucketVolumeMap.get(s3BucketName) == null) {
|
if (bucketVolumeMap.get(s3BucketName) == null) {
|
||||||
throw new IOException("S3_BUCKET_NOT_FOUND");
|
throw new IOException("S3_BUCKET_NOT_FOUND");
|
||||||
|
@ -63,7 +63,7 @@ public void setup() throws Exception {
|
|||||||
@Test
|
@Test
|
||||||
public void testBucketEndpoint() throws Exception {
|
public void testBucketEndpoint() throws Exception {
|
||||||
Response response = bucketEndpoint.delete(bucketName);
|
Response response = bucketEndpoint.delete(bucketName);
|
||||||
assertEquals(response.getStatus(), HttpStatus.SC_NO_CONTENT);
|
assertEquals(HttpStatus.SC_NO_CONTENT, response.getStatus());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,6 +23,8 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
@ -40,7 +42,7 @@ public void fromStreamWithNamespace() throws IOException {
|
|||||||
+ ".com/doc/2006-03-01/\"><Object>key1</Object><Object>key2"
|
+ ".com/doc/2006-03-01/\"><Object>key1</Object><Object>key2"
|
||||||
+ "</Object><Object>key3"
|
+ "</Object><Object>key3"
|
||||||
+ "</Object></Delete>")
|
+ "</Object></Delete>")
|
||||||
.getBytes());
|
.getBytes(UTF_8));
|
||||||
|
|
||||||
//WHEN
|
//WHEN
|
||||||
MultiDeleteRequest multiDeleteRequest =
|
MultiDeleteRequest multiDeleteRequest =
|
||||||
@ -58,7 +60,7 @@ public void fromStreamWithoutNamespace() throws IOException {
|
|||||||
("<Delete><Object>key1</Object><Object>key2"
|
("<Delete><Object>key1</Object><Object>key2"
|
||||||
+ "</Object><Object>key3"
|
+ "</Object><Object>key3"
|
||||||
+ "</Object></Delete>")
|
+ "</Object></Delete>")
|
||||||
.getBytes());
|
.getBytes(UTF_8));
|
||||||
|
|
||||||
//WHEN
|
//WHEN
|
||||||
MultiDeleteRequest multiDeleteRequest =
|
MultiDeleteRequest multiDeleteRequest =
|
||||||
|
@ -37,6 +37,8 @@
|
|||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test get object.
|
* Test get object.
|
||||||
*/
|
*/
|
||||||
@ -54,15 +56,16 @@ public void get() throws IOException, OS3Exception {
|
|||||||
OzoneBucket bucket =
|
OzoneBucket bucket =
|
||||||
volume.getBucket("b1");
|
volume.getBucket("b1");
|
||||||
OzoneOutputStream keyStream =
|
OzoneOutputStream keyStream =
|
||||||
bucket.createKey("key1", CONTENT.getBytes().length);
|
bucket.createKey("key1", CONTENT.getBytes(UTF_8).length);
|
||||||
keyStream.write(CONTENT.getBytes());
|
keyStream.write(CONTENT.getBytes(UTF_8));
|
||||||
keyStream.close();
|
keyStream.close();
|
||||||
|
|
||||||
ObjectEndpoint rest = new ObjectEndpoint();
|
ObjectEndpoint rest = new ObjectEndpoint();
|
||||||
rest.setClient(client);
|
rest.setClient(client);
|
||||||
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
|
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
|
||||||
rest.setHeaders(headers);
|
rest.setHeaders(headers);
|
||||||
ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes());
|
ByteArrayInputStream body =
|
||||||
|
new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
|
||||||
|
|
||||||
//WHEN
|
//WHEN
|
||||||
rest.get("b1", "key1", body);
|
rest.get("b1", "key1", body);
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
|
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
|
||||||
|
|
||||||
import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
|
import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
|
||||||
|
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||||
import org.apache.commons.lang3.RandomStringUtils;
|
import org.apache.commons.lang3.RandomStringUtils;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
@ -69,9 +70,9 @@ public void testHeadObject() throws Exception {
|
|||||||
//GIVEN
|
//GIVEN
|
||||||
String value = RandomStringUtils.randomAlphanumeric(32);
|
String value = RandomStringUtils.randomAlphanumeric(32);
|
||||||
OzoneOutputStream out = bucket.createKey("key1",
|
OzoneOutputStream out = bucket.createKey("key1",
|
||||||
value.getBytes().length, ReplicationType.STAND_ALONE,
|
value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE,
|
||||||
ReplicationFactor.ONE);
|
ReplicationFactor.ONE);
|
||||||
out.write(value.getBytes());
|
out.write(value.getBytes(UTF_8));
|
||||||
out.close();
|
out.close();
|
||||||
|
|
||||||
//WHEN
|
//WHEN
|
||||||
@ -79,7 +80,7 @@ public void testHeadObject() throws Exception {
|
|||||||
|
|
||||||
//THEN
|
//THEN
|
||||||
Assert.assertEquals(200, response.getStatus());
|
Assert.assertEquals(200, response.getStatus());
|
||||||
Assert.assertEquals(value.getBytes().length,
|
Assert.assertEquals(value.getBytes(UTF_8).length,
|
||||||
Long.parseLong(response.getHeaderString("Content-Length")));
|
Long.parseLong(response.getHeaderString("Content-Length")));
|
||||||
|
|
||||||
DateTimeFormatter.RFC_1123_DATE_TIME
|
DateTimeFormatter.RFC_1123_DATE_TIME
|
||||||
@ -91,7 +92,8 @@ public void testHeadObject() throws Exception {
|
|||||||
public void testHeadFailByBadName() throws Exception {
|
public void testHeadFailByBadName() throws Exception {
|
||||||
//Head an object that doesn't exist.
|
//Head an object that doesn't exist.
|
||||||
try {
|
try {
|
||||||
keyEndpoint.head(bucketName, "badKeyName");
|
Response response = keyEndpoint.head(bucketName, "badKeyName");
|
||||||
|
Assert.assertEquals(404, response.getStatus());
|
||||||
} catch (OS3Exception ex) {
|
} catch (OS3Exception ex) {
|
||||||
Assert.assertTrue(ex.getCode().contains("NoSuchObject"));
|
Assert.assertTrue(ex.getCode().contains("NoSuchObject"));
|
||||||
Assert.assertTrue(ex.getErrorMessage().contains("object does not exist"));
|
Assert.assertTrue(ex.getErrorMessage().contains("object does not exist"));
|
||||||
|
@ -286,7 +286,7 @@ private Thread getProgressBarThread() {
|
|||||||
long maxValue;
|
long maxValue;
|
||||||
|
|
||||||
currentValue = () -> numberOfKeysAdded.get();
|
currentValue = () -> numberOfKeysAdded.get();
|
||||||
maxValue = numOfVolumes *
|
maxValue = (long) numOfVolumes *
|
||||||
numOfBuckets *
|
numOfBuckets *
|
||||||
numOfKeys;
|
numOfKeys;
|
||||||
|
|
||||||
|
@ -88,8 +88,8 @@ public void testRestart() throws Exception {
|
|||||||
String expectedSnapFile =
|
String expectedSnapFile =
|
||||||
storage.getSnapshotFile(termIndexBeforeRestart.getTerm(),
|
storage.getSnapshotFile(termIndexBeforeRestart.getTerm(),
|
||||||
termIndexBeforeRestart.getIndex()).getAbsolutePath();
|
termIndexBeforeRestart.getIndex()).getAbsolutePath();
|
||||||
Assert.assertEquals(snapshotInfo.getFile().getPath().toString(),
|
Assert.assertEquals(expectedSnapFile,
|
||||||
expectedSnapFile);
|
snapshotInfo.getFile().getPath().toString());
|
||||||
Assert.assertEquals(termInSnapshot, termIndexBeforeRestart);
|
Assert.assertEquals(termInSnapshot, termIndexBeforeRestart);
|
||||||
|
|
||||||
// After restart the term index might have progressed to apply pending
|
// After restart the term index might have progressed to apply pending
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* Freon Ozone Load Generator.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.freon;
|
@ -45,10 +45,10 @@
|
|||||||
import java.sql.ResultSet;
|
import java.sql.ResultSet;
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
import java.sql.Statement;
|
import java.sql.Statement;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.LinkedList;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
@ -182,7 +182,7 @@ public void testOmDB() throws Exception {
|
|||||||
String sql = "SELECT * FROM volumeList";
|
String sql = "SELECT * FROM volumeList";
|
||||||
ResultSet rs = executeQuery(conn, sql);
|
ResultSet rs = executeQuery(conn, sql);
|
||||||
List<String> expectedValues =
|
List<String> expectedValues =
|
||||||
new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
|
new ArrayList<>(Arrays.asList(volumeName0, volumeName1));
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
String userNameRs = rs.getString("userName");
|
String userNameRs = rs.getString("userName");
|
||||||
String volumeNameRs = rs.getString("volumeName");
|
String volumeNameRs = rs.getString("volumeName");
|
||||||
@ -194,7 +194,7 @@ public void testOmDB() throws Exception {
|
|||||||
sql = "SELECT * FROM volumeInfo";
|
sql = "SELECT * FROM volumeInfo";
|
||||||
rs = executeQuery(conn, sql);
|
rs = executeQuery(conn, sql);
|
||||||
expectedValues =
|
expectedValues =
|
||||||
new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
|
new ArrayList<>(Arrays.asList(volumeName0, volumeName1));
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
String adName = rs.getString("adminName");
|
String adName = rs.getString("adminName");
|
||||||
String ownerName = rs.getString("ownerName");
|
String ownerName = rs.getString("ownerName");
|
||||||
@ -208,7 +208,7 @@ public void testOmDB() throws Exception {
|
|||||||
sql = "SELECT * FROM aclInfo";
|
sql = "SELECT * FROM aclInfo";
|
||||||
rs = executeQuery(conn, sql);
|
rs = executeQuery(conn, sql);
|
||||||
expectedValues =
|
expectedValues =
|
||||||
new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
|
new ArrayList<>(Arrays.asList(volumeName0, volumeName1));
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
String adName = rs.getString("adminName");
|
String adName = rs.getString("adminName");
|
||||||
String ownerName = rs.getString("ownerName");
|
String ownerName = rs.getString("ownerName");
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* OM to SQL Converter. Currently broken.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.om;
|
Loading…
Reference in New Issue
Block a user