HDDS-719. Remove Ozone dependencies on Apache Hadoop 3.2.0. Contributed by Arpit Agarwal.

This commit is contained in:
Arpit Agarwal 2018-10-24 16:17:05 -07:00
parent 021caaa55e
commit 244afaba4a
7 changed files with 139 additions and 20 deletions

View File

@ -41,9 +41,11 @@
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.UnknownHostException; import java.net.UnknownHostException;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.util.Calendar;
import java.util.Collection; import java.util.Collection;
import java.util.HashSet; import java.util.HashSet;
import java.util.Map; import java.util.Map;
import java.util.TimeZone;
import static org.apache.hadoop.hdfs.DFSConfigKeys import static org.apache.hadoop.hdfs.DFSConfigKeys
.DFS_DATANODE_DNS_INTERFACE_KEY; .DFS_DATANODE_DNS_INTERFACE_KEY;
@ -69,6 +71,8 @@ public final class HddsUtils {
public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService"; public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService";
public static final String OZONE_SCM_SERVICE_INSTANCE_ID = public static final String OZONE_SCM_SERVICE_INSTANCE_ID =
"OzoneScmServiceInstance"; "OzoneScmServiceInstance";
private static final TimeZone UTC_ZONE = TimeZone.getTimeZone("UTC");
private static final int NO_PORT = -1; private static final int NO_PORT = -1;
@ -391,4 +395,11 @@ public static ObjectName registerWithJmxProperties(
} }
} }
/**
* Get the current UTC time in milliseconds.
* @return the current UTC time in milliseconds.
*/
public static long getUtcTime() {
return Calendar.getInstance(UTC_ZONE).getTimeInMillis();
}
} }

View File

@ -18,6 +18,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.ScmUtils; import org.apache.hadoop.hdds.scm.ScmUtils;
@ -36,7 +37,6 @@
import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -465,7 +465,7 @@ private UniqueId() {}
* @return unique long value * @return unique long value
*/ */
public static synchronized long next() { public static synchronized long next() {
long utcTime = Time.getUtcTime(); long utcTime = HddsUtils.getUtcTime();
if ((utcTime & 0xFFFF000000000000L) == 0) { if ((utcTime & 0xFFFF000000000000L) == 0) {
return utcTime << Short.SIZE | (offset++ & 0x0000FFFF); return utcTime << Short.SIZE | (offset++ & 0x0000FFFF);
} }

View File

@ -0,0 +1,103 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm;
import java.lang.reflect.Field;
/**
* This class includes some functions copied from Mockito's
* Whitebox class for portability reasons.
*
* Whitebox methods are accessed differently in different
* versions of Hadoop. Specifically the availability of the class
* changed from Apache Hadoop 3.1.0 to Hadoop 3.2.0.
*
* Duplicating the test code is ugly but it allows building
* HDDS portably.
*/
public final class HddsWhiteboxTestUtils {
/**
* Private constructor to disallow construction.
*/
private HddsWhiteboxTestUtils() {
}
/**
* Get the field of the target object.
* @param target target object
* @param field field name
* @return the field of the object
*/
public static Object getInternalState(Object target, String field) {
Class<?> c = target.getClass();
try {
Field f = getFieldFromHierarchy(c, field);
f.setAccessible(true);
return f.get(target);
} catch (Exception e) {
throw new RuntimeException(
"Unable to set internal state on a private field.", e);
}
}
/**
* Set the field of the target object.
* @param target target object
* @param field field name
* @param value value to set
*/
public static void setInternalState(
Object target, String field, Object value) {
Class<?> c = target.getClass();
try {
Field f = getFieldFromHierarchy(c, field);
f.setAccessible(true);
f.set(target, value);
} catch (Exception e) {
throw new RuntimeException(
"Unable to set internal state on a private field.", e);
}
}
private static Field getFieldFromHierarchy(Class<?> clazz, String field) {
Field f = getField(clazz, field);
while (f == null && clazz != Object.class) {
clazz = clazz.getSuperclass();
f = getField(clazz, field);
}
if (f == null) {
throw new RuntimeException(
"You want me to set value to this field: '" + field +
"' on this class: '" + clazz.getSimpleName() +
"' but this field is not declared within hierarchy " +
"of this class!");
}
return f;
}
private static Field getField(Class<?> clazz, String field) {
try {
return clazz.getDeclaredField(field);
} catch (NoSuchFieldException e) {
return null;
}
}
}

View File

@ -20,6 +20,7 @@
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
@ -42,7 +43,6 @@
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.util.Time;
import org.junit.Assert; import org.junit.Assert;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -591,11 +591,11 @@ public static BlockID getTestBlockID(long containerID) {
// Add 2ms delay so that localID based on UtcTime // Add 2ms delay so that localID based on UtcTime
// won't collide. // won't collide.
sleep(2); sleep(2);
return new BlockID(containerID, Time.getUtcTime()); return new BlockID(containerID, HddsUtils.getUtcTime());
} }
public static long getTestContainerID() { public static long getTestContainerID() {
return Time.getUtcTime(); return HddsUtils.getUtcTime();
} }
public static boolean isContainerClosed(MiniOzoneCluster cluster, public static boolean isContainerClosed(MiniOzoneCluster cluster,

View File

@ -21,6 +21,7 @@
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@ -68,8 +69,8 @@ public void shutdown() {
@Test @Test
public void testVolumeOps() throws IOException { public void testVolumeOps() throws IOException {
VolumeManager volumeManager = VolumeManager volumeManager =
(VolumeManager) org.apache.hadoop.test.Whitebox (VolumeManager) HddsWhiteboxTestUtils.getInternalState(
.getInternalState(ozoneManager, "volumeManager"); ozoneManager, "volumeManager");
VolumeManager mockVm = Mockito.spy(volumeManager); VolumeManager mockVm = Mockito.spy(volumeManager);
Mockito.doNothing().when(mockVm).createVolume(null); Mockito.doNothing().when(mockVm).createVolume(null);
@ -79,7 +80,7 @@ public void testVolumeOps() throws IOException {
Mockito.doNothing().when(mockVm).setOwner(null, null); Mockito.doNothing().when(mockVm).setOwner(null, null);
Mockito.doReturn(null).when(mockVm).listVolumes(null, null, null, 0); Mockito.doReturn(null).when(mockVm).listVolumes(null, null, null, 0);
org.apache.hadoop.test.Whitebox.setInternalState( HddsWhiteboxTestUtils.setInternalState(
ozoneManager, "volumeManager", mockVm); ozoneManager, "volumeManager", mockVm);
doVolumeOps(); doVolumeOps();
@ -100,7 +101,7 @@ public void testVolumeOps() throws IOException {
Mockito.doThrow(exception).when(mockVm).setOwner(null, null); Mockito.doThrow(exception).when(mockVm).setOwner(null, null);
Mockito.doThrow(exception).when(mockVm).listVolumes(null, null, null, 0); Mockito.doThrow(exception).when(mockVm).listVolumes(null, null, null, 0);
org.apache.hadoop.test.Whitebox.setInternalState(ozoneManager, HddsWhiteboxTestUtils.setInternalState(ozoneManager,
"volumeManager", mockVm); "volumeManager", mockVm);
doVolumeOps(); doVolumeOps();
@ -124,8 +125,8 @@ public void testVolumeOps() throws IOException {
@Test @Test
public void testBucketOps() throws IOException { public void testBucketOps() throws IOException {
BucketManager bucketManager = BucketManager bucketManager =
(BucketManager) org.apache.hadoop.test.Whitebox (BucketManager) HddsWhiteboxTestUtils.getInternalState(
.getInternalState(ozoneManager, "bucketManager"); ozoneManager, "bucketManager");
BucketManager mockBm = Mockito.spy(bucketManager); BucketManager mockBm = Mockito.spy(bucketManager);
Mockito.doNothing().when(mockBm).createBucket(null); Mockito.doNothing().when(mockBm).createBucket(null);
@ -134,7 +135,7 @@ public void testBucketOps() throws IOException {
Mockito.doNothing().when(mockBm).setBucketProperty(null); Mockito.doNothing().when(mockBm).setBucketProperty(null);
Mockito.doReturn(null).when(mockBm).listBuckets(null, null, null, 0); Mockito.doReturn(null).when(mockBm).listBuckets(null, null, null, 0);
org.apache.hadoop.test.Whitebox.setInternalState( HddsWhiteboxTestUtils.setInternalState(
ozoneManager, "bucketManager", mockBm); ozoneManager, "bucketManager", mockBm);
doBucketOps(); doBucketOps();
@ -153,7 +154,7 @@ public void testBucketOps() throws IOException {
Mockito.doThrow(exception).when(mockBm).setBucketProperty(null); Mockito.doThrow(exception).when(mockBm).setBucketProperty(null);
Mockito.doThrow(exception).when(mockBm).listBuckets(null, null, null, 0); Mockito.doThrow(exception).when(mockBm).listBuckets(null, null, null, 0);
org.apache.hadoop.test.Whitebox.setInternalState( HddsWhiteboxTestUtils.setInternalState(
ozoneManager, "bucketManager", mockBm); ozoneManager, "bucketManager", mockBm);
doBucketOps(); doBucketOps();
@ -174,7 +175,7 @@ public void testBucketOps() throws IOException {
@Test @Test
public void testKeyOps() throws IOException { public void testKeyOps() throws IOException {
KeyManager bucketManager = (KeyManager) org.apache.hadoop.test.Whitebox KeyManager bucketManager = (KeyManager) HddsWhiteboxTestUtils
.getInternalState(ozoneManager, "keyManager"); .getInternalState(ozoneManager, "keyManager");
KeyManager mockKm = Mockito.spy(bucketManager); KeyManager mockKm = Mockito.spy(bucketManager);
@ -183,7 +184,7 @@ public void testKeyOps() throws IOException {
Mockito.doReturn(null).when(mockKm).lookupKey(null); Mockito.doReturn(null).when(mockKm).lookupKey(null);
Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0); Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0);
org.apache.hadoop.test.Whitebox.setInternalState( HddsWhiteboxTestUtils.setInternalState(
ozoneManager, "keyManager", mockKm); ozoneManager, "keyManager", mockKm);
doKeyOps(); doKeyOps();
@ -201,7 +202,7 @@ public void testKeyOps() throws IOException {
Mockito.doThrow(exception).when(mockKm).listKeys( Mockito.doThrow(exception).when(mockKm).listKeys(
null, null, null, null, 0); null, null, null, null, 0);
org.apache.hadoop.test.Whitebox.setInternalState( HddsWhiteboxTestUtils.setInternalState(
ozoneManager, "keyManager", mockKm); ozoneManager, "keyManager", mockKm);
doKeyOps(); doKeyOps();

View File

@ -23,6 +23,8 @@
import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException; import java.io.IOException;
@ -32,6 +34,8 @@
public class ITestOzoneContractGetFileStatus public class ITestOzoneContractGetFileStatus
extends AbstractContractGetFileStatusTest { extends AbstractContractGetFileStatusTest {
private static final Logger LOG =
LoggerFactory.getLogger(ITestOzoneContractGetFileStatus.class);
@BeforeClass @BeforeClass
public static void createCluster() throws IOException { public static void createCluster() throws IOException {
@ -50,7 +54,7 @@ protected AbstractFSContract createContract(Configuration conf) {
@Override @Override
public void teardown() throws Exception { public void teardown() throws Exception {
getLogger().info("FS details {}", getFileSystem()); LOG.info("FS details {}", getFileSystem());
super.teardown(); super.teardown();
} }

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.ozone.genesis; package org.apache.hadoop.ozone.genesis;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
import org.apache.hadoop.ozone.container.common.statemachine import org.apache.hadoop.ozone.container.common.statemachine
@ -32,7 +33,6 @@
import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.util.Time;
import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Level; import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Scope; import org.openjdk.jmh.annotations.Scope;
@ -115,7 +115,7 @@ public void initialize() throws IOException {
// Create containers // Create containers
for (int x = 0; x < INIT_CONTAINERS; x++) { for (int x = 0; x < INIT_CONTAINERS; x++) {
long containerID = Time.getUtcTime() + x; long containerID = HddsUtils.getUtcTime() + x;
ContainerCommandRequestProto req = getCreateContainerCommand(containerID); ContainerCommandRequestProto req = getCreateContainerCommand(containerID);
dispatcher.dispatch(req); dispatcher.dispatch(req);
containers.add(containerID); containers.add(containerID);
@ -123,7 +123,7 @@ public void initialize() throws IOException {
} }
for (int x = 0; x < INIT_KEYS; x++) { for (int x = 0; x < INIT_KEYS; x++) {
keys.add(Time.getUtcTime()+x); keys.add(HddsUtils.getUtcTime()+x);
} }
for (int x = 0; x < INIT_CHUNKS; x++) { for (int x = 0; x < INIT_CHUNKS; x++) {