HDFS-13405. Ozone: Rename HDSL to HDDS.

Contributed by Ajay Kumar, Elek Marton, Mukul Kumar Singh, Shashikant Banerjee and Anu Engineer.
This commit is contained in:
Anu Engineer 2018-04-05 11:24:39 -07:00 committed by Owen O'Malley
parent 792ac4d08b
commit 8b832f3c35
486 changed files with 3922 additions and 2721 deletions

View File

@ -51,7 +51,7 @@
<exclude>**/file:/**</exclude>
<exclude>**/SecurityAuth.audit*</exclude>
<exclude>hadoop-ozone/**</exclude>
<exclude>hadoop-hdsl/**</exclude>
<exclude>hadoop-hdds/**</exclude>
<exclude>hadoop-cblock/**</exclude>
</excludes>
</fileSet>

View File

@ -37,7 +37,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-server-framework</artifactId>
<artifactId>hadoop-hdds-server-framework</artifactId>
</dependency>
<dependency>
@ -47,12 +47,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-common</artifactId>
<artifactId>hadoop-hdds-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-client</artifactId>
<artifactId>hadoop-hdds-client</artifactId>
</dependency>
<dependency>
@ -132,7 +132,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/
</param>
<param>
${basedir}/../../hadoop-hdsl/common/src/main/proto/
${basedir}/../../hadoop-hdds/common/src/main/proto/
</param>
<param>${basedir}/src/main/proto</param>
</imports>

View File

@ -37,24 +37,24 @@
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.client.ContainerOperationClient;
import org.apache.hadoop.scm.client.ScmClient;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.cblock.storage.StorageManager;
import org.apache.hadoop.cblock.util.KeyUtil;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.scm.protocolPB
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.utils.LevelDBStore;
import static org.apache.hadoop.cblock.CblockUtils.getCblockServerRpcAddr;
import static org.apache.hadoop.cblock.CblockUtils.getCblockServiceRpcAddr;
import static org.apache.hadoop.hdsl.server.ServerUtils
import static org.apache.hadoop.hdds.server.ServerUtils
.updateRPCListenAddress;
import org.iq80.leveldb.DBIterator;
import org.slf4j.Logger;

View File

@ -36,8 +36,8 @@
.DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_PORT_DEFAULT;
import static org.apache.hadoop.hdsl.HdslUtils.getHostNameFromConfigKeys;
import static org.apache.hadoop.hdsl.HdslUtils.getPortNumberFromConfigKeys;
import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
/**
* Generic stateless utility functions for CBlock components.

View File

@ -25,7 +25,7 @@
import org.apache.hadoop.cblock.CBlockConfigKeys;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;

View File

@ -21,9 +21,9 @@
import com.google.common.primitives.Longs;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.AsyncBlockWriter;
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.LevelDBStore;

View File

@ -39,7 +39,7 @@
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.io.Closeable;
import java.io.IOException;

View File

@ -23,8 +23,8 @@
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.jscsi.target.storage.IStorageModule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@ -20,8 +20,8 @@
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.util.KeyUtil;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.jscsi.target.Configuration;
import org.jscsi.target.Target;
import org.jscsi.target.TargetServer;

View File

@ -26,8 +26,8 @@
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.DiskBlock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.LevelDBStore;
import org.iq80.leveldb.Options;

View File

@ -31,9 +31,9 @@
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.client.ContainerOperationClient;
import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
import org.apache.hadoop.security.UserGroupInformation;
import org.jscsi.target.Configuration;
@ -47,14 +47,14 @@
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_PORT_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_SERVER_ADDRESS_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
/**
* This class runs the target server process.

View File

@ -22,10 +22,10 @@
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.LevelDBStore;
import org.slf4j.Logger;

View File

@ -24,8 +24,8 @@
import org.apache.hadoop.cblock.jscsiHelper.cache.CacheModule;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
import org.apache.hadoop.utils.LevelDBStore;
import org.slf4j.Logger;

View File

@ -22,10 +22,10 @@
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor;
import org.apache.hadoop.utils.LevelDBStore;

View File

@ -38,7 +38,7 @@
import org.apache.hadoop.cblock.exception.CBlockException;
import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.storage.StorageManager;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@ -19,7 +19,7 @@
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
/**
*

View File

@ -19,7 +19,7 @@
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@ -17,7 +17,7 @@
*/
package org.apache.hadoop.cblock.proto;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.util.HashMap;
import java.util.List;

View File

@ -25,7 +25,7 @@
import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos;
import org.apache.hadoop.cblock.protocol.proto.CBlockServiceProtocolProtos;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.io.IOException;
import java.util.HashMap;

View File

@ -25,10 +25,10 @@
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.util.KeyUtil;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.scm.client.ScmClient;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -187,8 +187,8 @@ public void run() {
ContainerDescriptor container = null;
try {
Pipeline pipeline = storageClient.createContainer(
HdslProtos.ReplicationType.STAND_ALONE,
HdslProtos.ReplicationFactor.ONE,
HddsProtos.ReplicationType.STAND_ALONE,
HddsProtos.ReplicationFactor.ONE,
KeyUtil.getContainerName(volume.getUserName(),
volume.getVolumeName(), containerIdx), cblockId);

View File

@ -27,7 +27,7 @@ option java_generic_services = true;
option java_generate_equals_and_hash = true;
package hadoop.cblock;
import "hdsl.proto";
import "hdds.proto";
import "CBlockServiceProtocol.proto";
/**
* This message is sent from CBlock client side to CBlock server to
@ -69,7 +69,7 @@ message ContainerIDProto {
required string containerID = 1;
required uint64 index = 2;
// making pipeline optional to be compatible with exisiting tests
optional hadoop.hdsl.Pipeline pipeline = 3;
optional hadoop.hdds.Pipeline pipeline = 3;
}

View File

@ -23,13 +23,13 @@
import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;

View File

@ -24,21 +24,21 @@
import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.PipelineChannel;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocolPB
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
import org.junit.Assert;

View File

@ -19,8 +19,8 @@
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.scm.client.ScmClient;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.cblock.util.MockStorageClient;
import org.junit.After;
import org.junit.Before;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.cblock;
import org.apache.hadoop.cblock.meta.VolumeDescriptor;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.scm.client.ScmClient;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.cblock.util.MockStorageClient;
import org.junit.Test;

View File

@ -25,16 +25,16 @@
import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.junit.AfterClass;

View File

@ -29,7 +29,7 @@
import java.nio.file.Files;
import java.nio.file.Paths;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
/**
* Test the resource generation of Dynamic Provisioner.

View File

@ -19,7 +19,7 @@
import org.apache.hadoop.cblock.meta.ContainerDescriptor;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.io.IOException;
import java.util.concurrent.ConcurrentHashMap;

View File

@ -18,12 +18,12 @@
package org.apache.hadoop.cblock.util;
import org.apache.hadoop.cblock.meta.ContainerDescriptor;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.scm.client.ScmClient;
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.io.IOException;
import java.util.ArrayList;
@ -88,7 +88,7 @@ public List<ContainerInfo> listContainer(String startName,
ContainerInfo container = new ContainerInfo.Builder()
.setContainerName(containerDescriptor.getContainerID())
.setPipeline(containerDescriptor.getPipeline())
.setState(HdslProtos.LifeCycleState.ALLOCATED)
.setState(HddsProtos.LifeCycleState.ALLOCATED)
.build();
containerList.add(container);
return containerList;
@ -134,8 +134,8 @@ public long getContainerSize(Pipeline pipeline) throws IOException {
}
@Override
public Pipeline createContainer(HdslProtos.ReplicationType type,
HdslProtos.ReplicationFactor replicationFactor, String containerId,
public Pipeline createContainer(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor replicationFactor, String containerId,
String owner) throws IOException {
int contId = currentContainerId.getAndIncrement();
ContainerLookUpService.addContainer(Long.toString(contId));
@ -153,8 +153,8 @@ public Pipeline createContainer(HdslProtos.ReplicationType type,
* @throws IOException
*/
@Override
public HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState>
nodeStatuses, HdslProtos.QueryScope queryScope, String poolName)
public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
throws IOException {
return null;
}
@ -168,8 +168,8 @@ public HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState>
* @throws IOException
*/
@Override
public Pipeline createReplicationPipeline(HdslProtos.ReplicationType type,
HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool)
public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
throws IOException {
return null;
}

View File

@ -32,7 +32,7 @@
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

View File

@ -21,7 +21,7 @@
import org.apache.hadoop.cblock.meta.VolumeDescriptor;
import org.apache.hadoop.cblock.util.MockStorageClient;
import org.apache.hadoop.conf.OzoneConfiguration;
import org.apache.hadoop.scm.client.ScmClient;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.AfterClass;

View File

@ -596,8 +596,8 @@ function hadoop_bootstrap
YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
HDSL_DIR=${HDSL_DIR:-"share/hadoop/hdsl"}
HDSL_LIB_JARS_DIR=${HDSL_LIB_JARS_DIR:-"share/hadoop/hdsl/lib"}
HDDS_DIR=${HDDS_DIR:-"share/hadoop/hdds"}
HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"}
OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"}
OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"}
CBLOCK_DIR=${CBLOCK_DIR:-"share/hadoop/cblock"}

View File

@ -219,7 +219,7 @@
</profile>
<profile>
<id>hdsl</id>
<id>hdds</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
@ -231,11 +231,11 @@
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-server-scm</artifactId>
<artifactId>hadoop-hdds-server-scm</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-tools</artifactId>
<artifactId>hadoop-hdds-tools</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
@ -243,7 +243,7 @@
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-container-service</artifactId>
<artifactId>hadoop-hdds-container-service</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
@ -251,7 +251,7 @@
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-tools</artifactId>
<artifactId>hadoop-hdds-tools</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>

View File

@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_dfs.cblock.jscsi.cblock.server.address=cblock
OZONE-SITE.XML_dfs.cblock.scm.ipaddress=scm
OZONE-SITE.XML_dfs.cblock.service.leveldb.path=/tmp
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HdslDatanodeService
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true

View File

@ -27,7 +27,7 @@ HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HdslDatanodeService
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

View File

@ -19,24 +19,24 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl</artifactId>
<artifactId>hadoop-hdds</artifactId>
<version>3.2.0-SNAPSHOT</version>
</parent>
<artifactId>hadoop-hdsl-client</artifactId>
<artifactId>hadoop-hdds-client</artifactId>
<version>3.2.0-SNAPSHOT</version>
<description>Apache Hadoop HDSL Client libraries</description>
<name>Apache Hadoop HDSL Client</name>
<description>Apache Hadoop Distributed Data Store Client libraries</description>
<name>Apache HDDS Client</name>
<packaging>jar</packaging>
<properties>
<hadoop.component>hdsl</hadoop.component>
<hadoop.component>hdds</hadoop.component>
<is.hadoop.component>true</is.hadoop.component>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-common</artifactId>
<artifactId>hadoop-hdds-common</artifactId>
<scope>provided</scope>
</dependency>

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.scm;
package org.apache.hadoop.hdds.scm;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@ -28,19 +28,19 @@
import io.netty.handler.logging.LogLevel;
import io.netty.handler.logging.LoggingHandler;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.List;
import java.util.concurrent.Semaphore;
/**
@ -69,7 +69,7 @@ public XceiverClient(Pipeline pipeline, Configuration config) {
this.pipeline = pipeline;
this.config = config;
this.semaphore =
new Semaphore(OzoneClientUtils.getMaxOutstandingRequests(config));
new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config));
}
@Override
@ -186,7 +186,7 @@ public void createPipeline(String pipelineID, List<DatanodeDetails> datanodes)
* @return - Stand Alone as the type.
*/
@Override
public HdslProtos.ReplicationType getPipelineType() {
return HdslProtos.ReplicationType.STAND_ALONE;
public HddsProtos.ReplicationType getPipelineType() {
return HddsProtos.ReplicationType.STAND_ALONE;
}
}

View File

@ -15,19 +15,17 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm;
package org.apache.hadoop.hdds.scm;
import com.google.common.base.Preconditions;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm;
package org.apache.hadoop.hdds.scm;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
@ -24,8 +24,8 @@
import io.netty.handler.codec.protobuf.ProtobufEncoder;
import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import java.util.concurrent.Semaphore;

View File

@ -16,33 +16,32 @@
* limitations under the License.
*/
package org.apache.hadoop.scm;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.Callable;
package org.apache.hadoop.hdds.scm;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import static org.apache.hadoop.scm.ScmConfigKeys
.SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys
.SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY;
import static org.apache.hadoop.scm.ScmConfigKeys
.SCM_CONTAINER_CLIENT_MAX_SIZE_KEY;
import static org.apache.hadoop.scm.ScmConfigKeys
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT;
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.SCM_CONTAINER_CLIENT_MAX_SIZE_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
.ReplicationType.RATIS;
/**
@ -186,24 +185,24 @@ public boolean isUseRatis() {
* Returns hard coded 3 as replication factor.
* @return 3
*/
public HdslProtos.ReplicationFactor getFactor() {
public HddsProtos.ReplicationFactor getFactor() {
if(isUseRatis()) {
return HdslProtos.ReplicationFactor.THREE;
return HddsProtos.ReplicationFactor.THREE;
}
return HdslProtos.ReplicationFactor.ONE;
return HddsProtos.ReplicationFactor.ONE;
}
/**
* Returns the default replication type.
* @return Ratis or Standalone
*/
public HdslProtos.ReplicationType getType() {
public HddsProtos.ReplicationType getType() {
// TODO : Fix me and make Ratis default before release.
// TODO: Remove this as replication factor and type are pipeline properties
if(isUseRatis()) {
return HdslProtos.ReplicationType.RATIS;
return HddsProtos.ReplicationType.RATIS;
}
return HdslProtos.ReplicationType.STAND_ALONE;
return HddsProtos.ReplicationType.STAND_ALONE;
}
/**

View File

@ -15,10 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm;
package org.apache.hadoop.hdds.scm;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;

View File

@ -16,17 +16,19 @@
* limitations under the License.
*/
package org.apache.hadoop.scm;
package org.apache.hadoop.hdds.scm;
import com.google.common.base.Preconditions;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandRequestProto;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.ratis.RatisHelper;
import org.apache.ratis.client.RaftClient;
import org.apache.ratis.protocol.RaftClientReply;
@ -60,7 +62,7 @@ public static XceiverClientRatis newXceiverClientRatis(
ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
final int maxOutstandingRequests =
OzoneClientUtils.getMaxOutstandingRequests(ozoneConf);
HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
return new XceiverClientRatis(pipeline,
SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests);
}
@ -98,8 +100,8 @@ public void createPipeline(String clusterId, List<DatanodeDetails> datanodes)
* @return - Ratis
*/
@Override
public HdslProtos.ReplicationType getPipelineType() {
return HdslProtos.ReplicationType.RATIS;
public HddsProtos.ReplicationType getPipelineType() {
return HddsProtos.ReplicationType.RATIS;
}
private void reinitialize(List<DatanodeDetails> datanodes, RaftGroup group)

View File

@ -15,20 +15,22 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm.client;
package org.apache.hadoop.hdds.scm.client;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ReadContainerResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocolPB
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ReadContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -37,8 +39,10 @@
import java.util.List;
import java.util.UUID;
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.ALLOCATED;
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.OPEN;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState
.ALLOCATED;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState
.OPEN;
/**
* This class provides the client-facing APIs of container operations.
@ -189,8 +193,8 @@ private void createPipeline(XceiverClientSpi client, Pipeline pipeline)
* @inheritDoc
*/
@Override
public Pipeline createContainer(HdslProtos.ReplicationType type,
HdslProtos.ReplicationFactor factor,
public Pipeline createContainer(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor factor,
String containerId, String owner) throws IOException {
XceiverClientSpi client = null;
try {
@ -229,8 +233,8 @@ public Pipeline createContainer(HdslProtos.ReplicationType type,
* @throws IOException
*/
@Override
public HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState>
nodeStatuses, HdslProtos.QueryScope queryScope, String poolName)
public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
throws IOException {
return storageContainerLocationClient.queryNode(nodeStatuses, queryScope,
poolName);
@ -240,8 +244,8 @@ public HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState>
* Creates a specified replication pipeline.
*/
@Override
public Pipeline createReplicationPipeline(HdslProtos.ReplicationType type,
HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool)
public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
throws IOException {
return storageContainerLocationClient.createReplicationPipeline(type,
factor, nodePool);

View File

@ -16,7 +16,20 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone.client;
package org.apache.hadoop.hdds.scm.client;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.text.ParseException;
import java.time.Instant;
@ -25,21 +38,6 @@
import java.time.format.DateTimeFormatter;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.ScmConfigKeys;
import com.google.common.base.Preconditions;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utility methods for Ozone and Container Clients.
*
@ -49,14 +47,14 @@
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public final class OzoneClientUtils {
public final class HddsClientUtils {
private static final Logger LOG = LoggerFactory.getLogger(
OzoneClientUtils.class);
HddsClientUtils.class);
private static final int NO_PORT = -1;
private OzoneClientUtils() {
private HddsClientUtils() {
}
/**
@ -69,55 +67,28 @@ private OzoneClientUtils() {
return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE));
});
/**
* Returns the cache value to be used for list calls.
* @param conf Configuration object
* @return list cache size
* Convert time in millisecond to a human readable format required in ozone.
* @return a human readable string for the input time
*/
public static int getListCacheSize(Configuration conf) {
return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE,
OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT);
public static String formatDateTime(long millis) {
ZonedDateTime dateTime = ZonedDateTime.ofInstant(
Instant.ofEpochSecond(millis), DATE_FORMAT.get().getZone());
return DATE_FORMAT.get().format(dateTime);
}
/**
* @return a default instance of {@link CloseableHttpClient}.
* Convert time in ozone date format to millisecond.
* @return time in milliseconds
*/
public static CloseableHttpClient newHttpClient() {
return OzoneClientUtils.newHttpClient(new OzoneConfiguration());
public static long formatDateTime(String date) throws ParseException {
Preconditions.checkNotNull(date, "Date string should not be null.");
return ZonedDateTime.parse(date, DATE_FORMAT.get())
.toInstant().getEpochSecond();
}
/**
* Returns a {@link CloseableHttpClient} configured by given configuration.
* If conf is null, returns a default instance.
*
* @param conf configuration
* @return a {@link CloseableHttpClient} instance.
*/
public static CloseableHttpClient newHttpClient(Configuration conf) {
long socketTimeout = OzoneConfigKeys
.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT;
long connectionTimeout = OzoneConfigKeys
.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT;
if (conf != null) {
socketTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
connectionTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
}
CloseableHttpClient client = HttpClients.custom()
.setDefaultRequestConfig(
RequestConfig.custom()
.setSocketTimeout(Math.toIntExact(socketTimeout))
.setConnectTimeout(Math.toIntExact(connectionTimeout))
.build())
.build();
return client;
}
/**
* verifies that bucket name / volume name is a valid DNS name.
@ -199,23 +170,53 @@ public static void verifyResourceName(String resName)
}
/**
* Convert time in millisecond to a human readable format required in ozone.
* @return a human readable string for the input time
* Returns the cache value to be used for list calls.
* @param conf Configuration object
* @return list cache size
*/
public static String formatDateTime(long millis) {
ZonedDateTime dateTime = ZonedDateTime.ofInstant(
Instant.ofEpochSecond(millis), DATE_FORMAT.get().getZone());
return DATE_FORMAT.get().format(dateTime);
public static int getListCacheSize(Configuration conf) {
return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE,
OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT);
}
/**
* Convert time in ozone date format to millisecond.
* @return time in milliseconds
* @return a default instance of {@link CloseableHttpClient}.
*/
public static long formatDateTime(String date) throws ParseException {
Preconditions.checkNotNull(date, "Date string should not be null.");
return ZonedDateTime.parse(date, DATE_FORMAT.get())
.toInstant().getEpochSecond();
public static CloseableHttpClient newHttpClient() {
return HddsClientUtils.newHttpClient(new Configuration());
}
/**
* Returns a {@link CloseableHttpClient} configured by given configuration.
* If conf is null, returns a default instance.
*
* @param conf configuration
* @return a {@link CloseableHttpClient} instance.
*/
public static CloseableHttpClient newHttpClient(Configuration conf) {
long socketTimeout = OzoneConfigKeys
.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT;
long connectionTimeout = OzoneConfigKeys
.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT;
if (conf != null) {
socketTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
connectionTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
}
CloseableHttpClient client = HttpClients.custom()
.setDefaultRequestConfig(
RequestConfig.custom()
.setSocketTimeout(Math.toIntExact(socketTimeout))
.setConnectTimeout(Math.toIntExact(connectionTimeout))
.build())
.build();
return client;
}
/**

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.client;
package org.apache.hadoop.hdds.scm.client;
/**
* Client facing classes for the container operations.

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.scm;
package org.apache.hadoop.hdds.scm;
/**
* Classes for different type of container service client.

View File

@ -16,7 +16,15 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.storage;
package org.apache.hadoop.hdds.scm.storage;
import com.google.protobuf.ByteString;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ReadChunkResponseProto;
import java.io.EOFException;
import java.io.IOException;
@ -25,14 +33,6 @@
import java.util.Arrays;
import java.util.List;
import com.google.protobuf.ByteString;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ReadChunkResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ChunkInfo;
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.XceiverClientManager;
/**
* An {@link InputStream} used by the REST service in combination with the
* SCMClient to read the value of a key from a sequence

View File

@ -16,24 +16,24 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.storage;
package org.apache.hadoop.hdds.scm.storage;
import static org.apache.hadoop.scm.storage.ContainerProtocolCalls.putKey;
import static org.apache.hadoop.scm.storage.ContainerProtocolCalls.writeChunk;
import com.google.protobuf.ByteString;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.UUID;
import com.google.protobuf.ByteString;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ChunkInfo;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.KeyData;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.KeyValue;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi;
import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putKey;
import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
.writeChunk;
/**
* An {@link OutputStream} used by the REST service in combination with the

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.storage;
package org.apache.hadoop.hdds.scm.storage;
/**
* Low level IO streams to upload/download chunks from container service.

View File

@ -16,6 +16,6 @@
-->
<FindBugsFilter>
<Match>
<Package name="org.apache.hadoop.hdsl.protocol.proto"/>
<Package name="org.apache.hadoop.hdds.protocol.proto"/>
</Match>
</FindBugsFilter>

View File

@ -19,22 +19,21 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl</artifactId>
<artifactId>hadoop-hdds</artifactId>
<version>3.2.0-SNAPSHOT</version>
</parent>
<artifactId>hadoop-hdsl-common</artifactId>
<artifactId>hadoop-hdds-common</artifactId>
<version>3.2.0-SNAPSHOT</version>
<description>Apache Hadoop HDSL Common utilities</description>
<name>Apache Hadoop HDSL Common</name>
<description>Apache Hadoop Distributed Data Store Common</description>
<name>Apache HDDS Common</name>
<packaging>jar</packaging>
<properties>
<hadoop.component>hdsl</hadoop.component>
<hadoop.component>hdds</hadoop.component>
<is.hadoop.component>true</is.hadoop.component>
</properties>
<dependencies>
<dependency>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-all</artifactId>
@ -109,7 +108,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<includes>
<include>StorageContainerLocationProtocol.proto</include>
<include>DatanodeContainerProtocol.proto</include>
<include>hdsl.proto</include>
<include>hdds.proto</include>
<include>ScmBlockLocationProtocol.proto</include>
</includes>
</source>

View File

@ -0,0 +1,6 @@
package org.apache.hadoop.hdds;
public class HddsConfigKeys {
private HddsConfigKeys() {
}
}

View File

@ -16,35 +16,34 @@
* limitations under the License.
*/
package org.apache.hadoop.hdsl;
import java.net.InetSocketAddress;
import java.nio.file.Paths;
import java.util.Collection;
import java.util.HashSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.scm.ScmConfigKeys;
package org.apache.hadoop.hdds;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.google.common.net.HostAndPort;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.nio.file.Paths;
import java.util.Collection;
import java.util.HashSet;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
/**
* HDSL specific stateless utility functions.
* HDDS specific stateless utility functions.
*/
public class HdslUtils {
public class HddsUtils {
private static final Logger LOG = LoggerFactory.getLogger(HdslUtils.class);
private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class);
/**
* The service ID of the solitary Ozone SCM service.
@ -55,7 +54,7 @@ public class HdslUtils {
private static final int NO_PORT = -1;
private HdslUtils() {
private HddsUtils() {
}
/**
@ -233,7 +232,7 @@ public static Collection<InetSocketAddress> getSCMAddresses(
return addresses;
}
public static boolean isHdslEnabled(Configuration conf) {
public static boolean isHddsEnabled(Configuration conf) {
String securityEnabled =
conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"simple");

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone.client;
package org.apache.hadoop.hdds.client;
import org.apache.hadoop.ozone.OzoneConsts;

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone.client;
package org.apache.hadoop.hdds.client;
/**
* The replication factor to be used while writing key into ozone.

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone.client;
package org.apache.hadoop.hdds.client;
/**
* The replication type to be used while writing key into ozone.

View File

@ -16,8 +16,8 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone.client;
package org.apache.hadoop.hdds.client;
/**
* Base property types for HDSL containers and replications.
* Base property types for HDDS containers and replications.
*/

View File

@ -16,11 +16,11 @@
* limitations under the License.
*/
package org.apache.hadoop.hdsl.conf;
package org.apache.hadoop.hdds.conf;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import javax.xml.bind.Unmarshaller;
@ -28,8 +28,9 @@
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
/**
* Configuration for ozone.

View File

@ -15,4 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdsl.conf;
package org.apache.hadoop.hdds.conf;

View File

@ -16,8 +16,8 @@
* limitations under the License.
*/
package org.apache.hadoop.hdsl;
package org.apache.hadoop.hdds;
/**
* Generic HDSL specific configurator and helper classes.
* Generic HDDS specific configurator and helper classes.
*/

View File

@ -16,12 +16,12 @@
* limitations under the License.
*/
package org.apache.hadoop.hdsl.protocol;
package org.apache.hadoop.hdds.protocol;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import java.util.UUID;
@ -222,7 +222,7 @@ public int getOzoneRestPort() {
* @return DatanodeDetails
*/
public static DatanodeDetails getFromProtoBuf(
HdslProtos.DatanodeDetailsProto datanodeDetailsProto) {
HddsProtos.DatanodeDetailsProto datanodeDetailsProto) {
DatanodeDetails.Builder builder = newBuilder();
builder.setUuid(datanodeDetailsProto.getUuid());
if (datanodeDetailsProto.hasIpAddress()) {
@ -251,11 +251,11 @@ public static DatanodeDetails getFromProtoBuf(
/**
* Returns a DatanodeDetails protobuf message from a datanode ID.
* @return Hdsl.DatanodeDetailsProto
* @return HddsProtos.DatanodeDetailsProto
*/
public HdslProtos.DatanodeDetailsProto getProtoBufMessage() {
HdslProtos.DatanodeDetailsProto.Builder builder =
HdslProtos.DatanodeDetailsProto.newBuilder()
public HddsProtos.DatanodeDetailsProto getProtoBufMessage() {
HddsProtos.DatanodeDetailsProto.Builder builder =
HddsProtos.DatanodeDetailsProto.newBuilder()
.setUuid(getUuidString());
if (ipAddress != null) {
builder.setIpAddress(ipAddress);

View File

@ -17,6 +17,6 @@
*/
/**
* This package contains HDSL protocol related classes.
* This package contains HDDS protocol related classes.
*/
package org.apache.hadoop.hdsl.protocol;
package org.apache.hadoop.hdds.protocol;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm;
package org.apache.hadoop.hdds.scm;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -121,18 +121,18 @@ public final class ScmConfigKeys {
public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
public static final String HDSL_REST_HTTP_ADDRESS_KEY =
"hdsl.rest.http-address";
public static final String HDSL_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880";
public static final String HDSL_REST_CSRF_ENABLED_KEY =
"hdsl.rest.rest-csrf.enabled";
public static final boolean HDSL_REST_CSRF_ENABLED_DEFAULT = false;
public static final String HDSL_REST_NETTY_HIGH_WATERMARK =
"hdsl.rest.netty.high.watermark";
public static final int HDSL_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536;
public static final int HDSL_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768;
public static final String HDSL_REST_NETTY_LOW_WATERMARK =
"hdsl.rest.netty.low.watermark";
public static final String HDDS_REST_HTTP_ADDRESS_KEY =
"hdds.rest.http-address";
public static final String HDDS_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880";
public static final String HDDS_REST_CSRF_ENABLED_KEY =
"hdds.rest.rest-csrf.enabled";
public static final boolean HDDS_REST_CSRF_ENABLED_DEFAULT = false;
public static final String HDDS_REST_NETTY_HIGH_WATERMARK =
"hdds.rest.netty.high.watermark";
public static final int HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536;
public static final int HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768;
public static final String HDDS_REST_NETTY_LOW_WATERMARK =
"hdds.rest.netty.low.watermark";
public static final String OZONE_SCM_HANDLER_COUNT_KEY =
"ozone.scm.handler.count.key";

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.scm;
package org.apache.hadoop.hdds.scm;
/**
* ScmInfo wraps the result returned from SCM#getScmInfo which

View File

@ -16,16 +16,16 @@
* limitations under the License.
*/
package org.apache.hadoop.scm;
package org.apache.hadoop.hdds.scm;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import java.io.Closeable;
import java.io.IOException;
@ -125,5 +125,5 @@ public abstract void createPipeline(String pipelineID,
*
* @return - {Stand_Alone, Ratis or Chained}
*/
public abstract HdslProtos.ReplicationType getPipelineType();
public abstract HddsProtos.ReplicationType getPipelineType();
}

View File

@ -15,13 +15,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm.client;
package org.apache.hadoop.hdds.scm.client;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import java.io.IOException;
import java.util.EnumSet;
@ -111,8 +111,8 @@ List<ContainerInfo> listContainer(String startName, String prefixName,
* @return Pipeline
* @throws IOException - in case of error.
*/
Pipeline createContainer(HdslProtos.ReplicationType type,
HdslProtos.ReplicationFactor replicationFactor, String containerId,
Pipeline createContainer(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor replicationFactor, String containerId,
String owner) throws IOException;
/**
@ -123,8 +123,8 @@ Pipeline createContainer(HdslProtos.ReplicationType type,
* @return A set of nodes that meet the requested criteria.
* @throws IOException
*/
HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> nodeStatuses,
HdslProtos.QueryScope queryScope, String poolName) throws IOException;
HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> nodeStatuses,
HddsProtos.QueryScope queryScope, String poolName) throws IOException;
/**
* Creates a specified replication pipeline.
@ -133,7 +133,7 @@ HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> nodeStatuses,
* @param nodePool - Set of machines.
* @throws IOException
*/
Pipeline createReplicationPipeline(HdslProtos.ReplicationType type,
HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool)
Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
throws IOException;
}

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.client;
package org.apache.hadoop.hdds.scm.client;
/**
* This package contains classes for the client of the storage container

View File

@ -16,7 +16,7 @@
*
*/
package org.apache.hadoop.ozone.scm.container.ContainerStates;
package org.apache.hadoop.hdds.scm.container;
import com.google.common.base.Preconditions;
import org.apache.commons.math3.util.MathUtils;

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.container.common.helpers;
package org.apache.hadoop.hdds.scm.container.common.helpers;
/**
* Allocated block wraps the result returned from SCM#allocateBlock which

View File

@ -16,13 +16,13 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.container.common.helpers;
package org.apache.hadoop.hdds.scm.container.common.helpers;
import com.google.common.base.Preconditions;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.ozone.scm.container.ContainerStates.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.util.Time;
import java.util.Comparator;
@ -32,7 +32,7 @@
*/
public class ContainerInfo
implements Comparator<ContainerInfo>, Comparable<ContainerInfo> {
private HdslProtos.LifeCycleState state;
private HddsProtos.LifeCycleState state;
private Pipeline pipeline;
// Bytes allocated by SCM for clients.
private long allocatedBytes;
@ -48,7 +48,7 @@ public class ContainerInfo
ContainerInfo(
long containerID,
final String containerName,
HdslProtos.LifeCycleState state,
HddsProtos.LifeCycleState state,
Pipeline pipeline,
long allocatedBytes,
long usedBytes,
@ -73,7 +73,7 @@ public class ContainerInfo
public ContainerInfo() {
}
public static ContainerInfo fromProtobuf(HdslProtos.SCMContainerInfo info) {
public static ContainerInfo fromProtobuf(HddsProtos.SCMContainerInfo info) {
ContainerInfo.Builder builder = new ContainerInfo.Builder();
builder.setPipeline(Pipeline.getFromProtoBuf(info.getPipeline()));
builder.setAllocatedBytes(info.getAllocatedBytes());
@ -95,11 +95,11 @@ public String getContainerName() {
return containerName;
}
public HdslProtos.LifeCycleState getState() {
public HddsProtos.LifeCycleState getState() {
return state;
}
public void setState(HdslProtos.LifeCycleState state) {
public void setState(HddsProtos.LifeCycleState state) {
this.state = state;
}
@ -156,9 +156,9 @@ public void allocate(long size) {
allocatedBytes += size;
}
public HdslProtos.SCMContainerInfo getProtobuf() {
HdslProtos.SCMContainerInfo.Builder builder =
HdslProtos.SCMContainerInfo.newBuilder();
public HddsProtos.SCMContainerInfo getProtobuf() {
HddsProtos.SCMContainerInfo.Builder builder =
HddsProtos.SCMContainerInfo.newBuilder();
builder.setPipeline(getPipeline().getProtobufMessage());
builder.setAllocatedBytes(getAllocatedBytes());
builder.setUsedBytes(getUsedBytes());
@ -268,7 +268,7 @@ public int compareTo(ContainerInfo o) {
* Builder class for ContainerInfo.
*/
public static class Builder {
private HdslProtos.LifeCycleState state;
private HddsProtos.LifeCycleState state;
private Pipeline pipeline;
private long allocated;
private long used;
@ -284,7 +284,7 @@ public Builder setContainerID(long id) {
return this;
}
public Builder setState(HdslProtos.LifeCycleState lifeCycleState) {
public Builder setState(HddsProtos.LifeCycleState lifeCycleState) {
this.state = lifeCycleState;
return this;
}

View File

@ -15,10 +15,9 @@
* the License.
*/
package org.apache.hadoop.scm.container.common.helpers;
package org.apache.hadoop.hdds.scm.container.common.helpers;
import static org.apache.hadoop.hdsl.protocol.proto
import static org.apache.hadoop.hdds.protocol.proto
.ScmBlockLocationProtocolProtos.DeleteScmBlockResult;
/**

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.container.common.helpers;
package org.apache.hadoop.hdds.scm.container.common.helpers;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonFilter;
@ -29,8 +29,8 @@
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import java.io.IOException;
import java.util.ArrayList;
@ -83,14 +83,14 @@ public Pipeline(String containerName, PipelineChannel pipelineChannel) {
* @param pipeline - ProtoBuf definition for the pipeline.
* @return Pipeline Object
*/
public static Pipeline getFromProtoBuf(HdslProtos.Pipeline pipeline) {
public static Pipeline getFromProtoBuf(HddsProtos.Pipeline pipeline) {
Preconditions.checkNotNull(pipeline);
PipelineChannel pipelineChannel =
PipelineChannel.getFromProtoBuf(pipeline.getPipelineChannel());
return new Pipeline(pipeline.getContainerName(), pipelineChannel);
}
public HdslProtos.ReplicationFactor getFactor() {
public HddsProtos.ReplicationFactor getFactor() {
return pipelineChannel.getFactor();
}
@ -143,9 +143,9 @@ public List<String> getDatanodeHosts() {
* @return Protobuf message
*/
@JsonIgnore
public HdslProtos.Pipeline getProtobufMessage() {
HdslProtos.Pipeline.Builder builder =
HdslProtos.Pipeline.newBuilder();
public HddsProtos.Pipeline getProtobufMessage() {
HddsProtos.Pipeline.Builder builder =
HddsProtos.Pipeline.newBuilder();
builder.setContainerName(this.containerName);
builder.setPipelineChannel(this.pipelineChannel.getProtobufMessage());
return builder.build();
@ -194,7 +194,7 @@ public void setData(byte[] data) {
*
* @return - LifeCycleStates.
*/
public HdslProtos.LifeCycleState getLifeCycleState() {
public HddsProtos.LifeCycleState getLifeCycleState() {
return pipelineChannel.getLifeCycleState();
}
@ -212,7 +212,7 @@ public String getPipelineName() {
*
* @return type - Standalone, Ratis, Chained.
*/
public HdslProtos.ReplicationType getType() {
public HddsProtos.ReplicationType getType() {
return pipelineChannel.getType();
}

View File

@ -15,15 +15,15 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm.container.common.helpers;
package org.apache.hadoop.hdds.scm.container.common.helpers;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import java.util.Map;
import java.util.TreeMap;
@ -82,9 +82,9 @@ public void addMember(DatanodeDetails datanodeDetails) {
}
@JsonIgnore
public HdslProtos.PipelineChannel getProtobufMessage() {
HdslProtos.PipelineChannel.Builder builder =
HdslProtos.PipelineChannel.newBuilder();
public HddsProtos.PipelineChannel getProtobufMessage() {
HddsProtos.PipelineChannel.Builder builder =
HddsProtos.PipelineChannel.newBuilder();
for (DatanodeDetails datanode : datanodes.values()) {
builder.addMembers(datanode.getProtoBufMessage());
}
@ -104,7 +104,7 @@ public HdslProtos.PipelineChannel getProtobufMessage() {
}
public static PipelineChannel getFromProtoBuf(
HdslProtos.PipelineChannel transportProtos) {
HddsProtos.PipelineChannel transportProtos) {
Preconditions.checkNotNull(transportProtos);
PipelineChannel pipelineChannel =
new PipelineChannel(transportProtos.getLeaderID(),
@ -113,7 +113,7 @@ public static PipelineChannel getFromProtoBuf(
transportProtos.getFactor(),
transportProtos.getName());
for (HdslProtos.DatanodeDetailsProto dataID :
for (HddsProtos.DatanodeDetailsProto dataID :
transportProtos.getMembersList()) {
pipelineChannel.addMember(DatanodeDetails.getFromProtoBuf(dataID));
}

View File

@ -15,9 +15,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm.container.common.helpers;
package org.apache.hadoop.hdds.scm.container.common.helpers;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import java.io.IOException;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm.container.common.helpers;
package org.apache.hadoop.hdds.scm.container.common.helpers;
/**
Contains protocol buffer helper classes and utilites used in
impl.

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.scm;
package org.apache.hadoop.hdds.scm;
/**
* This package contains classes for the client of the storage container

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.protocol;
package org.apache.hadoop.hdds.scm.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;

View File

@ -15,19 +15,19 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm.protocol;
package org.apache.hadoop.hdds.scm.protocol;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor;
import org.apache.hadoop.scm.ScmInfo;
/**
* ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
* to read/write a block.

View File

@ -16,14 +16,14 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.protocol;
import java.util.List;
import java.util.stream.Collectors;
package org.apache.hadoop.hdds.scm.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import java.util.List;
import java.util.stream.Collectors;
/**
* Holds the nodes that currently host the block for a block key.
*/

View File

@ -15,18 +15,19 @@
* the License.
*/
package org.apache.hadoop.scm.protocol;
package org.apache.hadoop.hdds.scm.protocol;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
import org.apache.hadoop.scm.ScmInfo;
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
/**
* ContainerLocationProtocol is used by an HDFS node to find the set of nodes
* that currently host a container.
@ -37,8 +38,8 @@ public interface StorageContainerLocationProtocol {
* set of datanodes that should be used creating this container.
*
*/
Pipeline allocateContainer(HdslProtos.ReplicationType replicationType,
HdslProtos.ReplicationFactor factor, String containerName, String owner)
Pipeline allocateContainer(HddsProtos.ReplicationType replicationType,
HddsProtos.ReplicationFactor factor, String containerName, String owner)
throws IOException;
/**
@ -85,8 +86,8 @@ List<ContainerInfo> listContainer(String startName, String prefixName,
* @param nodeStatuses
* @return List of Datanodes.
*/
HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> nodeStatuses,
HdslProtos.QueryScope queryScope, String poolName) throws IOException;
HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> nodeStatuses,
HddsProtos.QueryScope queryScope, String poolName) throws IOException;
/**
* Notify from client when begin or finish creating objects like pipeline
@ -109,8 +110,8 @@ void notifyObjectStageChange(
* @param nodePool - optional machine list to build a pipeline.
* @throws IOException
*/
Pipeline createReplicationPipeline(HdslProtos.ReplicationType type,
HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool)
Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
throws IOException;
/**

View File

@ -16,4 +16,4 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone.scm.cli;
package org.apache.hadoop.hdds.scm.protocol;

View File

@ -14,31 +14,39 @@
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.scm.protocolPB;
package org.apache.hadoop.hdds.scm.protocolPB;
import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.AllocateScmBlockRequestProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.AllocateScmBlockResponseProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.DeleteScmKeyBlocksRequestProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.DeleteScmKeyBlocksResponseProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.GetScmBlockLocationsRequestProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.GetScmBlockLocationsResponseProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.KeyBlocks;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.ScmLocatedBlockProto;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.GetScmBlockLocationsRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.GetScmBlockLocationsResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.ScmLocatedBlockProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.KeyBlocks;
import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.scm.ScmInfo;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
import java.io.Closeable;
import java.io.IOException;
@ -117,7 +125,7 @@ public Set<AllocatedBlock> getBlockLocations(Set<String> keys)
*/
@Override
public AllocatedBlock allocateBlock(long size,
HdslProtos.ReplicationType type, HdslProtos.ReplicationFactor factor,
HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
String owner) throws IOException {
Preconditions.checkArgument(size > 0, "block size must be greater than 0");
@ -181,9 +189,9 @@ public List<DeleteBlockGroupResult> deleteKeyBlocks(
*/
@Override
public ScmInfo getScmInfo() throws IOException {
HdslProtos.GetScmInfoRequestProto request =
HdslProtos.GetScmInfoRequestProto.getDefaultInstance();
HdslProtos.GetScmInfoRespsonseProto resp;
HddsProtos.GetScmInfoRequestProto request =
HddsProtos.GetScmInfoRequestProto.getDefaultInstance();
HddsProtos.GetScmInfoRespsonseProto resp;
try {
resp = rpcProxy.getScmInfo(NULL_RPC_CONTROLLER, request);
} catch (ServiceException e) {

View File

@ -15,12 +15,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm.protocolPB;
package org.apache.hadoop.hdds.scm.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.ScmBlockLocationProtocolService;
import org.apache.hadoop.ipc.ProtocolInfo;
/**
* Protocol used from an HDFS node to StorageContainerManager. This extends the

View File

@ -14,33 +14,45 @@
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.scm.protocolPB;
package org.apache.hadoop.hdds.scm.protocolPB;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.ContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.ContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.GetContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.NodeQueryRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.PipelineRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.PipelineResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.scm.ScmInfo;
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.PipelineRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import java.io.Closeable;
import java.io.IOException;
@ -85,8 +97,8 @@ public StorageContainerLocationProtocolClientSideTranslatorPB(
* @throws IOException
*/
@Override
public Pipeline allocateContainer(HdslProtos.ReplicationType type,
HdslProtos.ReplicationFactor factor, String
public Pipeline allocateContainer(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor factor, String
containerName, String owner) throws IOException {
Preconditions.checkNotNull(containerName, "Container Name cannot be Null");
@ -151,7 +163,7 @@ public List<ContainerInfo> listContainer(String startName, String prefixName,
SCMListContainerResponseProto response =
rpcProxy.listContainer(NULL_RPC_CONTROLLER, request);
List<ContainerInfo> containerList = new ArrayList<>();
for (HdslProtos.SCMContainerInfo containerInfoProto : response
for (HddsProtos.SCMContainerInfo containerInfoProto : response
.getContainersList()) {
containerList.add(ContainerInfo.fromProtobuf(containerInfoProto));
}
@ -191,8 +203,8 @@ public void deleteContainer(String containerName)
* @return List of Datanodes.
*/
@Override
public HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState>
nodeStatuses, HdslProtos.QueryScope queryScope, String poolName)
public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
throws IOException {
// TODO : We support only cluster wide query right now. So ignoring checking
// queryScope and poolName
@ -248,8 +260,8 @@ public void notifyObjectStageChange(
* @throws IOException
*/
@Override
public Pipeline createReplicationPipeline(HdslProtos.ReplicationType
replicationType, HdslProtos.ReplicationFactor factor, HdslProtos
public Pipeline createReplicationPipeline(HddsProtos.ReplicationType
replicationType, HddsProtos.ReplicationFactor factor, HddsProtos
.NodePool nodePool) throws IOException {
PipelineRequestProto request = PipelineRequestProto.newBuilder()
.setNodePool(nodePool)
@ -277,10 +289,10 @@ public Pipeline createReplicationPipeline(HdslProtos.ReplicationType
@Override
public ScmInfo getScmInfo() throws IOException {
HdslProtos.GetScmInfoRequestProto request =
HdslProtos.GetScmInfoRequestProto.getDefaultInstance();
HddsProtos.GetScmInfoRequestProto request =
HddsProtos.GetScmInfoRequestProto.getDefaultInstance();
try {
HdslProtos.GetScmInfoRespsonseProto resp = rpcProxy.getScmInfo(
HddsProtos.GetScmInfoRespsonseProto resp = rpcProxy.getScmInfo(
NULL_RPC_CONTROLLER, request);
ScmInfo.Builder builder = new ScmInfo.Builder()
.setClusterId(resp.getClusterId())

View File

@ -15,11 +15,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm.protocolPB;
package org.apache.hadoop.hdds.scm.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos
.StorageContainerLocationProtocolService;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.StorageContainerLocationProtocolService;
/**
* Protocol used from an HDFS node to StorageContainerManager. This extends the

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.protocolPB;
package org.apache.hadoop.hdds.scm.protocolPB;
/**
* This package contains classes for the client of the storage container

View File

@ -16,44 +16,43 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.storage;
package org.apache.hadoop.hdds.scm.storage;
import com.google.protobuf.ByteString;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ChunkInfo;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
.GetKeyRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.GetKeyRequestProto;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.GetKeyResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.GetSmallFileRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.GetSmallFileResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.KeyData;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
.PutKeyRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.PutKeyRequestProto;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.PutSmallFileRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ReadChunkRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ReadChunkResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.Type;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
.WriteChunkRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
.ReadContainerResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ReadContainerRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.KeyValue;
import org.apache.hadoop.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ReadContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.WriteChunkRequestProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
import java.io.IOException;
import org.apache.hadoop.scm.XceiverClientSpi;
/**
* Implementation of all container protocol calls performed by Container

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.scm.storage;
package org.apache.hadoop.hdds.scm.storage;
/**
* This package contains StorageContainerManager classes.

View File

@ -20,10 +20,9 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ozone.client.ReplicationFactor;
import org.apache.hadoop.ozone.client.ReplicationType;
import org.apache.hadoop.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
/**
* This class contains constants for configuration keys used in Ozone.

View File

@ -17,8 +17,8 @@
package org.apache.hadoop.ozone.common;
import org.apache.hadoop.hdsl.protocol.proto
.ScmBlockLocationProtocolProtos.KeyBlocks;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.KeyBlocks;
import java.util.List;

View File

@ -17,9 +17,11 @@
*/
package org.apache.hadoop.ozone.common;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmBlockResult;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result;
import org.apache.hadoop.scm.container.common.helpers.DeleteBlockResult;
import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.DeleteScmBlockResult;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.DeleteScmBlockResult.Result;
import java.util.ArrayList;
import java.util.List;

View File

@ -17,12 +17,12 @@
*/
package org.apache.hadoop.ozone.common;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.io.File;
import java.io.IOException;
/**
* The exception is thrown when file system state is inconsistent
* and is not recoverable.

View File

@ -17,6 +17,13 @@
*/
package org.apache.hadoop.ozone.common;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.nio.file.DirectoryStream;
@ -24,14 +31,6 @@
import java.nio.file.Path;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeType;
import org.apache.hadoop.util.Time;
/**
* Storage information file. This Class defines the methods to check
* the consistency of the storage dir and the version file.

View File

@ -17,18 +17,17 @@
*/
package org.apache.hadoop.ozone.common;
import java.io.IOException;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.File;
import java.io.RandomAccessFile;
import java.util.Properties;
import java.util.UUID;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Properties;
import java.util.UUID;
/**
* Common class for storage information. This class defines the common

Some files were not shown because too many files have changed in this diff Show More