HDFS-12454. Ozone : the sample ozone-site.xml in OzoneGettingStarted does not work. Contributed by Chen Liang.

This commit is contained in:
Anu Engineer 2017-09-26 18:09:32 -07:00 committed by Owen O'Malley
parent 2804435386
commit a1a3ba6529
21 changed files with 88 additions and 54 deletions

View File

@ -77,8 +77,8 @@ public final class OzoneConfigKeys {
"ozone.trace.enabled";
public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;
public static final String OZONE_CONTAINER_METADATA_DIRS =
"ozone.container.metadata.dirs";
public static final String OZONE_METADATA_DIRS =
"ozone.metadata.dirs";
public static final String OZONE_METADATA_STORE_IMPL =
"ozone.metastore.impl";

View File

@ -171,6 +171,8 @@ public final class ScmConfigKeys {
// if this value is not set then container startup will fail.
public static final String OZONE_SCM_DATANODE_ID = "ozone.scm.datanode.id";
public static final String OZONE_SCM_DATANODE_ID_PATH_DEFAULT = "datanode.id";
public static final String OZONE_SCM_DB_CACHE_SIZE_MB =
"ozone.scm.db.cache.size.mb";
public static final int OZONE_SCM_DB_CACHE_SIZE_DEFAULT = 128;

View File

@ -25,6 +25,7 @@
import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.apache.hadoop.ozone.container.common.states.DatanodeState;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.scm.ScmConfigKeys;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -104,7 +105,7 @@ public DatanodeStateMachine.DatanodeStates call() throws Exception {
* and persist the ID to a local file.
*/
private void persistContainerDatanodeID() throws IOException {
String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
String dataNodeIDPath = OzoneUtils.getDatanodeIDPath(conf);
if (Strings.isNullOrEmpty(dataNodeIDPath)) {
LOG.error("A valid file path is needed for config setting {}",
ScmConfigKeys.OZONE_SCM_DATANODE_ID);

View File

@ -29,6 +29,7 @@
import org.apache.hadoop.ozone.container.common.states.endpoint.RegisterEndpointTask;
import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask;
import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.scm.ScmConfigKeys;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
@ -112,7 +113,7 @@ public RunningDatanodeState(Configuration conf,
*/
private StorageContainerDatanodeProtocolProtos.ContainerNodeIDProto
getContainerNodeID() {
String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
String dataNodeIDPath = OzoneUtils.getDatanodeIDPath(conf);
if (dataNodeIDPath == null || dataNodeIDPath.isEmpty()) {
LOG.error("A valid file path is needed for config setting {}",
ScmConfigKeys.OZONE_SCM_DATANODE_ID);

View File

@ -97,8 +97,8 @@ public static XceiverServerRatis newXceiverServerRatis(String datanodeID,
if (Strings.isNullOrEmpty(storageDir)) {
storageDir = ozoneConf.get(OzoneConfigKeys
.OZONE_CONTAINER_METADATA_DIRS);
Preconditions.checkNotNull(storageDir, "ozone.container.metadata.dirs " +
.OZONE_METADATA_DIRS);
Preconditions.checkNotNull(storageDir, "ozone.metadata.dirs " +
"cannot be null, Please check your configs.");
storageDir = storageDir.concat(ratisDir);
LOG.warn("Storage directory for Ratis is not configured. Mapping Ratis " +

View File

@ -85,7 +85,7 @@ public OzoneContainer(DatanodeID datanodeID, Configuration ozoneConfig) throws
this.ozoneConfig = ozoneConfig;
List<StorageLocation> locations = new LinkedList<>();
String[] paths = ozoneConfig.getStrings(
OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS);
OzoneConfigKeys.OZONE_METADATA_DIRS);
if (paths != null && paths.length > 0) {
for (String p : paths) {
locations.add(StorageLocation.parse(

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.web.utils;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@ -28,6 +29,7 @@
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.scm.ScmConfigKeys;
import org.apache.hadoop.util.Time;
import javax.ws.rs.core.HttpHeaders;
@ -38,6 +40,7 @@
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.charset.Charset;
import java.nio.file.Paths;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
@ -316,7 +319,7 @@ public static Response getResponse(UserArgs args, int statusCode,
*/
public static File getScmMetadirPath(Configuration conf) {
String metaDirPath = conf.getTrimmed(OzoneConfigKeys
.OZONE_CONTAINER_METADATA_DIRS);
.OZONE_METADATA_DIRS);
Preconditions.checkNotNull(metaDirPath);
File dirPath = new File(metaDirPath);
if (!dirPath.exists() && !dirPath.mkdirs()) {
@ -326,6 +329,28 @@ public static File getScmMetadirPath(Configuration conf) {
return dirPath;
}
/**
* Get the path for datanode id file.
*
* @param conf - Configuration
* @return the path of datanode id as string
*/
public static String getDatanodeIDPath(Configuration conf) {
String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
if (Strings.isNullOrEmpty(dataNodeIDPath)) {
String metaPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
if (Strings.isNullOrEmpty(metaPath)) {
// this means meta data is not found, in theory should not happen at
// this point because should've failed earlier.
throw new IllegalArgumentException("Unable to locate meta data" +
"directory when getting datanode id path");
}
dataNodeIDPath = Paths.get(metaPath,
ScmConfigKeys.OZONE_SCM_DATANODE_ID_PATH_DEFAULT).toString();
}
return dataNodeIDPath;
}
/**
* Convert time in millisecond to a human readable format required in ozone.
* @return a human readable string for the input time

View File

@ -59,7 +59,7 @@
</property>
<property>
<name>ozone.container.metadata.dirs</name>
<name>ozone.metadata.dirs</name>
<value></value>
<description>
Ozone metadata dir path.

View File

@ -63,17 +63,17 @@ place and not mingled with HDFS settings.
<value>True</value>
</property>
```
* _*ozone.container.metadata.dirs*_ Ozone is designed with modern hardware
* _*ozone.metadata.dirs*_ Ozone is designed with modern hardware
in mind. It tries to use SSDs effectively. So users can specify where the
datanode metadata must reside. Usually you pick your fastest disk (SSD if
you have them on your datanodes). Datanodes will write the container metadata
to these disks. This is a required setting, if this is missing datanodes will
metadata must reside. Usually you pick your fastest disk (SSD if
you have them on your nodes). KSM, SCM and datanode will write the metadata
to these disks. This is a required setting, if this is missing Ozone will
fail to come up. Here is an example,
```
<property>
<name>ozone.container.metadata.dirs</name>
<value>/data/disk1/container/meta</value>
<name>ozone.metadata.dirs</name>
<value>/data/disk1/meta</value>
</property>
```
@ -135,10 +135,11 @@ Here is a quick summary of settings needed by Ozone.
| Setting | Value | Comment |
|--------------------------------|------------------------------|------------------------------------------------------------------|
| ozone.enabled | True | This enables SCM and containers in HDFS cluster. |
| ozone.container.metadata.dirs | file path | The container metadata will be stored here in the datanode. |
| ozone.metadata.dirs | file path | The metadata will be stored here. |
| ozone.scm.names | SCM server name | Hostname:port or or IP:port address of SCM. |
| ozone.scm.datanode.id | file path | Data node ID is the location of datanode's ID file |
| ozone.scm.block.client.address | SCM server name | Used by services like KSM |
| ozone.scm.block.client.address | SCM server name and port | Used by services like KSM |
| ozone.scm.client.address | SCM server name and port | Used by client side |
| ozone.scm.datanode.address | SCM server name and port | Used by datanode to talk to SCM |
| ozone.ksm.address | KSM server name | Used by Ozone handler and Ozone file system. |
Here is a working example of`ozone-site.xml`.
@ -153,30 +154,34 @@ Here is a quick summary of settings needed by Ozone.
</property>
<property>
<name>ozone.container.metadata.dirs</name>
<value>/data/disk1/scm/meta</value>
<name>ozone.metadata.dirs</name>
<value>/data/disk1/ozone/meta</value>
</property>
<property>
<name>ozone.scm.names</name>
<value>scm.hadoop.apache.org</value>
<value>127.0.0.1</value>
</property>
<property>
<name>ozone.scm.datanode.id</name>
<value>/data/disk1/scm/meta/node/datanode.id</value>
</property>
<property>
<name>ozone.scm.block.client.address</name>
<value>scm.hadoop.apache.org</value>
<name>ozone.scm.client.address</name>
<value>127.0.0.1:9860</value>
</property>
<property>
<name>ozone.ksm.address</name>
<value>ksm.hadoop.apache.org</value>
</property>
<name>ozone.scm.block.client.address</name>
<value>127.0.0.1:9863</value>
</property>
<property>
<name>ozone.scm.datanode.address</name>
<value>127.0.0.1:9861</value>
</property>
<property>
<name>ozone.ksm.address</name>
<value>127.0.0.1:9874</value>
</property>
</configuration>
```

View File

@ -113,9 +113,9 @@ protected void setupDatanodeAddress(
setConf(i, dnConf, OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
getInstanceStorageDir(i, -1).getCanonicalPath());
String containerMetaDirs = dnConf.get(
OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS) + "-dn-" + i;
OzoneConfigKeys.OZONE_METADATA_DIRS) + "-dn-" + i;
Path containerMetaDirPath = Paths.get(containerMetaDirs);
setConf(i, dnConf, OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS,
setConf(i, dnConf, OzoneConfigKeys.OZONE_METADATA_DIRS,
containerMetaDirs);
Path containerRootPath =
containerMetaDirPath.resolve(OzoneConsts.CONTAINER_ROOT_PREFIX);
@ -476,7 +476,7 @@ private void configScmMetadata() throws IOException {
if (scmMetadataDir.isPresent()) {
// if user specifies a path in the test, it is assumed that user takes
// care of creating and cleaning up that directory after the tests.
conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS,
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
scmMetadataDir.get());
return;
}
@ -487,7 +487,7 @@ private void configScmMetadata() throws IOException {
Files.createDirectories(scmPath);
Path containerPath = scmPath.resolve(OzoneConsts.CONTAINER_ROOT_PREFIX);
Files.createDirectories(containerPath);
conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, scmPath
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath
.toString());
// TODO : Fix this, we need a more generic mechanism to map

View File

@ -44,7 +44,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
import static org.junit.Assert.*;
/**
@ -62,7 +62,7 @@ public class TestMiniOzoneCluster {
@BeforeClass
public static void setup() {
conf = new OzoneConfiguration();
conf.set(OZONE_CONTAINER_METADATA_DIRS,
conf.set(OZONE_METADATA_DIRS,
TEST_ROOT.toString());
conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
WRITE_TMP.mkdirs();
@ -183,7 +183,7 @@ public void testContainerRandomPort() throws IOException {
Configuration ozoneConf = SCMTestUtils.getConf();
File testDir = PathUtils.getTestDir(TestOzoneContainer.class);
ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath());
ozoneConf.set(OZONE_CONTAINER_METADATA_DIRS,
ozoneConf.set(OZONE_METADATA_DIRS,
TEST_ROOT.toString());
// Each instance of SM will create an ozone container

View File

@ -102,7 +102,7 @@ public void setUp() throws Exception {
}
conf.set(DFS_DATANODE_DATA_DIR_KEY,
new File(testRoot, "data").getAbsolutePath());
conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS,
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
new File(testRoot, "scm").getAbsolutePath());
path = Paths.get(path.toString(),
TestDatanodeStateMachine.class.getSimpleName() + ".id").toString();

View File

@ -64,7 +64,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_CONTAINER_METADATA_DIRS;
.OZONE_METADATA_DIRS;
import static org.apache.hadoop.ozone.container.common.SCMTestUtils
.getDatanodeID;
import static org.apache.hadoop.ozone.protocol.proto
@ -298,7 +298,7 @@ private void heartbeatTaskHelper(InetSocketAddress scmAddress,
int rpcTimeout) throws Exception {
Configuration conf = SCMTestUtils.getConf();
conf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath());
conf.set(OZONE_CONTAINER_METADATA_DIRS, testDir.getAbsolutePath());
conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
// Mini Ozone cluster will not come up if the port is not true, since
// Ratis will exit if the server port cannot be bound. We can remove this
// hard coding once we fix the Ratis default behaviour.

View File

@ -159,7 +159,7 @@ public void setup() throws Exception {
public void testKSMDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS);
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
String dbPath = dbRootPath + "/" + KSM_DB_NAME;
String[] args = {"-p", dbPath, "-o", dbOutPath};

View File

@ -171,7 +171,7 @@ public void shutdown() throws InterruptedException {
@Test
public void testConvertBlockDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS);
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
String dbPath = dbRootPath + "/" + BLOCK_DB;
String[] args = {"-p", dbPath, "-o", dbOutPath};
@ -193,7 +193,7 @@ public void testConvertBlockDB() throws Exception {
@Test
public void testConvertNodepoolDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS);
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
String dbPath = dbRootPath + "/" + NODEPOOL_DB;
String[] args = {"-p", dbPath, "-o", dbOutPath};
@ -220,7 +220,7 @@ public void testConvertNodepoolDB() throws Exception {
@Test
public void testConvertOpenContainerDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS);
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
String dbPath = dbRootPath + "/" + OPEN_CONTAINERS_DB;
String[] args = {"-p", dbPath, "-o", dbOutPath};
@ -254,7 +254,7 @@ public void testConvertContainerDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";
// TODO : the following will fail due to empty Datanode list, need to fix.
//String dnUUID = cluster.getDataNodes().get(0).getDatanodeUuid();
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS);
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
String dbPath = dbRootPath + "/" + CONTAINER_DB;
String[] args = {"-p", dbPath, "-o", dbOutPath};
Connection conn;

View File

@ -63,7 +63,7 @@ public static void setUp() throws Exception {
String path = GenericTestUtils
.getTempPath(TestBlockManager.class.getSimpleName());
conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS, path);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path);
testDir = Paths.get(path).toFile();
boolean folderExisted = testDir.exists() || testDir.mkdirs();
if (!folderExisted) {

View File

@ -40,7 +40,7 @@
import java.util.UUID;
import java.util.stream.Collectors;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
/**
@ -58,7 +58,7 @@ public void setup() throws Exception {
TestDeletedBlockLog.class.getSimpleName());
conf = new OzoneConfiguration();
conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
conf.set(OZONE_CONTAINER_METADATA_DIRS, testDir.getAbsolutePath());
conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
deletedBlockLog = new DeletedBlockLogImpl(conf);
}

View File

@ -55,7 +55,7 @@ public static void setUp() throws Exception {
testDir = GenericTestUtils
.getTestDir(TestContainerMapping.class.getSimpleName());
conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS,
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath());
boolean folderExisted = testDir.exists() || testDir.mkdirs();
if (!folderExisted) {

View File

@ -115,7 +115,7 @@ public void testContainerPlacementCapacity() throws IOException,
final File testDir = PathUtils.getTestDir(
TestContainerPlacement.class);
conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS,
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath());
conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);

View File

@ -102,7 +102,7 @@ public void cleanup() {
*/
OzoneConfiguration getConf() {
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS,
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath());
conf.setLong(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_MS, 100);
return conf;

View File

@ -57,7 +57,7 @@ public class TestSCMNodePoolManager {
SCMNodePoolManager createNodePoolManager(OzoneConfiguration conf)
throws IOException {
conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS,
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath());
conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);