HDFS-12155. Ozone : add RocksDB support to DEBUG CLI. Contributed by Chen Liang.

This commit is contained in:
Weiwei Yang 2017-07-26 12:59:25 +08:00 committed by Owen O'Malley
parent 0cc166c053
commit d50a743015
3 changed files with 66 additions and 27 deletions

View File

@ -27,6 +27,7 @@
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.OzoneAclInfo;
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.BucketInfo;
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo;
@ -75,6 +76,7 @@ public class SQLCLI extends Configured implements Tool {
private Options options;
private BasicParser parser;
private final Charset encoding = Charset.forName("UTF-8");
private final OzoneConfiguration conf;
// for container.db
private static final String CREATE_CONTAINER_INFO =
@ -199,9 +201,10 @@ public class SQLCLI extends Configured implements Tool {
private static final Logger LOG =
LoggerFactory.getLogger(SQLCLI.class);
public SQLCLI() {
public SQLCLI(OzoneConfiguration conf) {
this.options = getOptions();
this.parser = new BasicParser();
this.conf = conf;
}
@SuppressWarnings("static-access")
@ -468,7 +471,7 @@ private void convertContainerDB(Path dbPath, Path outPath)
LOG.info("Create tables for sql container db.");
File dbFile = dbPath.toFile();
try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
.setDbFile(dbFile).build();
.setConf(conf).setDbFile(dbFile).build();
Connection conn = connectDB(outPath.toString())) {
executeSQL(conn, CREATE_CONTAINER_INFO);
executeSQL(conn, CREATE_CONTAINER_MEMBERS);
@ -547,7 +550,7 @@ private void convertBlockDB(Path dbPath, Path outPath) throws Exception {
LOG.info("Create tables for sql block db.");
File dbFile = dbPath.toFile();
try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
.setDbFile(dbFile).build();
.setConf(conf).setDbFile(dbFile).build();
Connection conn = connectDB(outPath.toString())) {
executeSQL(conn, CREATE_BLOCK_CONTAINER);
@ -594,7 +597,7 @@ private void convertNodePoolDB(Path dbPath, Path outPath) throws Exception {
LOG.info("Create table for sql node pool db.");
File dbFile = dbPath.toFile();
try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
.setDbFile(dbFile).build();
.setConf(conf).setDbFile(dbFile).build();
Connection conn = connectDB(outPath.toString())) {
executeSQL(conn, CREATE_NODE_POOL);
executeSQL(conn, CREATE_DATANODE_INFO);
@ -645,7 +648,7 @@ private void convertOpenContainerDB(Path dbPath, Path outPath)
LOG.info("Create table for open container db.");
File dbFile = dbPath.toFile();
try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
.setDbFile(dbFile).build();
.setConf(conf).setDbFile(dbFile).build();
Connection conn = connectDB(outPath.toString())) {
executeSQL(conn, CREATE_OPEN_CONTAINER);
@ -671,7 +674,7 @@ private CommandLine parseArgs(String[] argv)
}
public static void main(String[] args) {
Tool shell = new SQLCLI();
Tool shell = new SQLCLI(new OzoneConfiguration());
int res = 0;
try {
ToolRunner.run(shell, args);

View File

@ -30,9 +30,9 @@
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.io.IOException;
import java.io.OutputStream;
@ -44,6 +44,7 @@
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
@ -57,6 +58,7 @@
/**
* This class tests the CLI that transforms ksm.db into SQLite DB files.
*/
@RunWith(Parameterized.class)
public class TestKSMSQLCli {
private static MiniOzoneCluster cluster = null;
private static StorageHandler storageHandler;
@ -76,8 +78,19 @@ public class TestKSMSQLCli {
private static String keyName2 = "key2";
private static String keyName3 = "key3";
@Rule
public ExpectedException exception = ExpectedException.none();
@Parameterized.Parameters
public static Collection<Object[]> data() {
return Arrays.asList(new Object[][] {
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
});
}
private static String metaStoreType;
public TestKSMSQLCli(String type) {
metaStoreType = type;
}
/**
* Create a MiniDFSCluster for testing.
@ -142,7 +155,8 @@ public static void setup() throws Exception {
@Before
public void init() throws Exception {
cli = new SQLCLI();
conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType);
cli = new SQLCLI(conf);
}
@Test

View File

@ -33,9 +33,11 @@
import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.nio.file.Files;
import java.nio.file.Paths;
@ -45,6 +47,8 @@
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
@ -59,27 +63,43 @@
/**
* This class tests the CLI that transforms container into SQLite DB files.
*/
@RunWith(Parameterized.class)
public class TestContainerSQLCli {
@Parameterized.Parameters
public static Collection<Object[]> data() {
return Arrays.asList(new Object[][] {
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
});
}
private static String metaStoreType;
public TestContainerSQLCli(String type) {
metaStoreType = type;
}
private static SQLCLI cli;
private static MiniOzoneCluster cluster;
private static OzoneConfiguration conf;
private static StorageContainerLocationProtocolClientSideTranslatorPB
private MiniOzoneCluster cluster;
private OzoneConfiguration conf;
private StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
private static ContainerMapping mapping;
private static NodeManager nodeManager;
private static BlockManagerImpl blockManager;
private ContainerMapping mapping;
private NodeManager nodeManager;
private BlockManagerImpl blockManager;
private static Pipeline pipeline1;
private static Pipeline pipeline2;
private Pipeline pipeline1;
private Pipeline pipeline2;
private static HashMap<String, String> blockContainerMap;
private HashMap<String, String> blockContainerMap;
private final static long DEFAULT_BLOCK_SIZE = 4 * KB;
@BeforeClass
public static void init() throws Exception {
@Before
public void setup() throws Exception {
long datanodeCapacities = 3 * OzoneConsts.TB;
blockContainerMap = new HashMap<>();
@ -138,11 +158,13 @@ public static void init() throws Exception {
mapping.close();
nodeManager.close();
cli = new SQLCLI();
conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType);
cli = new SQLCLI(conf);
}
@AfterClass
public static void shutdown() throws InterruptedException {
@After
public void shutdown() throws InterruptedException {
IOUtils.cleanup(null, storageContainerLocationClient, cluster);
}