HDFS-11756. Ozone : add DEBUG CLI support of blockDB file. Contributed by Chen Liang

This commit is contained in:
Chen Liang 2017-05-09 13:42:47 -07:00 committed by Owen O'Malley
parent c18229f0df
commit 9fcaeceb54
2 changed files with 133 additions and 12 deletions

View File

@ -24,6 +24,7 @@
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException; import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.Pipeline; import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.Pipeline;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
@ -47,6 +48,7 @@
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
/** /**
@ -61,7 +63,7 @@ public class SQLCLI extends Configured implements Tool {
// for container.db // for container.db
private static final String CREATE_CONTAINER_INFO = private static final String CREATE_CONTAINER_INFO =
"CREATE TABLE containerInfo (" + "CREATE TABLE containerInfo (" +
"containerName TEXT PRIMARY KEY NOT NULL , " + "containerName TEXT PRIMARY KEY NOT NULL, " +
"leaderUUID TEXT NOT NULL)"; "leaderUUID TEXT NOT NULL)";
private static final String CREATE_CONTAINER_MACHINE = private static final String CREATE_CONTAINER_MACHINE =
"CREATE TABLE containerMembers (" + "CREATE TABLE containerMembers (" +
@ -88,6 +90,14 @@ public class SQLCLI extends Configured implements Tool {
private static final String INSERT_CONTAINER_MEMBERS = private static final String INSERT_CONTAINER_MEMBERS =
"INSERT INTO containerMembers (containerName, datanodeUUID) " + "INSERT INTO containerMembers (containerName, datanodeUUID) " +
"VALUES (\"%s\", \"%s\")"; "VALUES (\"%s\", \"%s\")";
// for block.db
private static final String CREATE_BLOCK_CONTAINER =
"CREATE TABLE blockContainer (" +
"blockKey TEXT PRIMARY KEY NOT NULL, " +
"containerName TEXT NOT NULL)";
private static final String INSERT_BLOCK_CONTAINER =
"INSERT INTO blockContainer (blockKey, containerName) " +
"VALUES (\"%s\", \"%s\")";
private static final Logger LOG = private static final Logger LOG =
@ -153,6 +163,9 @@ public int run(String[] args) throws Exception {
if (dbName.toString().equals(CONTAINER_DB)) { if (dbName.toString().equals(CONTAINER_DB)) {
LOG.info("Converting container DB"); LOG.info("Converting container DB");
convertContainerDB(dbPath, outPath); convertContainerDB(dbPath, outPath);
} else if (dbName.toString().equals(BLOCK_DB)) {
LOG.info("Converting block DB");
convertBlockDB(dbPath, outPath);
} else { } else {
LOG.error("Unrecognized db name {}", dbName); LOG.error("Unrecognized db name {}", dbName);
} }
@ -201,6 +214,7 @@ private void executeSQL(Connection conn, String sql) throws SQLException {
* -------------------------------- * --------------------------------
* *
* @param dbPath path to container db. * @param dbPath path to container db.
* @param outPath path to output sqlite
* @throws IOException throws exception. * @throws IOException throws exception.
*/ */
private void convertContainerDB(Path dbPath, Path outPath) private void convertContainerDB(Path dbPath, Path outPath)
@ -269,6 +283,42 @@ private void insertContainerDB(Connection conn, String containerName,
LOG.info("Insertion completed."); LOG.info("Insertion completed.");
} }
/**
* Converts block.db to sqlite. This is rather simple db, the schema has only
* one table:
*
* blockContainer
* --------------------------
* blockKey* | containerName
* --------------------------
*
* @param dbPath path to container db.
* @param outPath path to output sqlite
* @throws IOException throws exception.
*/
private void convertBlockDB(Path dbPath, Path outPath) throws Exception {
LOG.info("Create tables for sql block db.");
File dbFile = dbPath.toFile();
org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options();
LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions);
Connection conn = connectDB(outPath.toString());
executeSQL(conn, CREATE_BLOCK_CONTAINER);
DBIterator iter = dbStore.getIterator();
iter.seekToFirst();
while (iter.hasNext()) {
Map.Entry<byte[], byte[]> entry = iter.next();
String blockKey = DFSUtilClient.bytes2String(entry.getKey());
String containerName = DFSUtilClient.bytes2String(entry.getValue());
String insertBlockContainer = String.format(
INSERT_BLOCK_CONTAINER, blockKey, containerName);
executeSQL(conn, insertBlockContainer);
}
closeDB(conn);
dbStore.close();
}
private CommandLine parseArgs(String[] argv) private CommandLine parseArgs(String[] argv)
throws ParseException { throws ParseException {
return parser.parse(options, argv); return parser.parse(options, argv);

View File

@ -22,7 +22,12 @@
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.scm.block.BlockManagerImpl;
import org.apache.hadoop.ozone.scm.cli.SQLCLI; import org.apache.hadoop.ozone.scm.cli.SQLCLI;
import org.apache.hadoop.ozone.scm.container.ContainerMapping;
import org.apache.hadoop.ozone.scm.node.NodeManager;
import org.apache.hadoop.scm.ScmConfigKeys;
import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -36,8 +41,11 @@
import java.sql.SQLException; import java.sql.SQLException;
import java.sql.Statement; import java.sql.Statement;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap;
import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
import static org.apache.hadoop.ozone.OzoneConsts.KB;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
@ -52,24 +60,65 @@ public class TestContainerSQLCli {
private static StorageContainerLocationProtocolClientSideTranslatorPB private static StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient; storageContainerLocationClient;
private static ContainerMapping mapping;
private static NodeManager nodeManager;
private static BlockManagerImpl blockManager;
private static String pipelineName1;
private static String pipelineName2;
private static HashMap<String, String> blockContainerMap;
private final static long DEFAULT_BLOCK_SIZE = 4 * KB;
@BeforeClass @BeforeClass
public static void init() throws Exception { public static void init() throws Exception {
long datanodeCapacities = 3 * OzoneConsts.TB; long datanodeCapacities = 3 * OzoneConsts.TB;
blockContainerMap = new HashMap<>();
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 2);
cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(1) cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(1)
.storageCapacities(new long[] {datanodeCapacities, datanodeCapacities}) .storageCapacities(new long[] {datanodeCapacities, datanodeCapacities})
.setHandlerType("distributed").build(); .setHandlerType("distributed").build();
storageContainerLocationClient = storageContainerLocationClient =
cluster.createStorageContainerLocationClient(); cluster.createStorageContainerLocationClient();
cluster.waitForHeartbeatProcessed(); cluster.waitForHeartbeatProcessed();
// create two containers to be retrieved later.
storageContainerLocationClient.allocateContainer(
"container0");
storageContainerLocationClient.allocateContainer(
"container1");
cluster.shutdown(); cluster.shutdown();
nodeManager = cluster.getStorageContainerManager().getScmNodeManager();
mapping = new ContainerMapping(conf, nodeManager, 128);
blockManager = new BlockManagerImpl(conf, nodeManager, mapping, 128);
// blockManager.allocateBlock() will create containers if there is none
// stored in levelDB. The number of containers to create is the value of
// OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE which we set to 2.
// so the first allocateBlock() will create two containers. A random one
// is assigned for the block.
AllocatedBlock ab1 = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE);
pipelineName1 = ab1.getPipeline().getContainerName();
blockContainerMap.put(ab1.getKey(), pipelineName1);
AllocatedBlock ab2;
// we want the two blocks on the two provisioned containers respectively,
// however blockManager picks containers randomly, keep retry until we
// assign the second block to the other container. This seems to be the only
// way to get the two containers.
// although each retry will create a block and assign to a container. So
// the size of blockContainerMap will vary each time the test is run.
while (true) {
ab2 = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE);
pipelineName2 = ab2.getPipeline().getContainerName();
blockContainerMap.put(ab2.getKey(), pipelineName2);
if (!pipelineName2.equals(pipelineName1)) {
break;
}
}
blockManager.close();
mapping.close();
nodeManager.close();
cli = new SQLCLI(); cli = new SQLCLI();
} }
@ -78,6 +127,28 @@ public static void shutdown() throws InterruptedException {
IOUtils.cleanup(null, storageContainerLocationClient, cluster); IOUtils.cleanup(null, storageContainerLocationClient, cluster);
} }
@Test
public void testConvertBlockDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS);
String dbPath = dbRootPath + "/" + BLOCK_DB;
String[] args = {"-p", dbPath, "-o", dbOutPath};
cli.run(args);
Connection conn = connectDB(dbOutPath);
String sql = "SELECT * FROM blockContainer";
ResultSet rs = executeQuery(conn, sql);
while(rs.next()) {
String blockKey = rs.getString("blockKey");
String containerName = rs.getString("containerName");
assertTrue(blockContainerMap.containsKey(blockKey) &&
blockContainerMap.remove(blockKey).equals(containerName));
}
assertEquals(0, blockContainerMap.size());
Files.delete(Paths.get(dbOutPath));
}
@Test @Test
public void testConvertContainerDB() throws Exception { public void testConvertContainerDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db"; String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";
@ -104,8 +175,8 @@ public void testConvertContainerDB() throws Exception {
//assertEquals(dnUUID, rs.getString("leaderUUID")); //assertEquals(dnUUID, rs.getString("leaderUUID"));
} }
assertTrue(containerNames.size() == 2 && assertTrue(containerNames.size() == 2 &&
containerNames.contains("container0") && containerNames.contains(pipelineName1) &&
containerNames.contains("container1")); containerNames.contains(pipelineName2));
sql = "SELECT * FROM containerMembers"; sql = "SELECT * FROM containerMembers";
rs = executeQuery(conn, sql); rs = executeQuery(conn, sql);
@ -115,8 +186,8 @@ public void testConvertContainerDB() throws Exception {
//assertEquals(dnUUID, rs.getString("datanodeUUID")); //assertEquals(dnUUID, rs.getString("datanodeUUID"));
} }
assertTrue(containerNames.size() == 2 && assertTrue(containerNames.size() == 2 &&
containerNames.contains("container0") && containerNames.contains(pipelineName1) &&
containerNames.contains("container1")); containerNames.contains(pipelineName2));
sql = "SELECT * FROM datanodeInfo"; sql = "SELECT * FROM datanodeInfo";
rs = executeQuery(conn, sql); rs = executeQuery(conn, sql);