From 9bc494b909112f15e5fb8475a2318bf951ecdceb Mon Sep 17 00:00:00 2001 From: Chen Liang Date: Fri, 12 May 2017 13:13:55 -0700 Subject: [PATCH] HDFS-11802. Ozone : add DEBUG CLI support for open container db file. Contributed by Chen Liang --- .../apache/hadoop/ozone/scm/cli/SQLCLI.java | 60 +++++++++++++++++-- .../hadoop/ozone/scm/TestContainerSQLCli.java | 43 +++++++++++++ 2 files changed, 97 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java index d18aca80eb..f558882db2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java @@ -53,6 +53,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB; import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; +import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB; /** * This is the CLI that can be use to convert a levelDB into a sqlite DB file. @@ -116,6 +117,14 @@ public class SQLCLI extends Configured implements Tool { "INSERT INTO nodePool (datanodeUUID, poolName) " + "VALUES (\"%s\", \"%s\")"; // and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO + // for openContainer.db + private static final String CREATE_OPEN_CONTAINER = + "CREATE TABLE openContainer (" + + "containerName TEXT PRIMARY KEY NOT NULL, " + + "containerUsed INTEGER NOT NULL)"; + private static final String INSERT_OPEN_CONTAINER = + "INSERT INTO openContainer (containerName, containerUsed) " + + "VALUES (\"%s\", \"%s\")"; private static final Logger LOG = @@ -191,6 +200,9 @@ public int run(String[] args) throws Exception { } else if (dbName.toString().equals(NODEPOOL_DB)) { LOG.info("Converting node pool DB"); convertNodePoolDB(dbPath, outPath); + } else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) { + LOG.info("Converting open container DB"); + convertOpenContainerDB(dbPath, outPath); } else { LOG.error("Unrecognized db name {}", dbName); } @@ -244,12 +256,12 @@ private void convertContainerDB(Path dbPath, Path outPath) File dbFile = dbPath.toFile(); org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options(); try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions); - Connection conn = connectDB(outPath.toString())) { + Connection conn = connectDB(outPath.toString()); + DBIterator iter = dbStore.getIterator()) { executeSQL(conn, CREATE_CONTAINER_INFO); executeSQL(conn, CREATE_CONTAINER_MEMBERS); executeSQL(conn, CREATE_DATANODE_INFO); - DBIterator iter = dbStore.getIterator(); iter.seekToFirst(); HashSet uuidChecked = new HashSet<>(); while (iter.hasNext()) { @@ -320,10 +332,10 @@ private void convertBlockDB(Path dbPath, Path outPath) throws Exception { File dbFile = dbPath.toFile(); org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options(); try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions); - Connection conn = connectDB(outPath.toString())) { + Connection conn = connectDB(outPath.toString()); + DBIterator iter = dbStore.getIterator()) { executeSQL(conn, CREATE_BLOCK_CONTAINER); - DBIterator iter = dbStore.getIterator(); iter.seekToFirst(); while (iter.hasNext()) { Map.Entry entry = iter.next(); @@ -364,11 +376,11 @@ private void convertNodePoolDB(Path dbPath, Path outPath) throws Exception { File dbFile = dbPath.toFile(); org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options(); try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions); - Connection conn = connectDB(outPath.toString())) { + Connection conn = connectDB(outPath.toString()); + DBIterator iter = dbStore.getIterator()) { executeSQL(conn, CREATE_NODE_POOL); executeSQL(conn, CREATE_DATANODE_INFO); - DBIterator iter = dbStore.getIterator(); iter.seekToFirst(); while (iter.hasNext()) { Map.Entry entry = iter.next(); @@ -394,6 +406,42 @@ private void insertNodePoolDB(Connection conn, String blockPool, executeSQL(conn, insertDatanodeID); } + /** + * Convert openContainer.db to sqlite db file. This is rather simple db, + * the schema has only one table: + * + * openContainer + * ------------------------------- + * containerName* | containerUsed + * ------------------------------- + * + * @param dbPath path to container db. + * @param outPath path to output sqlite + * @throws IOException throws exception. + */ + private void convertOpenContainerDB(Path dbPath, Path outPath) + throws Exception { + LOG.info("Create table for open container db."); + File dbFile = dbPath.toFile(); + org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options(); + try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions); + Connection conn = connectDB(outPath.toString()); + DBIterator iter = dbStore.getIterator()) { + executeSQL(conn, CREATE_OPEN_CONTAINER); + + iter.seekToFirst(); + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + String containerName = DFSUtil.bytes2String(entry.getKey()); + Long containerUsed = Long.parseLong( + DFSUtil.bytes2String(entry.getValue())); + String insertOpenContainer = String.format( + INSERT_OPEN_CONTAINER, containerName, containerUsed); + executeSQL(conn, insertOpenContainer); + } + } + } + private CommandLine parseArgs(String[] argv) throws ParseException { return parser.parse(options, argv); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java index 9e9e082938..1e6f5f7cb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java @@ -46,11 +46,13 @@ import java.sql.Statement; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB; import static org.apache.hadoop.ozone.OzoneConsts.KB; import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; +import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -103,6 +105,15 @@ public static void init() throws Exception { // OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE which we set to 2. // so the first allocateBlock() will create two containers. A random one // is assigned for the block. + + // loop until both the two datanodes are up, try up to about 4 seconds. + for (int c = 0; c < 40; c++) { + if (nodeManager.getAllNodes().size() == 2) { + break; + } + Thread.sleep(100); + } + assertEquals(2, nodeManager.getAllNodes().size()); AllocatedBlock ab1 = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE); pipeline1 = ab1.getPipeline(); blockContainerMap.put(ab1.getKey(), pipeline1.getContainerName()); @@ -184,6 +195,38 @@ public void testConvertNodepoolDB() throws Exception { Files.delete(Paths.get(dbOutPath)); } + @Test + public void testConvertOpenContainerDB() throws Exception { + String dbOutPath = cluster.getDataDirectory() + "/out_sql.db"; + String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS); + String dbPath = dbRootPath + "/" + OPEN_CONTAINERS_DB; + String[] args = {"-p", dbPath, "-o", dbOutPath}; + + cli.run(args); + + Connection conn = connectDB(dbOutPath); + String sql = "SELECT * FROM openContainer"; + ResultSet rs = executeQuery(conn, sql); + HashSet expectedContainer = new HashSet<>(); + expectedContainer.add(pipeline1.getContainerName()); + expectedContainer.add(pipeline2.getContainerName()); + // the number of allocated blocks can vary, and they can be located + // at either of the two containers. We only check if the total used + // is equal to block size * # of blocks. + long totalUsed = 0; + while(rs.next()) { + String containerName = rs.getString("containerName"); + long containerUsed = rs.getLong("containerUsed"); + totalUsed += containerUsed; + assertTrue(expectedContainer.remove(containerName)); + } + assertEquals(0, expectedContainer.size()); + assertEquals(blockContainerMap.keySet().size() * DEFAULT_BLOCK_SIZE, + totalUsed); + + Files.delete(Paths.get(dbOutPath)); + } + @Test public void testConvertContainerDB() throws Exception { String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";