YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

This commit is contained in:
Rohith Sharma K S 2018-02-17 20:30:28 +05:30
parent a1e56a6286
commit 9af30d46c6
131 changed files with 3024 additions and 2536 deletions

View File

@ -213,7 +213,11 @@
</fileSet>
<!-- Copy dependecies from hadoop-yarn-server-timelineservice as well -->
<fileSet>
<directory>hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/target/lib</directory>
<directory>hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/target/lib</directory>
<outputDirectory>share/hadoop/${hadoop.component}/timelineservice/lib</outputDirectory>
</fileSet>
<fileSet>
<directory>hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/target/lib</directory>
<outputDirectory>share/hadoop/${hadoop.component}/timelineservice/lib</outputDirectory>
</fileSet>
</fileSets>
@ -221,7 +225,8 @@
<moduleSet>
<includes>
<include>org.apache.hadoop:hadoop-yarn-server-timelineservice</include>
<include>org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase</include>
<include>org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase-client</include>
<include>org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase-common</include>
</includes>
<binaries>
<outputDirectory>share/hadoop/${hadoop.component}/timelineservice</outputDirectory>
@ -229,6 +234,19 @@
<unpack>false</unpack>
</binaries>
</moduleSet>
<moduleSet>
<includes>
<include>org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase-server</include>
</includes>
<binaries>
<outputDirectory>share/hadoop/${hadoop.component}/timelineservice</outputDirectory>
<includeDependencies>false</includeDependencies>
<!-- This is the id of the timelineservice-hbase-coprocessor assembly descriptor -->
<attachmentClassifier>coprocessor</attachmentClassifier>
<unpack>false</unpack>
<outputFileNameMapping>hadoop-yarn-server-timelineservice-hbase-coprocessor-${module.version}.${module.extension}</outputFileNameMapping>
</binaries>
</moduleSet>
<moduleSet>
<includes>
<include>org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase-tests</include>

View File

@ -397,7 +397,19 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>
<artifactId>hadoop-yarn-server-timelineservice-hbase-client</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-timelineservice-hbase-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-timelineservice-hbase-server</artifactId>
<version>${project.version}</version>
</dependency>

View File

@ -60,7 +60,31 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>
<artifactId>hadoop-yarn-server-timelineservice-hbase-client</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-timelineservice-hbase-common</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-timelineservice-hbase-server</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>

View File

@ -50,7 +50,7 @@
import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@ -70,7 +70,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
extends AbstractTimelineReaderHBaseTestBase {
private static long ts = System.currentTimeMillis();
private static long dayTs =
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(ts);
private static String doAsUser = "remoteuser";
@BeforeClass
@ -371,7 +371,7 @@ static void writeApplicationEntities(HBaseTimelineWriterImpl hbi,
BuilderUtils.newApplicationId(timestamp, count++);
ApplicationEntity appEntity = new ApplicationEntity();
appEntity.setId(
HBaseTimelineStorageUtils.convertApplicationIdToString(appId));
HBaseTimelineSchemaUtils.convertApplicationIdToString(appId));
appEntity.setCreatedTime(timestamp);
TimelineEvent created = new TimelineEvent();
@ -929,7 +929,7 @@ public void testGetFlows() throws Exception {
new String[] {"flow1"});
long firstFlowActivity =
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(1425016501000L);
DateFormat fmt = TimelineReaderWebServices.DATE_FORMAT.get();
uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +

View File

@ -68,10 +68,11 @@
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
@ -173,7 +174,7 @@ public void testWriteNullApplicationToHBase() throws Exception {
scan.setStartRow(Bytes.toBytes(cluster));
scan.setStopRow(Bytes.toBytes(cluster + "1"));
Connection conn = ConnectionFactory.createConnection(c1);
ResultScanner resultScanner = new ApplicationTable()
ResultScanner resultScanner = new ApplicationTableRW()
.getResultScanner(c1, conn, scan);
assertTrue(resultScanner != null);
@ -308,7 +309,7 @@ public void testWriteApplicationToHBase() throws Exception {
Get get = new Get(rowKey);
get.setMaxVersions(Integer.MAX_VALUE);
Connection conn = ConnectionFactory.createConnection(c1);
Result result = new ApplicationTable().getResult(c1, conn, get);
Result result = new ApplicationTableRW().getResult(c1, conn, get);
assertTrue(result != null);
assertEquals(17, result.size());
@ -319,24 +320,24 @@ public void testWriteApplicationToHBase() throws Exception {
appId));
// check info column family
String id1 = ApplicationColumn.ID.readResult(result).toString();
String id1 =
ColumnRWHelper.readResult(result, ApplicationColumn.ID).toString();
assertEquals(appId, id1);
Long cTime1 =
(Long) ApplicationColumn.CREATED_TIME.readResult(result);
Long cTime1 = (Long)
ColumnRWHelper.readResult(result, ApplicationColumn.CREATED_TIME);
assertEquals(cTime, cTime1);
Map<String, Object> infoColumns =
ApplicationColumnPrefix.INFO.readResults(result,
new StringKeyConverter());
Map<String, Object> infoColumns = ColumnRWHelper.readResults(
result, ApplicationColumnPrefix.INFO, new StringKeyConverter());
assertEquals(infoMap, infoColumns);
// Remember isRelatedTo is of type Map<String, Set<String>>
for (Map.Entry<String, Set<String>> isRelatedToEntry : isRelatedTo
.entrySet()) {
Object isRelatedToValue =
ApplicationColumnPrefix.IS_RELATED_TO.readResult(result,
isRelatedToEntry.getKey());
Object isRelatedToValue = ColumnRWHelper.readResult(
result, ApplicationColumnPrefix.IS_RELATED_TO,
isRelatedToEntry.getKey());
String compoundValue = isRelatedToValue.toString();
// id7?id9?id6
Set<String> isRelatedToValues =
@ -351,9 +352,9 @@ public void testWriteApplicationToHBase() throws Exception {
// RelatesTo
for (Map.Entry<String, Set<String>> relatesToEntry : relatesTo
.entrySet()) {
String compoundValue =
ApplicationColumnPrefix.RELATES_TO.readResult(result,
relatesToEntry.getKey()).toString();
String compoundValue = ColumnRWHelper.readResult(result,
ApplicationColumnPrefix.RELATES_TO, relatesToEntry.getKey())
.toString();
// id3?id4?id5
Set<String> relatesToValues =
new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue));
@ -366,14 +367,13 @@ public void testWriteApplicationToHBase() throws Exception {
KeyConverter<String> stringKeyConverter = new StringKeyConverter();
// Configuration
Map<String, Object> configColumns =
ApplicationColumnPrefix.CONFIG
.readResults(result, stringKeyConverter);
Map<String, Object> configColumns = ColumnRWHelper.readResults(
result, ApplicationColumnPrefix.CONFIG, stringKeyConverter);
assertEquals(conf, configColumns);
NavigableMap<String, NavigableMap<Long, Number>> metricsResult =
ApplicationColumnPrefix.METRIC.readResultsWithTimestamps(result,
stringKeyConverter);
ColumnRWHelper.readResultsWithTimestamps(
result, ApplicationColumnPrefix.METRIC, stringKeyConverter);
NavigableMap<Long, Number> metricMap = metricsResult.get(m1.getId());
matchMetrics(metricValues, metricMap);
@ -500,7 +500,7 @@ public void testEvents() throws IOException {
event.addInfo(expKey, expVal);
final TimelineEntity entity = new ApplicationEntity();
entity.setId(HBaseTimelineStorageUtils.convertApplicationIdToString(
entity.setId(HBaseTimelineSchemaUtils.convertApplicationIdToString(
ApplicationId.newInstance(0, 1)));
entity.addEvent(event);
@ -531,7 +531,7 @@ public void testEvents() throws IOException {
Get get = new Get(rowKey);
get.setMaxVersions(Integer.MAX_VALUE);
Connection conn = ConnectionFactory.createConnection(c1);
Result result = new ApplicationTable().getResult(c1, conn, get);
Result result = new ApplicationTableRW().getResult(c1, conn, get);
assertTrue(result != null);
@ -541,8 +541,8 @@ public void testEvents() throws IOException {
appName));
Map<EventColumnName, Object> eventsResult =
ApplicationColumnPrefix.EVENT.readResults(result,
new EventColumnNameConverter());
ColumnRWHelper.readResults(result,
ApplicationColumnPrefix.EVENT, new EventColumnNameConverter());
// there should be only one event
assertEquals(1, eventsResult.size());
for (Map.Entry<EventColumnName, Object> e : eventsResult.entrySet()) {

View File

@ -62,9 +62,10 @@
import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValuesFilter;
import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefixFilter;
import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
@ -73,12 +74,12 @@
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKeyPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
@ -208,7 +209,7 @@ public void testWriteEntityToHBase() throws Exception {
String flow = "some_flow_name";
String flowVersion = "AB7822C10F1111";
long runid = 1002345678919L;
String appName = HBaseTimelineStorageUtils.convertApplicationIdToString(
String appName = HBaseTimelineSchemaUtils.convertApplicationIdToString(
ApplicationId.newInstance(System.currentTimeMillis() + 9000000L, 1)
);
hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
@ -224,7 +225,7 @@ public void testWriteEntityToHBase() throws Exception {
s.setStartRow(startRow);
s.setMaxVersions(Integer.MAX_VALUE);
Connection conn = ConnectionFactory.createConnection(c1);
ResultScanner scanner = new EntityTable().getResultScanner(c1, conn, s);
ResultScanner scanner = new EntityTableRW().getResultScanner(c1, conn, s);
int rowCount = 0;
int colCount = 0;
@ -238,26 +239,27 @@ public void testWriteEntityToHBase() throws Exception {
entity));
// check info column family
String id1 = EntityColumn.ID.readResult(result).toString();
String id1 =
ColumnRWHelper.readResult(result, EntityColumn.ID).toString();
assertEquals(id, id1);
String type1 = EntityColumn.TYPE.readResult(result).toString();
String type1 =
ColumnRWHelper.readResult(result, EntityColumn.TYPE).toString();
assertEquals(type, type1);
Long cTime1 = (Long) EntityColumn.CREATED_TIME.readResult(result);
Long cTime1 = (Long)
ColumnRWHelper.readResult(result, EntityColumn.CREATED_TIME);
assertEquals(cTime1, cTime);
Map<String, Object> infoColumns =
EntityColumnPrefix.INFO.readResults(result,
new StringKeyConverter());
Map<String, Object> infoColumns = ColumnRWHelper.readResults(
result, EntityColumnPrefix.INFO, new StringKeyConverter());
assertEquals(infoMap, infoColumns);
// Remember isRelatedTo is of type Map<String, Set<String>>
for (Map.Entry<String, Set<String>> isRelatedToEntry : isRelatedTo
.entrySet()) {
Object isRelatedToValue =
EntityColumnPrefix.IS_RELATED_TO.readResult(result,
isRelatedToEntry.getKey());
Object isRelatedToValue = ColumnRWHelper.readResult(result,
EntityColumnPrefix.IS_RELATED_TO, isRelatedToEntry.getKey());
String compoundValue = isRelatedToValue.toString();
// id7?id9?id6
Set<String> isRelatedToValues =
@ -273,8 +275,9 @@ public void testWriteEntityToHBase() throws Exception {
// RelatesTo
for (Map.Entry<String, Set<String>> relatesToEntry : relatesTo
.entrySet()) {
String compoundValue = EntityColumnPrefix.RELATES_TO
.readResult(result, relatesToEntry.getKey()).toString();
String compoundValue = ColumnRWHelper.readResult(result,
EntityColumnPrefix.RELATES_TO, relatesToEntry.getKey())
.toString();
// id3?id4?id5
Set<String> relatesToValues =
new HashSet<String>(
@ -287,13 +290,13 @@ public void testWriteEntityToHBase() throws Exception {
}
// Configuration
Map<String, Object> configColumns =
EntityColumnPrefix.CONFIG.readResults(result, stringKeyConverter);
Map<String, Object> configColumns = ColumnRWHelper.readResults(
result, EntityColumnPrefix.CONFIG, stringKeyConverter);
assertEquals(conf, configColumns);
NavigableMap<String, NavigableMap<Long, Number>> metricsResult =
EntityColumnPrefix.METRIC.readResultsWithTimestamps(result,
stringKeyConverter);
ColumnRWHelper.readResultsWithTimestamps(
result, EntityColumnPrefix.METRIC, stringKeyConverter);
NavigableMap<Long, Number> metricMap = metricsResult.get(m1.getId());
matchMetrics(metricValues, metricMap);
@ -386,14 +389,14 @@ private void verifySubApplicationTableEntities(String cluster, String user,
Set<TimelineMetric> metrics, Long cTime, TimelineMetric m1)
throws IOException {
Scan s = new Scan();
// read from SubApplicationTable
// read from SubApplicationTableRW
byte[] startRow = new SubApplicationRowKeyPrefix(cluster, subAppUser, null,
null, null, null).getRowKeyPrefix();
s.setStartRow(startRow);
s.setMaxVersions(Integer.MAX_VALUE);
Connection conn = ConnectionFactory.createConnection(c1);
ResultScanner scanner =
new SubApplicationTable().getResultScanner(c1, conn, s);
new SubApplicationTableRW().getResultScanner(c1, conn, s);
int rowCount = 0;
int colCount = 0;
@ -407,25 +410,28 @@ private void verifySubApplicationTableEntities(String cluster, String user,
user, entity));
// check info column family
String id1 = SubApplicationColumn.ID.readResult(result).toString();
String id1 = ColumnRWHelper.readResult(result, SubApplicationColumn.ID)
.toString();
assertEquals(id, id1);
String type1 = SubApplicationColumn.TYPE.readResult(result).toString();
String type1 = ColumnRWHelper.readResult(result,
SubApplicationColumn.TYPE).toString();
assertEquals(type, type1);
Long cTime1 =
(Long) SubApplicationColumn.CREATED_TIME.readResult(result);
Long cTime1 = (Long) ColumnRWHelper.readResult(result,
SubApplicationColumn.CREATED_TIME);
assertEquals(cTime1, cTime);
Map<String, Object> infoColumns = SubApplicationColumnPrefix.INFO
.readResults(result, new StringKeyConverter());
Map<String, Object> infoColumns = ColumnRWHelper.readResults(
result, SubApplicationColumnPrefix.INFO, new StringKeyConverter());
assertEquals(infoMap, infoColumns);
// Remember isRelatedTo is of type Map<String, Set<String>>
for (Map.Entry<String, Set<String>> isRelatedToEntry : isRelatedTo
.entrySet()) {
Object isRelatedToValue = SubApplicationColumnPrefix.IS_RELATED_TO
.readResult(result, isRelatedToEntry.getKey());
Object isRelatedToValue = ColumnRWHelper.readResult(
result, SubApplicationColumnPrefix.IS_RELATED_TO,
isRelatedToEntry.getKey());
String compoundValue = isRelatedToValue.toString();
// id7?id9?id6
Set<String> isRelatedToValues =
@ -440,8 +446,9 @@ private void verifySubApplicationTableEntities(String cluster, String user,
// RelatesTo
for (Map.Entry<String, Set<String>> relatesToEntry : relatesTo
.entrySet()) {
String compoundValue = SubApplicationColumnPrefix.RELATES_TO
.readResult(result, relatesToEntry.getKey()).toString();
String compoundValue = ColumnRWHelper.readResult(result,
SubApplicationColumnPrefix.RELATES_TO, relatesToEntry.getKey())
.toString();
// id3?id4?id5
Set<String> relatesToValues =
new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue));
@ -453,13 +460,13 @@ private void verifySubApplicationTableEntities(String cluster, String user,
}
// Configuration
Map<String, Object> configColumns = SubApplicationColumnPrefix.CONFIG
.readResults(result, stringKeyConverter);
Map<String, Object> configColumns = ColumnRWHelper.readResults(
result, SubApplicationColumnPrefix.CONFIG, stringKeyConverter);
assertEquals(conf, configColumns);
NavigableMap<String, NavigableMap<Long, Number>> metricsResult =
SubApplicationColumnPrefix.METRIC.readResultsWithTimestamps(result,
stringKeyConverter);
ColumnRWHelper.readResultsWithTimestamps(
result, SubApplicationColumnPrefix.METRIC, stringKeyConverter);
NavigableMap<Long, Number> metricMap = metricsResult.get(m1.getId());
matchMetrics(metricValues, metricMap);
@ -511,7 +518,7 @@ public void testEventsWithEmptyInfo() throws IOException {
String flow = "other_flow_name";
String flowVersion = "1111F01C2287BA";
long runid = 1009876543218L;
String appName = HBaseTimelineStorageUtils.convertApplicationIdToString(
String appName = HBaseTimelineSchemaUtils.convertApplicationIdToString(
ApplicationId.newInstance(System.currentTimeMillis() + 9000000L, 1));
byte[] startRow =
new EntityRowKeyPrefix(cluster, user, flow, runid, appName)
@ -525,7 +532,7 @@ public void testEventsWithEmptyInfo() throws IOException {
s.setStartRow(startRow);
s.addFamily(EntityColumnFamily.INFO.getBytes());
Connection conn = ConnectionFactory.createConnection(c1);
ResultScanner scanner = new EntityTable().getResultScanner(c1, conn, s);
ResultScanner scanner = new EntityTableRW().getResultScanner(c1, conn, s);
int rowCount = 0;
for (Result result : scanner) {
@ -538,8 +545,8 @@ public void testEventsWithEmptyInfo() throws IOException {
entity));
Map<EventColumnName, Object> eventsResult =
EntityColumnPrefix.EVENT.readResults(result,
new EventColumnNameConverter());
ColumnRWHelper.readResults(result,
EntityColumnPrefix.EVENT, new EventColumnNameConverter());
// there should be only one event
assertEquals(1, eventsResult.size());
for (Map.Entry<EventColumnName, Object> e : eventsResult.entrySet()) {
@ -604,7 +611,7 @@ public void testEventsEscapeTs() throws IOException {
final TimelineEntity entity = new ApplicationEntity();
entity.setId(
HBaseTimelineStorageUtils.convertApplicationIdToString(
HBaseTimelineSchemaUtils.convertApplicationIdToString(
ApplicationId.newInstance(0, 1)));
entity.addEvent(event);

View File

@ -21,6 +21,9 @@
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW;
import org.junit.BeforeClass;
import org.junit.Test;
@ -35,10 +38,6 @@
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
/**
* Unit tests for checking different schema prefixes.
*/
@ -61,22 +60,24 @@ public void createWithDefaultPrefix() throws IOException {
conn = ConnectionFactory.createConnection(hbaseConf);
Admin admin = conn.getAdmin();
TableName entityTableName = BaseTable.getTableName(hbaseConf,
EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME);
TableName entityTableName = BaseTableRW.getTableName(hbaseConf,
EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME);
assertTrue(admin.tableExists(entityTableName));
assertTrue(entityTableName.getNameAsString().startsWith(
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX));
Table entityTable = conn.getTable(BaseTable.getTableName(hbaseConf,
EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME));
Table entityTable = conn.getTable(BaseTableRW.getTableName(hbaseConf,
EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME));
assertNotNull(entityTable);
TableName flowRunTableName = BaseTable.getTableName(hbaseConf,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME);
TableName flowRunTableName = BaseTableRW.getTableName(hbaseConf,
FlowRunTableRW.TABLE_NAME_CONF_NAME, FlowRunTableRW.DEFAULT_TABLE_NAME);
assertTrue(admin.tableExists(flowRunTableName));
assertTrue(flowRunTableName.getNameAsString().startsWith(
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX));
Table flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
Table flowRunTable = conn.getTable(
BaseTableRW.getTableName(hbaseConf,
FlowRunTableRW.TABLE_NAME_CONF_NAME,
FlowRunTableRW.DEFAULT_TABLE_NAME));
assertNotNull(flowRunTable);
}
@ -91,20 +92,22 @@ public void createWithSetPrefix() throws IOException {
conn = ConnectionFactory.createConnection(hbaseConf);
Admin admin = conn.getAdmin();
TableName entityTableName = BaseTable.getTableName(hbaseConf,
EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME);
TableName entityTableName = BaseTableRW.getTableName(hbaseConf,
EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME);
assertTrue(admin.tableExists(entityTableName));
assertTrue(entityTableName.getNameAsString().startsWith(prefix));
Table entityTable = conn.getTable(BaseTable.getTableName(hbaseConf,
EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME));
Table entityTable = conn.getTable(BaseTableRW.getTableName(hbaseConf,
EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME));
assertNotNull(entityTable);
TableName flowRunTableName = BaseTable.getTableName(hbaseConf,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME);
TableName flowRunTableName = BaseTableRW.getTableName(hbaseConf,
FlowRunTableRW.TABLE_NAME_CONF_NAME, FlowRunTableRW.DEFAULT_TABLE_NAME);
assertTrue(admin.tableExists(flowRunTableName));
assertTrue(flowRunTableName.getNameAsString().startsWith(prefix));
Table flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
Table flowRunTable = conn.getTable(
BaseTableRW.getTableName(hbaseConf,
FlowRunTableRW.TABLE_NAME_CONF_NAME,
FlowRunTableRW.DEFAULT_TABLE_NAME));
assertNotNull(flowRunTable);
// create another set with a diff prefix
@ -114,20 +117,22 @@ public void createWithSetPrefix() throws IOException {
hbaseConf.set(YarnConfiguration.TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME,
prefix);
DataGeneratorForTest.createSchema(hbaseConf);
entityTableName = BaseTable.getTableName(hbaseConf,
EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME);
entityTableName = BaseTableRW.getTableName(hbaseConf,
EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME);
assertTrue(admin.tableExists(entityTableName));
assertTrue(entityTableName.getNameAsString().startsWith(prefix));
entityTable = conn.getTable(BaseTable.getTableName(hbaseConf,
EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME));
entityTable = conn.getTable(BaseTableRW.getTableName(hbaseConf,
EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME));
assertNotNull(entityTable);
flowRunTableName = BaseTable.getTableName(hbaseConf,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME);
flowRunTableName = BaseTableRW.getTableName(hbaseConf,
FlowRunTableRW.TABLE_NAME_CONF_NAME, FlowRunTableRW.DEFAULT_TABLE_NAME);
assertTrue(admin.tableExists(flowRunTableName));
assertTrue(flowRunTableName.getNameAsString().startsWith(prefix));
flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
flowRunTable = conn.getTable(
BaseTableRW.getTableName(hbaseConf,
FlowRunTableRW.TABLE_NAME_CONF_NAME,
FlowRunTableRW.DEFAULT_TABLE_NAME));
assertNotNull(flowRunTable);
hbaseConf
.unset(YarnConfiguration.TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME);

View File

@ -52,9 +52,9 @@
import org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTest;
import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl;
import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -161,8 +161,8 @@ public void testWriteFlowRunMinMax() throws Exception {
Connection conn = ConnectionFactory.createConnection(c1);
// check in flow activity table
Table table1 = conn.getTable(
BaseTable.getTableName(c1, FlowActivityTable.TABLE_NAME_CONF_NAME,
FlowActivityTable.DEFAULT_TABLE_NAME));
BaseTableRW.getTableName(c1, FlowActivityTableRW.TABLE_NAME_CONF_NAME,
FlowActivityTableRW.DEFAULT_TABLE_NAME));
byte[] startRow =
new FlowActivityRowKey(cluster, minStartTs, user, flow).getRowKey();
Get g = new Get(startRow);
@ -178,7 +178,7 @@ public void testWriteFlowRunMinMax() throws Exception {
assertEquals(cluster, flowActivityRowKey.getClusterId());
assertEquals(user, flowActivityRowKey.getUserId());
assertEquals(flow, flowActivityRowKey.getFlowName());
Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(minStartTs);
Long dayTs = HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(minStartTs);
assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());
assertEquals(1, values.size());
checkFlowActivityRunId(runid, flowVersion, values);
@ -292,8 +292,8 @@ private void checkFlowActivityTable(String cluster, String user, String flow,
s.setStopRow(stopRow);
Connection conn = ConnectionFactory.createConnection(c1);
Table table1 = conn.getTable(
BaseTable.getTableName(c1, FlowActivityTable.TABLE_NAME_CONF_NAME,
FlowActivityTable.DEFAULT_TABLE_NAME));
BaseTableRW.getTableName(c1, FlowActivityTableRW.TABLE_NAME_CONF_NAME,
FlowActivityTableRW.DEFAULT_TABLE_NAME));
ResultScanner scanner = table1.getScanner(s);
int rowCount = 0;
for (Result result : scanner) {
@ -309,7 +309,7 @@ private void checkFlowActivityTable(String cluster, String user, String flow,
assertEquals(cluster, flowActivityRowKey.getClusterId());
assertEquals(user, flowActivityRowKey.getUserId());
assertEquals(flow, flowActivityRowKey.getFlowName());
Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(
Long dayTs = HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(
appCreatedTime);
assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());
assertEquals(1, values.size());
@ -401,7 +401,7 @@ public void testFlowActivityTableOneFlowMultipleRunIds() throws IOException {
assertEquals(user, flowActivity.getUser());
assertEquals(flow, flowActivity.getFlowName());
long dayTs =
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(appCreatedTime);
HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(appCreatedTime);
assertEquals(dayTs, flowActivity.getDate().getTime());
Set<FlowRunEntity> flowRuns = flowActivity.getFlowRuns();
assertEquals(3, flowRuns.size());
@ -442,8 +442,8 @@ private void checkFlowActivityTableSeveralRuns(String cluster, String user,
s.setStopRow(stopRow);
Connection conn = ConnectionFactory.createConnection(c1);
Table table1 = conn.getTable(
BaseTable.getTableName(c1, FlowActivityTable.TABLE_NAME_CONF_NAME,
FlowActivityTable.DEFAULT_TABLE_NAME));
BaseTableRW.getTableName(c1, FlowActivityTableRW.TABLE_NAME_CONF_NAME,
FlowActivityTableRW.DEFAULT_TABLE_NAME));
ResultScanner scanner = table1.getScanner(s);
int rowCount = 0;
for (Result result : scanner) {
@ -456,7 +456,7 @@ private void checkFlowActivityTableSeveralRuns(String cluster, String user,
assertEquals(cluster, flowActivityRowKey.getClusterId());
assertEquals(user, flowActivityRowKey.getUserId());
assertEquals(flow, flowActivityRowKey.getFlowName());
Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(
Long dayTs = HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(
appCreatedTime);
assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());

View File

@ -62,9 +62,9 @@
import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl;
import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -91,8 +91,8 @@ public static void setupBeforeClass() throws Exception {
@Test
public void checkCoProcessorOff() throws IOException, InterruptedException {
Configuration hbaseConf = util.getConfiguration();
TableName table = BaseTable.getTableName(hbaseConf,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME);
TableName table = BaseTableRW.getTableName(hbaseConf,
FlowRunTableRW.TABLE_NAME_CONF_NAME, FlowRunTableRW.DEFAULT_TABLE_NAME);
Connection conn = null;
conn = ConnectionFactory.createConnection(hbaseConf);
Admin admin = conn.getAdmin();
@ -106,9 +106,9 @@ public void checkCoProcessorOff() throws IOException, InterruptedException {
checkCoprocessorExists(table, true);
}
table = BaseTable.getTableName(hbaseConf,
FlowActivityTable.TABLE_NAME_CONF_NAME,
FlowActivityTable.DEFAULT_TABLE_NAME);
table = BaseTableRW.getTableName(hbaseConf,
FlowActivityTableRW.TABLE_NAME_CONF_NAME,
FlowActivityTableRW.DEFAULT_TABLE_NAME);
if (admin.tableExists(table)) {
// check the regions.
// check in flow activity table
@ -116,8 +116,8 @@ public void checkCoProcessorOff() throws IOException, InterruptedException {
checkCoprocessorExists(table, false);
}
table = BaseTable.getTableName(hbaseConf, EntityTable.TABLE_NAME_CONF_NAME,
EntityTable.DEFAULT_TABLE_NAME);
table = BaseTableRW.getTableName(hbaseConf,
EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME);
if (admin.tableExists(table)) {
// check the regions.
// check in entity run table
@ -224,8 +224,10 @@ public void testWriteFlowRunMinMax() throws Exception {
Connection conn = ConnectionFactory.createConnection(c1);
// check in flow run table
Table table1 = conn.getTable(BaseTable.getTableName(c1,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
Table table1 = conn.getTable(
BaseTableRW.getTableName(c1,
FlowRunTableRW.TABLE_NAME_CONF_NAME,
FlowRunTableRW.DEFAULT_TABLE_NAME));
// scan the table and see that we get back the right min and max
// timestamps
byte[] startRow = new FlowRunRowKey(cluster, user, flow, runid).getRowKey();
@ -380,8 +382,10 @@ void checkFlowRunTableBatchLimit(String cluster, String user, String flow,
.getRowKey();
s.setStopRow(stopRow);
Connection conn = ConnectionFactory.createConnection(c1);
Table table1 = conn.getTable(BaseTable.getTableName(c1,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
Table table1 = conn.getTable(
BaseTableRW.getTableName(c1,
FlowRunTableRW.TABLE_NAME_CONF_NAME,
FlowRunTableRW.DEFAULT_TABLE_NAME));
ResultScanner scanner = table1.getScanner(s);
int loopCount = 0;
@ -525,8 +529,10 @@ private void checkFlowRunTable(String cluster, String user, String flow,
new FlowRunRowKey(clusterStop, user, flow, runid).getRowKey();
s.setStopRow(stopRow);
Connection conn = ConnectionFactory.createConnection(c1);
Table table1 = conn.getTable(BaseTable.getTableName(c1,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
Table table1 = conn.getTable(
BaseTableRW.getTableName(c1,
FlowRunTableRW.TABLE_NAME_CONF_NAME,
FlowRunTableRW.DEFAULT_TABLE_NAME));
ResultScanner scanner = table1.getScanner(s);
int rowCount = 0;
@ -810,8 +816,10 @@ private void checkMinMaxFlush(Configuration c1, long minTS, long startTs,
boolean checkMax) throws IOException {
Connection conn = ConnectionFactory.createConnection(c1);
// check in flow run table
Table table1 = conn.getTable(BaseTable.getTableName(c1,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
Table table1 = conn.getTable(
BaseTableRW.getTableName(c1,
FlowRunTableRW.TABLE_NAME_CONF_NAME,
FlowRunTableRW.DEFAULT_TABLE_NAME));
// scan the table and see that we get back the right min and max
// timestamps
byte[] startRow = new FlowRunRowKey(cluster, user, flow, runid).getRowKey();

View File

@ -54,9 +54,9 @@
import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
import org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTest;
import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineServerUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
import org.junit.AfterClass;
@ -107,8 +107,10 @@ public void testWriteNonNumericData() throws Exception {
Configuration hbaseConf = util.getConfiguration();
Connection conn = null;
conn = ConnectionFactory.createConnection(hbaseConf);
Table flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
Table flowRunTable = conn.getTable(
BaseTableRW.getTableName(hbaseConf,
FlowRunTableRW.TABLE_NAME_CONF_NAME,
FlowRunTableRW.DEFAULT_TABLE_NAME));
flowRunTable.put(p);
Get g = new Get(rowKeyBytes);
@ -156,8 +158,10 @@ public void testWriteScanBatchLimit() throws Exception {
Configuration hbaseConf = util.getConfiguration();
Connection conn = null;
conn = ConnectionFactory.createConnection(hbaseConf);
Table flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
Table flowRunTable = conn.getTable(
BaseTableRW.getTableName(hbaseConf,
FlowRunTableRW.TABLE_NAME_CONF_NAME,
FlowRunTableRW.DEFAULT_TABLE_NAME));
flowRunTable.put(p);
String rowKey2 = "nonNumericRowKey2";
@ -324,10 +328,12 @@ public void testWriteFlowRunCompaction() throws Exception {
// check in flow run table
HRegionServer server = util.getRSForFirstRegionInTable(
BaseTable.getTableName(c1, FlowRunTable.TABLE_NAME_CONF_NAME,
FlowRunTable.DEFAULT_TABLE_NAME));
List<Region> regions = server.getOnlineRegions(BaseTable.getTableName(c1,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
BaseTableRW.getTableName(c1, FlowRunTableRW.TABLE_NAME_CONF_NAME,
FlowRunTableRW.DEFAULT_TABLE_NAME));
List<Region> regions = server.getOnlineRegions(
BaseTableRW.getTableName(c1,
FlowRunTableRW.TABLE_NAME_CONF_NAME,
FlowRunTableRW.DEFAULT_TABLE_NAME));
assertTrue("Didn't find any regions for primary table!",
regions.size() > 0);
// flush and compact all the regions of the primary table
@ -352,8 +358,10 @@ private void checkFlowRunTable(String cluster, String user, String flow,
new FlowRunRowKey(clusterStop, user, flow, runid).getRowKey();
s.setStopRow(stopRow);
Connection conn = ConnectionFactory.createConnection(c1);
Table table1 = conn.getTable(BaseTable.getTableName(c1,
FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
Table table1 = conn.getTable(
BaseTableRW.getTableName(c1,
FlowRunTableRW.TABLE_NAME_CONF_NAME,
FlowRunTableRW.DEFAULT_TABLE_NAME));
ResultScanner scanner = table1.getScanner(s);
int rowCount = 0;
@ -420,7 +428,7 @@ public void checkProcessSummationMoreCellsSumFinal2()
tags.add(t);
byte[] tagByteArray = Tag.fromList(tags);
// create a cell with a VERY old timestamp and attribute SUM_FINAL
Cell c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
Cell c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
aQualifier, cell1Ts, Bytes.toBytes(cellValue1), tagByteArray);
currentColumnCells.add(c1);
@ -430,7 +438,7 @@ public void checkProcessSummationMoreCellsSumFinal2()
tags.add(t);
tagByteArray = Tag.fromList(tags);
// create a cell with a recent timestamp and attribute SUM_FINAL
Cell c2 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
Cell c2 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
aQualifier, cell2Ts, Bytes.toBytes(cellValue2), tagByteArray);
currentColumnCells.add(c2);
@ -440,7 +448,7 @@ public void checkProcessSummationMoreCellsSumFinal2()
tags.add(t);
tagByteArray = Tag.fromList(tags);
// create a cell with a VERY old timestamp but has attribute SUM
Cell c3 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
Cell c3 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
aQualifier, cell3Ts, Bytes.toBytes(cellValue3), tagByteArray);
currentColumnCells.add(c3);
@ -450,7 +458,7 @@ public void checkProcessSummationMoreCellsSumFinal2()
tags.add(t);
tagByteArray = Tag.fromList(tags);
// create a cell with a VERY old timestamp but has attribute SUM
Cell c4 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
Cell c4 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
aQualifier, cell4Ts, Bytes.toBytes(cellValue4), tagByteArray);
currentColumnCells.add(c4);
@ -520,7 +528,7 @@ public void checkProcessSummationMoreCellsSumFinalMany() throws IOException {
tags.add(t);
byte[] tagByteArray = Tag.fromList(tags);
// create a cell with a VERY old timestamp and attribute SUM_FINAL
c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier,
c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily, aQualifier,
cellTsFinal, Bytes.toBytes(cellValueFinal), tagByteArray);
currentColumnCells.add(c1);
cellTsFinal++;
@ -534,7 +542,7 @@ public void checkProcessSummationMoreCellsSumFinalMany() throws IOException {
tags.add(t);
byte[] tagByteArray = Tag.fromList(tags);
// create a cell with attribute SUM
c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier,
c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily, aQualifier,
cellTsNotFinal, Bytes.toBytes(cellValueNotFinal), tagByteArray);
currentColumnCells.add(c1);
cellTsNotFinal++;
@ -611,7 +619,7 @@ public void checkProcessSummationMoreCellsSumFinalVariedTags()
tags.add(t);
byte[] tagByteArray = Tag.fromList(tags);
// create a cell with a VERY old timestamp and attribute SUM_FINAL
c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier,
c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily, aQualifier,
cellTsFinal, Bytes.toBytes(cellValueFinal), tagByteArray);
currentColumnCells.add(c1);
cellTsFinal++;
@ -625,7 +633,7 @@ public void checkProcessSummationMoreCellsSumFinalVariedTags()
tags.add(t);
byte[] tagByteArray = Tag.fromList(tags);
// create a cell with a VERY old timestamp and attribute SUM_FINAL
c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier,
c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily, aQualifier,
cellTsFinalNotExpire, Bytes.toBytes(cellValueFinal), tagByteArray);
currentColumnCells.add(c1);
cellTsFinalNotExpire++;
@ -639,7 +647,7 @@ public void checkProcessSummationMoreCellsSumFinalVariedTags()
tags.add(t);
byte[] tagByteArray = Tag.fromList(tags);
// create a cell with attribute SUM
c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier,
c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily, aQualifier,
cellTsNotFinal, Bytes.toBytes(cellValueNotFinal), tagByteArray);
currentColumnCells.add(c1);
cellTsNotFinal++;
@ -696,7 +704,7 @@ public void testProcessSummationMoreCellsSumFinal() throws IOException {
SortedSet<Cell> currentColumnCells = new TreeSet<Cell>(KeyValue.COMPARATOR);
// create a cell with a VERY old timestamp and attribute SUM_FINAL
Cell c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
Cell c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
aQualifier, 120L, Bytes.toBytes(cellValue1), tagByteArray);
currentColumnCells.add(c1);
@ -707,7 +715,7 @@ public void testProcessSummationMoreCellsSumFinal() throws IOException {
tagByteArray = Tag.fromList(tags);
// create a cell with a VERY old timestamp but has attribute SUM
Cell c2 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
Cell c2 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
aQualifier, 130L, Bytes.toBytes(cellValue2), tagByteArray);
currentColumnCells.add(c2);
List<Cell> cells = fs.processSummationMajorCompaction(currentColumnCells,
@ -754,7 +762,7 @@ public void testProcessSummationOneCellSumFinal() throws IOException {
SortedSet<Cell> currentColumnCells = new TreeSet<Cell>(KeyValue.COMPARATOR);
// create a cell with a VERY old timestamp
Cell c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
Cell c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
aQualifier, 120L, Bytes.toBytes(1110L), tagByteArray);
currentColumnCells.add(c1);
@ -792,7 +800,7 @@ public void testProcessSummationOneCell() throws IOException {
SortedSet<Cell> currentColumnCells = new TreeSet<Cell>(KeyValue.COMPARATOR);
Cell c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
Cell c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
aQualifier, currentTimestamp, Bytes.toBytes(1110L), tagByteArray);
currentColumnCells.add(c1);
List<Cell> cells = fs.processSummationMajorCompaction(currentColumnCells,

View File

@ -0,0 +1,219 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>3.2.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>hadoop-yarn-server-timelineservice-hbase-client</artifactId>
<name>Apache Hadoop YARN TimelineService HBase Client</name>
<properties>
<!-- Needed for generating FindBugs warnings using parent pom -->
<yarn.basedir>${project.parent.parent.parent.basedir}</yarn.basedir>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-timelineservice-hbase-common</artifactId>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</dependency>
<dependency>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
</dependency>
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>provided</scope>
</dependency>
<!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-applicationhistoryservice</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-timelineservice</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-client</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-sslengine</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>test-jar</goal>
</goals>
<phase>test-compile</phase>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<configuration>
<additionalDependencies>
<additionnalDependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
</additionnalDependency>
</additionalDependencies>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<includeScope>runtime</includeScope>
<excludeGroupIds>org.slf4j,org.apache.hadoop,com.github.stephenc.findbugs</excludeGroupIds>
<outputDirectory>${project.build.directory}/lib</outputDirectory>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -29,6 +29,7 @@
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.FilterList.Operator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
@ -98,7 +99,7 @@ private static CompareOp getHBaseCompareOp(
* @param filter
* @return a {@link QualifierFilter} object
*/
private static <T> Filter createHBaseColQualPrefixFilter(
private static <T extends BaseTable<T>> Filter createHBaseColQualPrefixFilter(
ColumnPrefix<T> colPrefix, TimelinePrefixFilter filter) {
return new QualifierFilter(getHBaseCompareOp(filter.getCompareOp()),
new BinaryPrefixComparator(
@ -114,8 +115,8 @@ private static <T> Filter createHBaseColQualPrefixFilter(
* @param columnPrefix column prefix.
* @return a column qualifier filter.
*/
public static <T> Filter createHBaseQualifierFilter(CompareOp compareOp,
ColumnPrefix<T> columnPrefix) {
public static <T extends BaseTable<T>> Filter createHBaseQualifierFilter(
CompareOp compareOp, ColumnPrefix<T> columnPrefix) {
return new QualifierFilter(compareOp,
new BinaryPrefixComparator(
columnPrefix.getColumnPrefixBytes("")));
@ -133,7 +134,8 @@ public static <T> Filter createHBaseQualifierFilter(CompareOp compareOp,
* @return a filter list.
* @throws IOException if any problem occurs while creating the filters.
*/
public static <T> Filter createFilterForConfsOrMetricsToRetrieve(
public static <T extends BaseTable<T>> Filter
createFilterForConfsOrMetricsToRetrieve(
TimelineFilterList confsOrMetricToRetrieve, ColumnFamily<T> columnFamily,
ColumnPrefix<T> columnPrefix) throws IOException {
Filter familyFilter = new FamilyFilter(CompareOp.EQUAL,
@ -164,8 +166,9 @@ public static <T> Filter createFilterForConfsOrMetricsToRetrieve(
* @return 2 single column value filters wrapped in a filter list.
* @throws IOException if any problem is encountered while encoding value.
*/
public static <T> FilterList createSingleColValueFiltersByRange(
Column<T> column, Object startValue, Object endValue) throws IOException {
public static <T extends BaseTable<T>> FilterList
createSingleColValueFiltersByRange(Column<T> column,
Object startValue, Object endValue) throws IOException {
FilterList list = new FilterList();
Filter singleColValFilterStart = createHBaseSingleColValueFilter(
column.getColumnFamilyBytes(), column.getColumnQualifierBytes(),
@ -190,8 +193,9 @@ public static <T> FilterList createSingleColValueFiltersByRange(
* @return a SingleColumnValue Filter
* @throws IOException if any exception.
*/
public static <T> Filter createHBaseSingleColValueFilter(Column<T> column,
Object value, CompareOp op) throws IOException {
public static <T extends BaseTable<T>> Filter
createHBaseSingleColValueFilter(Column<T> column,
Object value, CompareOp op) throws IOException {
Filter singleColValFilter = createHBaseSingleColValueFilter(
column.getColumnFamilyBytes(), column.getColumnQualifierBytes(),
column.getValueConverter().encodeValue(value), op, true);
@ -263,7 +267,8 @@ public static Set<String> fetchColumnsFromFilterList(
* @return A {@link FilterList} object.
* @throws IOException if any problem occurs while creating the filter list.
*/
public static <T> FilterList createHBaseFilterList(ColumnPrefix<T> colPrefix,
public static <T extends BaseTable<T>> FilterList createHBaseFilterList(
ColumnPrefix<T> colPrefix,
TimelineFilterList filterList) throws IOException {
FilterList list =
new FilterList(getHBaseOperator(filterList.getOperator()));

View File

@ -40,10 +40,14 @@
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
@ -55,20 +59,24 @@
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -121,14 +129,15 @@ protected void serviceInit(Configuration conf) throws Exception {
Configuration hbaseConf =
HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf);
conn = ConnectionFactory.createConnection(hbaseConf);
entityTable = new EntityTable().getTableMutator(hbaseConf, conn);
appToFlowTable = new AppToFlowTable().getTableMutator(hbaseConf, conn);
applicationTable = new ApplicationTable().getTableMutator(hbaseConf, conn);
flowRunTable = new FlowRunTable().getTableMutator(hbaseConf, conn);
entityTable = new EntityTableRW().getTableMutator(hbaseConf, conn);
appToFlowTable = new AppToFlowTableRW().getTableMutator(hbaseConf, conn);
applicationTable =
new ApplicationTableRW().getTableMutator(hbaseConf, conn);
flowRunTable = new FlowRunTableRW().getTableMutator(hbaseConf, conn);
flowActivityTable =
new FlowActivityTable().getTableMutator(hbaseConf, conn);
new FlowActivityTableRW().getTableMutator(hbaseConf, conn);
subApplicationTable =
new SubApplicationTable().getTableMutator(hbaseConf, conn);
new SubApplicationTableRW().getTableMutator(hbaseConf, conn);
UserGroupInformation ugi = UserGroupInformation.isSecurityEnabled() ?
UserGroupInformation.getLoginUser() :
@ -232,12 +241,12 @@ private void onApplicationCreated(FlowRunRowKey flowRunRowKey,
// store in App to flow table
AppToFlowRowKey appToFlowRowKey = new AppToFlowRowKey(appId);
byte[] rowKey = appToFlowRowKey.getRowKey();
AppToFlowColumnPrefix.FLOW_NAME.store(rowKey, appToFlowTable, clusterId,
null, flowName);
AppToFlowColumnPrefix.FLOW_RUN_ID.store(rowKey, appToFlowTable, clusterId,
null, flowRunId);
AppToFlowColumnPrefix.USER_ID.store(rowKey, appToFlowTable, clusterId, null,
userId);
ColumnRWHelper.store(rowKey, appToFlowTable,
AppToFlowColumnPrefix.FLOW_NAME, clusterId, null, flowName);
ColumnRWHelper.store(rowKey, appToFlowTable,
AppToFlowColumnPrefix.FLOW_RUN_ID, clusterId, null, flowRunId);
ColumnRWHelper.store(rowKey, appToFlowTable, AppToFlowColumnPrefix.USER_ID,
clusterId, null, userId);
// store in flow run table
storeAppCreatedInFlowRunTable(flowRunRowKey, appId, te);
@ -248,8 +257,8 @@ private void onApplicationCreated(FlowRunRowKey flowRunRowKey,
appCreatedTimeStamp, flowRunRowKey.getUserId(), flowName)
.getRowKey();
byte[] qualifier = longKeyConverter.encode(flowRunRowKey.getFlowRunId());
FlowActivityColumnPrefix.RUN_ID.store(flowActivityRowKeyBytes,
flowActivityTable, qualifier, null, flowVersion,
ColumnRWHelper.store(flowActivityRowKeyBytes, flowActivityTable,
FlowActivityColumnPrefix.RUN_ID, qualifier, null, flowVersion,
AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId));
}
@ -259,8 +268,8 @@ private void onApplicationCreated(FlowRunRowKey flowRunRowKey,
private void storeAppCreatedInFlowRunTable(FlowRunRowKey flowRunRowKey,
String appId, TimelineEntity te) throws IOException {
byte[] rowKey = flowRunRowKey.getRowKey();
FlowRunColumn.MIN_START_TIME.store(rowKey, flowRunTable, null,
te.getCreatedTime(),
ColumnRWHelper.store(rowKey, flowRunTable, FlowRunColumn.MIN_START_TIME,
null, te.getCreatedTime(),
AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId));
}
@ -282,8 +291,8 @@ private void onApplicationFinished(FlowRunRowKey flowRunRowKey,
appFinishedTimeStamp, flowRunRowKey.getUserId(),
flowRunRowKey.getFlowName()).getRowKey();
byte[] qualifier = longKeyConverter.encode(flowRunRowKey.getFlowRunId());
FlowActivityColumnPrefix.RUN_ID.store(rowKey, flowActivityTable, qualifier,
null, flowVersion,
ColumnRWHelper.store(rowKey, flowActivityTable,
FlowActivityColumnPrefix.RUN_ID, qualifier, null, flowVersion,
AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId));
}
@ -296,8 +305,8 @@ private void storeAppFinishedInFlowRunTable(FlowRunRowKey flowRunRowKey,
byte[] rowKey = flowRunRowKey.getRowKey();
Attribute attributeAppId =
AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId);
FlowRunColumn.MAX_END_TIME.store(rowKey, flowRunTable, null,
appFinishedTimeStamp, attributeAppId);
ColumnRWHelper.store(rowKey, flowRunTable, FlowRunColumn.MAX_END_TIME,
null, appFinishedTimeStamp, attributeAppId);
// store the final value of metrics since application has finished
Set<TimelineMetric> metrics = te.getMetrics();
@ -328,7 +337,7 @@ private void storeFlowMetrics(byte[] rowKey, Set<TimelineMetric> metrics,
Map<Long, Number> timeseries = metric.getValues();
for (Map.Entry<Long, Number> timeseriesEntry : timeseries.entrySet()) {
Long timestamp = timeseriesEntry.getKey();
FlowRunColumnPrefix.METRIC.store(rowKey, flowRunTable,
ColumnRWHelper.store(rowKey, flowRunTable, FlowRunColumnPrefix.METRIC,
metricColumnQualifier, timestamp, timeseriesEntry.getValue(),
attributes);
}
@ -338,7 +347,7 @@ private void storeFlowMetrics(byte[] rowKey, Set<TimelineMetric> metrics,
/**
* Stores the Relations from the {@linkplain TimelineEntity} object.
*/
private <T> void storeRelations(byte[] rowKey,
private <T extends BaseTable<T>> void storeRelations(byte[] rowKey,
Map<String, Set<String>> connectedEntities, ColumnPrefix<T> columnPrefix,
TypedBufferedMutator<T> table) throws IOException {
if (connectedEntities != null) {
@ -347,9 +356,9 @@ private <T> void storeRelations(byte[] rowKey,
// id3?id4?id5
String compoundValue =
Separator.VALUES.joinEncoded(connectedEntity.getValue());
columnPrefix.store(rowKey, table,
stringKeyConverter.encode(connectedEntity.getKey()), null,
compoundValue);
ColumnRWHelper.store(rowKey, table, columnPrefix,
stringKeyConverter.encode(connectedEntity.getKey()),
null, compoundValue);
}
}
}
@ -362,11 +371,12 @@ private void store(byte[] rowKey, TimelineEntity te,
Tables table) throws IOException {
switch (table) {
case APPLICATION_TABLE:
ApplicationColumn.ID.store(rowKey, applicationTable, null, te.getId());
ApplicationColumn.CREATED_TIME.store(rowKey, applicationTable, null,
te.getCreatedTime());
ApplicationColumn.FLOW_VERSION.store(rowKey, applicationTable, null,
flowVersion);
ColumnRWHelper.store(rowKey, applicationTable,
ApplicationColumn.ID, null, te.getId());
ColumnRWHelper.store(rowKey, applicationTable,
ApplicationColumn.CREATED_TIME, null, te.getCreatedTime());
ColumnRWHelper.store(rowKey, applicationTable,
ApplicationColumn.FLOW_VERSION, null, flowVersion);
storeInfo(rowKey, te.getInfo(), flowVersion, ApplicationColumnPrefix.INFO,
applicationTable);
storeMetrics(rowKey, te.getMetrics(), ApplicationColumnPrefix.METRIC,
@ -381,11 +391,14 @@ private void store(byte[] rowKey, TimelineEntity te,
ApplicationColumnPrefix.RELATES_TO, applicationTable);
break;
case ENTITY_TABLE:
EntityColumn.ID.store(rowKey, entityTable, null, te.getId());
EntityColumn.TYPE.store(rowKey, entityTable, null, te.getType());
EntityColumn.CREATED_TIME.store(rowKey, entityTable, null,
te.getCreatedTime());
EntityColumn.FLOW_VERSION.store(rowKey, entityTable, null, flowVersion);
ColumnRWHelper.store(rowKey, entityTable,
EntityColumn.ID, null, te.getId());
ColumnRWHelper.store(rowKey, entityTable,
EntityColumn.TYPE, null, te.getType());
ColumnRWHelper.store(rowKey, entityTable,
EntityColumn.CREATED_TIME, null, te.getCreatedTime());
ColumnRWHelper.store(rowKey, entityTable,
EntityColumn.FLOW_VERSION, null, flowVersion);
storeInfo(rowKey, te.getInfo(), flowVersion, EntityColumnPrefix.INFO,
entityTable);
storeMetrics(rowKey, te.getMetrics(), EntityColumnPrefix.METRIC,
@ -400,14 +413,14 @@ private void store(byte[] rowKey, TimelineEntity te,
EntityColumnPrefix.RELATES_TO, entityTable);
break;
case SUBAPPLICATION_TABLE:
SubApplicationColumn.ID.store(rowKey, subApplicationTable, null,
te.getId());
SubApplicationColumn.TYPE.store(rowKey, subApplicationTable, null,
te.getType());
SubApplicationColumn.CREATED_TIME.store(rowKey, subApplicationTable, null,
te.getCreatedTime());
SubApplicationColumn.FLOW_VERSION.store(rowKey, subApplicationTable, null,
flowVersion);
ColumnRWHelper.store(rowKey, subApplicationTable, SubApplicationColumn.ID,
null, te.getId());
ColumnRWHelper.store(rowKey, subApplicationTable,
SubApplicationColumn.TYPE, null, te.getType());
ColumnRWHelper.store(rowKey, subApplicationTable,
SubApplicationColumn.CREATED_TIME, null, te.getCreatedTime());
ColumnRWHelper.store(rowKey, subApplicationTable,
SubApplicationColumn.FLOW_VERSION, null, flowVersion);
storeInfo(rowKey, te.getInfo(), flowVersion,
SubApplicationColumnPrefix.INFO, subApplicationTable);
storeMetrics(rowKey, te.getMetrics(), SubApplicationColumnPrefix.METRIC,
@ -430,12 +443,13 @@ private void store(byte[] rowKey, TimelineEntity te,
/**
* stores the info information from {@linkplain TimelineEntity}.
*/
private <T> void storeInfo(byte[] rowKey, Map<String, Object> info,
String flowVersion, ColumnPrefix<T> columnPrefix,
TypedBufferedMutator<T> table) throws IOException {
private <T extends BaseTable<T>> void storeInfo(byte[] rowKey,
Map<String, Object> info, String flowVersion,
ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T > table)
throws IOException {
if (info != null) {
for (Map.Entry<String, Object> entry : info.entrySet()) {
columnPrefix.store(rowKey, table,
ColumnRWHelper.store(rowKey, table, columnPrefix,
stringKeyConverter.encode(entry.getKey()), null, entry.getValue());
}
}
@ -444,13 +458,15 @@ private <T> void storeInfo(byte[] rowKey, Map<String, Object> info,
/**
* stores the config information from {@linkplain TimelineEntity}.
*/
private <T> void storeConfig(byte[] rowKey, Map<String, String> config,
private <T extends BaseTable<T>> void storeConfig(
byte[] rowKey, Map<String, String> config,
ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
throws IOException {
if (config != null) {
for (Map.Entry<String, String> entry : config.entrySet()) {
byte[] configKey = stringKeyConverter.encode(entry.getKey());
columnPrefix.store(rowKey, table, configKey, null, entry.getValue());
ColumnRWHelper.store(rowKey, table, columnPrefix, configKey,
null, entry.getValue());
}
}
}
@ -459,7 +475,8 @@ private <T> void storeConfig(byte[] rowKey, Map<String, String> config,
* stores the {@linkplain TimelineMetric} information from the
* {@linkplain TimelineEvent} object.
*/
private <T> void storeMetrics(byte[] rowKey, Set<TimelineMetric> metrics,
private <T extends BaseTable<T>> void storeMetrics(
byte[] rowKey, Set<TimelineMetric> metrics,
ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
throws IOException {
if (metrics != null) {
@ -469,8 +486,8 @@ private <T> void storeMetrics(byte[] rowKey, Set<TimelineMetric> metrics,
Map<Long, Number> timeseries = metric.getValues();
for (Map.Entry<Long, Number> timeseriesEntry : timeseries.entrySet()) {
Long timestamp = timeseriesEntry.getKey();
columnPrefix.store(rowKey, table, metricColumnQualifier, timestamp,
timeseriesEntry.getValue());
ColumnRWHelper.store(rowKey, table, columnPrefix,
metricColumnQualifier, timestamp, timeseriesEntry.getValue());
}
}
}
@ -479,7 +496,8 @@ private <T> void storeMetrics(byte[] rowKey, Set<TimelineMetric> metrics,
/**
* Stores the events from the {@linkplain TimelineEvent} object.
*/
private <T> void storeEvents(byte[] rowKey, Set<TimelineEvent> events,
private <T extends BaseTable<T>> void storeEvents(
byte[] rowKey, Set<TimelineEvent> events,
ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
throws IOException {
if (events != null) {
@ -499,16 +517,16 @@ private <T> void storeEvents(byte[] rowKey, Set<TimelineEvent> events,
byte[] columnQualifierBytes =
new EventColumnName(eventId, eventTimestamp, null)
.getColumnQualifier();
columnPrefix.store(rowKey, table, columnQualifierBytes, null,
Separator.EMPTY_BYTES);
ColumnRWHelper.store(rowKey, table, columnPrefix,
columnQualifierBytes, null, Separator.EMPTY_BYTES);
} else {
for (Map.Entry<String, Object> info : eventInfo.entrySet()) {
// eventId=infoKey
byte[] columnQualifierBytes =
new EventColumnName(eventId, eventTimestamp, info.getKey())
.getColumnQualifier();
columnPrefix.store(rowKey, table, columnQualifierBytes, null,
info.getValue());
ColumnRWHelper.store(rowKey, table, columnPrefix,
columnQualifierBytes, null, info.getValue());
} // for info: eventInfo
}
}

View File

@ -37,13 +37,13 @@
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
@ -94,26 +94,27 @@ public static void main(String[] args) throws Exception {
String entityTableName = commandLine.getOptionValue(
ENTITY_TABLE_NAME_SHORT);
if (StringUtils.isNotBlank(entityTableName)) {
hbaseConf.set(EntityTable.TABLE_NAME_CONF_NAME, entityTableName);
hbaseConf.set(EntityTableRW.TABLE_NAME_CONF_NAME, entityTableName);
}
// Grab the entity metrics TTL
String entityTableMetricsTTL = commandLine.getOptionValue(
ENTITY_METRICS_TTL_OPTION_SHORT);
if (StringUtils.isNotBlank(entityTableMetricsTTL)) {
int entityMetricsTTL = Integer.parseInt(entityTableMetricsTTL);
new EntityTable().setMetricsTTL(entityMetricsTTL, hbaseConf);
new EntityTableRW().setMetricsTTL(entityMetricsTTL, hbaseConf);
}
// Grab the appToflowTableName argument
String appToflowTableName = commandLine.getOptionValue(
APP_TO_FLOW_TABLE_NAME_SHORT);
if (StringUtils.isNotBlank(appToflowTableName)) {
hbaseConf.set(AppToFlowTable.TABLE_NAME_CONF_NAME, appToflowTableName);
hbaseConf.set(
AppToFlowTableRW.TABLE_NAME_CONF_NAME, appToflowTableName);
}
// Grab the applicationTableName argument
String applicationTableName = commandLine.getOptionValue(
APP_TABLE_NAME_SHORT);
if (StringUtils.isNotBlank(applicationTableName)) {
hbaseConf.set(ApplicationTable.TABLE_NAME_CONF_NAME,
hbaseConf.set(ApplicationTableRW.TABLE_NAME_CONF_NAME,
applicationTableName);
}
// Grab the application metrics TTL
@ -121,14 +122,14 @@ public static void main(String[] args) throws Exception {
APP_METRICS_TTL_OPTION_SHORT);
if (StringUtils.isNotBlank(applicationTableMetricsTTL)) {
int appMetricsTTL = Integer.parseInt(applicationTableMetricsTTL);
new ApplicationTable().setMetricsTTL(appMetricsTTL, hbaseConf);
new ApplicationTableRW().setMetricsTTL(appMetricsTTL, hbaseConf);
}
// Grab the subApplicationTableName argument
String subApplicationTableName = commandLine.getOptionValue(
SUB_APP_TABLE_NAME_SHORT);
if (StringUtils.isNotBlank(subApplicationTableName)) {
hbaseConf.set(SubApplicationTable.TABLE_NAME_CONF_NAME,
hbaseConf.set(SubApplicationTableRW.TABLE_NAME_CONF_NAME,
subApplicationTableName);
}
// Grab the subApplication metrics TTL
@ -136,7 +137,7 @@ public static void main(String[] args) throws Exception {
.getOptionValue(SUB_APP_METRICS_TTL_OPTION_SHORT);
if (StringUtils.isNotBlank(subApplicationTableMetricsTTL)) {
int subAppMetricsTTL = Integer.parseInt(subApplicationTableMetricsTTL);
new SubApplicationTable().setMetricsTTL(subAppMetricsTTL, hbaseConf);
new SubApplicationTableRW().setMetricsTTL(subAppMetricsTTL, hbaseConf);
}
// create all table schemas in hbase
@ -303,7 +304,7 @@ public static void createAllTables(Configuration hbaseConf,
throw new IOException("Cannot create table since admin is null");
}
try {
new EntityTable().createTable(admin, hbaseConf);
new EntityTableRW().createTable(admin, hbaseConf);
} catch (IOException e) {
if (skipExisting) {
LOG.warn("Skip and continue on: " + e.getMessage());
@ -312,7 +313,7 @@ public static void createAllTables(Configuration hbaseConf,
}
}
try {
new AppToFlowTable().createTable(admin, hbaseConf);
new AppToFlowTableRW().createTable(admin, hbaseConf);
} catch (IOException e) {
if (skipExisting) {
LOG.warn("Skip and continue on: " + e.getMessage());
@ -321,7 +322,7 @@ public static void createAllTables(Configuration hbaseConf,
}
}
try {
new ApplicationTable().createTable(admin, hbaseConf);
new ApplicationTableRW().createTable(admin, hbaseConf);
} catch (IOException e) {
if (skipExisting) {
LOG.warn("Skip and continue on: " + e.getMessage());
@ -330,7 +331,7 @@ public static void createAllTables(Configuration hbaseConf,
}
}
try {
new FlowRunTable().createTable(admin, hbaseConf);
new FlowRunTableRW().createTable(admin, hbaseConf);
} catch (IOException e) {
if (skipExisting) {
LOG.warn("Skip and continue on: " + e.getMessage());
@ -339,7 +340,7 @@ public static void createAllTables(Configuration hbaseConf,
}
}
try {
new FlowActivityTable().createTable(admin, hbaseConf);
new FlowActivityTableRW().createTable(admin, hbaseConf);
} catch (IOException e) {
if (skipExisting) {
LOG.warn("Skip and continue on: " + e.getMessage());
@ -348,7 +349,7 @@ public static void createAllTables(Configuration hbaseConf,
}
}
try {
new SubApplicationTable().createTable(admin, hbaseConf);
new SubApplicationTableRW().createTable(admin, hbaseConf);
} catch (IOException e) {
if (skipExisting) {
LOG.warn("Skip and continue on: " + e.getMessage());

View File

@ -26,48 +26,15 @@
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The application table as column families info, config and metrics. Info
* stores information about a YARN application entity, config stores
* configuration data of a YARN application, metrics stores the metrics of a
* YARN application. This table is entirely analogous to the entity table but
* created for better performance.
*
* Example application table record:
*
* <pre>
* |-------------------------------------------------------------------------|
* | Row | Column Family | Column Family| Column Family|
* | key | info | metrics | config |
* |-------------------------------------------------------------------------|
* | clusterId! | id:appId | metricId1: | configKey1: |
* | userName! | | metricValue1 | configValue1 |
* | flowName! | created_time: | @timestamp1 | |
* | flowRunId! | 1392993084018 | | configKey2: |
* | AppId | | metriciD1: | configValue2 |
* | | i!infoKey: | metricValue2 | |
* | | infoValue | @timestamp2 | |
* | | | | |
* | | r!relatesToKey: | metricId2: | |
* | | id3=id4=id5 | metricValue1 | |
* | | | @timestamp2 | |
* | | s!isRelatedToKey: | | |
* | | id7=id9=id6 | | |
* | | | | |
* | | e!eventId=timestamp=infoKey: | | |
* | | eventInfoValue | | |
* | | | | |
* | | flowVersion: | | |
* | | versionValue | | |
* |-------------------------------------------------------------------------|
* </pre>
* Create, read and write to the Application Table.
*/
public class ApplicationTable extends BaseTable<ApplicationTable> {
public class ApplicationTableRW extends BaseTableRW<ApplicationTable> {
/** application prefix. */
private static final String PREFIX =
YarnConfiguration.TIMELINE_SERVICE_PREFIX + "application";
@ -100,9 +67,9 @@ public class ApplicationTable extends BaseTable<ApplicationTable> {
private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000;
private static final Logger LOG =
LoggerFactory.getLogger(ApplicationTable.class);
LoggerFactory.getLogger(ApplicationTableRW.class);
public ApplicationTable() {
public ApplicationTableRW() {
super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
}
@ -110,8 +77,8 @@ public ApplicationTable() {
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
* (org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
* createTable(org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.conf.Configuration)
*/
public void createTable(Admin admin, Configuration hbaseConf)

View File

@ -25,7 +25,7 @@
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -33,42 +33,9 @@
import java.io.IOException;
/**
* The app_flow table as column families mapping. Mapping stores
* appId to flowName and flowRunId mapping information
*
* Example app_flow table record:
*
* <pre>
* |--------------------------------------|
* | Row | Column Family |
* | key | mapping |
* |--------------------------------------|
* | appId | flow_name!cluster1: |
* | | foo@daily_hive_report |
* | | |
* | | flow_run_id!cluster1: |
* | | 1452828720457 |
* | | |
* | | user_id!cluster1: |
* | | admin |
* | | |
* | | flow_name!cluster2: |
* | | bar@ad_hoc_query |
* | | |
* | | flow_run_id!cluster2: |
* | | 1452828498752 |
* | | |
* | | user_id!cluster2: |
* | | joe |
* | | |
* |--------------------------------------|
* </pre>
*
* It is possible (although unlikely) in a multi-cluster environment that there
* may be more than one applications for a given app id. Different clusters are
* recorded as different sets of columns.
* Create, read and write to the AppToFlow Table.
*/
public class AppToFlowTable extends BaseTable<AppToFlowTable> {
public class AppToFlowTableRW extends BaseTableRW<AppToFlowTable> {
/** app_flow prefix. */
private static final String PREFIX =
YarnConfiguration.TIMELINE_SERVICE_PREFIX + "app-flow";
@ -80,9 +47,9 @@ public class AppToFlowTable extends BaseTable<AppToFlowTable> {
private static final String DEFAULT_TABLE_NAME = "timelineservice.app_flow";
private static final Logger LOG =
LoggerFactory.getLogger(AppToFlowTable.class);
LoggerFactory.getLogger(AppToFlowTableRW.class);
public AppToFlowTable() {
public AppToFlowTableRW() {
super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
}
@ -90,8 +57,8 @@ public AppToFlowTable() {
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
* (org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
* createTable(org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.conf.Configuration)
*/
public void createTable(Admin admin, Configuration hbaseConf)

View File

@ -37,7 +37,7 @@
*
* @param <T> reference to the table instance class itself for type safety.
*/
public abstract class BaseTable<T> {
public abstract class BaseTableRW<T extends BaseTable<T>> {
/**
* Name of config variable that is used to point to this table.
@ -56,7 +56,7 @@ public abstract class BaseTable<T> {
* @param defaultTableName Default table name if table from config is not
* found.
*/
protected BaseTable(String tableNameConfName, String defaultTableName) {
protected BaseTableRW(String tableNameConfName, String defaultTableName) {
this.tableNameConfName = tableNameConfName;
this.defaultTableName = defaultTableName;
}
@ -82,7 +82,7 @@ public TypedBufferedMutator<T> getTableMutator(Configuration hbaseConf,
// This is how service initialization should hang on to this variable, with
// the proper type
TypedBufferedMutator<T> table =
new BufferedMutatorDelegator<T>(bufferedMutator);
new TypedBufferedMutator<T>(bufferedMutator);
return table;
}

View File

@ -15,14 +15,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.common;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NavigableMap;
import java.util.TreeMap;
package org.apache.hadoop.yarn.server.timelineservice.storage.common;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@ -32,109 +26,37 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
/**
* This class is meant to be used only by explicit Columns, and not directly to
* write by clients.
*
* @param <T> refers to the table.
* A set of utility functions that read or read to a column.
* This class is meant to be used only by explicit Columns,
* and not directly to write by clients.
*/
public class ColumnHelper<T> {
public final class ColumnRWHelper {
private static final Logger LOG =
LoggerFactory.getLogger(ColumnHelper.class);
private final ColumnFamily<T> columnFamily;
/**
* Local copy of bytes representation of columnFamily so that we can avoid
* cloning a new copy over and over.
*/
private final byte[] columnFamilyBytes;
private final ValueConverter converter;
private final boolean supplementTs;
public ColumnHelper(ColumnFamily<T> columnFamily) {
this(columnFamily, GenericConverter.getInstance());
}
public ColumnHelper(ColumnFamily<T> columnFamily, ValueConverter converter) {
this(columnFamily, converter, false);
private ColumnRWHelper() {
}
/**
* @param columnFamily column family implementation.
* @param converter converter use to encode/decode values stored in the column
* or column prefix.
* @param needSupplementTs flag to indicate if cell timestamp needs to be
* modified for this column by calling
* {@link TimestampGenerator#getSupplementedTimestamp(long, String)}. This
* would be required for columns(such as metrics in flow run table) where
* potential collisions can occur due to same timestamp.
*/
public ColumnHelper(ColumnFamily<T> columnFamily, ValueConverter converter,
boolean needSupplementTs) {
this.columnFamily = columnFamily;
columnFamilyBytes = columnFamily.getBytes();
if (converter == null) {
this.converter = GenericConverter.getInstance();
} else {
this.converter = converter;
}
this.supplementTs = needSupplementTs;
}
/**
* Sends a Mutation to the table. The mutations will be buffered and sent over
* the wire as part of a batch.
*
* @param rowKey
* identifying the row to write. Nothing gets written when null.
* @param tableMutator
* used to modify the underlying HBase table
* @param columnQualifier
* column qualifier. Nothing gets written when null.
* @param timestamp
* version timestamp. When null the current timestamp multiplied with
* TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
* app id will be used
* @param inputValue
* the value to write to the rowKey and column qualifier. Nothing
* gets written when null.
* @param attributes Attributes to be set for HBase Put.
* @throws IOException if any problem occurs during store operation(sending
* mutation to table).
*/
public void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator,
byte[] columnQualifier, Long timestamp, Object inputValue,
Attribute... attributes) throws IOException {
if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) {
return;
}
Put p = new Put(rowKey);
timestamp = getPutTimestamp(timestamp, attributes);
p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
converter.encodeValue(inputValue));
if ((attributes != null) && (attributes.length > 0)) {
for (Attribute attribute : attributes) {
p.setAttribute(attribute.getName(), attribute.getValue());
}
}
tableMutator.mutate(p);
}
/*
* Figures out the cell timestamp used in the Put For storing.
* Will supplement the timestamp if required. Typically done for flow run
* table.If we supplement the timestamp, we left shift the timestamp and
* supplement it with the AppId id so that there are no collisions in the flow
* run table's cells.
*/
private long getPutTimestamp(Long timestamp, Attribute[] attributes) {
private static long getPutTimestamp(
Long timestamp, boolean supplementTs, Attribute[] attributes) {
if (timestamp == null) {
timestamp = System.currentTimeMillis();
}
if (!this.supplementTs) {
if (!supplementTs) {
return timestamp;
} else {
String appId = getAppIdFromAttributes(attributes);
@ -144,7 +66,7 @@ private long getPutTimestamp(Long timestamp, Attribute[] attributes) {
}
}
private String getAppIdFromAttributes(Attribute[] attributes) {
private static String getAppIdFromAttributes(Attribute[] attributes) {
if (attributes == null) {
return null;
}
@ -159,10 +81,76 @@ private String getAppIdFromAttributes(Attribute[] attributes) {
}
/**
* @return the column family for this column implementation.
* Sends a Mutation to the table. The mutations will be buffered and sent over
* the wire as part of a batch.
*
* @param rowKey
* identifying the row to write. Nothing gets written when null.
* @param tableMutator
* used to modify the underlying HBase table
* @param column the column that is to be modified
* @param timestamp
* version timestamp. When null the current timestamp multiplied with
* TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
* app id will be used
* @param inputValue
* the value to write to the rowKey and column qualifier. Nothing
* gets written when null.
* @param attributes Attributes to be set for HBase Put.
* @throws IOException if any problem occurs during store operation(sending
* mutation to table).
*/
public ColumnFamily<T> getColumnFamily() {
return columnFamily;
public static void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator,
Column<?> column, Long timestamp,
Object inputValue, Attribute... attributes)
throws IOException {
store(rowKey, tableMutator, column.getColumnFamilyBytes(),
column.getColumnQualifierBytes(), timestamp,
column.supplementCellTimestamp(), inputValue,
column.getValueConverter(),
column.getCombinedAttrsWithAggr(attributes));
}
/**
* Sends a Mutation to the table. The mutations will be buffered and sent over
* the wire as part of a batch.
*
* @param rowKey
* identifying the row to write. Nothing gets written when null.
* @param tableMutator
* used to modify the underlying HBase table
* @param columnFamilyBytes
* @param columnQualifier
* column qualifier. Nothing gets written when null.
* @param timestamp
* version timestamp. When null the current timestamp multiplied with
* TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
* app id will be used
* @param inputValue
* the value to write to the rowKey and column qualifier. Nothing
* gets written when null.
* @param converter
* @param attributes Attributes to be set for HBase Put.
* @throws IOException if any problem occurs during store operation(sending
* mutation to table).
*/
public static void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator,
byte[] columnFamilyBytes, byte[] columnQualifier, Long timestamp,
boolean supplementTs, Object inputValue, ValueConverter converter,
Attribute... attributes) throws IOException {
if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) {
return;
}
Put p = new Put(rowKey);
timestamp = getPutTimestamp(timestamp, supplementTs, attributes);
p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
converter.encodeValue(inputValue));
if ((attributes != null) && (attributes.length > 0)) {
for (Attribute attribute : attributes) {
p.setAttribute(attribute.getName(), attribute.getValue());
}
}
tableMutator.mutate(p);
}
/**
@ -170,12 +158,15 @@ public ColumnFamily<T> getColumnFamily() {
* value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
*
* @param result from which to read the value. Cannot be null
* @param columnFamilyBytes
* @param columnQualifierBytes referring to the column to be read.
* @param converter
* @return latest version of the specified column of whichever object was
* written.
* @throws IOException if any problem occurs while reading result.
*/
public Object readResult(Result result, byte[] columnQualifierBytes)
public static Object readResult(Result result, byte[] columnFamilyBytes,
byte[] columnQualifierBytes, ValueConverter converter)
throws IOException {
if (result == null || columnQualifierBytes == null) {
return null;
@ -188,6 +179,87 @@ public Object readResult(Result result, byte[] columnQualifierBytes)
return converter.decodeValue(value);
}
/**
* Get the latest version of this specified column. Note: this call clones the
* value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
*
* @param result from which to read the value. Cannot be null
* @param column the column that the result can be parsed to
* @return latest version of the specified column of whichever object was
* written.
* @throws IOException if any problem occurs while reading result.
*/
public static Object readResult(Result result, Column<?> column)
throws IOException {
return readResult(result, column.getColumnFamilyBytes(),
column.getColumnQualifierBytes(), column.getValueConverter());
}
/**
* Get the latest version of this specified column. Note: this call clones the
* value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
*
* @param result Cannot be null
* @param columnPrefix column prefix to read from
* @param qualifier column qualifier. Nothing gets read when null.
* @return result object (can be cast to whatever object was written to) or
* null when specified column qualifier for this prefix doesn't exist
* in the result.
* @throws IOException if there is any exception encountered while reading
* result.
*/
public static Object readResult(Result result, ColumnPrefix<?> columnPrefix,
String qualifier) throws IOException {
byte[] columnQualifier = ColumnHelper.getColumnQualifier(
columnPrefix.getColumnPrefixInBytes(), qualifier);
return readResult(
result, columnPrefix.getColumnFamilyBytes(),
columnQualifier, columnPrefix.getValueConverter());
}
/**
*
* @param <K> identifies the type of key converter.
* @param result from which to read columns.
* @param keyConverter used to convert column bytes to the appropriate key
* type
* @return the latest values of columns in the column family with this prefix
* (or all of them if the prefix value is null).
* @throws IOException if there is any exception encountered while reading
* results.
*/
public static <K> Map<K, Object> readResults(Result result,
ColumnPrefix<?> columnPrefix, KeyConverter<K> keyConverter)
throws IOException {
return readResults(result,
columnPrefix.getColumnFamilyBytes(),
columnPrefix.getColumnPrefixInBytes(),
keyConverter, columnPrefix.getValueConverter());
}
/**
* @param result from which to reads data with timestamps.
* @param <K> identifies the type of key converter.
* @param <V> the type of the values. The values will be cast into that type.
* @param keyConverter used to convert column bytes to the appropriate key
* type.
* @return the cell values at each respective time in for form
* {@literal {idA={timestamp1->value1}, idA={timestamp2->value2},
* idB={timestamp3->value3}, idC={timestamp1->value4}}}
* @throws IOException if there is any exception encountered while reading
* result.
*/
public static <K, V> NavigableMap<K, NavigableMap<Long, V>>
readResultsWithTimestamps(Result result, ColumnPrefix<?> columnPrefix,
KeyConverter<K> keyConverter) throws IOException {
return readResultsWithTimestamps(result,
columnPrefix.getColumnFamilyBytes(),
columnPrefix.getColumnPrefixInBytes(),
keyConverter, columnPrefix.getValueConverter(),
columnPrefix.supplementCellTimeStamp());
}
/**
* @param result from which to reads data with timestamps
* @param columnPrefixBytes optional prefix to limit columns. If null all
@ -203,22 +275,24 @@ public Object readResult(Result result, byte[] columnQualifierBytes)
* @throws IOException if any problem occurs while reading results.
*/
@SuppressWarnings("unchecked")
public <K, V> NavigableMap<K, NavigableMap<Long, V>>
readResultsWithTimestamps(Result result, byte[] columnPrefixBytes,
KeyConverter<K> keyConverter) throws IOException {
public static <K, V> NavigableMap<K, NavigableMap<Long, V>>
readResultsWithTimestamps(Result result, byte[] columnFamilyBytes,
byte[] columnPrefixBytes, KeyConverter<K> keyConverter,
ValueConverter valueConverter, boolean supplementTs)
throws IOException {
NavigableMap<K, NavigableMap<Long, V>> results = new TreeMap<>();
if (result != null) {
NavigableMap<
byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> resultMap =
result.getMap();
result.getMap();
NavigableMap<byte[], NavigableMap<Long, byte[]>> columnCellMap =
resultMap.get(columnFamilyBytes);
// could be that there is no such column family.
if (columnCellMap != null) {
for (Entry<byte[], NavigableMap<Long, byte[]>> entry : columnCellMap
for (Map.Entry<byte[], NavigableMap<Long, byte[]>> entry : columnCellMap
.entrySet()) {
K converterColumnKey = null;
if (columnPrefixBytes == null) {
@ -255,9 +329,9 @@ public Object readResult(Result result, byte[] columnQualifierBytes)
new TreeMap<Long, V>();
NavigableMap<Long, byte[]> cells = entry.getValue();
if (cells != null) {
for (Entry<Long, byte[]> cell : cells.entrySet()) {
for (Map.Entry<Long, byte[]> cell : cells.entrySet()) {
V value =
(V) converter.decodeValue(cell.getValue());
(V) valueConverter.decodeValue(cell.getValue());
Long ts = supplementTs ? TimestampGenerator.
getTruncatedTimestamp(cell.getKey()) : cell.getKey();
cellResults.put(ts, value);
@ -286,14 +360,15 @@ public Object readResult(Result result, byte[] columnQualifierBytes)
* returning byte arrays of values that were not Strings.
* @throws IOException if any problem occurs while reading results.
*/
public <K> Map<K, Object> readResults(Result result,
byte[] columnPrefixBytes, KeyConverter<K> keyConverter)
public static <K> Map<K, Object> readResults(Result result,
byte[] columnFamilyBytes, byte[] columnPrefixBytes,
KeyConverter<K> keyConverter, ValueConverter valueConverter)
throws IOException {
Map<K, Object> results = new HashMap<K, Object>();
if (result != null) {
Map<byte[], byte[]> columns = result.getFamilyMap(columnFamilyBytes);
for (Entry<byte[], byte[]> entry : columns.entrySet()) {
for (Map.Entry<byte[], byte[]> entry : columns.entrySet()) {
byte[] columnKey = entry.getKey();
if (columnKey != null && columnKey.length > 0) {
@ -327,7 +402,7 @@ public <K> Map<K, Object> readResults(Result result,
// If the columnPrefix is null (we want all columns), or the actual
// prefix matches the given prefix we want this column
if (converterColumnKey != null) {
Object value = converter.decodeValue(entry.getValue());
Object value = valueConverter.decodeValue(entry.getValue());
// we return the columnQualifier in parts since we don't know
// which part is of which data type.
results.put(converterColumnKey, value);
@ -339,76 +414,74 @@ public <K> Map<K, Object> readResults(Result result,
}
/**
* @param columnPrefixBytes The byte representation for the column prefix.
* Should not contain {@link Separator#QUALIFIERS}.
* @param qualifier for the remainder of the column.
* {@link Separator#QUALIFIERS} is permissible in the qualifier
* as it is joined only with the column prefix bytes.
* @return fully sanitized column qualifier that is a combination of prefix
* and qualifier. If prefix is null, the result is simply the encoded
* qualifier without any separator.
* Sends a Mutation to the table. The mutations will be buffered and sent over
* the wire as part of a batch.
*
* @param rowKey identifying the row to write. Nothing gets written when null.
* @param tableMutator used to modify the underlying HBase table. Caller is
* responsible to pass a mutator for the table that actually has this
* column.
* @param qualifier column qualifier. Nothing gets written when null.
* @param timestamp version timestamp. When null the server timestamp will be
* used.
* @param attributes attributes for the mutation that are used by the
* coprocessor to set/read the cell tags.
* @param inputValue the value to write to the rowKey and column qualifier.
* Nothing gets written when null.
* @throws IOException if there is any exception encountered while doing
* store operation(sending mutation to the table).
*/
public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
String qualifier) {
// We don't want column names to have spaces / tabs.
byte[] encodedQualifier =
Separator.encode(qualifier, Separator.SPACE, Separator.TAB);
if (columnPrefixBytes == null) {
return encodedQualifier;
public static void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator,
ColumnPrefix<?> columnPrefix, byte[] qualifier, Long timestamp,
Object inputValue, Attribute... attributes) throws IOException {
// Null check
if (qualifier == null) {
throw new IOException("Cannot store column with null qualifier in "
+tableMutator.getName().getNameAsString());
}
// Convert qualifier to lower case, strip of separators and tag on column
// prefix.
byte[] columnQualifier =
Separator.QUALIFIERS.join(columnPrefixBytes, encodedQualifier);
return columnQualifier;
byte[] columnQualifier = columnPrefix.getColumnPrefixBytes(qualifier);
Attribute[] combinedAttributes =
columnPrefix.getCombinedAttrsWithAggr(attributes);
store(rowKey, tableMutator, columnPrefix.getColumnFamilyBytes(),
columnQualifier, timestamp, columnPrefix.supplementCellTimeStamp(),
inputValue, columnPrefix.getValueConverter(), combinedAttributes);
}
/**
* @param columnPrefixBytes The byte representation for the column prefix.
* Should not contain {@link Separator#QUALIFIERS}.
* @param qualifier for the remainder of the column.
* @return fully sanitized column qualifier that is a combination of prefix
* and qualifier. If prefix is null, the result is simply the encoded
* qualifier without any separator.
* Sends a Mutation to the table. The mutations will be buffered and sent over
* the wire as part of a batch.
*
* @param rowKey identifying the row to write. Nothing gets written when null.
* @param tableMutator used to modify the underlying HBase table. Caller is
* responsible to pass a mutator for the table that actually has this
* column.
* @param qualifier column qualifier. Nothing gets written when null.
* @param timestamp version timestamp. When null the server timestamp will be
* used.
* @param attributes attributes for the mutation that are used by the
* coprocessor to set/read the cell tags.
* @param inputValue the value to write to the rowKey and column qualifier.
* Nothing gets written when null.
* @throws IOException if there is any exception encountered while doing
* store operation(sending mutation to the table).
*/
public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
long qualifier) {
if (columnPrefixBytes == null) {
return Bytes.toBytes(qualifier);
public static void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator,
ColumnPrefix<?> columnPrefix, String qualifier, Long timestamp,
Object inputValue, Attribute... attributes) throws IOException {
// Null check
if (qualifier == null) {
throw new IOException("Cannot store column with null qualifier in "
+ tableMutator.getName().getNameAsString());
}
// Convert qualifier to lower case, strip of separators and tag on column
// prefix.
byte[] columnQualifier =
Separator.QUALIFIERS.join(columnPrefixBytes, Bytes.toBytes(qualifier));
return columnQualifier;
byte[] columnQualifier = columnPrefix.getColumnPrefixBytes(qualifier);
Attribute[] combinedAttributes =
columnPrefix.getCombinedAttrsWithAggr(attributes);
store(rowKey, tableMutator, columnPrefix.getColumnFamilyBytes(),
columnQualifier, timestamp, columnPrefix.supplementCellTimeStamp(),
inputValue, columnPrefix.getValueConverter(), combinedAttributes);
}
public ValueConverter getValueConverter() {
return converter;
}
/**
* @param columnPrefixBytes The byte representation for the column prefix.
* Should not contain {@link Separator#QUALIFIERS}.
* @param qualifier the byte representation for the remainder of the column.
* @return fully sanitized column qualifier that is a combination of prefix
* and qualifier. If prefix is null, the result is simply the encoded
* qualifier without any separator.
*/
public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
byte[] qualifier) {
if (columnPrefixBytes == null) {
return qualifier;
}
byte[] columnQualifier =
Separator.QUALIFIERS.join(columnPrefixBytes, qualifier);
return columnQualifier;
}
}

View File

@ -0,0 +1,121 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.common;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Query;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A bunch of utility functions used in HBase TimelineService backend.
*/
public final class HBaseTimelineStorageUtils {
private static final Logger LOG =
LoggerFactory.getLogger(HBaseTimelineStorageUtils.class);
private HBaseTimelineStorageUtils() {
}
/**
* @param conf YARN configuration. Used to see if there is an explicit config
* pointing to the HBase config file to read. It should not be null
* or a NullPointerException will be thrown.
* @return a configuration with the HBase configuration from the classpath,
* optionally overwritten by the timeline service configuration URL if
* specified.
* @throws MalformedURLException if a timeline service HBase configuration URL
* is specified but is a malformed URL.
*/
public static Configuration getTimelineServiceHBaseConf(Configuration conf)
throws MalformedURLException {
if (conf == null) {
throw new NullPointerException();
}
Configuration hbaseConf;
String timelineServiceHBaseConfFileURL =
conf.get(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE);
if (timelineServiceHBaseConfFileURL != null
&& timelineServiceHBaseConfFileURL.length() > 0) {
LOG.info("Using hbase configuration at " +
timelineServiceHBaseConfFileURL);
// create a clone so that we don't mess with out input one
hbaseConf = new Configuration(conf);
Configuration plainHBaseConf = new Configuration(false);
URL hbaseSiteXML = new URL(timelineServiceHBaseConfFileURL);
plainHBaseConf.addResource(hbaseSiteXML);
HBaseConfiguration.merge(hbaseConf, plainHBaseConf);
} else {
// default to what is on the classpath
hbaseConf = HBaseConfiguration.create(conf);
}
return hbaseConf;
}
/**
* Given a row key prefix stored in a byte array, return a byte array for its
* immediate next row key.
*
* @param rowKeyPrefix The provided row key prefix, represented in an array.
* @return the closest next row key of the provided row key.
*/
public static byte[] calculateTheClosestNextRowKeyForPrefix(
byte[] rowKeyPrefix) {
// Essentially we are treating it like an 'unsigned very very long' and
// doing +1 manually.
// Search for the place where the trailing 0xFFs start
int offset = rowKeyPrefix.length;
while (offset > 0) {
if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
break;
}
offset--;
}
if (offset == 0) {
// We got an 0xFFFF... (only FFs) stopRow value which is
// the last possible prefix before the end of the table.
// So set it to stop at the 'end of the table'
return HConstants.EMPTY_END_ROW;
}
// Copy the right length of the original
byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
// And increment the last one
newStopRow[newStopRow.length - 1]++;
return newStopRow;
}
public static void setMetricsTimeRange(Query query, byte[] metricsCf,
long tsBegin, long tsEnd) {
if (tsBegin != 0 || tsEnd != Long.MAX_VALUE) {
query.setColumnFamilyTimeRange(metricsCf,
tsBegin, ((tsEnd == Long.MAX_VALUE) ? Long.MAX_VALUE : (tsEnd + 1)));
}
}
}

View File

@ -30,7 +30,7 @@
*
* @param <T> The class referring to the table to be written to.
*/
class BufferedMutatorDelegator<T> implements TypedBufferedMutator<T> {
public class TypedBufferedMutator<T extends BaseTable<T>> {
private final BufferedMutator bufferedMutator;
@ -38,7 +38,7 @@ class BufferedMutatorDelegator<T> implements TypedBufferedMutator<T> {
* @param bufferedMutator the mutator to be wrapped for delegation. Shall not
* be null.
*/
public BufferedMutatorDelegator(BufferedMutator bufferedMutator) {
public TypedBufferedMutator(BufferedMutator bufferedMutator) {
this.bufferedMutator = bufferedMutator;
}

View File

@ -26,49 +26,15 @@
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The entity table as column families info, config and metrics. Info stores
* information about a timeline entity object config stores configuration data
* of a timeline entity object metrics stores the metrics of a timeline entity
* object
*
* Example entity table record:
*
* <pre>
* |-------------------------------------------------------------------------|
* | Row | Column Family | Column Family| Column Family|
* | key | info | metrics | config |
* |-------------------------------------------------------------------------|
* | userName! | id:entityId | metricId1: | configKey1: |
* | clusterId! | | metricValue1 | configValue1 |
* | flowName! | type:entityType | @timestamp1 | |
* | flowRunId! | | | configKey2: |
* | AppId! | created_time: | metricId1: | configValue2 |
* | entityType!| 1392993084018 | metricValue2 | |
* | idPrefix! | | @timestamp2 | |
* | entityId | i!infoKey: | | |
* | | infoValue | metricId1: | |
* | | | metricValue1 | |
* | | r!relatesToKey: | @timestamp2 | |
* | | id3=id4=id5 | | |
* | | | | |
* | | s!isRelatedToKey | | |
* | | id7=id9=id6 | | |
* | | | | |
* | | e!eventId=timestamp=infoKey: | | |
* | | eventInfoValue | | |
* | | | | |
* | | flowVersion: | | |
* | | versionValue | | |
* |-------------------------------------------------------------------------|
* </pre>
* Create, read and write to the Entity Table.
*/
public class EntityTable extends BaseTable<EntityTable> {
public class EntityTableRW extends BaseTableRW<EntityTable> {
/** entity prefix. */
private static final String PREFIX =
YarnConfiguration.TIMELINE_SERVICE_PREFIX + "entity";
@ -100,9 +66,9 @@ public class EntityTable extends BaseTable<EntityTable> {
private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000;
private static final Logger LOG =
LoggerFactory.getLogger(EntityTable.class);
LoggerFactory.getLogger(EntityTableRW.class);
public EntityTable() {
public EntityTableRW() {
super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
}
@ -110,8 +76,8 @@ public EntityTable() {
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
* (org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
* createTable(org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.conf.Configuration)
*/
public void createTable(Admin admin, Configuration hbaseConf)

View File

@ -26,32 +26,14 @@
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The flow activity table has column family info
* Stores the daily activity record for flows
* Useful as a quick lookup of what flows were
* running on a given day
*
* Example flow activity table record:
*
* <pre>
* |-------------------------------------------|
* | Row key | Column Family |
* | | info |
* |-------------------------------------------|
* | clusterId! | r!runid1:version1 |
* | inv Top of | |
* | Day! | r!runid2:version7 |
* | userName! | |
* | flowName | |
* |-------------------------------------------|
* </pre>
* Create, read and write to the FlowActivity Table.
*/
public class FlowActivityTable extends BaseTable<FlowActivityTable> {
public class FlowActivityTableRW extends BaseTableRW<FlowActivityTable> {
/** flow activity table prefix. */
private static final String PREFIX =
YarnConfiguration.TIMELINE_SERVICE_PREFIX + ".flowactivity";
@ -64,12 +46,12 @@ public class FlowActivityTable extends BaseTable<FlowActivityTable> {
"timelineservice.flowactivity";
private static final Logger LOG =
LoggerFactory.getLogger(FlowActivityTable.class);
LoggerFactory.getLogger(FlowActivityTableRW.class);
/** default max number of versions. */
public static final int DEFAULT_METRICS_MAX_VERSIONS = Integer.MAX_VALUE;
public FlowActivityTable() {
public FlowActivityTableRW() {
super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
}
@ -77,8 +59,8 @@ public FlowActivityTable() {
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
* (org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
* createTable(org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.conf.Configuration)
*/
public void createTable(Admin admin, Configuration hbaseConf)

View File

@ -26,66 +26,16 @@
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Coprocessor;
/**
* The flow run table has column family info
* Stores per flow run information
* aggregated across applications.
*
* Metrics are also stored in the info column family.
*
* Example flow run table record:
*
* <pre>
* flow_run table
* |-------------------------------------------|
* | Row key | Column Family |
* | | info |
* |-------------------------------------------|
* | clusterId! | flow_version:version7 |
* | userName! | |
* | flowName! | running_apps:1 |
* | flowRunId | |
* | | min_start_time:1392995080000 |
* | | #0:"" |
* | | |
* | | min_start_time:1392995081012 |
* | | #0:appId2 |
* | | |
* | | min_start_time:1392993083210 |
* | | #0:appId3 |
* | | |
* | | |
* | | max_end_time:1392993084018 |
* | | #0:"" |
* | | |
* | | |
* | | m!mapInputRecords:127 |
* | | #0:"" |
* | | |
* | | m!mapInputRecords:31 |
* | | #2:appId2 |
* | | |
* | | m!mapInputRecords:37 |
* | | #1:appId3 |
* | | |
* | | |
* | | m!mapOutputRecords:181 |
* | | #0:"" |
* | | |
* | | m!mapOutputRecords:37 |
* | | #1:appId3 |
* | | |
* | | |
* |-------------------------------------------|
* </pre>
* Create, read and write to the FlowRun table.
*/
public class FlowRunTable extends BaseTable<FlowRunTable> {
public class FlowRunTableRW extends BaseTableRW<FlowRunTable> {
/** entity prefix. */
private static final String PREFIX =
YarnConfiguration.TIMELINE_SERVICE_PREFIX + ".flowrun";
@ -97,12 +47,12 @@ public class FlowRunTable extends BaseTable<FlowRunTable> {
public static final String DEFAULT_TABLE_NAME = "timelineservice.flowrun";
private static final Logger LOG =
LoggerFactory.getLogger(FlowRunTable.class);
LoggerFactory.getLogger(FlowRunTableRW.class);
/** default max number of versions. */
public static final int DEFAULT_METRICS_MAX_VERSIONS = Integer.MAX_VALUE;
public FlowRunTable() {
public FlowRunTableRW() {
super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
}
@ -110,8 +60,8 @@ public FlowRunTable() {
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
* (org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
* createTable(org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.conf.Configuration)
*/
public void createTable(Admin admin, Configuration hbaseConf)
@ -142,7 +92,8 @@ public void createTable(Admin admin, Configuration hbaseConf)
Path coprocessorJarPath = new Path(coprocessorJarPathStr);
LOG.info("CoprocessorJarPath=" + coprocessorJarPath.toString());
flowRunTableDescp.addCoprocessor(
FlowRunCoprocessor.class.getCanonicalName(), coprocessorJarPath,
"org.apache.hadoop.yarn.server.timelineservice.storage." +
"flow.FlowRunCoprocessor", coprocessorJarPath,
Coprocessor.PRIORITY_USER, null);
admin.createTable(flowRunTableDescp);
LOG.info("Status of table creation for " + table.getNameAsString() + "="

View File

@ -26,7 +26,8 @@
import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
import org.apache.hadoop.yarn.webapp.NotFoundException;
/**
@ -39,7 +40,7 @@ public abstract class AbstractTimelineStorageReader {
/**
* Used to look up the flow context.
*/
private final AppToFlowTable appToFlowTable = new AppToFlowTable();
private final AppToFlowTableRW appToFlowTable = new AppToFlowTableRW();
public AbstractTimelineStorageReader(TimelineReaderContext ctxt) {
context = ctxt;
@ -66,12 +67,12 @@ protected FlowContext lookupFlowContext(AppToFlowRowKey appToFlowRowKey,
Get get = new Get(rowKey);
Result result = appToFlowTable.getResult(hbaseConf, conn, get);
if (result != null && !result.isEmpty()) {
Object flowName =
AppToFlowColumnPrefix.FLOW_NAME.readResult(result, clusterId);
Object flowRunId =
AppToFlowColumnPrefix.FLOW_RUN_ID.readResult(result, clusterId);
Object userId =
AppToFlowColumnPrefix.USER_ID.readResult(result, clusterId);
Object flowName = ColumnRWHelper.readResult(
result, AppToFlowColumnPrefix.FLOW_NAME, clusterId);
Object flowRunId = ColumnRWHelper.readResult(
result, AppToFlowColumnPrefix.FLOW_RUN_ID, clusterId);
Object userId = ColumnRWHelper.readResult(
result, AppToFlowColumnPrefix.USER_ID, clusterId);
if (flowName == null || userId == null || flowRunId == null) {
throw new NotFoundException(
"Unable to find the context flow name, and flow run id, "

View File

@ -49,8 +49,9 @@
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
@ -63,23 +64,23 @@
* application table.
*/
class ApplicationEntityReader extends GenericEntityReader {
private static final ApplicationTable APPLICATION_TABLE =
new ApplicationTable();
private static final ApplicationTableRW APPLICATION_TABLE =
new ApplicationTableRW();
public ApplicationEntityReader(TimelineReaderContext ctxt,
ApplicationEntityReader(TimelineReaderContext ctxt,
TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
super(ctxt, entityFilters, toRetrieve);
}
public ApplicationEntityReader(TimelineReaderContext ctxt,
ApplicationEntityReader(TimelineReaderContext ctxt,
TimelineDataToRetrieve toRetrieve) {
super(ctxt, toRetrieve);
}
/**
* Uses the {@link ApplicationTable}.
* Uses the {@link ApplicationTableRW}.
*/
protected BaseTable<?> getTable() {
protected BaseTableRW<?> getTable() {
return APPLICATION_TABLE;
}
@ -430,12 +431,14 @@ protected TimelineEntity parseEntity(Result result) throws IOException {
}
TimelineEntity entity = new TimelineEntity();
entity.setType(TimelineEntityType.YARN_APPLICATION.toString());
String entityId = ApplicationColumn.ID.readResult(result).toString();
String entityId =
ColumnRWHelper.readResult(result, ApplicationColumn.ID).toString();
entity.setId(entityId);
TimelineEntityFilters filters = getFilters();
// fetch created time
Long createdTime = (Long) ApplicationColumn.CREATED_TIME.readResult(result);
Long createdTime = (Long) ColumnRWHelper.readResult(result,
ApplicationColumn.CREATED_TIME);
entity.setCreatedTime(createdTime);
EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();

View File

@ -33,7 +33,7 @@
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -51,7 +51,7 @@ public final class EntityTypeReader extends AbstractTimelineStorageReader {
private static final Logger LOG =
LoggerFactory.getLogger(EntityTypeReader.class);
private static final EntityTable ENTITY_TABLE = new EntityTable();
private static final EntityTableRW ENTITY_TABLE = new EntityTableRW();
public EntityTypeReader(TimelineReaderContext context) {
super(context);

View File

@ -35,13 +35,14 @@
import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTableRW;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import com.google.common.base.Preconditions;
@ -51,8 +52,8 @@
* flow activity table.
*/
class FlowActivityEntityReader extends TimelineEntityReader {
private static final FlowActivityTable FLOW_ACTIVITY_TABLE =
new FlowActivityTable();
private static final FlowActivityTableRW FLOW_ACTIVITY_TABLE =
new FlowActivityTableRW();
/**
* Used to convert Long key components to and from storage format.
@ -60,21 +61,21 @@ class FlowActivityEntityReader extends TimelineEntityReader {
private final KeyConverter<Long> longKeyConverter = new LongKeyConverter();
public FlowActivityEntityReader(TimelineReaderContext ctxt,
FlowActivityEntityReader(TimelineReaderContext ctxt,
TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
super(ctxt, entityFilters, toRetrieve);
}
public FlowActivityEntityReader(TimelineReaderContext ctxt,
FlowActivityEntityReader(TimelineReaderContext ctxt,
TimelineDataToRetrieve toRetrieve) {
super(ctxt, toRetrieve);
}
/**
* Uses the {@link FlowActivityTable}.
* Uses the {@link FlowActivityTableRW}.
*/
@Override
protected BaseTable<?> getTable() {
protected BaseTableRW<?> getTable() {
return FLOW_ACTIVITY_TABLE;
}
@ -164,8 +165,8 @@ protected TimelineEntity parseEntity(Result result) throws IOException {
flowActivity.setId(flowActivity.getId());
// get the list of run ids along with the version that are associated with
// this flow on this day
Map<Long, Object> runIdsMap =
FlowActivityColumnPrefix.RUN_ID.readResults(result, longKeyConverter);
Map<Long, Object> runIdsMap = ColumnRWHelper.readResults(result,
FlowActivityColumnPrefix.RUN_ID, longKeyConverter);
for (Map.Entry<Long, Object> e : runIdsMap.entrySet()) {
Long runId = e.getKey();
String version = (String)e.getValue();

View File

@ -43,7 +43,8 @@
import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
@ -51,7 +52,7 @@
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKeyPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import com.google.common.base.Preconditions;
@ -61,23 +62,23 @@
* table.
*/
class FlowRunEntityReader extends TimelineEntityReader {
private static final FlowRunTable FLOW_RUN_TABLE = new FlowRunTable();
private static final FlowRunTableRW FLOW_RUN_TABLE = new FlowRunTableRW();
public FlowRunEntityReader(TimelineReaderContext ctxt,
FlowRunEntityReader(TimelineReaderContext ctxt,
TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
super(ctxt, entityFilters, toRetrieve);
}
public FlowRunEntityReader(TimelineReaderContext ctxt,
FlowRunEntityReader(TimelineReaderContext ctxt,
TimelineDataToRetrieve toRetrieve) {
super(ctxt, toRetrieve);
}
/**
* Uses the {@link FlowRunTable}.
* Uses the {@link FlowRunTableRW}.
*/
@Override
protected BaseTable<?> getTable() {
protected BaseTableRW<?> getTable() {
return FLOW_RUN_TABLE;
}
@ -261,19 +262,22 @@ protected TimelineEntity parseEntity(Result result) throws IOException {
flowRun.setName(rowKey.getFlowName());
// read the start time
Long startTime = (Long) FlowRunColumn.MIN_START_TIME.readResult(result);
Long startTime = (Long) ColumnRWHelper.readResult(result,
FlowRunColumn.MIN_START_TIME);
if (startTime != null) {
flowRun.setStartTime(startTime.longValue());
}
// read the end time if available
Long endTime = (Long) FlowRunColumn.MAX_END_TIME.readResult(result);
Long endTime = (Long) ColumnRWHelper.readResult(result,
FlowRunColumn.MAX_END_TIME);
if (endTime != null) {
flowRun.setMaxEndTime(endTime.longValue());
}
// read the flow version
String version = (String) FlowRunColumn.FLOW_VERSION.readResult(result);
String version = (String) ColumnRWHelper.readResult(result,
FlowRunColumn.FLOW_VERSION);
if (version != null) {
flowRun.setVersion(version);
}

View File

@ -46,7 +46,9 @@
import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
@ -57,7 +59,7 @@
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import com.google.common.base.Preconditions;
@ -67,7 +69,7 @@
* table.
*/
class GenericEntityReader extends TimelineEntityReader {
private static final EntityTable ENTITY_TABLE = new EntityTable();
private static final EntityTableRW ENTITY_TABLE = new EntityTableRW();
/**
* Used to convert strings key components to and from storage format.
@ -75,20 +77,20 @@ class GenericEntityReader extends TimelineEntityReader {
private final KeyConverter<String> stringKeyConverter =
new StringKeyConverter();
public GenericEntityReader(TimelineReaderContext ctxt,
GenericEntityReader(TimelineReaderContext ctxt,
TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
super(ctxt, entityFilters, toRetrieve);
}
public GenericEntityReader(TimelineReaderContext ctxt,
GenericEntityReader(TimelineReaderContext ctxt,
TimelineDataToRetrieve toRetrieve) {
super(ctxt, toRetrieve);
}
/**
* Uses the {@link EntityTable}.
* Uses the {@link EntityTableRW}.
*/
protected BaseTable<?> getTable() {
protected BaseTableRW<?> getTable() {
return ENTITY_TABLE;
}
@ -543,7 +545,8 @@ protected TimelineEntity parseEntity(Result result) throws IOException {
TimelineEntityFilters filters = getFilters();
// fetch created time
Long createdTime = (Long) EntityColumn.CREATED_TIME.readResult(result);
Long createdTime = (Long) ColumnRWHelper.readResult(result,
EntityColumn.CREATED_TIME);
entity.setCreatedTime(createdTime);
EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
@ -635,11 +638,12 @@ protected TimelineEntity parseEntity(Result result) throws IOException {
* @param isConfig if true, means we are reading configs, otherwise info.
* @throws IOException if any problem is encountered while reading result.
*/
protected <T> void readKeyValuePairs(TimelineEntity entity, Result result,
protected <T extends BaseTable<T>> void readKeyValuePairs(
TimelineEntity entity, Result result,
ColumnPrefix<T> prefix, boolean isConfig) throws IOException {
// info and configuration are of type Map<String, Object or String>
Map<String, Object> columns =
prefix.readResults(result, stringKeyConverter);
ColumnRWHelper.readResults(result, prefix, stringKeyConverter);
if (isConfig) {
for (Map.Entry<String, Object> column : columns.entrySet()) {
entity.addConfig(column.getKey(), column.getValue().toString());

View File

@ -42,7 +42,8 @@
import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
@ -51,14 +52,14 @@
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKeyPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import com.google.common.base.Preconditions;
class SubApplicationEntityReader extends GenericEntityReader {
private static final SubApplicationTable SUB_APPLICATION_TABLE =
new SubApplicationTable();
private static final SubApplicationTableRW SUB_APPLICATION_TABLE =
new SubApplicationTableRW();
SubApplicationEntityReader(TimelineReaderContext ctxt,
TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
@ -71,9 +72,9 @@ class SubApplicationEntityReader extends GenericEntityReader {
}
/**
* Uses the {@link SubApplicationTable}.
* Uses the {@link SubApplicationTableRW}.
*/
protected BaseTable<?> getTable() {
protected BaseTableRW<?> getTable() {
return SUB_APPLICATION_TABLE;
}
@ -403,8 +404,8 @@ protected TimelineEntity parseEntity(Result result) throws IOException {
TimelineEntityFilters filters = getFilters();
// fetch created time
Long createdTime =
(Long) SubApplicationColumn.CREATED_TIME.readResult(result);
Long createdTime = (Long) ColumnRWHelper.readResult(result,
SubApplicationColumn.CREATED_TIME);
entity.setCreatedTime(createdTime);
EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();

View File

@ -44,7 +44,9 @@
import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
@ -72,7 +74,7 @@ public abstract class TimelineEntityReader extends
/**
* Main table the entity reader uses.
*/
private BaseTable<?> table;
private BaseTableRW<?> table;
/**
* Used to convert strings key components to and from storage format.
@ -261,7 +263,7 @@ public Set<TimelineEntity> readEntities(Configuration hbaseConf,
*
* @return A reference to the table.
*/
protected BaseTable<?> getTable() {
protected BaseTableRW<?> getTable() {
return table;
}
@ -314,8 +316,8 @@ protected abstract TimelineEntity parseEntity(Result result)
protected void readMetrics(TimelineEntity entity, Result result,
ColumnPrefix<?> columnPrefix) throws IOException {
NavigableMap<String, NavigableMap<Long, Number>> metricsResult =
columnPrefix.readResultsWithTimestamps(
result, stringKeyConverter);
ColumnRWHelper.readResultsWithTimestamps(
result, columnPrefix, stringKeyConverter);
for (Map.Entry<String, NavigableMap<Long, Number>> metricResult:
metricsResult.entrySet()) {
TimelineMetric metric = new TimelineMetric();
@ -340,7 +342,7 @@ public boolean isSingleEntityRead() {
return singleEntityRead;
}
protected void setTable(BaseTable<?> baseTable) {
protected void setTable(BaseTableRW<?> baseTable) {
this.table = baseTable;
}
@ -367,8 +369,9 @@ protected boolean hasField(EnumSet<Field> fieldsToRetrieve,
* @param columns set of column qualifiers.
* @return filter list.
*/
protected <T> FilterList createFiltersFromColumnQualifiers(
ColumnPrefix<T> colPrefix, Set<String> columns) {
protected <T extends BaseTable<T>> FilterList
createFiltersFromColumnQualifiers(
ColumnPrefix<T> colPrefix, Set<String> columns) {
FilterList list = new FilterList(Operator.MUST_PASS_ONE);
for (String column : columns) {
// For columns which have compound column qualifiers (eg. events), we need
@ -381,8 +384,8 @@ protected <T> FilterList createFiltersFromColumnQualifiers(
return list;
}
protected <T> byte[] createColQualifierPrefix(ColumnPrefix<T> colPrefix,
String column) {
protected <T extends BaseTable<T>> byte[] createColQualifierPrefix(
ColumnPrefix<T> colPrefix, String column) {
if (colPrefix == ApplicationColumnPrefix.EVENT
|| colPrefix == EntityColumnPrefix.EVENT) {
return new EventColumnName(column, null, null).getColumnQualifier();
@ -402,11 +405,12 @@ protected <T> byte[] createColQualifierPrefix(ColumnPrefix<T> colPrefix,
* isRelatedTo, otherwise its added to relatesTo.
* @throws IOException if any problem is encountered while reading result.
*/
protected <T> void readRelationship(TimelineEntity entity, Result result,
protected <T extends BaseTable<T>> void readRelationship(
TimelineEntity entity, Result result,
ColumnPrefix<T> prefix, boolean isRelatedTo) throws IOException {
// isRelatedTo and relatesTo are of type Map<String, Set<String>>
Map<String, Object> columns =
prefix.readResults(result, stringKeyConverter);
Map<String, Object> columns = ColumnRWHelper.readResults(
result, prefix, stringKeyConverter);
for (Map.Entry<String, Object> column : columns.entrySet()) {
for (String id : Separator.VALUES.splitEncoded(column.getValue()
.toString())) {
@ -430,11 +434,12 @@ protected <T> void readRelationship(TimelineEntity entity, Result result,
* @param prefix column prefix.
* @throws IOException if any problem is encountered while reading result.
*/
protected static <T> void readEvents(TimelineEntity entity, Result result,
protected static <T extends BaseTable<T>> void readEvents(
TimelineEntity entity, Result result,
ColumnPrefix<T> prefix) throws IOException {
Map<String, TimelineEvent> eventsMap = new HashMap<>();
Map<EventColumnName, Object> eventsResult =
prefix.readResults(result, new EventColumnNameConverter());
Map<EventColumnName, Object> eventsResult = ColumnRWHelper.readResults(
result, prefix, new EventColumnNameConverter());
for (Map.Entry<EventColumnName, Object>
eventResult : eventsResult.entrySet()) {
EventColumnName eventColumnName = eventResult.getKey();

View File

@ -26,52 +26,15 @@
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The sub application table has column families:
* info, config and metrics.
* Info stores information about a timeline entity object
* config stores configuration data of a timeline entity object
* metrics stores the metrics of a timeline entity object
*
* Example sub application table record:
*
* <pre>
* |-------------------------------------------------------------------------|
* | Row | Column Family | Column Family| Column Family|
* | key | info | metrics | config |
* |-------------------------------------------------------------------------|
* | subAppUserId! | id:entityId | metricId1: | configKey1: |
* | clusterId! | type:entityType | metricValue1 | configValue1 |
* | entityType! | | @timestamp1 | |
* | idPrefix!| | | | configKey2: |
* | entityId! | created_time: | metricId1: | configValue2 |
* | userId | 1392993084018 | metricValue2 | |
* | | | @timestamp2 | |
* | | i!infoKey: | | |
* | | infoValue | metricId1: | |
* | | | metricValue1 | |
* | | | @timestamp2 | |
* | | e!eventId=timestamp= | | |
* | | infoKey: | | |
* | | eventInfoValue | | |
* | | | | |
* | | r!relatesToKey: | | |
* | | id3=id4=id5 | | |
* | | | | |
* | | s!isRelatedToKey | | |
* | | id7=id9=id6 | | |
* | | | | |
* | | flowVersion: | | |
* | | versionValue | | |
* |-------------------------------------------------------------------------|
* </pre>
* Create, read and write to the SubApplication table.
*/
public class SubApplicationTable extends BaseTable<SubApplicationTable> {
public class SubApplicationTableRW extends BaseTableRW<SubApplicationTable> {
/** sub app prefix. */
private static final String PREFIX =
YarnConfiguration.TIMELINE_SERVICE_PREFIX + "subapplication";
@ -104,9 +67,9 @@ public class SubApplicationTable extends BaseTable<SubApplicationTable> {
private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000;
private static final Logger LOG = LoggerFactory.getLogger(
SubApplicationTable.class);
SubApplicationTableRW.class);
public SubApplicationTable() {
public SubApplicationTableRW() {
super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
}
@ -114,8 +77,8 @@ public SubApplicationTable() {
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
* (org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
* createTable(org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.conf.Configuration)
*/
public void createTable(Admin admin, Configuration hbaseConf)

View File

@ -0,0 +1,132 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>3.2.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>hadoop-yarn-server-timelineservice-hbase-common</artifactId>
<name>Apache Hadoop YARN TimelineService HBase Common</name>
<version>3.2.0-SNAPSHOT</version>
<properties>
<!-- Needed for generating FindBugs warnings using parent pom -->
<yarn.basedir>${project.parent.parent.parent.basedir}</yarn.basedir>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-timelineservice</artifactId>
</dependency>
<!-- This is needed for GenericObjectMapper in GenericConverter -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-applicationhistoryservice</artifactId>
<scope>provided</scope>
</dependency>
<!-- 'mvn dependency:analyze' fails to detect use of this direct
dependency -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>test-jar</goal>
</goals>
<phase>test-compile</phase>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<includeScope>runtime</includeScope>
<excludeGroupIds>org.slf4j,org.apache.hadoop,com.github.stephenc.findbugs</excludeGroupIds>
<outputDirectory>${project.build.directory}/lib</outputDirectory>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -17,17 +17,12 @@
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.application;
import java.io.IOException;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
@ -52,10 +47,10 @@ public enum ApplicationColumn implements Column<ApplicationTable> {
*/
FLOW_VERSION(ApplicationColumnFamily.INFO, "flow_version");
private final ColumnHelper<ApplicationTable> column;
private final ColumnFamily<ApplicationTable> columnFamily;
private final String columnQualifier;
private final byte[] columnQualifierBytes;
private final ValueConverter valueConverter;
private ApplicationColumn(ColumnFamily<ApplicationTable> columnFamily,
String columnQualifier) {
@ -69,7 +64,7 @@ private ApplicationColumn(ColumnFamily<ApplicationTable> columnFamily,
// Future-proof by ensuring the right column prefix hygiene.
this.columnQualifierBytes =
Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
this.column = new ColumnHelper<ApplicationTable>(columnFamily, converter);
this.valueConverter = converter;
}
/**
@ -79,17 +74,6 @@ private String getColumnQualifier() {
return columnQualifier;
}
public void store(byte[] rowKey,
TypedBufferedMutator<ApplicationTable> tableMutator, Long timestamp,
Object inputValue, Attribute... attributes) throws IOException {
column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
inputValue, attributes);
}
public Object readResult(Result result) throws IOException {
return column.readResult(result, columnQualifierBytes);
}
@Override
public byte[] getColumnQualifierBytes() {
return columnQualifierBytes.clone();
@ -102,7 +86,16 @@ public byte[] getColumnFamilyBytes() {
@Override
public ValueConverter getValueConverter() {
return column.getValueConverter();
return valueConverter;
}
@Override
public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
return attributes;
}
@Override
public boolean supplementCellTimestamp() {
return false;
}
}

View File

@ -17,20 +17,13 @@
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.application;
import java.io.IOException;
import java.util.Map;
import java.util.NavigableMap;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
@ -69,7 +62,6 @@ public enum ApplicationColumnPrefix implements ColumnPrefix<ApplicationTable> {
*/
METRIC(ApplicationColumnFamily.METRICS, null, new LongConverter());
private final ColumnHelper<ApplicationTable> column;
private final ColumnFamily<ApplicationTable> columnFamily;
/**
@ -78,6 +70,7 @@ public enum ApplicationColumnPrefix implements ColumnPrefix<ApplicationTable> {
*/
private final String columnPrefix;
private final byte[] columnPrefixBytes;
private final ValueConverter valueConverter;
/**
* Private constructor, meant to be used by the enum definition.
@ -100,7 +93,7 @@ private ApplicationColumnPrefix(ColumnFamily<ApplicationTable> columnFamily,
*/
private ApplicationColumnPrefix(ColumnFamily<ApplicationTable> columnFamily,
String columnPrefix, ValueConverter converter) {
column = new ColumnHelper<ApplicationTable>(columnFamily, converter);
this.valueConverter = converter;
this.columnFamily = columnFamily;
this.columnPrefix = columnPrefix;
if (columnPrefix == null) {
@ -136,101 +129,22 @@ public byte[] getColumnFamilyBytes() {
return columnFamily.getBytes();
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
* #store(byte[],
* org.apache.hadoop.yarn.server.timelineservice.storage.common.
* TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object)
*/
public void store(byte[] rowKey,
TypedBufferedMutator<ApplicationTable> tableMutator, byte[] qualifier,
Long timestamp, Object inputValue, Attribute... attributes)
throws IOException {
// Null check
if (qualifier == null) {
throw new IOException("Cannot store column with null qualifier in "
+ tableMutator.getName().getNameAsString());
}
byte[] columnQualifier = getColumnPrefixBytes(qualifier);
column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
attributes);
@Override
public byte[] getColumnPrefixInBytes() {
return columnPrefixBytes != null ? columnPrefixBytes.clone() : null;
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
* #store(byte[],
* org.apache.hadoop.yarn.server.timelineservice.storage.common.
* TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object)
*/
public void store(byte[] rowKey,
TypedBufferedMutator<ApplicationTable> tableMutator, String qualifier,
Long timestamp, Object inputValue, Attribute...attributes)
throws IOException {
// Null check
if (qualifier == null) {
throw new IOException("Cannot store column with null qualifier in "
+ tableMutator.getName().getNameAsString());
}
byte[] columnQualifier = getColumnPrefixBytes(qualifier);
column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
attributes);
@Override
public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
return attributes;
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
* #readResult(org.apache.hadoop.hbase.client.Result, java.lang.String)
*/
public Object readResult(Result result, String qualifier) throws IOException {
byte[] columnQualifier =
ColumnHelper.getColumnQualifier(this.columnPrefixBytes, qualifier);
return column.readResult(result, columnQualifier);
@Override
public boolean supplementCellTimeStamp() {
return false;
}
public ValueConverter getValueConverter() {
return column.getValueConverter();
return valueConverter;
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
* #readResults(org.apache.hadoop.hbase.client.Result,
* org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
*/
public <K> Map<K, Object> readResults(Result result,
KeyConverter<K> keyConverter) throws IOException {
return column.readResults(result, columnPrefixBytes, keyConverter);
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
* #readResultsWithTimestamps(org.apache.hadoop.hbase.client.Result,
* org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
*/
public <K, V> NavigableMap<K, NavigableMap<Long, V>>
readResultsWithTimestamps(Result result, KeyConverter<K> keyConverter)
throws IOException {
return column.readResultsWithTimestamps(result, columnPrefixBytes,
keyConverter);
}
}

View File

@ -0,0 +1,60 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.application;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
/**
* The application table as column families info, config and metrics. Info
* stores information about a YARN application entity, config stores
* configuration data of a YARN application, metrics stores the metrics of a
* YARN application. This table is entirely analogous to the entity table but
* created for better performance.
*
* Example application table record:
*
* <pre>
* |-------------------------------------------------------------------------|
* | Row | Column Family | Column Family| Column Family|
* | key | info | metrics | config |
* |-------------------------------------------------------------------------|
* | clusterId! | id:appId | metricId1: | configKey1: |
* | userName! | | metricValue1 | configValue1 |
* | flowName! | created_time: | @timestamp1 | |
* | flowRunId! | 1392993084018 | | configKey2: |
* | AppId | | metriciD1: | configValue2 |
* | | i!infoKey: | metricValue2 | |
* | | infoValue | @timestamp2 | |
* | | | | |
* | | r!relatesToKey: | metricId2: | |
* | | id3=id4=id5 | metricValue1 | |
* | | | @timestamp2 | |
* | | s!isRelatedToKey: | | |
* | | id7=id9=id6 | | |
* | | | | |
* | | e!eventId=timestamp=infoKey: | | |
* | | eventInfoValue | | |
* | | | | |
* | | flowVersion: | | |
* | | versionValue | | |
* |-------------------------------------------------------------------------|
* </pre>
*/
public final class ApplicationTable extends BaseTable<ApplicationTable> {
}

View File

@ -0,0 +1,28 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Package org.apache.hadoop.yarn.server.timelineservice.storage.application
* contains classes related to implementation for application table.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.yarn.server.timelineservice.storage.application;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -18,18 +18,14 @@
package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
import java.io.IOException;
/**
* Identifies fully qualified columns for the {@link AppToFlowTable}.
*/
@ -50,10 +46,10 @@ public enum AppToFlowColumn implements Column<AppToFlowTable> {
*/
USER_ID(AppToFlowColumnFamily.MAPPING, "user_id");
private final ColumnHelper<AppToFlowTable> column;
private final ColumnFamily<AppToFlowTable> columnFamily;
private final String columnQualifier;
private final byte[] columnQualifierBytes;
private final ValueConverter valueConverter;
AppToFlowColumn(ColumnFamily<AppToFlowTable> columnFamily,
String columnQualifier) {
@ -62,7 +58,7 @@ public enum AppToFlowColumn implements Column<AppToFlowTable> {
// Future-proof by ensuring the right column prefix hygiene.
this.columnQualifierBytes =
Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
this.column = new ColumnHelper<AppToFlowTable>(columnFamily);
this.valueConverter = GenericConverter.getInstance();
}
/**
@ -77,13 +73,6 @@ public byte[] getColumnQualifierBytes() {
return columnQualifierBytes.clone();
}
public void store(byte[] rowKey,
TypedBufferedMutator<AppToFlowTable> tableMutator, Long timestamp,
Object inputValue, Attribute... attributes) throws IOException {
column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
inputValue, attributes);
}
@Override
public byte[] getColumnFamilyBytes() {
return columnFamily.getBytes();
@ -91,11 +80,16 @@ public byte[] getColumnFamilyBytes() {
@Override
public ValueConverter getValueConverter() {
return column.getValueConverter();
return valueConverter;
}
public Object readResult(Result result) throws IOException {
return column.readResult(result, columnQualifierBytes);
@Override
public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
return attributes;
}
@Override
public boolean supplementCellTimestamp() {
return false;
}
}

View File

@ -0,0 +1,105 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
/**
* Identifies partially qualified columns for the app-to-flow table.
*/
public enum AppToFlowColumnPrefix implements ColumnPrefix<AppToFlowTable> {
/**
* The flow name.
*/
FLOW_NAME(AppToFlowColumnFamily.MAPPING, "flow_name"),
/**
* The flow run ID.
*/
FLOW_RUN_ID(AppToFlowColumnFamily.MAPPING, "flow_run_id"),
/**
* The user.
*/
USER_ID(AppToFlowColumnFamily.MAPPING, "user_id");
private final ColumnFamily<AppToFlowTable> columnFamily;
private final String columnPrefix;
private final byte[] columnPrefixBytes;
private final ValueConverter valueConverter;
AppToFlowColumnPrefix(ColumnFamily<AppToFlowTable> columnFamily,
String columnPrefix) {
this.columnFamily = columnFamily;
this.columnPrefix = columnPrefix;
if (columnPrefix == null) {
this.columnPrefixBytes = null;
} else {
// Future-proof by ensuring the right column prefix hygiene.
this.columnPrefixBytes =
Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
}
this.valueConverter = GenericConverter.getInstance();
}
@Override
public byte[] getColumnPrefixBytes(String qualifierPrefix) {
return ColumnHelper.getColumnQualifier(
columnPrefixBytes, qualifierPrefix);
}
@Override
public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
return ColumnHelper.getColumnQualifier(
columnPrefixBytes, qualifierPrefix);
}
@Override
public byte[] getColumnPrefixInBytes() {
return columnPrefixBytes != null ? columnPrefixBytes.clone() : null;
}
@Override
public byte[] getColumnFamilyBytes() {
return columnFamily.getBytes();
}
@Override
public ValueConverter getValueConverter() {
return valueConverter;
}
@Override
public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
return attributes;
}
@Override
public boolean supplementCellTimeStamp() {
return false;
}
}

View File

@ -0,0 +1,60 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
/**
* The app_flow table as column families mapping. Mapping stores
* appId to flowName and flowRunId mapping information
*
* Example app_flow table record:
*
* <pre>
* |--------------------------------------|
* | Row | Column Family |
* | key | mapping |
* |--------------------------------------|
* | appId | flow_name!cluster1: |
* | | foo@daily_hive_report |
* | | |
* | | flow_run_id!cluster1: |
* | | 1452828720457 |
* | | |
* | | user_id!cluster1: |
* | | admin |
* | | |
* | | flow_name!cluster2: |
* | | bar@ad_hoc_query |
* | | |
* | | flow_run_id!cluster2: |
* | | 1452828498752 |
* | | |
* | | user_id!cluster2: |
* | | joe |
* | | |
* |--------------------------------------|
* </pre>
*
* It is possible (although unlikely) in a multi-cluster environment that there
* may be more than one applications for a given app id. Different clusters are
* recorded as different sets of columns.
*/
public final class AppToFlowTable extends BaseTable<AppToFlowTable> {
}

View File

@ -0,0 +1,28 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow
* contains classes related to implementation for app to flow table.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -55,7 +55,7 @@ public byte[] encode(String appIdStr) {
LongConverter.invertLong(appId.getClusterTimestamp()));
System.arraycopy(clusterTs, 0, appIdBytes, 0, Bytes.SIZEOF_LONG);
byte[] seqId = Bytes.toBytes(
HBaseTimelineStorageUtils.invertInt(appId.getId()));
HBaseTimelineSchemaUtils.invertInt(appId.getId()));
System.arraycopy(seqId, 0, appIdBytes, Bytes.SIZEOF_LONG, Bytes.SIZEOF_INT);
return appIdBytes;
}
@ -80,9 +80,9 @@ public String decode(byte[] appIdBytes) {
}
long clusterTs = LongConverter.invertLong(
Bytes.toLong(appIdBytes, 0, Bytes.SIZEOF_LONG));
int seqId = HBaseTimelineStorageUtils.invertInt(
int seqId = HBaseTimelineSchemaUtils.invertInt(
Bytes.toInt(appIdBytes, Bytes.SIZEOF_LONG, Bytes.SIZEOF_INT));
return HBaseTimelineStorageUtils.convertApplicationIdToString(
return HBaseTimelineSchemaUtils.convertApplicationIdToString(
ApplicationId.newInstance(clusterTs, seqId));
}

View File

@ -15,14 +15,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.common;
import org.apache.hadoop.hbase.client.BufferedMutator;
/**
* Just a typed wrapper around {@link BufferedMutator} used to ensure that
* columns can write only to the table mutator for the right table.
* The base type of tables.
* @param T table type
*/
public interface TypedBufferedMutator<T> extends BufferedMutator {
// This class is intentionally left (almost) blank
public abstract class BaseTable<T> {
}

View File

@ -17,49 +17,13 @@
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.common;
import java.io.IOException;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
/**
* A Column represents the way to store a fully qualified column in a specific
* table.
*/
public interface Column<T> {
/**
* Sends a Mutation to the table. The mutations will be buffered and sent over
* the wire as part of a batch.
*
* @param rowKey identifying the row to write. Nothing gets written when null.
* @param tableMutator used to modify the underlying HBase table. Caller is
* responsible to pass a mutator for the table that actually has this
* column.
* @param timestamp version timestamp. When null the server timestamp will be
* used.
* @param attributes Map of attributes for this mutation. used in the
* coprocessor to set/read the cell tags. Can be null.
* @param inputValue the value to write to the rowKey and column qualifier.
* Nothing gets written when null.
* @throws IOException if there is any exception encountered during store.
*/
void store(byte[] rowKey, TypedBufferedMutator<T> tableMutator,
Long timestamp, Object inputValue, Attribute... attributes)
throws IOException;
/**
* Get the latest version of this specified column. Note: this call clones the
* value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
*
* @param result Cannot be null
* @return result object (can be cast to whatever object was written to), or
* null when result doesn't contain this column.
* @throws IOException if there is any exception encountered while reading
* result.
*/
Object readResult(Result result) throws IOException;
public interface Column<T extends BaseTable<T>> {
/**
* Returns column family name(as bytes) associated with this column.
* @return a byte array encoding column family for this column qualifier.
@ -77,4 +41,16 @@ void store(byte[] rowKey, TypedBufferedMutator<T> tableMutator,
* @return a {@link ValueConverter} implementation.
*/
ValueConverter getValueConverter();
/**
* Return attributed combined with aggregations, if any.
* @return an array of Attributes
*/
Attribute[] getCombinedAttrsWithAggr(Attribute... attributes);
/**
* Return true if the cell timestamp needs to be supplemented.
* @return true if the cell timestamp needs to be supplemented
*/
boolean supplementCellTimestamp();
}

View File

@ -0,0 +1,101 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.common;
import org.apache.hadoop.hbase.util.Bytes;
/**
* This class is meant to be used only by explicit Columns, and not directly to
* write by clients.
*/
public final class ColumnHelper {
private ColumnHelper() {
}
/**
* @param columnPrefixBytes The byte representation for the column prefix.
* Should not contain {@link Separator#QUALIFIERS}.
* @param qualifier for the remainder of the column.
* {@link Separator#QUALIFIERS} is permissible in the qualifier
* as it is joined only with the column prefix bytes.
* @return fully sanitized column qualifier that is a combination of prefix
* and qualifier. If prefix is null, the result is simply the encoded
* qualifier without any separator.
*/
public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
String qualifier) {
// We don't want column names to have spaces / tabs.
byte[] encodedQualifier =
Separator.encode(qualifier, Separator.SPACE, Separator.TAB);
if (columnPrefixBytes == null) {
return encodedQualifier;
}
// Convert qualifier to lower case, strip of separators and tag on column
// prefix.
byte[] columnQualifier =
Separator.QUALIFIERS.join(columnPrefixBytes, encodedQualifier);
return columnQualifier;
}
/**
* @param columnPrefixBytes The byte representation for the column prefix.
* Should not contain {@link Separator#QUALIFIERS}.
* @param qualifier for the remainder of the column.
* @return fully sanitized column qualifier that is a combination of prefix
* and qualifier. If prefix is null, the result is simply the encoded
* qualifier without any separator.
*/
public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
long qualifier) {
if (columnPrefixBytes == null) {
return Bytes.toBytes(qualifier);
}
// Convert qualifier to lower case, strip of separators and tag on column
// prefix.
byte[] columnQualifier =
Separator.QUALIFIERS.join(columnPrefixBytes, Bytes.toBytes(qualifier));
return columnQualifier;
}
/**
* @param columnPrefixBytes The byte representation for the column prefix.
* Should not contain {@link Separator#QUALIFIERS}.
* @param qualifier the byte representation for the remainder of the column.
* @return fully sanitized column qualifier that is a combination of prefix
* and qualifier. If prefix is null, the result is simply the encoded
* qualifier without any separator.
*/
public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
byte[] qualifier) {
if (columnPrefixBytes == null) {
return qualifier;
}
byte[] columnQualifier =
Separator.QUALIFIERS.join(columnPrefixBytes, qualifier);
return columnQualifier;
}
}

View File

@ -0,0 +1,71 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.common;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
/**
* Used to represent a partially qualified column, where the actual column name
* will be composed of a prefix and the remainder of the column qualifier. The
* prefix can be null, in which case the column qualifier will be completely
* determined when the values are stored.
*/
public interface ColumnPrefix<T extends BaseTable<T>> {
/**
* @param qualifierPrefix Column qualifier or prefix of qualifier.
* @return a byte array encoding column prefix and qualifier/prefix passed.
*/
byte[] getColumnPrefixBytes(String qualifierPrefix);
/**
* @param qualifierPrefix Column qualifier or prefix of qualifier.
* @return a byte array encoding column prefix and qualifier/prefix passed.
*/
byte[] getColumnPrefixBytes(byte[] qualifierPrefix);
/**
* Get the column prefix in bytes.
* @return column prefix in bytes
*/
byte[] getColumnPrefixInBytes();
/**
* Returns column family name(as bytes) associated with this column prefix.
* @return a byte array encoding column family for this prefix.
*/
byte[] getColumnFamilyBytes();
/**
* Returns value converter implementation associated with this column prefix.
* @return a {@link ValueConverter} implementation.
*/
ValueConverter getValueConverter();
/**
* Return attributed combined with aggregations, if any.
* @return an array of Attributes
*/
Attribute[] getCombinedAttrsWithAggr(Attribute... attributes);
/**
* Return true if the cell timestamp needs to be supplemented.
* @return true if the cell timestamp needs to be supplemented
*/
boolean supplementCellTimeStamp();
}

View File

@ -0,0 +1,156 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.common;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
import java.text.NumberFormat;
/**
* A bunch of utility functions used in HBase TimelineService common module.
*/
public final class HBaseTimelineSchemaUtils {
/** milliseconds in one day. */
public static final long MILLIS_ONE_DAY = 86400000L;
private static final ThreadLocal<NumberFormat> APP_ID_FORMAT =
new ThreadLocal<NumberFormat>() {
@Override
public NumberFormat initialValue() {
NumberFormat fmt = NumberFormat.getInstance();
fmt.setGroupingUsed(false);
fmt.setMinimumIntegerDigits(4);
return fmt;
}
};
private HBaseTimelineSchemaUtils() {
}
/**
* Combines the input array of attributes and the input aggregation operation
* into a new array of attributes.
*
* @param attributes Attributes to be combined.
* @param aggOp Aggregation operation.
* @return array of combined attributes.
*/
public static Attribute[] combineAttributes(Attribute[] attributes,
AggregationOperation aggOp) {
int newLength = getNewLengthCombinedAttributes(attributes, aggOp);
Attribute[] combinedAttributes = new Attribute[newLength];
if (attributes != null) {
System.arraycopy(attributes, 0, combinedAttributes, 0, attributes.length);
}
if (aggOp != null) {
Attribute a2 = aggOp.getAttribute();
combinedAttributes[newLength - 1] = a2;
}
return combinedAttributes;
}
/**
* Returns a number for the new array size. The new array is the combination
* of input array of attributes and the input aggregation operation.
*
* @param attributes Attributes.
* @param aggOp Aggregation operation.
* @return the size for the new array
*/
private static int getNewLengthCombinedAttributes(Attribute[] attributes,
AggregationOperation aggOp) {
int oldLength = getAttributesLength(attributes);
int aggLength = getAppOpLength(aggOp);
return oldLength + aggLength;
}
private static int getAppOpLength(AggregationOperation aggOp) {
if (aggOp != null) {
return 1;
}
return 0;
}
private static int getAttributesLength(Attribute[] attributes) {
if (attributes != null) {
return attributes.length;
}
return 0;
}
/**
* Converts an int into it's inverse int to be used in (row) keys
* where we want to have the largest int value in the top of the table
* (scans start at the largest int first).
*
* @param key value to be inverted so that the latest version will be first in
* a scan.
* @return inverted int
*/
public static int invertInt(int key) {
return Integer.MAX_VALUE - key;
}
/**
* returns the timestamp of that day's start (which is midnight 00:00:00 AM)
* for a given input timestamp.
*
* @param ts Timestamp.
* @return timestamp of that day's beginning (midnight)
*/
public static long getTopOfTheDayTimestamp(long ts) {
long dayTimestamp = ts - (ts % MILLIS_ONE_DAY);
return dayTimestamp;
}
/**
* Checks if passed object is of integral type(Short/Integer/Long).
*
* @param obj Object to be checked.
* @return true if object passed is of type Short or Integer or Long, false
* otherwise.
*/
public static boolean isIntegralValue(Object obj) {
return (obj instanceof Short) || (obj instanceof Integer) ||
(obj instanceof Long);
}
/**
* A utility method that converts ApplicationId to string without using
* FastNumberFormat in order to avoid the incompatibility issue caused
* by mixing hadoop-common 2.5.1 and hadoop-yarn-api 3.0 in this module.
* This is a work-around implementation as discussed in YARN-6905.
*
* @param appId application id
* @return the string representation of the given application id
*
*/
public static String convertApplicationIdToString(ApplicationId appId) {
StringBuilder sb = new StringBuilder(64);
sb.append(ApplicationId.appIdStrPrefix);
sb.append("_");
sb.append(appId.getClusterTimestamp());
sb.append('_');
sb.append(APP_ID_FORMAT.get().format(appId.getId()));
return sb.toString();
}
}

View File

@ -40,7 +40,7 @@ public LongConverter() {
@Override
public byte[] encodeValue(Object value) throws IOException {
if (!HBaseTimelineStorageUtils.isIntegralValue(value)) {
if (!HBaseTimelineSchemaUtils.isIntegralValue(value)) {
throw new IOException("Expected integral value");
}
return Bytes.toBytes(((Number)value).longValue());

View File

@ -0,0 +1,28 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Package org.apache.hadoop.yarn.server.timelineservice.storage.common contains
* a set of utility classes used across backend storage reader and writer.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.yarn.server.timelineservice.storage.common;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -17,17 +17,12 @@
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
import java.io.IOException;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
@ -56,10 +51,10 @@ public enum EntityColumn implements Column<EntityTable> {
*/
FLOW_VERSION(EntityColumnFamily.INFO, "flow_version");
private final ColumnHelper<EntityTable> column;
private final ColumnFamily<EntityTable> columnFamily;
private final String columnQualifier;
private final byte[] columnQualifierBytes;
private final ValueConverter valueConverter;
EntityColumn(ColumnFamily<EntityTable> columnFamily,
String columnQualifier) {
@ -73,7 +68,7 @@ public enum EntityColumn implements Column<EntityTable> {
// Future-proof by ensuring the right column prefix hygiene.
this.columnQualifierBytes =
Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
this.column = new ColumnHelper<EntityTable>(columnFamily, converter);
this.valueConverter = converter;
}
/**
@ -83,17 +78,6 @@ private String getColumnQualifier() {
return columnQualifier;
}
public void store(byte[] rowKey,
TypedBufferedMutator<EntityTable> tableMutator, Long timestamp,
Object inputValue, Attribute... attributes) throws IOException {
column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
inputValue, attributes);
}
public Object readResult(Result result) throws IOException {
return column.readResult(result, columnQualifierBytes);
}
@Override
public byte[] getColumnQualifierBytes() {
return columnQualifierBytes.clone();
@ -106,7 +90,16 @@ public byte[] getColumnFamilyBytes() {
@Override
public ValueConverter getValueConverter() {
return column.getValueConverter();
return valueConverter;
}
@Override
public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
return attributes;
}
@Override
public boolean supplementCellTimestamp() {
return false;
}
}

View File

@ -17,20 +17,13 @@
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
import java.io.IOException;
import java.util.Map;
import java.util.NavigableMap;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
@ -69,7 +62,6 @@ public enum EntityColumnPrefix implements ColumnPrefix<EntityTable> {
*/
METRIC(EntityColumnFamily.METRICS, null, new LongConverter());
private final ColumnHelper<EntityTable> column;
private final ColumnFamily<EntityTable> columnFamily;
/**
@ -78,6 +70,7 @@ public enum EntityColumnPrefix implements ColumnPrefix<EntityTable> {
*/
private final String columnPrefix;
private final byte[] columnPrefixBytes;
private final ValueConverter valueConverter;
/**
* Private constructor, meant to be used by the enum definition.
@ -111,7 +104,7 @@ public enum EntityColumnPrefix implements ColumnPrefix<EntityTable> {
*/
EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily,
String columnPrefix, boolean compondColQual, ValueConverter converter) {
column = new ColumnHelper<EntityTable>(columnFamily, converter);
this.valueConverter = converter;
this.columnFamily = columnFamily;
this.columnPrefix = columnPrefix;
if (columnPrefix == null) {
@ -142,6 +135,11 @@ public byte[] getColumnPrefixBytes(String qualifierPrefix) {
this.columnPrefixBytes, qualifierPrefix);
}
@Override
public byte[] getColumnPrefixInBytes() {
return columnPrefixBytes != null ? columnPrefixBytes.clone() : null;
}
@Override
public byte[] getColumnFamilyBytes() {
return columnFamily.getBytes();
@ -149,101 +147,16 @@ public byte[] getColumnFamilyBytes() {
@Override
public ValueConverter getValueConverter() {
return column.getValueConverter();
return valueConverter;
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
* #store(byte[],
* org.apache.hadoop.yarn.server.timelineservice.storage.common.
* TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object,
* org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[])
*/
public void store(byte[] rowKey,
TypedBufferedMutator<EntityTable> tableMutator, String qualifier,
Long timestamp, Object inputValue, Attribute... attributes)
throws IOException {
// Null check
if (qualifier == null) {
throw new IOException("Cannot store column with null qualifier in "
+ tableMutator.getName().getNameAsString());
}
byte[] columnQualifier = getColumnPrefixBytes(qualifier);
column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
attributes);
@Override
public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
return attributes;
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
* #store(byte[],
* org.apache.hadoop.yarn.server.timelineservice.storage.common.
* TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object)
*/
public void store(byte[] rowKey,
TypedBufferedMutator<EntityTable> tableMutator, byte[] qualifier,
Long timestamp, Object inputValue, Attribute... attributes)
throws IOException {
// Null check
if (qualifier == null) {
throw new IOException("Cannot store column with null qualifier in "
+ tableMutator.getName().getNameAsString());
}
byte[] columnQualifier = getColumnPrefixBytes(qualifier);
column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
attributes);
@Override
public boolean supplementCellTimeStamp() {
return false;
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
* #readResult(org.apache.hadoop.hbase.client.Result, java.lang.String)
*/
public Object readResult(Result result, String qualifier) throws IOException {
byte[] columnQualifier =
ColumnHelper.getColumnQualifier(this.columnPrefixBytes, qualifier);
return column.readResult(result, columnQualifier);
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
* #readResults(org.apache.hadoop.hbase.client.Result,
* org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
*/
public <K> Map<K, Object> readResults(Result result,
KeyConverter<K> keyConverter) throws IOException {
return column.readResults(result, columnPrefixBytes, keyConverter);
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
* #readResultsWithTimestamps(org.apache.hadoop.hbase.client.Result,
* org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
*/
public <K, V> NavigableMap<K, NavigableMap<Long, V>>
readResultsWithTimestamps(Result result, KeyConverter<K> keyConverter)
throws IOException {
return column.readResultsWithTimestamps(result, columnPrefixBytes,
keyConverter);
}
}

View File

@ -0,0 +1,61 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
/**
* The entity table as column families info, config and metrics. Info stores
* information about a timeline entity object config stores configuration data
* of a timeline entity object metrics stores the metrics of a timeline entity
* object
*
* Example entity table record:
*
* <pre>
* |-------------------------------------------------------------------------|
* | Row | Column Family | Column Family| Column Family|
* | key | info | metrics | config |
* |-------------------------------------------------------------------------|
* | userName! | id:entityId | metricId1: | configKey1: |
* | clusterId! | | metricValue1 | configValue1 |
* | flowName! | type:entityType | @timestamp1 | |
* | flowRunId! | | | configKey2: |
* | AppId! | created_time: | metricId1: | configValue2 |
* | entityType!| 1392993084018 | metricValue2 | |
* | idPrefix! | | @timestamp2 | |
* | entityId | i!infoKey: | | |
* | | infoValue | metricId1: | |
* | | | metricValue1 | |
* | | r!relatesToKey: | @timestamp2 | |
* | | id3=id4=id5 | | |
* | | | | |
* | | s!isRelatedToKey | | |
* | | id7=id9=id6 | | |
* | | | | |
* | | e!eventId=timestamp=infoKey: | | |
* | | eventInfoValue | | |
* | | | | |
* | | flowVersion: | | |
* | | versionValue | | |
* |-------------------------------------------------------------------------|
* </pre>
*/
public final class EntityTable extends BaseTable<EntityTable> {
}

View File

@ -0,0 +1,28 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Package org.apache.hadoop.yarn.server.timelineservice.storage.entity
* contains classes related to implementation for entity table.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -0,0 +1,133 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
/**
* Identifies partially qualified columns for the {@link FlowActivityTable}.
*/
public enum FlowActivityColumnPrefix
implements ColumnPrefix<FlowActivityTable> {
/**
* To store run ids of the flows.
*/
RUN_ID(FlowActivityColumnFamily.INFO, "r", null);
private final ColumnFamily<FlowActivityTable> columnFamily;
private final ValueConverter valueConverter;
/**
* Can be null for those cases where the provided column qualifier is the
* entire column name.
*/
private final String columnPrefix;
private final byte[] columnPrefixBytes;
private final AggregationOperation aggOp;
/**
* Private constructor, meant to be used by the enum definition.
*
* @param columnFamily
* that this column is stored in.
* @param columnPrefix
* for this column.
*/
private FlowActivityColumnPrefix(
ColumnFamily<FlowActivityTable> columnFamily, String columnPrefix,
AggregationOperation aggOp) {
this(columnFamily, columnPrefix, aggOp, false);
}
private FlowActivityColumnPrefix(
ColumnFamily<FlowActivityTable> columnFamily, String columnPrefix,
AggregationOperation aggOp, boolean compoundColQual) {
this.valueConverter = GenericConverter.getInstance();
this.columnFamily = columnFamily;
this.columnPrefix = columnPrefix;
if (columnPrefix == null) {
this.columnPrefixBytes = null;
} else {
// Future-proof by ensuring the right column prefix hygiene.
this.columnPrefixBytes = Bytes.toBytes(Separator.SPACE
.encode(columnPrefix));
}
this.aggOp = aggOp;
}
/**
* @return the column name value
*/
public String getColumnPrefix() {
return columnPrefix;
}
@Override
public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
return ColumnHelper.getColumnQualifier(
this.columnPrefixBytes, qualifierPrefix);
}
@Override
public byte[] getColumnPrefixBytes(String qualifierPrefix) {
return ColumnHelper.getColumnQualifier(
this.columnPrefixBytes, qualifierPrefix);
}
public byte[] getColumnPrefixBytes() {
return columnPrefixBytes.clone();
}
@Override
public byte[] getColumnFamilyBytes() {
return columnFamily.getBytes();
}
@Override
public byte[] getColumnPrefixInBytes() {
return columnPrefixBytes != null ? columnPrefixBytes.clone() : null;
}
@Override
public ValueConverter getValueConverter() {
return valueConverter;
}
@Override
public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
return HBaseTimelineSchemaUtils.combineAttributes(attributes, aggOp);
}
@Override
public boolean supplementCellTimeStamp() {
return false;
}
public AggregationOperation getAttribute() {
return aggOp;
}
}

View File

@ -21,7 +21,7 @@
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
@ -63,7 +63,7 @@ protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
String flowName, boolean convertDayTsToTopOfDay) {
this.clusterId = clusterId;
if (convertDayTsToTopOfDay && (timestamp != null)) {
this.dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
this.dayTs = HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(timestamp);
} else {
this.dayTs = timestamp;
}

View File

@ -0,0 +1,45 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
/**
* The flow activity table has column family info
* Stores the daily activity record for flows
* Useful as a quick lookup of what flows were
* running on a given day
*
* Example flow activity table record:
*
* <pre>
* |-------------------------------------------|
* | Row key | Column Family |
* | | info |
* |-------------------------------------------|
* | clusterId! | r!runid1:version1 |
* | inv Top of | |
* | Day! | r!runid2:version7 |
* | userName! | |
* | flowName | |
* |-------------------------------------------|
* </pre>
*/
public final class FlowActivityTable extends BaseTable<FlowActivityTable> {
}

View File

@ -17,18 +17,13 @@
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
import java.io.IOException;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
/**
@ -55,11 +50,11 @@ AggregationOperation.GLOBAL_MAX, new LongConverter()),
*/
FLOW_VERSION(FlowRunColumnFamily.INFO, "flow_version", null);
private final ColumnHelper<FlowRunTable> column;
private final ColumnFamily<FlowRunTable> columnFamily;
private final String columnQualifier;
private final byte[] columnQualifierBytes;
private final AggregationOperation aggOp;
private final ValueConverter valueConverter;
private FlowRunColumn(ColumnFamily<FlowRunTable> columnFamily,
String columnQualifier, AggregationOperation aggOp) {
@ -76,7 +71,7 @@ private FlowRunColumn(ColumnFamily<FlowRunTable> columnFamily,
// Future-proof by ensuring the right column prefix hygiene.
this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE
.encode(columnQualifier));
this.column = new ColumnHelper<FlowRunTable>(columnFamily, converter, true);
this.valueConverter = converter;
}
/**
@ -100,32 +95,18 @@ public AggregationOperation getAggregationOperation() {
return aggOp;
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.Column#store
* (byte[], org.apache.hadoop.yarn.server.timelineservice.storage.common.
* TypedBufferedMutator, java.lang.Long, java.lang.Object,
* org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[])
*/
public void store(byte[] rowKey,
TypedBufferedMutator<FlowRunTable> tableMutator, Long timestamp,
Object inputValue, Attribute... attributes) throws IOException {
Attribute[] combinedAttributes =
HBaseTimelineStorageUtils.combineAttributes(attributes, aggOp);
column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
inputValue, combinedAttributes);
}
public Object readResult(Result result) throws IOException {
return column.readResult(result, columnQualifierBytes);
@Override
public ValueConverter getValueConverter() {
return valueConverter;
}
@Override
public ValueConverter getValueConverter() {
return column.getValueConverter();
public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
return HBaseTimelineSchemaUtils.combineAttributes(attributes, aggOp);
}
@Override
public boolean supplementCellTimestamp() {
return true;
}
}

View File

@ -0,0 +1,129 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
/**
* Identifies partially qualified columns for the {@link FlowRunTable}.
*/
public enum FlowRunColumnPrefix implements ColumnPrefix<FlowRunTable> {
/**
* To store flow run info values.
*/
METRIC(FlowRunColumnFamily.INFO, "m", null, new LongConverter());
private final ColumnFamily<FlowRunTable> columnFamily;
/**
* Can be null for those cases where the provided column qualifier is the
* entire column name.
*/
private final String columnPrefix;
private final byte[] columnPrefixBytes;
private final ValueConverter valueConverter;
private final AggregationOperation aggOp;
/**
* Private constructor, meant to be used by the enum definition.
*
* @param columnFamily that this column is stored in.
* @param columnPrefix for this column.
*/
private FlowRunColumnPrefix(ColumnFamily<FlowRunTable> columnFamily,
String columnPrefix, AggregationOperation fra, ValueConverter converter) {
this(columnFamily, columnPrefix, fra, converter, false);
}
private FlowRunColumnPrefix(ColumnFamily<FlowRunTable> columnFamily,
String columnPrefix, AggregationOperation fra, ValueConverter converter,
boolean compoundColQual) {
this.valueConverter = converter;
this.columnFamily = columnFamily;
this.columnPrefix = columnPrefix;
if (columnPrefix == null) {
this.columnPrefixBytes = null;
} else {
// Future-proof by ensuring the right column prefix hygiene.
this.columnPrefixBytes =
Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
}
this.aggOp = fra;
}
/**
* @return the column name value
*/
public String getColumnPrefix() {
return columnPrefix;
}
public byte[] getColumnPrefixBytes() {
return columnPrefixBytes.clone();
}
@Override
public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
return ColumnHelper.getColumnQualifier(this.columnPrefixBytes,
qualifierPrefix);
}
@Override
public byte[] getColumnPrefixBytes(String qualifierPrefix) {
return ColumnHelper.getColumnQualifier(this.columnPrefixBytes,
qualifierPrefix);
}
@Override
public byte[] getColumnFamilyBytes() {
return columnFamily.getBytes();
}
@Override
public byte[] getColumnPrefixInBytes() {
return columnPrefixBytes != null ? columnPrefixBytes.clone() : null;
}
@Override
public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
return HBaseTimelineSchemaUtils.combineAttributes(attributes, aggOp);
}
@Override
public boolean supplementCellTimeStamp() {
return true;
}
public AggregationOperation getAttribute() {
return aggOp;
}
@Override
public ValueConverter getValueConverter() {
return valueConverter;
}
}

Some files were not shown because too many files have changed in this diff Show More