本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.getWriteId()
方法的一些代码示例,展示了Table.getWriteId()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getWriteId()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:getWriteId
暂无
代码示例来源:origin: apache/hive
Map<String, String> params = table.getParameters();
List<String> colsToUpdate = null;
long writeId = isTxn ? table.getWriteId() : -1;
if (isExistingOnly) {
代码示例来源:origin: apache/hive
statsDesc.setTableName(newTableName);
colStats.setStatsObj(newStatsObjs);
msdb.updateTableColumnStatistics(colStats, validWriteIds, newTable.getWriteId());
代码示例来源:origin: apache/hive
partBatch, newt.getWriteId(), writeIdList);
newPartColStats.getStatsDesc().setTableName(newTblName);
msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues(),
writeIdList, newt.getWriteId());
EventMessage.EventType.ALTER_TABLE,
new AlterTableEvent(oldt, newt, false, true,
newt.getWriteId(), handler),
environmentContext);
new AlterTableEvent(oldt, newt, false, success, newt.getWriteId(), handler),
environmentContext, txnAlterTableEventResponses, msdb);
代码示例来源:origin: apache/hive
updateTableColumnStatsInternal(tbl.getColStats(), null, tbl.getWriteId());
代码示例来源:origin: apache/hive
adjustStatsParamsForGet(tbl.getParameters(), tbl.getParameters(), tbl.getWriteId(), validWriteIds));
代码示例来源:origin: apache/hive
mtbl.setWriteId(tbl.getWriteId());
代码示例来源:origin: apache/hive
@Override
public ColumnStatistics getTableColumnStatistics(
String catName, String dbName, String tblName, List<String> colNames,
String validWriteIds)
throws MetaException, NoSuchObjectException {
catName = StringUtils.normalizeIdentifier(catName);
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName)) {
return rawStore.getTableColumnStatistics(
catName, dbName, tblName, colNames, validWriteIds);
}
Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
if (table == null) {
// The table is not yet loaded in cache
return rawStore.getTableColumnStatistics(
catName, dbName, tblName, colNames, validWriteIds);
}
ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName);
List<ColumnStatisticsObj> colStatObjs =
sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames);
return adjustColStatForGet(table.getParameters(), table.getParameters(),
new ColumnStatistics(csd, colStatObjs), table.getWriteId(), validWriteIds);
}
代码示例来源:origin: apache/hive
newTable.getWriteId(), queryValidWriteIds, false);
if (errorMsg != null) {
throw new MetaException(errorMsg);
if (!areTxnStatsSupported || isToTxn) {
StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE);
} else if (queryValidWriteIds != null && newTable.getWriteId() > 0) {
dbname + "." + name + ". will be made persistent.");
assert newTable.getWriteId() > 0;
oldt.setWriteId(newTable.getWriteId());
代码示例来源:origin: apache/hive
return getWriteId();
代码示例来源:origin: apache/hive
@Test
public void testAlterTable() throws IOException {
Table t = new Table();
t.setDbName("testdb");
t.setTableName("testtable");
NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
HCatConstants.HCAT_ALTER_TABLE_EVENT,
msgFactory.buildAlterTableMessage(t, t, t.getWriteId()).toString());
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
HCatNotificationEvent hev = new HCatNotificationEvent(event);
ReplicationTask rtask = ReplicationTask.create(client,hev);
assertEquals(hev.toString(), rtask.getEvent().toString());
verifyAlterTableReplicationTask(rtask);
}
代码示例来源:origin: apache/hive
throw new MetaException(errorMsg);
if (!ObjectStore.isCurrentStatsValidForTheQuery(conf, newParams, table.getWriteId(),
validWriteIds, true)) {
代码示例来源:origin: apache/hive
@Test
public void testAlterTable() throws Exception {
Table originalTable = testTables[2];
String originalTableName = originalTable.getTableName();
String originalDatabase = originalTable.getDbName();
Table newTable = getTableWithAllParametersSet();
newTable.setTableName(originalTableName);
newTable.setDbName(originalDatabase);
// Partition keys can not be set, but getTableWithAllParametersSet is added one, so remove for
// this test
newTable.setPartitionKeys(originalTable.getPartitionKeys());
client.alter_table(originalDatabase, originalTableName, newTable);
Table alteredTable = client.getTable(originalDatabase, originalTableName);
// The extra parameters will be added on server side, so check that the required ones are
// present
for(String key: newTable.getParameters().keySet()) {
Assert.assertEquals("parameters are present", newTable.getParameters().get(key),
alteredTable.getParameters().get(key));
}
// The parameters are checked manually, so do not check them
newTable.setParameters(alteredTable.getParameters());
// Some of the data is set on the server side, so reset those
newTable.setCreateTime(alteredTable.getCreateTime());
newTable.setCreationMetadata(alteredTable.getCreationMetadata());
newTable.setWriteId(alteredTable.getWriteId());
Assert.assertTrue(alteredTable.isSetId());
alteredTable.unsetId();
Assert.assertEquals("The table data should be the same", newTable, alteredTable);
}
代码示例来源:origin: apache/hive
table.setWriteId(createdTable.getWriteId());
内容来源于网络,如有侵权,请联系作者删除!