本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getTTable()
方法的一些代码示例,展示了Table.getTTable()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getTTable()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getTTable
[英]This function should only be used in serialization. We should never call this function to modify the fields, because the cached fields will become outdated.
[中]此函数只能用于序列化。我们不应该调用此函数来修改字段,因为缓存的字段将过时。
代码示例来源:origin: apache/hive
/**
* Should produce the same result as
* {@link org.apache.hadoop.hive.metastore.txn.TxnUtils#isAcidTable(org.apache.hadoop.hive.metastore.api.Table)}
*/
public static boolean isFullAcidTable(Table table) {
return isFullAcidTable(table == null ? null : table.getTTable());
}
代码示例来源:origin: apache/hive
private org.apache.hadoop.hive.metastore.api.Table getTempTable(String dbName, String tableName) {
Map<String, Table> tables = getTempTablesForDatabase(dbName.toLowerCase(),
tableName.toLowerCase());
if (tables != null) {
Table table = tables.get(tableName.toLowerCase());
if (table != null) {
return table.getTTable();
}
}
return null;
}
代码示例来源:origin: apache/hive
@Override
public StorageDescriptor getPartSd() {
return table.getTTable().getSd();
}
代码示例来源:origin: apache/hive
@Override
public Map<String, String> getPartParameters() {
return table.getTTable().getParameters();
}
代码示例来源:origin: apache/hive
public Properties getMetadataFromPartitionSchema() {
return MetaStoreUtils.getPartitionMetadata(tPartition, table.getTTable());
}
代码示例来源:origin: apache/incubator-gobblin
@Override
public long getUpdateTime(Table table) throws UpdateNotFoundException {
// TODO if a table/partition is registered by gobblin an update time will be made available in table properties
// Use the update time instead of create time
return TimeUnit.MILLISECONDS.convert(table.getTTable().getCreateTime(), TimeUnit.SECONDS);
}
代码示例来源:origin: apache/hive
@Nullable
public StorageHandlerInfo getStorageHandlerInfo(Table table)
throws HiveException {
try {
HiveStorageHandler storageHandler = createStorageHandler(table.getTTable());
return storageHandler == null ? null : storageHandler.getStorageHandlerInfo(table.getTTable());
} catch (Exception e) {
throw new HiveException(e);
}
}
}
代码示例来源:origin: apache/hive
final public Deserializer getDeserializer() {
if (deserializer == null) {
try {
deserializer = HiveMetaStoreUtils.getDeserializer(SessionState.getSessionConf(),
tPartition, table.getTTable());
} catch (MetaException e) {
throw new RuntimeException(e);
}
}
return deserializer;
}
代码示例来源:origin: apache/hive
/**
* Are the basic stats for the table up-to-date for query planning.
* Can run additional checks compared to the version in StatsSetupConst.
*/
public static boolean areBasicStatsUptoDateForQueryAnswering(Table table, Map<String, String> params) {
// HIVE-19332: external tables should not be considered to have up-to-date stats.
if (MetaStoreUtils.isExternalTable(table.getTTable())) {
return false;
}
return StatsSetupConst.areBasicStatsUptoDate(params);
}
代码示例来源:origin: apache/hive
/**
* Are the column stats for the table up-to-date for query planning.
* Can run additional checks compared to the version in StatsSetupConst.
*/
public static boolean areColumnStatsUptoDateForQueryAnswering(Table table, Map<String, String> params, String colName) {
// HIVE-19332: external tables should not be considered to have up-to-date stats.
if (MetaStoreUtils.isExternalTable(table.getTTable())) {
return false;
}
return StatsSetupConst.areColumnStatsUptoDate(params, colName);
}
代码示例来源:origin: apache/hive
public TruncateTableDesc(String tableName, Map<String, String> partSpec,
ReplicationSpec replicationSpec, Table table) {
this.tableName = tableName;
this.partSpec = partSpec;
this.replicationSpec = replicationSpec;
this.isTransactional = AcidUtils.isTransactionalTable(table);
this.fullTableName = table == null ? tableName : Warehouse.getQualifiedName(table.getTTable());
}
代码示例来源:origin: apache/hive
public Path getDataLocation() {
if (table.isPartitioned()) {
if (tPartition.getSd() == null)
return null;
else
return new Path(tPartition.getSd().getLocation());
} else {
if (table.getTTable() == null || table.getTTable().getSd() == null)
return null;
else
return new Path(table.getTTable().getSd().getLocation());
}
}
代码示例来源:origin: apache/hive
public static String getTableInformation(Table table, boolean isOutputPadded) {
StringBuilder tableInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
// Table Metadata
tableInfo.append(LINE_DELIM).append("# Detailed Table Information").append(LINE_DELIM);
getTableMetaDataInformation(tableInfo, table, isOutputPadded);
// Storage information.
tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM);
getStorageDescriptorInfo(tableInfo, table.getTTable().getSd());
if (table.isView() || table.isMaterializedView()) {
tableInfo.append(LINE_DELIM).append(table.isView() ? "# View Information" : "# Materialized View Information").append(LINE_DELIM);
getViewInfo(tableInfo, table);
}
return tableInfo.toString();
}
代码示例来源:origin: apache/hive
@Override
public Object getOutput() throws HiveException {
return new Table(getTable().getTTable());
}
代码示例来源:origin: apache/hive
protected Table getDummyTable() throws SemanticException {
Path dummyPath = createDummyFile();
Table desc = new Table(DUMMY_DATABASE, DUMMY_TABLE);
desc.getTTable().getSd().setLocation(dummyPath.toString());
desc.getTTable().getSd().getSerdeInfo().setSerializationLib(NullStructSerDe.class.getName());
desc.setInputFormatClass(NullRowsInputFormat.class);
desc.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
return desc;
}
代码示例来源:origin: apache/incubator-gobblin
/**
* Currently updated the {@link #HIVE_TABLE_AVRO_SCHEMA_URL} location for new hive table
* @param targetTable, new Table to be registered in hive
* @throws IOException
*/
public static void updateTableAttributesIfAvro(Table targetTable, HiveCopyEntityHelper hiveHelper) throws IOException {
if (isHiveTableAvroType(targetTable)) {
updateAvroSchemaURL(targetTable.getCompleteName(), targetTable.getTTable().getSd(), hiveHelper);
}
}
代码示例来源:origin: apache/hive
private static boolean hasExternalTableAncestor(Operator op, StringBuilder sb) {
boolean result = false;
Operator ancestor = OperatorUtils.findSingleOperatorUpstream(op, TableScanOperator.class);
if (ancestor != null) {
TableScanOperator ts = (TableScanOperator) ancestor;
if (MetaStoreUtils.isExternalTable(ts.getConf().getTableMetadata().getTTable())) {
sb.append(ts.getConf().getTableMetadata().getFullyQualifiedName());
return true;
}
}
return result;
}
}
代码示例来源:origin: apache/hive
/**
* create an empty partition.
* SemanticAnalyzer code requires that an empty partition when the table is not partitioned.
*/
public Partition(Table tbl) throws HiveException {
org.apache.hadoop.hive.metastore.api.Partition tPart =
new org.apache.hadoop.hive.metastore.api.Partition();
if (!tbl.isView()) {
tPart.setSd(tbl.getTTable().getSd().deepCopy());
}
initialize(tbl, tPart);
}
代码示例来源:origin: apache/hive
private int preInsertWork(Hive db, PreInsertTableDesc preInsertTableDesc) throws HiveException {
try{
HiveMetaHook hook = preInsertTableDesc.getTable().getStorageHandler().getMetaHook();
if (hook == null || !(hook instanceof DefaultHiveMetaHook)) {
return 0;
}
DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook;
hiveMetaHook.preInsertTable(preInsertTableDesc.getTable().getTTable(), preInsertTableDesc.isOverwrite());
} catch (MetaException e) {
throw new HiveException(e);
}
return 0;
}
代码示例来源:origin: apache/incubator-gobblin
private static Partition localTestPartition(Table table, List<String> values) throws Exception {
return new Partition(table,
LocalHiveMetastoreTestUtils.getInstance().addTestPartition(table.getTTable(), values, 0));
}
}
内容来源于网络,如有侵权,请联系作者删除!