本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getTableName()
方法的一些代码示例,展示了Table.getTableName()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getTableName()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getTableName
暂无
代码示例来源:origin: apache/hive
public TableSpec(Table table) {
tableHandle = table;
tableName = table.getDbName() + "." + table.getTableName();
specType = SpecType.TABLE_ONLY;
}
代码示例来源:origin: apache/hive
/**
* get all the partitions of the table that matches the given partial
* specification. partition columns whose value is can be anything should be
* an empty string.
*
* @param tbl
* object for which partition is needed. Must be partitioned.
* @param partialPartSpec
* partial partition specification (some subpartitions can be empty).
* @return list of partition objects
* @throws HiveException
*/
public List<Partition> getPartitionsByNames(Table tbl,
Map<String, String> partialPartSpec)
throws HiveException {
if (!tbl.isPartitioned()) {
throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
}
List<String> names = getPartitionNames(tbl.getDbName(), tbl.getTableName(),
partialPartSpec, (short)-1);
List<Partition> partitions = getPartitionsByNames(tbl, names);
return partitions;
}
代码示例来源:origin: apache/hive
private List<String> getTablesFromEntitySet(Set<? extends Entity> entities) {
List<String> tableNames = new ArrayList<>();
for (Entity entity : entities) {
if (entity.getType() == Entity.Type.TABLE) {
tableNames.add(entity.getTable().getDbName() + "." + entity.getTable().getTableName());
}
}
return tableNames;
}
代码示例来源:origin: apache/hive
/**
* Convert the partition value map to a value list in the partition key order.
* @param table the table being written to
* @param valueMap the partition value map
* @return the partition value list
* @throws java.io.IOException
*/
static List<String> getPartitionValueList(Table table, Map<String, String> valueMap) throws IOException {
if (valueMap.size() != table.getPartitionKeys().size()) {
throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
"Table "
+ table.getTableName() + " has " +
table.getPartitionKeys().size() + " partition keys, got " +
valueMap.size());
}
List<String> values = new ArrayList<String>();
for (FieldSchema schema : table.getPartitionKeys()) {
String value = valueMap.get(schema.getName().toLowerCase());
if (value == null) {
throw new HCatException(ErrorType.ERROR_MISSING_PARTITION_KEY,
"Key " + schema.getName() + " of table " + table.getTableName());
}
values.add(value);
}
return values;
}
代码示例来源:origin: apache/hive
List<FieldSchema> columns = tbl.getCols();
List<String> cols = new ArrayList<String>();
for (int i = 0; i < neededColumnIds.size(); i++) {
cols.add(columns.get(neededColumnIds.get(i)).getName());
if (tbl.isPartitioned()
&& Boolean.TRUE.equals(tableUsePartLevelAuth.get(tbl.getTableName()))) {
String alias_id = topOpMap.getKey();
代码示例来源:origin: apache/hive
/**
* Update table schema, adding new columns as added for the partition.
* @param client the client
* @param table the table
* @param partitionSchema the schema of the partition
* @throws java.io.IOException Signals that an I/O exception has occurred.
* @throws org.apache.hadoop.hive.metastore.api.InvalidOperationException the invalid operation exception
* @throws org.apache.hadoop.hive.metastore.api.MetaException the meta exception
* @throws org.apache.thrift.TException the t exception
*/
private void updateTableSchema(IMetaStoreClient client, Table table,
HCatSchema partitionSchema) throws IOException, InvalidOperationException, MetaException, TException {
List<FieldSchema> newColumns = HCatUtil.validatePartitionSchema(table, partitionSchema);
if (newColumns.size() != 0) {
List<FieldSchema> tableColumns = new ArrayList<FieldSchema>(table.getTTable().getSd().getCols());
tableColumns.addAll(newColumns);
//Update table schema to add the newly added columns
table.getTTable().getSd().setCols(tableColumns);
client.alter_table(table.getDbName(), table.getTableName(), table.getTTable());
}
}
代码示例来源:origin: apache/drill
private void msckAddPartitionsOneByOne(Hive db, Table table,
Set<CheckResult.PartitionResult> partsNotInMs, List<String> repairOutput) {
for (CheckResult.PartitionResult part : partsNotInMs) {
try {
db.createPartition(table, Warehouse.makeSpecFromName(part.getPartitionName()));
repairOutput.add("Repair: Added partition to metastore "
+ table.getTableName() + ':' + part.getPartitionName());
} catch (Exception e) {
LOG.warn("Repair error, could not add partition to metastore: ", e);
}
}
}
代码示例来源:origin: apache/hive
conn.getMSC().addDynamicPartitions(txnToWriteId.getTxnId(),
txnToWriteId.getWriteId(), conn.getDatabase(),
conn.getTable().getTableName(), partNames,
DataOperationType.INSERT);
if (currentTxnIndex + 1 < txnToWriteIds.size()) {
minTxnId.set(txnToWriteIds.get(currentTxnIndex + 1).getTxnId());
} else {
代码示例来源:origin: apache/drill
String rewrittenQuery;
for (int i = 0; i < colNames.size(); i++) {
if (i > 0) {
rewrittenQueryBuilder.append(" , ");
rewrittenQueryBuilder.append(tbl.getDbName());
rewrittenQueryBuilder.append("`.");
rewrittenQueryBuilder.append("`" + tbl.getTableName() + "`");
isRewritten = true;
代码示例来源:origin: apache/hive
static Map<String, String> createPtnKeyValueMap(Table table, Partition ptn)
throws IOException {
List<String> values = ptn.getValues();
if (values.size() != table.getPartitionKeys().size()) {
throw new IOException(
"Partition values in partition inconsistent with table definition, table "
+ table.getTableName() + " has "
+ table.getPartitionKeys().size()
+ " partition keys, partition has " + values.size()
+ "partition values");
}
Map<String, String> ptnKeyValues = new HashMap<String, String>();
int i = 0;
for (FieldSchema schema : table.getPartitionKeys()) {
// CONCERN : the way this mapping goes, the order *needs* to be
// preserved for table.getPartitionKeys() and ptn.getValues()
ptnKeyValues.put(schema.getName().toLowerCase(), values.get(i));
i++;
}
return ptnKeyValues;
}
}
代码示例来源:origin: apache/drill
throws HiveException {
if (indexes == null || indexes.size() == 0) {
return false;
for (FileStatus status : srcs) {
if (status.getModificationTime() > Long.parseLong(indexTs)) {
LOG.info("Index is stale on table '" + src.getTableName()
+ "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath()
+ "' is higher than index creation time (" + indexTs + ").");
throw new HiveException("Failed to grab timestamp information from table '" + src.getTableName() + "': " + e.getMessage(), e);
代码示例来源:origin: apache/hive
snapshot = AcidUtils.getTableSnapshot(conf, table, true);
} else {
String fullTableName = getFullTableName(table.getDbName(), table.getTableName());
ValidWriteIdList writeIdList = getMSC().getValidWriteIds(fullTableName, writeId);
snapshot = new TableSnapshot(writeId, writeIdList.writeToString());
? null : getPartitionNames(table.getDbName(), table.getTableName(), partSpec, (short) -1));
if (snapshot == null) {
getMSC().truncateTable(table.getDbName(), table.getTableName(), partNames);
} else {
getMSC().truncateTable(table.getDbName(), table.getTableName(), partNames,
snapshot.getValidWriteIdList(), snapshot.getWriteId());
throw new HiveException(e);
代码示例来源:origin: apache/hive
private int updateColumns(Table tbl, Partition part)
throws HiveException {
String serializationLib = tbl.getSd().getSerdeInfo().getSerializationLib();
if (MetastoreConf.getStringCollection(conf,
MetastoreConf.ConfVars.SERDES_USING_METASTORE_FOR_SCHEMA).contains(serializationLib)) {
throw new HiveException(tbl.getTableName() + " has serde " + serializationLib + " for which schema " +
"is already handled by HMS.");
}
Deserializer deserializer = tbl.getDeserializer(true);
try {
LOG.info("Updating metastore columns for table: {}", tbl.getTableName());
final List<FieldSchema> fields = HiveMetaStoreUtils.getFieldsFromDeserializer(
tbl.getTableName(), deserializer);
StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
sd.setCols(fields);
} catch (org.apache.hadoop.hive.serde2.SerDeException | MetaException e) {
LOG.error("alter table update columns: {}", e);
throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
}
return 0;
}
代码示例来源:origin: apache/hive
if (isValuesTempTable(part.getTable().getTableName())) {
continue;
if (part.getTable().isPartitioned()) {
newInput = new ReadEntity(part, parentViewInfo, isDirectRead);
} else {
代码示例来源:origin: apache/hive
if (table.isPartitioned()) {
try {
if (tPartition.getSd().getLocation() == null) {
throw new HiveException("Invalid partition for table " + table.getTableName(),
e);
代码示例来源:origin: apache/hive
/**
* Get a list of Partitions by filter.
* @param tbl The table containing the partitions.
* @param filter A string represent partition predicates.
* @return a list of partitions satisfying the partition predicates.
* @throws HiveException
* @throws MetaException
* @throws NoSuchObjectException
* @throws TException
*/
public List<Partition> getPartitionsByFilter(Table tbl, String filter)
throws HiveException, MetaException, NoSuchObjectException, TException {
if (!tbl.isPartitioned()) {
throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
}
List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().listPartitionsByFilter(
tbl.getDbName(), tbl.getTableName(), filter, (short)-1);
return convertFromMetastore(tbl, tParts);
}
代码示例来源:origin: apache/drill
protected List<String> getTablesFromEntitySet(Set<? extends Entity> entities) {
List<String> tableNames = new ArrayList<String>();
for (Entity entity : entities) {
if (entity.getType() == Entity.Type.TABLE) {
tableNames.add(entity.getTable().getDbName() + "." + entity.getTable().getTableName());
}
}
return tableNames;
}
代码示例来源:origin: apache/hive
ts.add(table1Name);
ts.add("table2");
Table tbl1 = createTestTable(dbName, ts.get(0));
hm.createTable(tbl1);
assertEquals(2, fts.size());
assertEquals(1, fts.size());
assertEquals(ts.get(0), fts.get(0));
assertEquals(table1Name, table1.getTableName());
代码示例来源:origin: apache/hive
private void genAutoColumnStatsGatheringPipeline(QB qb, Table table, Map<String, String> partSpec,
Operator curr, boolean isInsertInto, boolean useTableValueConstructor)
throws SemanticException {
LOG.info("Generate an operator pipeline to autogather column stats for table " + table.getTableName()
+ " in query " + ctx.getCmd());
ColumnStatsAutoGatherContext columnStatsAutoGatherContext = null;
columnStatsAutoGatherContext = new ColumnStatsAutoGatherContext(this, conf, curr, table, partSpec, isInsertInto, ctx);
if (useTableValueConstructor) {
// Table does not exist, use table value constructor to simulate
columnStatsAutoGatherContext.insertTableValuesAnalyzePipeline();
} else {
// Table already exists
columnStatsAutoGatherContext.insertAnalyzePipeline();
}
columnStatsAutoGatherContexts.add(columnStatsAutoGatherContext);
}
代码示例来源:origin: apache/hive
private ImmutableBitSet getEnabledNotNullConstraints(Table tbl) throws HiveException{
List<Boolean> nullConstraints = new ArrayList<>();
final NotNullConstraint nnc = Hive.get().getEnabledNotNullConstraints(
tbl.getDbName(), tbl.getTableName());
ImmutableBitSet bitSet = null;
if(nnc == null || nnc.getNotNullConstraints().isEmpty()) {
return bitSet;
}
// Build the bitset with not null columns
ImmutableBitSet.Builder builder = ImmutableBitSet.builder();
for (String nnCol : nnc.getNotNullConstraints().values()) {
int nnPos = -1;
for (int i = 0; i < tbl.getCols().size(); i++) {
if (tbl.getCols().get(i).getName().equals(nnCol)) {
nnPos = i;
builder.set(nnPos);
break;
}
}
}
bitSet = builder.build();
return bitSet;
}
内容来源于网络,如有侵权,请联系作者删除!