org.apache.hadoop.hive.ql.metadata.Table.getTableName()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(11.1k)|赞(0)|评价(0)|浏览(234)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getTableName()方法的一些代码示例,展示了Table.getTableName()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getTableName()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getTableName

Table.getTableName介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. public TableSpec(Table table) {
  2. tableHandle = table;
  3. tableName = table.getDbName() + "." + table.getTableName();
  4. specType = SpecType.TABLE_ONLY;
  5. }

代码示例来源:origin: apache/hive

  1. /**
  2. * get all the partitions of the table that matches the given partial
  3. * specification. partition columns whose value is can be anything should be
  4. * an empty string.
  5. *
  6. * @param tbl
  7. * object for which partition is needed. Must be partitioned.
  8. * @param partialPartSpec
  9. * partial partition specification (some subpartitions can be empty).
  10. * @return list of partition objects
  11. * @throws HiveException
  12. */
  13. public List<Partition> getPartitionsByNames(Table tbl,
  14. Map<String, String> partialPartSpec)
  15. throws HiveException {
  16. if (!tbl.isPartitioned()) {
  17. throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
  18. }
  19. List<String> names = getPartitionNames(tbl.getDbName(), tbl.getTableName(),
  20. partialPartSpec, (short)-1);
  21. List<Partition> partitions = getPartitionsByNames(tbl, names);
  22. return partitions;
  23. }

代码示例来源:origin: apache/hive

  1. private List<String> getTablesFromEntitySet(Set<? extends Entity> entities) {
  2. List<String> tableNames = new ArrayList<>();
  3. for (Entity entity : entities) {
  4. if (entity.getType() == Entity.Type.TABLE) {
  5. tableNames.add(entity.getTable().getDbName() + "." + entity.getTable().getTableName());
  6. }
  7. }
  8. return tableNames;
  9. }

代码示例来源:origin: apache/hive

  1. /**
  2. * Convert the partition value map to a value list in the partition key order.
  3. * @param table the table being written to
  4. * @param valueMap the partition value map
  5. * @return the partition value list
  6. * @throws java.io.IOException
  7. */
  8. static List<String> getPartitionValueList(Table table, Map<String, String> valueMap) throws IOException {
  9. if (valueMap.size() != table.getPartitionKeys().size()) {
  10. throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
  11. "Table "
  12. + table.getTableName() + " has " +
  13. table.getPartitionKeys().size() + " partition keys, got " +
  14. valueMap.size());
  15. }
  16. List<String> values = new ArrayList<String>();
  17. for (FieldSchema schema : table.getPartitionKeys()) {
  18. String value = valueMap.get(schema.getName().toLowerCase());
  19. if (value == null) {
  20. throw new HCatException(ErrorType.ERROR_MISSING_PARTITION_KEY,
  21. "Key " + schema.getName() + " of table " + table.getTableName());
  22. }
  23. values.add(value);
  24. }
  25. return values;
  26. }

代码示例来源:origin: apache/hive

  1. List<FieldSchema> columns = tbl.getCols();
  2. List<String> cols = new ArrayList<String>();
  3. for (int i = 0; i < neededColumnIds.size(); i++) {
  4. cols.add(columns.get(neededColumnIds.get(i)).getName());
  5. if (tbl.isPartitioned()
  6. && Boolean.TRUE.equals(tableUsePartLevelAuth.get(tbl.getTableName()))) {
  7. String alias_id = topOpMap.getKey();

代码示例来源:origin: apache/hive

  1. /**
  2. * Update table schema, adding new columns as added for the partition.
  3. * @param client the client
  4. * @param table the table
  5. * @param partitionSchema the schema of the partition
  6. * @throws java.io.IOException Signals that an I/O exception has occurred.
  7. * @throws org.apache.hadoop.hive.metastore.api.InvalidOperationException the invalid operation exception
  8. * @throws org.apache.hadoop.hive.metastore.api.MetaException the meta exception
  9. * @throws org.apache.thrift.TException the t exception
  10. */
  11. private void updateTableSchema(IMetaStoreClient client, Table table,
  12. HCatSchema partitionSchema) throws IOException, InvalidOperationException, MetaException, TException {
  13. List<FieldSchema> newColumns = HCatUtil.validatePartitionSchema(table, partitionSchema);
  14. if (newColumns.size() != 0) {
  15. List<FieldSchema> tableColumns = new ArrayList<FieldSchema>(table.getTTable().getSd().getCols());
  16. tableColumns.addAll(newColumns);
  17. //Update table schema to add the newly added columns
  18. table.getTTable().getSd().setCols(tableColumns);
  19. client.alter_table(table.getDbName(), table.getTableName(), table.getTTable());
  20. }
  21. }

代码示例来源:origin: apache/drill

  1. private void msckAddPartitionsOneByOne(Hive db, Table table,
  2. Set<CheckResult.PartitionResult> partsNotInMs, List<String> repairOutput) {
  3. for (CheckResult.PartitionResult part : partsNotInMs) {
  4. try {
  5. db.createPartition(table, Warehouse.makeSpecFromName(part.getPartitionName()));
  6. repairOutput.add("Repair: Added partition to metastore "
  7. + table.getTableName() + ':' + part.getPartitionName());
  8. } catch (Exception e) {
  9. LOG.warn("Repair error, could not add partition to metastore: ", e);
  10. }
  11. }
  12. }

代码示例来源:origin: apache/hive

  1. conn.getMSC().addDynamicPartitions(txnToWriteId.getTxnId(),
  2. txnToWriteId.getWriteId(), conn.getDatabase(),
  3. conn.getTable().getTableName(), partNames,
  4. DataOperationType.INSERT);
  5. if (currentTxnIndex + 1 < txnToWriteIds.size()) {
  6. minTxnId.set(txnToWriteIds.get(currentTxnIndex + 1).getTxnId());
  7. } else {

代码示例来源:origin: apache/drill

  1. String rewrittenQuery;
  2. for (int i = 0; i < colNames.size(); i++) {
  3. if (i > 0) {
  4. rewrittenQueryBuilder.append(" , ");
  5. rewrittenQueryBuilder.append(tbl.getDbName());
  6. rewrittenQueryBuilder.append("`.");
  7. rewrittenQueryBuilder.append("`" + tbl.getTableName() + "`");
  8. isRewritten = true;

代码示例来源:origin: apache/hive

  1. static Map<String, String> createPtnKeyValueMap(Table table, Partition ptn)
  2. throws IOException {
  3. List<String> values = ptn.getValues();
  4. if (values.size() != table.getPartitionKeys().size()) {
  5. throw new IOException(
  6. "Partition values in partition inconsistent with table definition, table "
  7. + table.getTableName() + " has "
  8. + table.getPartitionKeys().size()
  9. + " partition keys, partition has " + values.size()
  10. + "partition values");
  11. }
  12. Map<String, String> ptnKeyValues = new HashMap<String, String>();
  13. int i = 0;
  14. for (FieldSchema schema : table.getPartitionKeys()) {
  15. // CONCERN : the way this mapping goes, the order *needs* to be
  16. // preserved for table.getPartitionKeys() and ptn.getValues()
  17. ptnKeyValues.put(schema.getName().toLowerCase(), values.get(i));
  18. i++;
  19. }
  20. return ptnKeyValues;
  21. }
  22. }

代码示例来源:origin: apache/drill

  1. throws HiveException {
  2. if (indexes == null || indexes.size() == 0) {
  3. return false;
  4. for (FileStatus status : srcs) {
  5. if (status.getModificationTime() > Long.parseLong(indexTs)) {
  6. LOG.info("Index is stale on table '" + src.getTableName()
  7. + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath()
  8. + "' is higher than index creation time (" + indexTs + ").");
  9. throw new HiveException("Failed to grab timestamp information from table '" + src.getTableName() + "': " + e.getMessage(), e);

代码示例来源:origin: apache/hive

  1. snapshot = AcidUtils.getTableSnapshot(conf, table, true);
  2. } else {
  3. String fullTableName = getFullTableName(table.getDbName(), table.getTableName());
  4. ValidWriteIdList writeIdList = getMSC().getValidWriteIds(fullTableName, writeId);
  5. snapshot = new TableSnapshot(writeId, writeIdList.writeToString());
  6. ? null : getPartitionNames(table.getDbName(), table.getTableName(), partSpec, (short) -1));
  7. if (snapshot == null) {
  8. getMSC().truncateTable(table.getDbName(), table.getTableName(), partNames);
  9. } else {
  10. getMSC().truncateTable(table.getDbName(), table.getTableName(), partNames,
  11. snapshot.getValidWriteIdList(), snapshot.getWriteId());
  12. throw new HiveException(e);

代码示例来源:origin: apache/hive

  1. private int updateColumns(Table tbl, Partition part)
  2. throws HiveException {
  3. String serializationLib = tbl.getSd().getSerdeInfo().getSerializationLib();
  4. if (MetastoreConf.getStringCollection(conf,
  5. MetastoreConf.ConfVars.SERDES_USING_METASTORE_FOR_SCHEMA).contains(serializationLib)) {
  6. throw new HiveException(tbl.getTableName() + " has serde " + serializationLib + " for which schema " +
  7. "is already handled by HMS.");
  8. }
  9. Deserializer deserializer = tbl.getDeserializer(true);
  10. try {
  11. LOG.info("Updating metastore columns for table: {}", tbl.getTableName());
  12. final List<FieldSchema> fields = HiveMetaStoreUtils.getFieldsFromDeserializer(
  13. tbl.getTableName(), deserializer);
  14. StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
  15. sd.setCols(fields);
  16. } catch (org.apache.hadoop.hive.serde2.SerDeException | MetaException e) {
  17. LOG.error("alter table update columns: {}", e);
  18. throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
  19. }
  20. return 0;
  21. }

代码示例来源:origin: apache/hive

  1. if (isValuesTempTable(part.getTable().getTableName())) {
  2. continue;
  3. if (part.getTable().isPartitioned()) {
  4. newInput = new ReadEntity(part, parentViewInfo, isDirectRead);
  5. } else {

代码示例来源:origin: apache/hive

  1. if (table.isPartitioned()) {
  2. try {
  3. if (tPartition.getSd().getLocation() == null) {
  4. throw new HiveException("Invalid partition for table " + table.getTableName(),
  5. e);

代码示例来源:origin: apache/hive

  1. /**
  2. * Get a list of Partitions by filter.
  3. * @param tbl The table containing the partitions.
  4. * @param filter A string represent partition predicates.
  5. * @return a list of partitions satisfying the partition predicates.
  6. * @throws HiveException
  7. * @throws MetaException
  8. * @throws NoSuchObjectException
  9. * @throws TException
  10. */
  11. public List<Partition> getPartitionsByFilter(Table tbl, String filter)
  12. throws HiveException, MetaException, NoSuchObjectException, TException {
  13. if (!tbl.isPartitioned()) {
  14. throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
  15. }
  16. List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().listPartitionsByFilter(
  17. tbl.getDbName(), tbl.getTableName(), filter, (short)-1);
  18. return convertFromMetastore(tbl, tParts);
  19. }

代码示例来源:origin: apache/drill

  1. protected List<String> getTablesFromEntitySet(Set<? extends Entity> entities) {
  2. List<String> tableNames = new ArrayList<String>();
  3. for (Entity entity : entities) {
  4. if (entity.getType() == Entity.Type.TABLE) {
  5. tableNames.add(entity.getTable().getDbName() + "." + entity.getTable().getTableName());
  6. }
  7. }
  8. return tableNames;
  9. }

代码示例来源:origin: apache/hive

  1. ts.add(table1Name);
  2. ts.add("table2");
  3. Table tbl1 = createTestTable(dbName, ts.get(0));
  4. hm.createTable(tbl1);
  5. assertEquals(2, fts.size());
  6. assertEquals(1, fts.size());
  7. assertEquals(ts.get(0), fts.get(0));
  8. assertEquals(table1Name, table1.getTableName());

代码示例来源:origin: apache/hive

  1. private void genAutoColumnStatsGatheringPipeline(QB qb, Table table, Map<String, String> partSpec,
  2. Operator curr, boolean isInsertInto, boolean useTableValueConstructor)
  3. throws SemanticException {
  4. LOG.info("Generate an operator pipeline to autogather column stats for table " + table.getTableName()
  5. + " in query " + ctx.getCmd());
  6. ColumnStatsAutoGatherContext columnStatsAutoGatherContext = null;
  7. columnStatsAutoGatherContext = new ColumnStatsAutoGatherContext(this, conf, curr, table, partSpec, isInsertInto, ctx);
  8. if (useTableValueConstructor) {
  9. // Table does not exist, use table value constructor to simulate
  10. columnStatsAutoGatherContext.insertTableValuesAnalyzePipeline();
  11. } else {
  12. // Table already exists
  13. columnStatsAutoGatherContext.insertAnalyzePipeline();
  14. }
  15. columnStatsAutoGatherContexts.add(columnStatsAutoGatherContext);
  16. }

代码示例来源:origin: apache/hive

  1. private ImmutableBitSet getEnabledNotNullConstraints(Table tbl) throws HiveException{
  2. List<Boolean> nullConstraints = new ArrayList<>();
  3. final NotNullConstraint nnc = Hive.get().getEnabledNotNullConstraints(
  4. tbl.getDbName(), tbl.getTableName());
  5. ImmutableBitSet bitSet = null;
  6. if(nnc == null || nnc.getNotNullConstraints().isEmpty()) {
  7. return bitSet;
  8. }
  9. // Build the bitset with not null columns
  10. ImmutableBitSet.Builder builder = ImmutableBitSet.builder();
  11. for (String nnCol : nnc.getNotNullConstraints().values()) {
  12. int nnPos = -1;
  13. for (int i = 0; i < tbl.getCols().size(); i++) {
  14. if (tbl.getCols().get(i).getName().equals(nnCol)) {
  15. nnPos = i;
  16. builder.set(nnPos);
  17. break;
  18. }
  19. }
  20. }
  21. bitSet = builder.build();
  22. return bitSet;
  23. }

相关文章

Table类方法