org.apache.hadoop.hive.ql.metadata.Table.getFullyQualifiedName()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(9.6k)|赞(0)|评价(0)|浏览(162)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getFullyQualifiedName()方法的一些代码示例,展示了Table.getFullyQualifiedName()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getFullyQualifiedName()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getFullyQualifiedName

Table.getFullyQualifiedName介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. public String getCompleteName() {
  2. return tbl.getFullyQualifiedName();
  3. }

代码示例来源:origin: apache/hive

  1. protected List<String> getTablesFromEntitySet(Set<? extends Entity> entities) {
  2. List<String> tableNames = new ArrayList<String>();
  3. for (Entity entity : entities) {
  4. if (entity.getType() == Entity.Type.TABLE) {
  5. tableNames.add(entity.getTable().getFullyQualifiedName());
  6. }
  7. }
  8. return tableNames;
  9. }

代码示例来源:origin: apache/hive

  1. public Stream<String> getTableNames() {
  2. return queryInfo.getInputs().stream()
  3. .filter(readEntity -> readEntity.getType() == Type.TABLE)
  4. .map(readEntity -> readEntity.getTable().getFullyQualifiedName());
  5. }
  6. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public void visit(RelNode node, int ordinal, RelNode parent) {
  3. if (node instanceof TableScan) {
  4. TableScan ts = (TableScan) node;
  5. tablesUsed.add(((RelOptHiveTable) ts.getTable()).getHiveTableMD().getFullyQualifiedName());
  6. }
  7. super.visit(node, ordinal, parent);
  8. }
  9. }.go(plan);

代码示例来源:origin: apache/hive

  1. private Task<?> dropPartitionTask(Table table, Map<String, String> partSpec) throws SemanticException {
  2. Task<DDLWork> dropPtnTask = null;
  3. Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecsExpr =
  4. ReplUtils.genPartSpecs(table, Collections.singletonList(partSpec));
  5. if (partSpecsExpr.size() > 0) {
  6. DropTableDesc dropPtnDesc = new DropTableDesc(table.getFullyQualifiedName(),
  7. partSpecsExpr, null, true, event.replicationSpec());
  8. dropPtnTask = TaskFactory.get(
  9. new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc), context.hiveConf
  10. );
  11. }
  12. return dropPtnTask;
  13. }

代码示例来源:origin: apache/hive

  1. private Task<?> dropTableTask(Table table) {
  2. assert(table != null);
  3. DropTableDesc dropTblDesc = new DropTableDesc(table.getFullyQualifiedName(), table.getTableType(),
  4. true, false, event.replicationSpec());
  5. return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), context.hiveConf);
  6. }
  7. }

代码示例来源:origin: apache/hive

  1. private Set<String> getTablesUsed(ParseContext parseCtx) throws SemanticException {
  2. Set<String> tablesUsed = new HashSet<>();
  3. for (TableScanOperator topOp : parseCtx.getTopOps().values()) {
  4. Table table = topOp.getConf().getTableMetadata();
  5. if (!table.isMaterializedTable() && !table.isView()) {
  6. // Add to signature
  7. tablesUsed.add(table.getFullyQualifiedName());
  8. }
  9. }
  10. return tablesUsed;
  11. }

代码示例来源:origin: apache/hive

  1. /**
  2. * Assert it supports Acid write.
  3. */
  4. protected void validateTargetTable(Table mTable) throws SemanticException {
  5. if (mTable.getTableType() == TableType.VIRTUAL_VIEW || mTable.getTableType() == TableType.MATERIALIZED_VIEW) {
  6. LOG.error("Table " + mTable.getFullyQualifiedName() + " is a view or materialized view");
  7. throw new SemanticException(ErrorMsg.UPDATE_DELETE_VIEW.getMsg());
  8. }
  9. }

代码示例来源:origin: apache/hive

  1. private ValidTxnWriteIdList getQueryValidTxnWriteIdList() throws SemanticException {
  2. // TODO: Once HIVE-18948 is in, should be able to retrieve writeIdList from the conf.
  3. //cachedWriteIdList = AcidUtils.getValidTxnWriteIdList(conf);
  4. //
  5. List<String> transactionalTables = tablesFromReadEntities(inputs)
  6. .stream()
  7. .filter(table -> AcidUtils.isTransactionalTable(table))
  8. .map(table -> table.getFullyQualifiedName())
  9. .collect(Collectors.toList());
  10. if (transactionalTables.size() > 0) {
  11. try {
  12. String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY);
  13. return getTxnMgr().getValidWriteIds(transactionalTables, txnString);
  14. } catch (Exception err) {
  15. String msg = "Error while getting the txnWriteIdList for tables " + transactionalTables
  16. + " and validTxnList " + conf.get(ValidTxnList.VALID_TXNS_KEY);
  17. throw new SemanticException(msg, err);
  18. }
  19. }
  20. // No transactional tables.
  21. return null;
  22. }

代码示例来源:origin: apache/hive

  1. AnalyzeRewriteContext getAnalyzeRewriteContext() {
  2. AnalyzeRewriteContext analyzeRewrite = new AnalyzeRewriteContext();
  3. analyzeRewrite.setTableName(tbl.getFullyQualifiedName());
  4. analyzeRewrite.setTblLvl(isTableLevel);
  5. analyzeRewrite.setColName(colNames);
  6. analyzeRewrite.setColType(colType);
  7. return analyzeRewrite;
  8. }

代码示例来源:origin: apache/hive

  1. private static boolean hasExternalTableAncestor(Operator op, StringBuilder sb) {
  2. boolean result = false;
  3. Operator ancestor = OperatorUtils.findSingleOperatorUpstream(op, TableScanOperator.class);
  4. if (ancestor != null) {
  5. TableScanOperator ts = (TableScanOperator) ancestor;
  6. if (MetaStoreUtils.isExternalTable(ts.getConf().getTableMetadata().getTTable())) {
  7. sb.append(ts.getConf().getTableMetadata().getFullyQualifiedName());
  8. return true;
  9. }
  10. }
  11. return result;
  12. }
  13. }

代码示例来源:origin: apache/hive

  1. String tableName = tableUsed.getFullyQualifiedName();
  2. ValidTxnWriteIdList currentTxnWriteIdList = lookupInfo.txnWriteIdListProvider.get();
  3. if (currentTxnWriteIdList == null) {
  4. LOG.debug("Cached query no longer valid due to table {}", tableUsed.getFullyQualifiedName());

代码示例来源:origin: apache/hive

  1. private void alterPartitionSpec(Table tbl,
  2. Map<String, String> partSpec,
  3. org.apache.hadoop.hive.metastore.api.Partition tpart,
  4. boolean inheritTableSpecs,
  5. String partPath) throws HiveException, InvalidOperationException {
  6. alterPartitionSpecInMemory(tbl, partSpec, tpart, inheritTableSpecs, partPath);
  7. String fullName = tbl.getTableName();
  8. if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
  9. fullName = tbl.getFullyQualifiedName();
  10. }
  11. alterPartition(tbl.getCatalogName(), tbl.getDbName(), tbl.getTableName(),
  12. new Partition(tbl, tpart), null, true);
  13. }

代码示例来源:origin: apache/hive

  1. /**
  2. * Assert that we are not asked to update a bucketing column or partition column.
  3. * @param colName it's the A in "SET A = B"
  4. */
  5. protected void checkValidSetClauseTarget(ASTNode colName, Table targetTable) throws SemanticException {
  6. String columnName = normalizeColName(colName.getText());
  7. // Make sure this isn't one of the partitioning columns, that's not supported.
  8. for (FieldSchema fschema : targetTable.getPartCols()) {
  9. if (fschema.getName().equalsIgnoreCase(columnName)) {
  10. throw new SemanticException(ErrorMsg.UPDATE_CANNOT_UPDATE_PART_VALUE.getMsg());
  11. }
  12. }
  13. //updating bucket column should move row from one file to another - not supported
  14. if (targetTable.getBucketCols() != null && targetTable.getBucketCols().contains(columnName)) {
  15. throw new SemanticException(ErrorMsg.UPDATE_CANNOT_UPDATE_BUCKET_VALUE, columnName);
  16. }
  17. boolean foundColumnInTargetTable = false;
  18. for (FieldSchema col : targetTable.getCols()) {
  19. if (columnName.equalsIgnoreCase(col.getName())) {
  20. foundColumnInTargetTable = true;
  21. break;
  22. }
  23. }
  24. if (!foundColumnInTargetTable) {
  25. throw new SemanticException(ErrorMsg.INVALID_TARGET_COLUMN_IN_SET_CLAUSE, colName.getText(),
  26. targetTable.getFullyQualifiedName());
  27. }
  28. }

代码示例来源:origin: apache/hive

  1. ts.getConf().getTableMetadata().getFullyQualifiedName());
  2. disableSemiJoin = true;
  3. } else {
  4. LOG.debug("Join key {} is from {} which is an external table. Disabling semijoin optimization.",
  5. columnOrigin.col,
  6. joinKeyTs.getConf().getTableMetadata().getFullyQualifiedName());
  7. disableSemiJoin = true;

代码示例来源:origin: apache/hive

  1. @Override
  2. public PartitionInfo createPartitionIfNotExists(final List<String> partitionValues) throws StreamingException {
  3. String partLocation = null;
  4. String partName = null;
  5. boolean exists = false;
  6. try {
  7. Map<String, String> partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), partitionValues);
  8. AddPartitionDesc addPartitionDesc = new AddPartitionDesc(database, table, true);
  9. partName = Warehouse.makePartName(tableObject.getPartitionKeys(), partitionValues);
  10. partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString();
  11. addPartitionDesc.addPartition(partSpec, partLocation);
  12. Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf);
  13. if (getMSC() == null) {
  14. // We assume it doesn't exist if we can't check it
  15. // so the driver will decide
  16. return new PartitionInfo(partName, partLocation, false);
  17. }
  18. getMSC().add_partition(partition);
  19. if (LOG.isDebugEnabled()) {
  20. LOG.debug("Created partition {} for table {}", partName,
  21. tableObject.getFullyQualifiedName());
  22. }
  23. } catch (AlreadyExistsException e) {
  24. exists = true;
  25. } catch (HiveException | TException e) {
  26. throw new StreamingException("Unable to creation partition for values: " + partitionValues + " connection: " +
  27. toConnectionInfoString(), e);
  28. }
  29. return new PartitionInfo(partName, partLocation, exists);
  30. }

代码示例来源:origin: apache/hive

  1. static AnalyzeRewriteContext genAnalyzeRewriteContext(HiveConf conf, Table tbl) {
  2. AnalyzeRewriteContext analyzeRewrite = new AnalyzeRewriteContext();
  3. analyzeRewrite.setTableName(tbl.getFullyQualifiedName());
  4. analyzeRewrite.setTblLvl(!(conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned()));
  5. List<String> colNames = Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
  6. List<String> colTypes = getColumnTypes(tbl, colNames);
  7. analyzeRewrite.setColName(colNames);
  8. analyzeRewrite.setColType(colTypes);
  9. return analyzeRewrite;
  10. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public RelNode visit(TableScan scan) {
  3. if (scan instanceof HiveTableScan) {
  4. HiveTableScan hiveScan = (HiveTableScan) scan;
  5. RelOptHiveTable relOptHiveTable = (RelOptHiveTable) hiveScan.getTable();
  6. Table tab = relOptHiveTable.getHiveTableMD();
  7. if (tab.isTemporary()) {
  8. fail(tab.getTableName() + " is a temporary table");
  9. }
  10. if (tab.getTableType() == TableType.EXTERNAL_TABLE) {
  11. fail(tab.getFullyQualifiedName() + " is an external table");
  12. }
  13. return scan;
  14. }
  15. // TableScan of a non-Hive table - don't support for materializations.
  16. fail(scan.getTable().getQualifiedName() + " is a table scan of a non-Hive table.");
  17. return scan;
  18. }

代码示例来源:origin: apache/hive

  1. ((RelOptHiveTable)tableScan.getTable()).getHiveTableMD().getFullyQualifiedName();
  2. final ValidWriteIdList tableCurrentTxnList =
  3. currentTxnList.getTableValidWriteIdList(tableQName);

代码示例来源:origin: apache/hive

  1. ctx.addMaterializedTable(table.getFullyQualifiedName(), table);

相关文章

Table类方法