org.apache.hadoop.hive.metastore.api.Table.getTableType()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(9.5k)|赞(0)|评价(0)|浏览(227)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.getTableType()方法的一些代码示例,展示了Table.getTableType()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getTableType()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:getTableType

Table.getTableType介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. public TableType getTableType() {
  2. return Enum.valueOf(TableType.class, tTable.getTableType());
  3. }

代码示例来源:origin: apache/hive

  1. public static boolean isMaterializedViewTable(Table table) {
  2. if (table == null) {
  3. return false;
  4. }
  5. return TableType.MATERIALIZED_VIEW.toString().equals(table.getTableType());
  6. }

代码示例来源:origin: apache/hive

  1. public static boolean isView(Table table) {
  2. if (table == null) {
  3. return false;
  4. }
  5. return TableType.VIRTUAL_VIEW.toString().equals(table.getTableType());
  6. }

代码示例来源:origin: apache/hive

  1. private static Boolean isViewTable(Table t) {
  2. return t.isSetTableType() ?
  3. t.getTableType().equals(TableType.VIRTUAL_VIEW.toString()) : null;
  4. }

代码示例来源:origin: apache/hive

  1. public JSONCreateTableMessage(String server, String servicePrincipal, Table tableObj,
  2. Iterator<String> fileIter, Long timestamp) {
  3. this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(),
  4. tableObj.getTableType(), timestamp);
  5. try {
  6. this.tableObjJson = MessageBuilder.createTableObjJson(tableObj);
  7. } catch (TException e) {
  8. throw new IllegalArgumentException("Could not serialize: ", e);
  9. }
  10. this.files = (fileIter != null) ? Lists.newArrayList(fileIter) : Lists.newArrayList();
  11. }

代码示例来源:origin: apache/hive

  1. public JSONDropPartitionMessage(String server, String servicePrincipal, Table tableObj,
  2. List<Map<String, String>> partitionKeyValues, long timestamp) {
  3. this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(),
  4. tableObj.getTableType(), partitionKeyValues, timestamp);
  5. try {
  6. this.tableObjJson = MessageBuilder.createTableObjJson(tableObj);
  7. } catch (TException e) {
  8. throw new IllegalArgumentException("Could not serialize: ", e);
  9. }
  10. }

代码示例来源:origin: apache/hive

  1. public JSONDropTableMessage(String server, String servicePrincipal, Table tableObj,
  2. Long timestamp) {
  3. this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(),
  4. tableObj.getTableType(), timestamp);
  5. try {
  6. this.tableObjJson = MessageBuilder.createTableObjJson(tableObj);
  7. } catch (TException e) {
  8. throw new IllegalArgumentException("Could not serialize: ", e);
  9. }
  10. checkValid();
  11. }

代码示例来源:origin: apache/hive

  1. @Override public void preCreateTable(Table table) throws MetaException {
  2. if (!table.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
  3. throw new MetaException(KAFKA_STORAGE_HANDLER + " supports only " + TableType.EXTERNAL_TABLE);
  4. }
  5. Arrays.stream(KafkaTableProperties.values())
  6. .filter(KafkaTableProperties::isMandatory)
  7. .forEach(key -> Preconditions.checkNotNull(table.getParameters().get(key.getName()),
  8. "Set Table property " + key.getName()));
  9. // Put all the default at the pre create.
  10. Arrays.stream(KafkaTableProperties.values()).forEach((key) -> {
  11. if (table.getParameters().get(key.getName()) == null) {
  12. table.putToParameters(key.getName(), key.getDefaultValue());
  13. }
  14. });
  15. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public CreateTableMessage buildCreateTableMessage(Table table) {
  3. return new JSONCreateTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
  4. table.getTableName(), table.getTableType(), now());
  5. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public DropTableMessage buildDropTableMessage(Table table) {
  3. return new JSONDropTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
  4. table.getTableName(), table.getTableType(), now());
  5. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public InsertMessage buildInsertMessage(String db, Table table, Map<String,String> partKeyVals,
  3. List<String> files) {
  4. return new JSONInsertMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
  5. table.getTableName(), table.getTableType(), partKeyVals, files, now());
  6. }

代码示例来源:origin: prestodb/presto

  1. public static Table fromMetastoreApiTable(org.apache.hadoop.hive.metastore.api.Table table, List<FieldSchema> schema)
  2. {
  3. StorageDescriptor storageDescriptor = table.getSd();
  4. if (storageDescriptor == null) {
  5. throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor");
  6. }
  7. Table.Builder tableBuilder = Table.builder()
  8. .setDatabaseName(table.getDbName())
  9. .setTableName(table.getTableName())
  10. .setOwner(nullToEmpty(table.getOwner()))
  11. .setTableType(table.getTableType())
  12. .setDataColumns(schema.stream()
  13. .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema)
  14. .collect(toList()))
  15. .setPartitionColumns(table.getPartitionKeys().stream()
  16. .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema)
  17. .collect(toList()))
  18. .setParameters(table.getParameters() == null ? ImmutableMap.of() : table.getParameters())
  19. .setViewOriginalText(Optional.ofNullable(emptyToNull(table.getViewOriginalText())))
  20. .setViewExpandedText(Optional.ofNullable(emptyToNull(table.getViewExpandedText())));
  21. fromMetastoreApiStorageDescriptor(storageDescriptor, tableBuilder.getStorageBuilder(), table.getTableName());
  22. return tableBuilder.build();
  23. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public AlterTableMessage buildAlterTableMessage(Table before, Table after, Long writeId) {
  3. return new JSONAlterTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, before.getDbName(),
  4. before.getTableName(), before.getTableType(), writeId, now());
  5. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public DropPartitionMessage buildDropPartitionMessage(Table table, Iterator<Partition> partitions) {
  3. return new JSONDropPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
  4. table.getTableName(), table.getTableType(),
  5. MessageBuilder.getPartitionKeyValues(table, partitions), now());
  6. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public AddPartitionMessage buildAddPartitionMessage(Table table, Iterator<Partition> partitionsIterator) {
  3. return new JSONAddPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
  4. table.getTableName(), table.getTableType(),
  5. MessageBuilder.getPartitionKeyValues(table, partitionsIterator), now());
  6. }

代码示例来源:origin: prestodb/presto

  1. @Override
  2. public synchronized void dropTable(String databaseName, String tableName, boolean deleteData)
  3. {
  4. List<String> locations = listAllDataPaths(this, databaseName, tableName);
  5. SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName);
  6. Table table = relations.remove(schemaTableName);
  7. if (table == null) {
  8. throw new TableNotFoundException(schemaTableName);
  9. }
  10. views.remove(schemaTableName);
  11. partitions.keySet().removeIf(partitionName -> partitionName.matches(databaseName, tableName));
  12. // remove data
  13. if (deleteData && table.getTableType().equals(MANAGED_TABLE.name())) {
  14. for (String location : locations) {
  15. if (location != null) {
  16. File directory = new File(new Path(location).toUri());
  17. checkArgument(isParentDir(directory, baseDirectory), "Table directory must be inside of the metastore base directory");
  18. deleteDirectory(directory);
  19. }
  20. }
  21. }
  22. }

代码示例来源:origin: apache/hive

  1. private static void upgradeTableDesc(org.apache.hadoop.hive.metastore.api.Table tableObj, MetaData rv,
  2. EximUtil.SemanticAnalyzerWrapperContext x)
  3. throws IOException, TException, HiveException {
  4. x.getLOG().debug("Converting table " + tableObj.getTableName() + " of type " + tableObj.getTableType() +
  5. " with para " + tableObj.getParameters());
  6. //TODO : isPathOwnedByHive is hard coded to true, need to get it from repl dump metadata.
  7. TableType tableType = TableType.valueOf(tableObj.getTableType());
  8. HiveStrictManagedMigration.TableMigrationOption migrationOption =
  9. HiveStrictManagedMigration.determineMigrationTypeAutomatically(tableObj, tableType,
  10. null, x.getConf(), x.getHive().getMSC(), true);
  11. HiveStrictManagedMigration.migrateTable(tableObj, tableType, migrationOption, false,
  12. getHiveUpdater(x.getConf()), x.getHive().getMSC(), x.getConf());
  13. x.getLOG().debug("Converted table " + tableObj.getTableName() + " of type " + tableObj.getTableType() +
  14. " with para " + tableObj.getParameters());
  15. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public AlterPartitionMessage buildAlterPartitionMessage(Table table, Partition before, Partition after,
  3. Long writeId) {
  4. return new JSONAlterPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL,
  5. before.getDbName(), before.getTableName(), table.getTableType(),
  6. MessageBuilder.getPartitionKeyValues(table,before), writeId, now());
  7. }

代码示例来源:origin: prestodb/presto

  1. @Override
  2. public Optional<Table> getTable(String databaseName, String tableName)
  3. {
  4. try {
  5. return retry()
  6. .stopOn(NoSuchObjectException.class, HiveViewNotSupportedException.class)
  7. .stopOnIllegalExceptions()
  8. .run("getTable", stats.getGetTable().wrap(() -> {
  9. try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) {
  10. Table table = client.getTable(databaseName, tableName);
  11. if (table.getTableType().equals(TableType.VIRTUAL_VIEW.name()) && !isPrestoView(table)) {
  12. throw new HiveViewNotSupportedException(new SchemaTableName(databaseName, tableName));
  13. }
  14. return Optional.of(table);
  15. }
  16. }));
  17. }
  18. catch (NoSuchObjectException e) {
  19. return Optional.empty();
  20. }
  21. catch (TException e) {
  22. throw new PrestoException(HIVE_METASTORE_ERROR, e);
  23. }
  24. catch (Exception e) {
  25. throw propagate(e);
  26. }
  27. }

代码示例来源:origin: apache/hive

  1. public TableWrapper(org.apache.hadoop.hive.metastore.api.Table apiTable) {
  2. org.apache.hadoop.hive.metastore.api.Table wrapperApiTable = apiTable.deepCopy();
  3. if (wrapperApiTable.getTableType() == null){
  4. // TableType specified was null, we need to figure out what type it was.
  5. if (MetaStoreUtils.isExternalTable(wrapperApiTable)){
  6. wrapperApiTable.setTableType(TableType.EXTERNAL_TABLE.toString());
  7. } else if (MetaStoreUtils.isMaterializedViewTable(wrapperApiTable)) {
  8. wrapperApiTable.setTableType(TableType.MATERIALIZED_VIEW.toString());
  9. } else if ((wrapperApiTable.getSd() == null) || (wrapperApiTable.getSd().getLocation() == null)) {
  10. wrapperApiTable.setTableType(TableType.VIRTUAL_VIEW.toString());
  11. } else {
  12. wrapperApiTable.setTableType(TableType.MANAGED_TABLE.toString());
  13. }
  14. }
  15. initialize(wrapperApiTable);
  16. }
  17. }

相关文章

Table类方法