org.apache.hadoop.hive.metastore.api.Table.getTableType()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(9.5k)|赞(0)|评价(0)|浏览(175)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.getTableType()方法的一些代码示例,展示了Table.getTableType()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getTableType()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:getTableType

Table.getTableType介绍

暂无

代码示例

代码示例来源:origin: apache/hive

public TableType getTableType() {
  return Enum.valueOf(TableType.class, tTable.getTableType());
 }

代码示例来源:origin: apache/hive

public static boolean isMaterializedViewTable(Table table) {
 if (table == null) {
  return false;
 }
 return TableType.MATERIALIZED_VIEW.toString().equals(table.getTableType());
}

代码示例来源:origin: apache/hive

public static boolean isView(Table table) {
 if (table == null) {
  return false;
 }
 return TableType.VIRTUAL_VIEW.toString().equals(table.getTableType());
}

代码示例来源:origin: apache/hive

private static Boolean isViewTable(Table t) {
 return t.isSetTableType() ?
   t.getTableType().equals(TableType.VIRTUAL_VIEW.toString()) : null;
}

代码示例来源:origin: apache/hive

public JSONCreateTableMessage(String server, String servicePrincipal, Table tableObj,
  Iterator<String> fileIter, Long timestamp) {
 this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(),
   tableObj.getTableType(), timestamp);
 try {
  this.tableObjJson = MessageBuilder.createTableObjJson(tableObj);
 } catch (TException e) {
  throw new IllegalArgumentException("Could not serialize: ", e);
 }
 this.files = (fileIter != null) ? Lists.newArrayList(fileIter) : Lists.newArrayList();
}

代码示例来源:origin: apache/hive

public JSONDropPartitionMessage(String server, String servicePrincipal, Table tableObj,
  List<Map<String, String>> partitionKeyValues, long timestamp) {
 this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(),
   tableObj.getTableType(), partitionKeyValues, timestamp);
 try {
  this.tableObjJson = MessageBuilder.createTableObjJson(tableObj);
 } catch (TException e) {
  throw new IllegalArgumentException("Could not serialize: ", e);
 }
}

代码示例来源:origin: apache/hive

public JSONDropTableMessage(String server, String servicePrincipal, Table tableObj,
  Long timestamp) {
 this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(),
   tableObj.getTableType(), timestamp);
 try {
  this.tableObjJson = MessageBuilder.createTableObjJson(tableObj);
 } catch (TException e) {
  throw new IllegalArgumentException("Could not serialize: ", e);
 }
 checkValid();
}

代码示例来源:origin: apache/hive

@Override public void preCreateTable(Table table) throws MetaException {
 if (!table.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
  throw new MetaException(KAFKA_STORAGE_HANDLER + " supports only " + TableType.EXTERNAL_TABLE);
 }
 Arrays.stream(KafkaTableProperties.values())
   .filter(KafkaTableProperties::isMandatory)
   .forEach(key -> Preconditions.checkNotNull(table.getParameters().get(key.getName()),
     "Set Table property " + key.getName()));
 // Put all the default at the pre create.
 Arrays.stream(KafkaTableProperties.values()).forEach((key) -> {
  if (table.getParameters().get(key.getName()) == null) {
   table.putToParameters(key.getName(), key.getDefaultValue());
  }
 });
}

代码示例来源:origin: apache/hive

@Override
public CreateTableMessage buildCreateTableMessage(Table table) {
 return new JSONCreateTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
   table.getTableName(), table.getTableType(), now());
}

代码示例来源:origin: apache/hive

@Override
public DropTableMessage buildDropTableMessage(Table table) {
 return new JSONDropTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
   table.getTableName(), table.getTableType(), now());
}

代码示例来源:origin: apache/hive

@Override
public InsertMessage buildInsertMessage(String db, Table table, Map<String,String> partKeyVals,
  List<String> files) {
 return new JSONInsertMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
   table.getTableName(), table.getTableType(), partKeyVals, files, now());
}

代码示例来源:origin: prestodb/presto

public static Table fromMetastoreApiTable(org.apache.hadoop.hive.metastore.api.Table table, List<FieldSchema> schema)
{
  StorageDescriptor storageDescriptor = table.getSd();
  if (storageDescriptor == null) {
    throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor");
  }
  Table.Builder tableBuilder = Table.builder()
      .setDatabaseName(table.getDbName())
      .setTableName(table.getTableName())
      .setOwner(nullToEmpty(table.getOwner()))
      .setTableType(table.getTableType())
      .setDataColumns(schema.stream()
          .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema)
          .collect(toList()))
      .setPartitionColumns(table.getPartitionKeys().stream()
          .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema)
          .collect(toList()))
      .setParameters(table.getParameters() == null ? ImmutableMap.of() : table.getParameters())
      .setViewOriginalText(Optional.ofNullable(emptyToNull(table.getViewOriginalText())))
      .setViewExpandedText(Optional.ofNullable(emptyToNull(table.getViewExpandedText())));
  fromMetastoreApiStorageDescriptor(storageDescriptor, tableBuilder.getStorageBuilder(), table.getTableName());
  return tableBuilder.build();
}

代码示例来源:origin: apache/hive

@Override
public AlterTableMessage buildAlterTableMessage(Table before, Table after, Long writeId) {
 return new JSONAlterTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, before.getDbName(),
   before.getTableName(), before.getTableType(), writeId, now());
}

代码示例来源:origin: apache/hive

@Override
public DropPartitionMessage buildDropPartitionMessage(Table table, Iterator<Partition> partitions) {
 return new JSONDropPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
   table.getTableName(), table.getTableType(),
   MessageBuilder.getPartitionKeyValues(table, partitions), now());
}

代码示例来源:origin: apache/hive

@Override
public AddPartitionMessage buildAddPartitionMessage(Table table, Iterator<Partition> partitionsIterator) {
 return new JSONAddPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(),
   table.getTableName(), table.getTableType(),
   MessageBuilder.getPartitionKeyValues(table, partitionsIterator), now());
}

代码示例来源:origin: prestodb/presto

@Override
public synchronized void dropTable(String databaseName, String tableName, boolean deleteData)
{
  List<String> locations = listAllDataPaths(this, databaseName, tableName);
  SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName);
  Table table = relations.remove(schemaTableName);
  if (table == null) {
    throw new TableNotFoundException(schemaTableName);
  }
  views.remove(schemaTableName);
  partitions.keySet().removeIf(partitionName -> partitionName.matches(databaseName, tableName));
  // remove data
  if (deleteData && table.getTableType().equals(MANAGED_TABLE.name())) {
    for (String location : locations) {
      if (location != null) {
        File directory = new File(new Path(location).toUri());
        checkArgument(isParentDir(directory, baseDirectory), "Table directory must be inside of the metastore base directory");
        deleteDirectory(directory);
      }
    }
  }
}

代码示例来源:origin: apache/hive

private static void upgradeTableDesc(org.apache.hadoop.hive.metastore.api.Table tableObj, MetaData rv,
                   EximUtil.SemanticAnalyzerWrapperContext x)
    throws IOException, TException, HiveException {
 x.getLOG().debug("Converting table " + tableObj.getTableName() + " of type " + tableObj.getTableType() +
     " with para " + tableObj.getParameters());
 //TODO : isPathOwnedByHive is hard coded to true, need to get it from repl dump metadata.
 TableType tableType = TableType.valueOf(tableObj.getTableType());
 HiveStrictManagedMigration.TableMigrationOption migrationOption =
     HiveStrictManagedMigration.determineMigrationTypeAutomatically(tableObj, tableType,
         null, x.getConf(), x.getHive().getMSC(), true);
 HiveStrictManagedMigration.migrateTable(tableObj, tableType, migrationOption, false,
     getHiveUpdater(x.getConf()), x.getHive().getMSC(), x.getConf());
 x.getLOG().debug("Converted table " + tableObj.getTableName() + " of type " + tableObj.getTableType() +
     " with para " + tableObj.getParameters());
}

代码示例来源:origin: apache/hive

@Override
public AlterPartitionMessage buildAlterPartitionMessage(Table table, Partition before, Partition after,
                            Long writeId) {
 return new JSONAlterPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL,
   before.getDbName(), before.getTableName(), table.getTableType(),
   MessageBuilder.getPartitionKeyValues(table,before), writeId, now());
}

代码示例来源:origin: prestodb/presto

@Override
public Optional<Table> getTable(String databaseName, String tableName)
{
  try {
    return retry()
        .stopOn(NoSuchObjectException.class, HiveViewNotSupportedException.class)
        .stopOnIllegalExceptions()
        .run("getTable", stats.getGetTable().wrap(() -> {
          try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) {
            Table table = client.getTable(databaseName, tableName);
            if (table.getTableType().equals(TableType.VIRTUAL_VIEW.name()) && !isPrestoView(table)) {
              throw new HiveViewNotSupportedException(new SchemaTableName(databaseName, tableName));
            }
            return Optional.of(table);
          }
        }));
  }
  catch (NoSuchObjectException e) {
    return Optional.empty();
  }
  catch (TException e) {
    throw new PrestoException(HIVE_METASTORE_ERROR, e);
  }
  catch (Exception e) {
    throw propagate(e);
  }
}

代码示例来源:origin: apache/hive

public TableWrapper(org.apache.hadoop.hive.metastore.api.Table apiTable) {
  org.apache.hadoop.hive.metastore.api.Table wrapperApiTable = apiTable.deepCopy();
  if (wrapperApiTable.getTableType() == null){
   // TableType specified was null, we need to figure out what type it was.
   if (MetaStoreUtils.isExternalTable(wrapperApiTable)){
    wrapperApiTable.setTableType(TableType.EXTERNAL_TABLE.toString());
   } else if (MetaStoreUtils.isMaterializedViewTable(wrapperApiTable)) {
    wrapperApiTable.setTableType(TableType.MATERIALIZED_VIEW.toString());
   } else if ((wrapperApiTable.getSd() == null) || (wrapperApiTable.getSd().getLocation() == null)) {
    wrapperApiTable.setTableType(TableType.VIRTUAL_VIEW.toString());
   } else {
    wrapperApiTable.setTableType(TableType.MANAGED_TABLE.toString());
   }
  }
  initialize(wrapperApiTable);
 }
}

相关文章

Table类方法