org.apache.hadoop.hive.metastore.api.Table.setTableType()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(12.6k)|赞(0)|评价(0)|浏览(200)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.setTableType()方法的一些代码示例,展示了Table.setTableType()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setTableType()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:setTableType

Table.setTableType介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. public void setTableType(TableType tableType) {
  2. tTable.setTableType(tableType.toString());
  3. }

代码示例来源:origin: apache/drill

  1. public void setTableType(TableType tableType) {
  2. tTable.setTableType(tableType.toString());
  3. }

代码示例来源:origin: apache/hive

  1. private void validateTableType(Table tbl) {
  2. // If the table has property EXTERNAL set, update table type
  3. // accordingly
  4. String tableType = tbl.getTableType();
  5. boolean isExternal = Boolean.parseBoolean(tbl.getParameters().get("EXTERNAL"));
  6. if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
  7. if (isExternal) {
  8. tableType = TableType.EXTERNAL_TABLE.toString();
  9. }
  10. }
  11. if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
  12. if (!isExternal) {
  13. tableType = TableType.MANAGED_TABLE.toString();
  14. }
  15. }
  16. tbl.setTableType(tableType);
  17. }

代码示例来源:origin: apache/hive

  1. public TableWrapper(org.apache.hadoop.hive.metastore.api.Table apiTable) {
  2. org.apache.hadoop.hive.metastore.api.Table wrapperApiTable = apiTable.deepCopy();
  3. if (wrapperApiTable.getTableType() == null){
  4. // TableType specified was null, we need to figure out what type it was.
  5. if (MetaStoreUtils.isExternalTable(wrapperApiTable)){
  6. wrapperApiTable.setTableType(TableType.EXTERNAL_TABLE.toString());
  7. } else if (MetaStoreUtils.isMaterializedViewTable(wrapperApiTable)) {
  8. wrapperApiTable.setTableType(TableType.MATERIALIZED_VIEW.toString());
  9. } else if ((wrapperApiTable.getSd() == null) || (wrapperApiTable.getSd().getLocation() == null)) {
  10. wrapperApiTable.setTableType(TableType.VIRTUAL_VIEW.toString());
  11. } else {
  12. wrapperApiTable.setTableType(TableType.MANAGED_TABLE.toString());
  13. }
  14. }
  15. initialize(wrapperApiTable);
  16. }
  17. }

代码示例来源:origin: apache/drill

  1. public TableWrapper(org.apache.hadoop.hive.metastore.api.Table apiTable) {
  2. org.apache.hadoop.hive.metastore.api.Table wrapperApiTable = apiTable.deepCopy();
  3. if (wrapperApiTable.getTableType() == null){
  4. // TableType specified was null, we need to figure out what type it was.
  5. if (MetaStoreUtils.isExternalTable(wrapperApiTable)){
  6. wrapperApiTable.setTableType(TableType.EXTERNAL_TABLE.toString());
  7. } else if (MetaStoreUtils.isIndexTable(wrapperApiTable)) {
  8. wrapperApiTable.setTableType(TableType.INDEX_TABLE.toString());
  9. } else if (MetaStoreUtils.isMaterializedViewTable(wrapperApiTable)) {
  10. wrapperApiTable.setTableType(TableType.MATERIALIZED_VIEW.toString());
  11. } else if ((wrapperApiTable.getSd() == null) || (wrapperApiTable.getSd().getLocation() == null)) {
  12. wrapperApiTable.setTableType(TableType.VIRTUAL_VIEW.toString());
  13. } else {
  14. wrapperApiTable.setTableType(TableType.MANAGED_TABLE.toString());
  15. }
  16. }
  17. initialize(wrapperApiTable);
  18. }
  19. }

代码示例来源:origin: apache/hive

  1. static boolean migrateToExternalTable(Table tableObj, TableType tableType, boolean dryRun, HiveUpdater hiveUpdater)
  2. throws HiveException {
  3. String msg;
  4. switch (tableType) {
  5. case MANAGED_TABLE:
  6. if (AcidUtils.isTransactionalTable(tableObj)) {
  7. msg = createExternalConversionExcuse(tableObj,
  8. "Table is a transactional table");
  9. LOG.debug(msg);
  10. return false;
  11. }
  12. LOG.info("Converting {} to external table ...", getQualifiedName(tableObj));
  13. if (!dryRun) {
  14. tableObj.setTableType(TableType.EXTERNAL_TABLE.toString());
  15. hiveUpdater.updateTableProperties(tableObj, convertToExternalTableProps);
  16. }
  17. return true;
  18. case EXTERNAL_TABLE:
  19. msg = createExternalConversionExcuse(tableObj,
  20. "Table is already an external table");
  21. LOG.debug(msg);
  22. break;
  23. default: // VIEW/MATERIALIZED_VIEW
  24. msg = createExternalConversionExcuse(tableObj,
  25. "Table type " + tableType + " cannot be converted");
  26. LOG.debug(msg);
  27. break;
  28. }
  29. return false;
  30. }

代码示例来源:origin: prestodb/presto

  1. public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges)
  2. {
  3. org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table();
  4. result.setDbName(table.getDatabaseName());
  5. result.setTableName(table.getTableName());
  6. result.setOwner(table.getOwner());
  7. result.setTableType(table.getTableType());
  8. result.setParameters(table.getParameters());
  9. result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList()));
  10. result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage()));
  11. result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges));
  12. result.setViewOriginalText(table.getViewOriginalText().orElse(null));
  13. result.setViewExpandedText(table.getViewExpandedText().orElse(null));
  14. return result;
  15. }

代码示例来源:origin: apache/hive

  1. newTable.setTableType(TableType.EXTERNAL_TABLE.toString());
  2. } else {
  3. newTable.setTableType(TableType.MANAGED_TABLE.toString());

代码示例来源:origin: apache/hive

  1. public TableBuilder(Database database) {
  2. this.database = database;
  3. partitions = new ArrayList<>();
  4. columnNames = new ArrayList<>();
  5. columnTypes = new ArrayList<>();
  6. partitionKeys = Collections.emptyList();
  7. table = new Table();
  8. table.setDbName(database.getName());
  9. table.setTableType(TableType.MANAGED_TABLE.toString());
  10. Map<String, String> tableParams = new HashMap<String, String>();
  11. tableParams.put("transactional", Boolean.TRUE.toString());
  12. table.setParameters(tableParams);
  13. sd = new StorageDescriptor();
  14. sd.setInputFormat(OrcInputFormat.class.getName());
  15. sd.setOutputFormat(OrcOutputFormat.class.getName());
  16. sd.setNumBuckets(1);
  17. table.setSd(sd);
  18. serDeInfo = new SerDeInfo();
  19. serDeInfo.setParameters(new HashMap<String, String>());
  20. serDeInfo.getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
  21. serDeInfo.setSerializationLib(OrcSerde.class.getName());
  22. sd.setSerdeInfo(serDeInfo);
  23. }

代码示例来源:origin: apache/incubator-gobblin

  1. @Test
  2. public void dropReplacedPartitionsTest() throws Exception {
  3. Table table = ConvertibleHiveDatasetTest.getTestTable("dbName", "tableName");
  4. table.setTableType("VIRTUAL_VIEW");
  5. table.setPartitionKeys(ImmutableList.of(new FieldSchema("year", "string", ""), new FieldSchema("month", "string", "")));
  6. Partition part = new Partition();
  7. part.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01"));
  8. SchemaAwareHiveTable hiveTable = new SchemaAwareHiveTable(table, null);
  9. SchemaAwareHivePartition partition = new SchemaAwareHivePartition(table, part, null);
  10. QueryBasedHiveConversionEntity conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(partition));
  11. List<ImmutableMap<String, String>> expected =
  12. ImmutableList.of(ImmutableMap.of("year", "2015", "month", "12"), ImmutableMap.of("year", "2016", "month", "01"));
  13. Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected);
  14. // Make sure that a partition itself is not dropped
  15. Partition replacedSelf = new Partition();
  16. replacedSelf.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01|2016,02"));
  17. replacedSelf.setValues(ImmutableList.of("2016", "02"));
  18. conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(new SchemaAwareHivePartition(table, replacedSelf, null)));
  19. Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected);
  20. }

代码示例来源:origin: apache/hive

  1. preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString());
  2. preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC);
  3. preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);

代码示例来源:origin: apache/hive

  1. @Test public void configureJobPropertiesWithDefaultValues() throws MetaException {
  2. KafkaStorageHandler kafkaStorageHandler = new KafkaStorageHandler();
  3. TableDesc tableDesc = Mockito.mock(TableDesc.class);
  4. Properties properties = new Properties();
  5. Table preCreateTable = new Table();
  6. preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC);
  7. preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);
  8. preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString());
  9. kafkaStorageHandler.preCreateTable(preCreateTable);
  10. preCreateTable.getParameters().forEach(properties::setProperty);
  11. Mockito.when(tableDesc.getProperties()).thenReturn(properties);
  12. Map<String, String> jobProperties = new HashMap<>();
  13. kafkaStorageHandler.configureInputJobProperties(tableDesc, jobProperties);
  14. kafkaStorageHandler.configureOutputJobProperties(tableDesc, jobProperties);
  15. Assert.assertEquals(jobProperties.get(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName()), TEST_TOPIC);
  16. Assert.assertEquals(jobProperties.get(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName()), LOCALHOST_9291);
  17. Arrays.stream(KafkaTableProperties.values())
  18. .filter(key -> !key.isMandatory())
  19. .forEach((key) -> Assert.assertEquals("Wrong match for key " + key.getName(),
  20. key.getDefaultValue(),
  21. jobProperties.get(key.getName())));
  22. }

代码示例来源:origin: apache/hive

  1. preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC);
  2. preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);
  3. preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString());
  4. kafkaStorageHandler.preCreateTable(preCreateTable);
  5. preCreateTable.getParameters().forEach(properties::setProperty);

代码示例来源:origin: apache/hive

  1. throws TException {
  2. Table table = new Table();
  3. table.setTableType(TableType.MANAGED_TABLE.name());
  4. table.setTableName(tableName);
  5. table.setDbName(dbName);

代码示例来源:origin: apache/incubator-gobblin

  1. public static Table getTestTable(String dbName, String tableName) {
  2. Table table = new Table();
  3. table.setDbName(dbName);
  4. table.setTableName(tableName);
  5. table.setTableType(TableType.EXTERNAL_TABLE.name());
  6. StorageDescriptor sd = new StorageDescriptor();
  7. sd.setLocation("/tmp/test");
  8. table.setSd(sd);
  9. return table;
  10. }

代码示例来源:origin: apache/hive

  1. tbl.setTableType(tableType);
  2. return tbl;

代码示例来源:origin: apache/hive

  1. Table build() {
  2. StorageDescriptor sd = new StorageDescriptor();
  3. if (columns == null) {
  4. sd.setCols(Collections.emptyList());
  5. } else {
  6. sd.setCols(columns);
  7. }
  8. SerDeInfo serdeInfo = new SerDeInfo();
  9. serdeInfo.setSerializationLib(serde);
  10. serdeInfo.setName(tableName);
  11. sd.setSerdeInfo(serdeInfo);
  12. sd.setInputFormat(inputFormat);
  13. sd.setOutputFormat(outputFormat);
  14. if (location != null) {
  15. sd.setLocation(location);
  16. }
  17. Table table = new Table();
  18. table.setDbName(dbName);
  19. table.setTableName(tableName);
  20. table.setSd(sd);
  21. table.setParameters(parameters);
  22. table.setOwner(owner);
  23. if (partitionKeys != null) {
  24. table.setPartitionKeys(partitionKeys);
  25. }
  26. table.setTableType(tableType.toString());
  27. return table;
  28. }
  29. }

代码示例来源:origin: apache/hive

  1. private void createTable(String dbName, String tableName) throws Exception {
  2. String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME
  3. : dbName;
  4. try {
  5. msc.dropTable(databaseName, tableName);
  6. } catch (Exception e) {
  7. } // can fail with NoSuchObjectException
  8. Table tbl = new Table();
  9. tbl.setDbName(databaseName);
  10. tbl.setTableName(tableName);
  11. tbl.setTableType("MANAGED_TABLE");
  12. StorageDescriptor sd = new StorageDescriptor();
  13. sd.setCols(getTableColumns());
  14. tbl.setPartitionKeys(getPartitionKeys());
  15. tbl.setSd(sd);
  16. sd.setBucketCols(new ArrayList<String>(2));
  17. sd.setSerdeInfo(new SerDeInfo());
  18. sd.getSerdeInfo().setName(tbl.getTableName());
  19. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  20. sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
  21. sd.getSerdeInfo().setSerializationLib(ColumnarSerDe.class.getName());
  22. sd.setInputFormat(RCFileInputFormat.class.getName());
  23. sd.setOutputFormat(RCFileOutputFormat.class.getName());
  24. Map<String, String> tableParams = new HashMap<String, String>();
  25. tbl.setParameters(tableParams);
  26. msc.createTable(tbl);
  27. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testCreateTableDefaultValuesView() throws Exception {
  3. Table table = new Table();
  4. StorageDescriptor sd = new StorageDescriptor();
  5. List<FieldSchema> cols = new ArrayList<>();
  6. table.setDbName(DEFAULT_DATABASE);
  7. table.setTableName("test_table_2");
  8. table.setTableType("VIRTUAL_VIEW");
  9. cols.add(new FieldSchema("column_name", "int", null));
  10. sd.setCols(cols);
  11. sd.setSerdeInfo(new SerDeInfo());
  12. table.setSd(sd);
  13. client.createTable(table);
  14. Table createdTable = client.getTable(table.getDbName(), table.getTableName());
  15. // No location should be created for views
  16. Assert.assertNull("Storage descriptor location should be null",
  17. createdTable.getSd().getLocation());
  18. }

代码示例来源:origin: apache/storm

  1. tbl.setDbName(databaseName);
  2. tbl.setTableName(tableName);
  3. tbl.setTableType(TableType.MANAGED_TABLE.toString());
  4. StorageDescriptor sd = new StorageDescriptor();
  5. sd.setCols(getTableColumns(colNames, colTypes));

相关文章

Table类方法