org.apache.hadoop.hive.metastore.api.Table.setTableType()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(12.6k)|赞(0)|评价(0)|浏览(150)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.setTableType()方法的一些代码示例,展示了Table.setTableType()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setTableType()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:setTableType

Table.setTableType介绍

暂无

代码示例

代码示例来源:origin: apache/hive

public void setTableType(TableType tableType) {
  tTable.setTableType(tableType.toString());
 }

代码示例来源:origin: apache/drill

public void setTableType(TableType tableType) {
  tTable.setTableType(tableType.toString());
 }

代码示例来源:origin: apache/hive

private void validateTableType(Table tbl) {
 // If the table has property EXTERNAL set, update table type
 // accordingly
 String tableType = tbl.getTableType();
 boolean isExternal = Boolean.parseBoolean(tbl.getParameters().get("EXTERNAL"));
 if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
  if (isExternal) {
   tableType = TableType.EXTERNAL_TABLE.toString();
  }
 }
 if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
  if (!isExternal) {
   tableType = TableType.MANAGED_TABLE.toString();
  }
 }
 tbl.setTableType(tableType);
}

代码示例来源:origin: apache/hive

public TableWrapper(org.apache.hadoop.hive.metastore.api.Table apiTable) {
  org.apache.hadoop.hive.metastore.api.Table wrapperApiTable = apiTable.deepCopy();
  if (wrapperApiTable.getTableType() == null){
   // TableType specified was null, we need to figure out what type it was.
   if (MetaStoreUtils.isExternalTable(wrapperApiTable)){
    wrapperApiTable.setTableType(TableType.EXTERNAL_TABLE.toString());
   } else if (MetaStoreUtils.isMaterializedViewTable(wrapperApiTable)) {
    wrapperApiTable.setTableType(TableType.MATERIALIZED_VIEW.toString());
   } else if ((wrapperApiTable.getSd() == null) || (wrapperApiTable.getSd().getLocation() == null)) {
    wrapperApiTable.setTableType(TableType.VIRTUAL_VIEW.toString());
   } else {
    wrapperApiTable.setTableType(TableType.MANAGED_TABLE.toString());
   }
  }
  initialize(wrapperApiTable);
 }
}

代码示例来源:origin: apache/drill

public TableWrapper(org.apache.hadoop.hive.metastore.api.Table apiTable) {
  org.apache.hadoop.hive.metastore.api.Table wrapperApiTable = apiTable.deepCopy();
  if (wrapperApiTable.getTableType() == null){
   // TableType specified was null, we need to figure out what type it was.
   if (MetaStoreUtils.isExternalTable(wrapperApiTable)){
    wrapperApiTable.setTableType(TableType.EXTERNAL_TABLE.toString());
   } else if (MetaStoreUtils.isIndexTable(wrapperApiTable)) {
    wrapperApiTable.setTableType(TableType.INDEX_TABLE.toString());
   } else if (MetaStoreUtils.isMaterializedViewTable(wrapperApiTable)) {
    wrapperApiTable.setTableType(TableType.MATERIALIZED_VIEW.toString());
   } else if ((wrapperApiTable.getSd() == null) || (wrapperApiTable.getSd().getLocation() == null)) {
    wrapperApiTable.setTableType(TableType.VIRTUAL_VIEW.toString());
   } else {
    wrapperApiTable.setTableType(TableType.MANAGED_TABLE.toString());
   }
  }
  initialize(wrapperApiTable);
 }
}

代码示例来源:origin: apache/hive

static boolean migrateToExternalTable(Table tableObj, TableType tableType, boolean dryRun, HiveUpdater hiveUpdater)
    throws HiveException {
 String msg;
 switch (tableType) {
 case MANAGED_TABLE:
  if (AcidUtils.isTransactionalTable(tableObj)) {
   msg = createExternalConversionExcuse(tableObj,
     "Table is a transactional table");
   LOG.debug(msg);
   return false;
  }
  LOG.info("Converting {} to external table ...", getQualifiedName(tableObj));
  if (!dryRun) {
   tableObj.setTableType(TableType.EXTERNAL_TABLE.toString());
   hiveUpdater.updateTableProperties(tableObj, convertToExternalTableProps);
  }
  return true;
 case EXTERNAL_TABLE:
  msg = createExternalConversionExcuse(tableObj,
    "Table is already an external table");
  LOG.debug(msg);
  break;
 default: // VIEW/MATERIALIZED_VIEW
  msg = createExternalConversionExcuse(tableObj,
    "Table type " + tableType + " cannot be converted");
  LOG.debug(msg);
  break;
 }
 return false;
}

代码示例来源:origin: prestodb/presto

public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges)
{
  org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table();
  result.setDbName(table.getDatabaseName());
  result.setTableName(table.getTableName());
  result.setOwner(table.getOwner());
  result.setTableType(table.getTableType());
  result.setParameters(table.getParameters());
  result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList()));
  result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage()));
  result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges));
  result.setViewOriginalText(table.getViewOriginalText().orElse(null));
  result.setViewExpandedText(table.getViewExpandedText().orElse(null));
  return result;
}

代码示例来源:origin: apache/hive

newTable.setTableType(TableType.EXTERNAL_TABLE.toString());
} else {
 newTable.setTableType(TableType.MANAGED_TABLE.toString());

代码示例来源:origin: apache/hive

public TableBuilder(Database database) {
 this.database = database;
 partitions = new ArrayList<>();
 columnNames = new ArrayList<>();
 columnTypes = new ArrayList<>();
 partitionKeys = Collections.emptyList();
 table = new Table();
 table.setDbName(database.getName());
 table.setTableType(TableType.MANAGED_TABLE.toString());
 Map<String, String> tableParams = new HashMap<String, String>();
 tableParams.put("transactional", Boolean.TRUE.toString());
 table.setParameters(tableParams);
 sd = new StorageDescriptor();
 sd.setInputFormat(OrcInputFormat.class.getName());
 sd.setOutputFormat(OrcOutputFormat.class.getName());
 sd.setNumBuckets(1);
 table.setSd(sd);
 serDeInfo = new SerDeInfo();
 serDeInfo.setParameters(new HashMap<String, String>());
 serDeInfo.getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
 serDeInfo.setSerializationLib(OrcSerde.class.getName());
 sd.setSerdeInfo(serDeInfo);
}

代码示例来源:origin: apache/incubator-gobblin

@Test
public void dropReplacedPartitionsTest() throws Exception {
 Table table = ConvertibleHiveDatasetTest.getTestTable("dbName", "tableName");
 table.setTableType("VIRTUAL_VIEW");
 table.setPartitionKeys(ImmutableList.of(new FieldSchema("year", "string", ""), new FieldSchema("month", "string", "")));
 Partition part = new Partition();
 part.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01"));
 SchemaAwareHiveTable hiveTable = new SchemaAwareHiveTable(table, null);
 SchemaAwareHivePartition partition = new SchemaAwareHivePartition(table, part, null);
 QueryBasedHiveConversionEntity conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(partition));
 List<ImmutableMap<String, String>> expected =
   ImmutableList.of(ImmutableMap.of("year", "2015", "month", "12"), ImmutableMap.of("year", "2016", "month", "01"));
 Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected);
 // Make sure that a partition itself is not dropped
 Partition replacedSelf = new Partition();
 replacedSelf.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01|2016,02"));
 replacedSelf.setValues(ImmutableList.of("2016", "02"));
 conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(new SchemaAwareHivePartition(table, replacedSelf, null)));
 Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected);
}

代码示例来源:origin: apache/hive

preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString());
preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC);
preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);

代码示例来源:origin: apache/hive

@Test public void configureJobPropertiesWithDefaultValues() throws MetaException {
 KafkaStorageHandler kafkaStorageHandler = new KafkaStorageHandler();
 TableDesc tableDesc = Mockito.mock(TableDesc.class);
 Properties properties = new Properties();
 Table preCreateTable = new Table();
 preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC);
 preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);
 preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString());
 kafkaStorageHandler.preCreateTable(preCreateTable);
 preCreateTable.getParameters().forEach(properties::setProperty);
 Mockito.when(tableDesc.getProperties()).thenReturn(properties);
 Map<String, String> jobProperties = new HashMap<>();
 kafkaStorageHandler.configureInputJobProperties(tableDesc, jobProperties);
 kafkaStorageHandler.configureOutputJobProperties(tableDesc, jobProperties);
 Assert.assertEquals(jobProperties.get(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName()), TEST_TOPIC);
 Assert.assertEquals(jobProperties.get(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName()), LOCALHOST_9291);
 Arrays.stream(KafkaTableProperties.values())
   .filter(key -> !key.isMandatory())
   .forEach((key) -> Assert.assertEquals("Wrong match for key " + key.getName(),
     key.getDefaultValue(),
     jobProperties.get(key.getName())));
}

代码示例来源:origin: apache/hive

preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC);
preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);
preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString());
kafkaStorageHandler.preCreateTable(preCreateTable);
preCreateTable.getParameters().forEach(properties::setProperty);

代码示例来源:origin: apache/hive

throws  TException {
Table table = new Table();
table.setTableType(TableType.MANAGED_TABLE.name());
table.setTableName(tableName);
table.setDbName(dbName);

代码示例来源:origin: apache/incubator-gobblin

public static Table getTestTable(String dbName, String tableName) {
 Table table = new Table();
 table.setDbName(dbName);
 table.setTableName(tableName);
 table.setTableType(TableType.EXTERNAL_TABLE.name());
 StorageDescriptor sd = new StorageDescriptor();
 sd.setLocation("/tmp/test");
 table.setSd(sd);
 return table;
}

代码示例来源:origin: apache/hive

tbl.setTableType(tableType);
return tbl;

代码示例来源:origin: apache/hive

Table build() {
  StorageDescriptor sd = new StorageDescriptor();
  if (columns == null) {
   sd.setCols(Collections.emptyList());
  } else {
   sd.setCols(columns);
  }
  SerDeInfo serdeInfo = new SerDeInfo();
  serdeInfo.setSerializationLib(serde);
  serdeInfo.setName(tableName);
  sd.setSerdeInfo(serdeInfo);
  sd.setInputFormat(inputFormat);
  sd.setOutputFormat(outputFormat);
  if (location != null) {
   sd.setLocation(location);
  }
  Table table = new Table();
  table.setDbName(dbName);
  table.setTableName(tableName);
  table.setSd(sd);
  table.setParameters(parameters);
  table.setOwner(owner);
  if (partitionKeys != null) {
   table.setPartitionKeys(partitionKeys);
  }
  table.setTableType(tableType.toString());
  return table;
 }
}

代码示例来源:origin: apache/hive

private void createTable(String dbName, String tableName) throws Exception {
 String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME
   : dbName;
 try {
  msc.dropTable(databaseName, tableName);
 } catch (Exception e) {
 } // can fail with NoSuchObjectException
 Table tbl = new Table();
 tbl.setDbName(databaseName);
 tbl.setTableName(tableName);
 tbl.setTableType("MANAGED_TABLE");
 StorageDescriptor sd = new StorageDescriptor();
 sd.setCols(getTableColumns());
 tbl.setPartitionKeys(getPartitionKeys());
 tbl.setSd(sd);
 sd.setBucketCols(new ArrayList<String>(2));
 sd.setSerdeInfo(new SerDeInfo());
 sd.getSerdeInfo().setName(tbl.getTableName());
 sd.getSerdeInfo().setParameters(new HashMap<String, String>());
 sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
 sd.getSerdeInfo().setSerializationLib(ColumnarSerDe.class.getName());
 sd.setInputFormat(RCFileInputFormat.class.getName());
 sd.setOutputFormat(RCFileOutputFormat.class.getName());
 Map<String, String> tableParams = new HashMap<String, String>();
 tbl.setParameters(tableParams);
 msc.createTable(tbl);
}

代码示例来源:origin: apache/hive

@Test
public void testCreateTableDefaultValuesView() throws Exception {
 Table table = new Table();
 StorageDescriptor sd = new StorageDescriptor();
 List<FieldSchema> cols = new ArrayList<>();
 table.setDbName(DEFAULT_DATABASE);
 table.setTableName("test_table_2");
 table.setTableType("VIRTUAL_VIEW");
 cols.add(new FieldSchema("column_name", "int", null));
 sd.setCols(cols);
 sd.setSerdeInfo(new SerDeInfo());
 table.setSd(sd);
 client.createTable(table);
 Table createdTable = client.getTable(table.getDbName(), table.getTableName());
 // No location should be created for views
 Assert.assertNull("Storage descriptor location should be null",
   createdTable.getSd().getLocation());
}

代码示例来源:origin: apache/storm

tbl.setDbName(databaseName);
tbl.setTableName(tableName);
tbl.setTableType(TableType.MANAGED_TABLE.toString());
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(getTableColumns(colNames, colTypes));

相关文章

Table类方法