org.apache.hadoop.hive.ql.metadata.Table.<init>()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(8.7k)|赞(0)|评价(0)|浏览(209)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.<init>()方法的一些代码示例,展示了Table.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.<init>()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:<init>

Table.<init>介绍

[英]Used only for serialization.
[中]仅用于序列化。

代码示例

代码示例来源:origin: apache/incubator-gobblin

  1. public SchemaAwareHivePartition(org.apache.hadoop.hive.metastore.api.Table table, org.apache.hadoop.hive.metastore.api.Partition partition, Schema schema)
  2. throws HiveException {
  3. super(new Table(table), partition);
  4. this.avroSchema = schema;
  5. }

代码示例来源:origin: apache/hive

  1. private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName,
  2. org.apache.hadoop.hive.metastore.api.Table tblObj) throws Exception {
  3. Table table = new Table(tblObj);
  4. return new ImportTableDesc(dbName, table);
  5. }

代码示例来源:origin: apache/hive

  1. public static Table getTable(IMetaStoreClient client, String dbName, String tableName)
  2. throws NoSuchObjectException, TException, MetaException {
  3. return new Table(client.getTable(dbName, tableName));
  4. }

代码示例来源:origin: apache/hive

  1. public Table newTable(String tableName) throws HiveException {
  2. String[] names = Utilities.getDbTableName(tableName);
  3. return new Table(names[0], names[1]);
  4. }

代码示例来源:origin: apache/hive

  1. void updatePartitionLocation(String dbName, Table table, String partName,
  2. Partition part, Path newLocation) throws HiveException, TException {
  3. String msg = String.format("ALTER TABLE %s PARTITION (%s) SET LOCATION '%s'",
  4. getQualifiedName(table), partName, newLocation.toString());
  5. LOG.info(msg);
  6. org.apache.hadoop.hive.ql.metadata.Partition modifiedPart =
  7. new org.apache.hadoop.hive.ql.metadata.Partition(
  8. new org.apache.hadoop.hive.ql.metadata.Table(table),
  9. part);
  10. modifiedPart.setLocation(newLocation.toString());
  11. alterPartitionInternal(table, modifiedPart);
  12. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public Object getOutput() throws HiveException {
  3. return new Table(getTable().getTTable());
  4. }

代码示例来源:origin: apache/incubator-gobblin

  1. private static org.apache.hadoop.hive.ql.metadata.Partition getQlPartition(final Table table, final Partition partition) {
  2. try {
  3. return new org.apache.hadoop.hive.ql.metadata.Partition(new org.apache.hadoop.hive.ql.metadata.Table(table), partition);
  4. } catch (HiveException e) {
  5. throw new RuntimeException(e);
  6. }
  7. }

代码示例来源:origin: apache/hive

  1. void updateTableLocation(Table table, Path newLocation) throws HiveException {
  2. String msg = String.format("ALTER TABLE %s SET LOCATION '%s'",
  3. getQualifiedName(table), newLocation);
  4. LOG.info(msg);
  5. boolean isTxn = TxnUtils.isTransactionalTable(table);
  6. org.apache.hadoop.hive.ql.metadata.Table modifiedTable =
  7. new org.apache.hadoop.hive.ql.metadata.Table(table);
  8. modifiedTable.setDataLocation(newLocation);
  9. alterTableInternal(isTxn, table, modifiedTable);
  10. }

代码示例来源:origin: apache/kylin

  1. @Override
  2. public long getHiveTableRows(String database, String tableName) throws Exception {
  3. Table table = getMetaStoreClient().getTable(database, tableName);
  4. return getBasicStatForTable(new org.apache.hadoop.hive.ql.metadata.Table(table), StatsSetupConst.ROW_COUNT);
  5. }

代码示例来源:origin: apache/incubator-gobblin

  1. @Override
  2. protected HiveDataset createHiveDataset(Table table, Config config)
  3. throws IOException {
  4. if (table.getTableName().equals(THROW_EXCEPTION)) {
  5. throw new IOException("bad table");
  6. }
  7. return new HiveDataset(super.fs, super.clientPool, new org.apache.hadoop.hive.ql.metadata.Table(table), config);
  8. }
  9. }

代码示例来源:origin: apache/incubator-gobblin

  1. public static ConvertibleHiveDataset createTestConvertibleDataset(Config config)
  2. throws URISyntaxException {
  3. Table table = getTestTable("db1", "tb1");
  4. FileSystem mockFs = Mockito.mock(FileSystem.class);
  5. when(mockFs.getUri()).thenReturn(new URI("test"));
  6. ConvertibleHiveDataset cd =
  7. new ConvertibleHiveDataset(mockFs, Mockito.mock(HiveMetastoreClientPool.class), new org.apache.hadoop.hive.ql.metadata.Table(
  8. table), new Properties(), config);
  9. return cd;
  10. }

代码示例来源:origin: apache/hive

  1. /** Adds entities for create table/create view. */
  2. private void addDbAndTabToOutputs(String[] qualifiedTabName, TableType type,
  3. boolean isTemporary, Map<String, String> tblProps) throws SemanticException {
  4. Database database = getDatabase(qualifiedTabName[0]);
  5. outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED));
  6. Table t = new Table(qualifiedTabName[0], qualifiedTabName[1]);
  7. t.setParameters(tblProps);
  8. t.setTableType(type);
  9. t.setTemporary(isTemporary);
  10. outputs.add(new WriteEntity(t, WriteEntity.WriteType.DDL_NO_LOCK));
  11. }

代码示例来源:origin: apache/incubator-gobblin

  1. private static Table localTestTable(String dbName, String name, boolean partitioned) throws Exception {
  2. File tableSdFile = Files.createTempDir();
  3. tableSdFile.deleteOnExit();
  4. return new Table(LocalHiveMetastoreTestUtils.getInstance()
  5. .createTestAvroTable(dbName, name, tableSdFile.getAbsolutePath(),
  6. partitioned ? Optional.of("part") : Optional.<String>absent()));
  7. }

代码示例来源:origin: apache/incubator-gobblin

  1. private HiveTargetPathHelper createTestTargetPathHelper(Properties properties) {
  2. HiveDataset dataset = Mockito.mock(HiveDataset.class);
  3. Table table = new Table(new org.apache.hadoop.hive.metastore.api.Table());
  4. table.setDbName("dbName");
  5. table.setTableName("tableName");
  6. Mockito.when(dataset.getTable()).thenReturn(table);
  7. Mockito.when(dataset.getTableRootPath()).thenReturn(Optional.of(TABLE_ROOT));
  8. Mockito.when(dataset.getProperties()).thenReturn(properties);
  9. HiveTargetPathHelper helper = new HiveTargetPathHelper(dataset);
  10. return helper;
  11. }

代码示例来源:origin: apache/hive

  1. private static Table createTestTable(String dbName, String tableName) throws HiveException {
  2. Table tbl = new Table(dbName, tableName);
  3. tbl.setInputFormatClass(SequenceFileInputFormat.class.getName());
  4. tbl.setOutputFormatClass(SequenceFileOutputFormat.class.getName());
  5. tbl.setSerializationLib(ThriftDeserializer.class.getName());
  6. tbl.setSerdeParam(serdeConstants.SERIALIZATION_CLASS, Complex.class.getName());
  7. tbl.setSerdeParam(serdeConstants.SERIALIZATION_FORMAT, TBinaryProtocol.class
  8. .getName());
  9. return tbl;
  10. }

代码示例来源:origin: apache/incubator-gobblin

  1. @Test
  2. public void testDefaults() throws Exception {
  3. DatePartitionHiveVersionFinder versionFinder = new DatePartitionHiveVersionFinder(this.fs, ConfigFactory.empty());
  4. String tableName = "VfTb1";
  5. Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, tableName, ImmutableList.of("datepartition"));
  6. org.apache.hadoop.hive.metastore.api.Partition tp =
  7. this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("2016-01-01-20"), (int) System.currentTimeMillis());
  8. Partition partition = new Partition(new org.apache.hadoop.hive.ql.metadata.Table(tbl), tp);
  9. assertThat(partition.getName(), is("datepartition=2016-01-01-20"));
  10. TimestampedHiveDatasetVersion dv = versionFinder.getDatasetVersion(partition);
  11. Assert.assertEquals(dv.getDateTime(), formatter.parseDateTime("2016/01/01/20"));
  12. }

代码示例来源:origin: apache/hive

  1. protected Table getDummyTable() throws SemanticException {
  2. Path dummyPath = createDummyFile();
  3. Table desc = new Table(DUMMY_DATABASE, DUMMY_TABLE);
  4. desc.getTTable().getSd().setLocation(dummyPath.toString());
  5. desc.getTTable().getSd().getSerdeInfo().setSerializationLib(NullStructSerDe.class.getName());
  6. desc.setInputFormatClass(NullRowsInputFormat.class);
  7. desc.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
  8. return desc;
  9. }

代码示例来源:origin: apache/hive

  1. private Table newTable(boolean isPartitioned) {
  2. Table t = new Table("default", "table" + Integer.toString(nextInput++));
  3. if (isPartitioned) {
  4. FieldSchema fs = new FieldSchema();
  5. fs.setName("version");
  6. fs.setType("String");
  7. List<FieldSchema> partCols = new ArrayList<FieldSchema>(1);
  8. partCols.add(fs);
  9. t.setPartCols(partCols);
  10. }
  11. return t;
  12. }
  13. }

代码示例来源:origin: apache/hive

  1. private Operator<TableScanDesc> getTsOp(int i) {
  2. Table tblMetadata = new Table("db", "table");
  3. TableScanDesc desc = new TableScanDesc("alias"/*+ cCtx.nextOperatorId()*/, tblMetadata);
  4. List<ExprNodeDesc> as =
  5. Lists.newArrayList(new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, Integer.valueOf(i)),
  6. new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "c1", "aa", false));
  7. ExprNodeGenericFuncDesc f1 = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo, udf, as);
  8. desc.setFilterExpr(f1);
  9. Operator<TableScanDesc> ts = OperatorFactory.get(cCtx, desc);
  10. return ts;
  11. }

代码示例来源:origin: apache/hive

  1. private Operator<TableScanDesc> getTsOp(int i) {
  2. Table tblMetadata = new Table("db", "table");
  3. TableScanDesc desc = new TableScanDesc("alias_" + cCtx.nextOperatorId(), tblMetadata);
  4. List<ExprNodeDesc> as =
  5. Lists.newArrayList(new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, Integer.valueOf(i)),
  6. new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "c1", "aa", false));
  7. ExprNodeGenericFuncDesc f1 = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo, udf, as);
  8. desc.setFilterExpr(f1);
  9. Operator<TableScanDesc> ts = OperatorFactory.get(cCtx, desc);
  10. return ts;
  11. }

相关文章

Table类方法