org.apache.hadoop.hive.metastore.api.Table.setPartitionKeys()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(15.7k)|赞(0)|评价(0)|浏览(215)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.setPartitionKeys()方法的一些代码示例,展示了Table.setPartitionKeys()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setPartitionKeys()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:setPartitionKeys

Table.setPartitionKeys介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. public void setPartCols(List<FieldSchema> partCols) {
  2. tTable.setPartitionKeys(partCols);
  3. }

代码示例来源:origin: apache/drill

  1. public void setPartCols(List<FieldSchema> partCols) {
  2. tTable.setPartitionKeys(partCols);
  3. }

代码示例来源:origin: apache/hive

  1. public static List<FieldSchema> getPartCols(Table table) {
  2. List<FieldSchema> partKeys = table.getPartitionKeys();
  3. if (partKeys == null) {
  4. partKeys = new ArrayList<>();
  5. table.setPartitionKeys(partKeys);
  6. }
  7. return partKeys;
  8. }

代码示例来源:origin: apache/hive

  1. public List<FieldSchema> getPartCols() {
  2. List<FieldSchema> partKeys = tTable.getPartitionKeys();
  3. if (partKeys == null) {
  4. partKeys = new ArrayList<FieldSchema>();
  5. tTable.setPartitionKeys(partKeys);
  6. }
  7. return partKeys;
  8. }

代码示例来源:origin: apache/drill

  1. public List<FieldSchema> getPartCols() {
  2. List<FieldSchema> partKeys = tTable.getPartitionKeys();
  3. if (partKeys == null) {
  4. partKeys = new ArrayList<FieldSchema>();
  5. tTable.setPartitionKeys(partKeys);
  6. }
  7. return partKeys;
  8. }

代码示例来源:origin: prestodb/presto

  1. public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges)
  2. {
  3. org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table();
  4. result.setDbName(table.getDatabaseName());
  5. result.setTableName(table.getTableName());
  6. result.setOwner(table.getOwner());
  7. result.setTableType(table.getTableType());
  8. result.setParameters(table.getParameters());
  9. result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList()));
  10. result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage()));
  11. result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges));
  12. result.setViewOriginalText(table.getViewOriginalText().orElse(null));
  13. result.setViewExpandedText(table.getViewExpandedText().orElse(null));
  14. return result;
  15. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testGetTableSchemaWithPtnColsApi() throws IOException {
  3. // Check the schema of a table with one field & no partition keys.
  4. StorageDescriptor sd = new StorageDescriptor(
  5. Lists.newArrayList(new FieldSchema("username", serdeConstants.STRING_TYPE_NAME, null)),
  6. "location", "org.apache.hadoop.mapred.TextInputFormat",
  7. "org.apache.hadoop.mapred.TextOutputFormat", false, -1, new SerDeInfo(),
  8. new ArrayList<String>(), new ArrayList<Order>(), new HashMap<String, String>());
  9. org.apache.hadoop.hive.metastore.api.Table apiTable =
  10. new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner",
  11. 0, 0, 0, sd, new ArrayList<FieldSchema>(), new HashMap<String, String>(),
  12. "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name());
  13. Table table = new Table(apiTable);
  14. List<HCatFieldSchema> expectedHCatSchema =
  15. Lists.newArrayList(new HCatFieldSchema("username", HCatFieldSchema.Type.STRING, null));
  16. Assert.assertEquals(new HCatSchema(expectedHCatSchema),
  17. HCatUtil.getTableSchemaWithPtnCols(table));
  18. // Add a partition key & ensure its reflected in the schema.
  19. List<FieldSchema> partitionKeys =
  20. Lists.newArrayList(new FieldSchema("dt", serdeConstants.STRING_TYPE_NAME, null));
  21. table.getTTable().setPartitionKeys(partitionKeys);
  22. expectedHCatSchema.add(new HCatFieldSchema("dt", HCatFieldSchema.Type.STRING, null));
  23. Assert.assertEquals(new HCatSchema(expectedHCatSchema),
  24. HCatUtil.getTableSchemaWithPtnCols(table));
  25. }

代码示例来源:origin: apache/incubator-gobblin

  1. @Test
  2. public void dropReplacedPartitionsTest() throws Exception {
  3. Table table = ConvertibleHiveDatasetTest.getTestTable("dbName", "tableName");
  4. table.setTableType("VIRTUAL_VIEW");
  5. table.setPartitionKeys(ImmutableList.of(new FieldSchema("year", "string", ""), new FieldSchema("month", "string", "")));
  6. Partition part = new Partition();
  7. part.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01"));
  8. SchemaAwareHiveTable hiveTable = new SchemaAwareHiveTable(table, null);
  9. SchemaAwareHivePartition partition = new SchemaAwareHivePartition(table, part, null);
  10. QueryBasedHiveConversionEntity conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(partition));
  11. List<ImmutableMap<String, String>> expected =
  12. ImmutableList.of(ImmutableMap.of("year", "2015", "month", "12"), ImmutableMap.of("year", "2016", "month", "01"));
  13. Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected);
  14. // Make sure that a partition itself is not dropped
  15. Partition replacedSelf = new Partition();
  16. replacedSelf.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01|2016,02"));
  17. replacedSelf.setValues(ImmutableList.of("2016", "02"));
  18. conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(new SchemaAwareHivePartition(table, replacedSelf, null)));
  19. Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected);
  20. }

代码示例来源:origin: apache/hive

  1. private static void createTable(String tableName, String tablePerm) throws Exception {
  2. Table tbl = new Table();
  3. tbl.setDbName(DATABASE);
  4. tbl.setTableName(tableName);
  5. StorageDescriptor sd = new StorageDescriptor();
  6. sd.setCols(ColumnHolder.colMapping.get(tableName));
  7. tbl.setSd(sd);
  8. sd.setParameters(new HashMap<String, String>());
  9. sd.setSerdeInfo(new SerDeInfo());
  10. sd.getSerdeInfo().setName(tbl.getTableName());
  11. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  12. sd.setInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat.class.getName());
  13. sd.setOutputFormat(org.apache.hadoop.hive.ql.io.RCFileOutputFormat.class.getName());
  14. sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
  15. sd.getSerdeInfo().setSerializationLib(
  16. org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName());
  17. tbl.setPartitionKeys(ColumnHolder.partitionCols);
  18. hmsc.createTable(tbl);
  19. Path path = new Path(warehousedir, tableName);
  20. FileSystem fs = path.getFileSystem(hiveConf);
  21. fs.setPermission(path, new FsPermission(tablePerm));
  22. }

代码示例来源:origin: apache/hive

  1. partitionFields.add(new FieldSchema(partitionKey, serdeConstants.STRING_TYPE_NAME, ""));
  2. table.setPartitionKeys(partitionFields);

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testAddPartition() throws IOException {
  3. Table t = new Table();
  4. t.setDbName("testdb");
  5. t.setTableName("testtable");
  6. List<FieldSchema> pkeys = HCatSchemaUtils.getFieldSchemas(
  7. HCatSchemaUtils.getHCatSchema("a:int,b:string").getFields());
  8. t.setPartitionKeys(pkeys);
  9. List<Partition> addedPtns = new ArrayList<Partition>();
  10. addedPtns.add(createPtn(t, Arrays.asList("120", "abc")));
  11. addedPtns.add(createPtn(t, Arrays.asList("201", "xyz")));
  12. NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
  13. HCatConstants.HCAT_ADD_PARTITION_EVENT, msgFactory.buildAddPartitionMessage(t, addedPtns.iterator()).toString());
  14. event.setDbName(t.getDbName());
  15. event.setTableName(t.getTableName());
  16. HCatNotificationEvent hev = new HCatNotificationEvent(event);
  17. ReplicationTask rtask = ReplicationTask.create(client,hev);
  18. assertEquals(hev.toString(), rtask.getEvent().toString());
  19. verifyAddPartitionReplicationTask(rtask, t, addedPtns);
  20. }

代码示例来源:origin: apache/hive

  1. Table build() {
  2. StorageDescriptor sd = new StorageDescriptor();
  3. if (columns == null) {
  4. sd.setCols(Collections.emptyList());
  5. } else {
  6. sd.setCols(columns);
  7. }
  8. SerDeInfo serdeInfo = new SerDeInfo();
  9. serdeInfo.setSerializationLib(serde);
  10. serdeInfo.setName(tableName);
  11. sd.setSerdeInfo(serdeInfo);
  12. sd.setInputFormat(inputFormat);
  13. sd.setOutputFormat(outputFormat);
  14. if (location != null) {
  15. sd.setLocation(location);
  16. }
  17. Table table = new Table();
  18. table.setDbName(dbName);
  19. table.setTableName(tableName);
  20. table.setSd(sd);
  21. table.setParameters(parameters);
  22. table.setOwner(owner);
  23. if (partitionKeys != null) {
  24. table.setPartitionKeys(partitionKeys);
  25. }
  26. table.setTableType(tableType.toString());
  27. return table;
  28. }
  29. }

代码示例来源:origin: apache/hive

  1. private void createTable(String dbName, String tableName) throws Exception {
  2. String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME
  3. : dbName;
  4. try {
  5. msc.dropTable(databaseName, tableName);
  6. } catch (Exception e) {
  7. } // can fail with NoSuchObjectException
  8. Table tbl = new Table();
  9. tbl.setDbName(databaseName);
  10. tbl.setTableName(tableName);
  11. tbl.setTableType("MANAGED_TABLE");
  12. StorageDescriptor sd = new StorageDescriptor();
  13. sd.setCols(getTableColumns());
  14. tbl.setPartitionKeys(getPartitionKeys());
  15. tbl.setSd(sd);
  16. sd.setBucketCols(new ArrayList<String>(2));
  17. sd.setSerdeInfo(new SerDeInfo());
  18. sd.getSerdeInfo().setName(tbl.getTableName());
  19. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  20. sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
  21. sd.getSerdeInfo().setSerializationLib(ColumnarSerDe.class.getName());
  22. sd.setInputFormat(RCFileInputFormat.class.getName());
  23. sd.setOutputFormat(RCFileOutputFormat.class.getName());
  24. Map<String, String> tableParams = new HashMap<String, String>();
  25. tbl.setParameters(tableParams);
  26. msc.createTable(tbl);
  27. }

代码示例来源:origin: apache/hive

  1. private Table createPartitionedTable(String catName, String dbName, String tableName) throws Exception {
  2. try {
  3. db.dropTable(catName, dbName, tableName);
  4. Table table = new Table();
  5. table.setCatName(catName);
  6. table.setDbName(dbName);
  7. table.setTableName(tableName);
  8. FieldSchema col1 = new FieldSchema("key", "string", "");
  9. FieldSchema col2 = new FieldSchema("value", "int", "");
  10. FieldSchema col3 = new FieldSchema("city", "string", "");
  11. StorageDescriptor sd = new StorageDescriptor();
  12. sd.setSerdeInfo(new SerDeInfo());
  13. sd.setInputFormat(TextInputFormat.class.getCanonicalName());
  14. sd.setOutputFormat(HiveIgnoreKeyTextOutputFormat.class.getCanonicalName());
  15. sd.setCols(Arrays.asList(col1, col2));
  16. table.setPartitionKeys(Arrays.asList(col3));
  17. table.setSd(sd);
  18. db.createTable(table);
  19. return db.getTable(catName, dbName, tableName);
  20. } catch (Exception exception) {
  21. fail("Unable to drop and create table " + StatsUtils.getFullyQualifiedTableName(dbName, tableName) + " because "
  22. + StringUtils.stringifyException(exception));
  23. throw exception;
  24. }
  25. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testDropPartition() throws HCatException {
  3. Table t = new Table();
  4. t.setDbName("testdb");
  5. t.setTableName("testtable");
  6. List<FieldSchema> pkeys = HCatSchemaUtils.getFieldSchemas(
  7. HCatSchemaUtils.getHCatSchema("a:int,b:string").getFields());
  8. t.setPartitionKeys(pkeys);
  9. Partition p = createPtn(t, Arrays.asList("102", "lmn"));
  10. NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
  11. HCatConstants.HCAT_DROP_PARTITION_EVENT, msgFactory.buildDropPartitionMessage(
  12. t, Collections.singletonList(p).iterator()).toString());
  13. event.setDbName(t.getDbName());
  14. event.setTableName(t.getTableName());
  15. HCatNotificationEvent hev = new HCatNotificationEvent(event);
  16. ReplicationTask rtask = ReplicationTask.create(client,hev);
  17. assertEquals(hev.toString(), rtask.getEvent().toString());
  18. verifyDropPartitionReplicationTask(rtask, t, p);
  19. }

代码示例来源:origin: apache/hive

  1. private Table createPartitionedTable(String catName, String dbName, String tableName) throws Exception {
  2. try {
  3. db.dropTable(catName, dbName, tableName);
  4. Table table = new Table();
  5. table.setCatName(catName);
  6. table.setDbName(dbName);
  7. table.setTableName(tableName);
  8. FieldSchema col1 = new FieldSchema("key", "string", "");
  9. FieldSchema col2 = new FieldSchema("value", "int", "");
  10. FieldSchema col3 = new FieldSchema("city", "string", "");
  11. StorageDescriptor sd = new StorageDescriptor();
  12. sd.setSerdeInfo(new SerDeInfo());
  13. sd.setInputFormat(TextInputFormat.class.getCanonicalName());
  14. sd.setOutputFormat(HiveIgnoreKeyTextOutputFormat.class.getCanonicalName());
  15. sd.setCols(Arrays.asList(col1, col2));
  16. table.setPartitionKeys(Arrays.asList(col3));
  17. table.setSd(sd);
  18. db.createTable(table);
  19. return db.getTable(catName, dbName, tableName);
  20. } catch (Exception exception) {
  21. fail("Unable to drop and create table " + StatsUtils
  22. .getFullyQualifiedTableName(dbName, tableName) + " because " + StringUtils
  23. .stringifyException(exception));
  24. throw exception;
  25. }
  26. }

代码示例来源:origin: apache/storm

  1. sd.setLocation(dbLocation + Path.SEPARATOR + tableName);
  2. if (partNames != null && partNames.length != 0) {
  3. tbl.setPartitionKeys(getPartitionKeys(partNames));

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testInsert() throws HCatException {
  3. Table t = new Table();
  4. t.setDbName("testdb");
  5. t.setTableName("testtable");
  6. List<FieldSchema> pkeys = HCatSchemaUtils.getFieldSchemas(
  7. HCatSchemaUtils.getHCatSchema("a:int,b:string").getFields());
  8. t.setPartitionKeys(pkeys);
  9. Partition p = createPtn(t, Arrays.asList("102", "lmn"));
  10. List<String> files = Arrays.asList("/tmp/test123");
  11. NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
  12. HCatConstants.HCAT_INSERT_EVENT, msgFactory.buildInsertMessage(
  13. t.getDbName(),
  14. t.getTableName(),
  15. getPtnDesc(t,p),
  16. files
  17. ).toString());
  18. event.setDbName(t.getDbName());
  19. event.setTableName(t.getTableName());
  20. HCatNotificationEvent hev = new HCatNotificationEvent(event);
  21. ReplicationTask rtask = ReplicationTask.create(client,hev);
  22. assertEquals(hev.toString(), rtask.getEvent().toString());
  23. verifyInsertReplicationTask(rtask, t, p);
  24. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testAlterTable() throws Exception {
  3. Table originalTable = testTables[2];
  4. String originalTableName = originalTable.getTableName();
  5. String originalDatabase = originalTable.getDbName();
  6. Table newTable = getTableWithAllParametersSet();
  7. newTable.setTableName(originalTableName);
  8. newTable.setDbName(originalDatabase);
  9. // Partition keys can not be set, but getTableWithAllParametersSet is added one, so remove for
  10. // this test
  11. newTable.setPartitionKeys(originalTable.getPartitionKeys());
  12. client.alter_table(originalDatabase, originalTableName, newTable);
  13. Table alteredTable = client.getTable(originalDatabase, originalTableName);
  14. // The extra parameters will be added on server side, so check that the required ones are
  15. // present
  16. for(String key: newTable.getParameters().keySet()) {
  17. Assert.assertEquals("parameters are present", newTable.getParameters().get(key),
  18. alteredTable.getParameters().get(key));
  19. }
  20. // The parameters are checked manually, so do not check them
  21. newTable.setParameters(alteredTable.getParameters());
  22. // Some of the data is set on the server side, so reset those
  23. newTable.setCreateTime(alteredTable.getCreateTime());
  24. newTable.setCreationMetadata(alteredTable.getCreationMetadata());
  25. newTable.setWriteId(alteredTable.getWriteId());
  26. Assert.assertTrue(alteredTable.isSetId());
  27. alteredTable.unsetId();
  28. Assert.assertEquals("The table data should be the same", newTable, alteredTable);
  29. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testAlterPartition() throws HCatException {
  3. Table t = new Table();
  4. t.setDbName("testdb");
  5. t.setTableName("testtable");
  6. List<FieldSchema> pkeys = HCatSchemaUtils.getFieldSchemas(
  7. HCatSchemaUtils.getHCatSchema("a:int,b:string").getFields());
  8. t.setPartitionKeys(pkeys);
  9. Partition p = createPtn(t, Arrays.asList("102", "lmn"));
  10. NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
  11. HCatConstants.HCAT_ALTER_PARTITION_EVENT, msgFactory.buildAlterPartitionMessage(t,
  12. p, p, p.getWriteId()).toString());
  13. event.setDbName(t.getDbName());
  14. event.setTableName(t.getTableName());
  15. HCatNotificationEvent hev = new HCatNotificationEvent(event);
  16. ReplicationTask rtask = ReplicationTask.create(client,hev);
  17. assertEquals(hev.toString(), rtask.getEvent().toString());
  18. verifyAlterPartitionReplicationTask(rtask, t, p);
  19. }

相关文章

Table类方法