org.apache.hadoop.hive.metastore.api.Table.getPartitionKeys()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(8.6k)|赞(0)|评价(0)|浏览(274)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.getPartitionKeys()方法的一些代码示例,展示了Table.getPartitionKeys()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getPartitionKeys()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:getPartitionKeys

Table.getPartitionKeys介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. static boolean isPartitionedTable(Table tableObj) {
  2. List<FieldSchema> partKeys = tableObj.getPartitionKeys();
  3. if (partKeys != null && partKeys.size() > 0) {
  4. return true;
  5. }
  6. return false;
  7. }

代码示例来源:origin: apache/hive

  1. private String buildPartColStr(Table table) {
  2. String partColStr = "";
  3. for (int i = 0; i < table.getPartitionKeysSize(); ++i) {
  4. if (i != 0) {
  5. partColStr += ",";
  6. }
  7. partColStr += table.getPartitionKeys().get(i).getName();
  8. }
  9. return partColStr;
  10. }

代码示例来源:origin: apache/hive

  1. public List<FieldSchema> getPartCols() {
  2. List<FieldSchema> partKeys = tTable.getPartitionKeys();
  3. if (partKeys == null) {
  4. partKeys = new ArrayList<FieldSchema>();
  5. tTable.setPartitionKeys(partKeys);
  6. }
  7. return partKeys;
  8. }

代码示例来源:origin: apache/hive

  1. public static Map<String, String> getPartitionKeyValues(Table table, Partition partition) {
  2. Map<String, String> partitionKeys = new LinkedHashMap<>();
  3. for (int i = 0; i < table.getPartitionKeysSize(); ++i) {
  4. partitionKeys.put(table.getPartitionKeys().get(i).getName(),
  5. partition.getValues().get(i));
  6. }
  7. return partitionKeys;
  8. }

代码示例来源:origin: apache/hive

  1. public static List<FieldSchema> getPartCols(Table table) {
  2. List<FieldSchema> partKeys = table.getPartitionKeys();
  3. if (partKeys == null) {
  4. partKeys = new ArrayList<>();
  5. table.setPartitionKeys(partKeys);
  6. }
  7. return partKeys;
  8. }

代码示例来源:origin: apache/hive

  1. private static Map<String, String> getPtnDesc(Table t, Partition p) {
  2. assertEquals(t.getPartitionKeysSize(),p.getValuesSize());
  3. Map<String,String> retval = new HashMap<String,String>();
  4. Iterator<String> pval = p.getValuesIterator();
  5. for (FieldSchema fs : t.getPartitionKeys()){
  6. retval.put(fs.getName(),pval.next());
  7. }
  8. return retval;
  9. }

代码示例来源:origin: apache/hive

  1. private Partition getPartitionObj(String db, String table, List<String> partitionVals, Table tableObj)
  2. throws MetaException, NoSuchObjectException {
  3. if (tableObj.isSetPartitionKeys() && !tableObj.getPartitionKeys().isEmpty()) {
  4. return get_partition(db, table, partitionVals);
  5. }
  6. return null;
  7. }

代码示例来源:origin: apache/hive

  1. public ExpressionBuilder(Table table, Map<String, String> partSpecs) {
  2. this.partSpecs = partSpecs;
  3. for (FieldSchema partField : table.getPartitionKeys()) {
  4. partColumnTypesMap.put(partField.getName().toLowerCase(),
  5. TypeInfoFactory.getPrimitiveTypeInfo(partField.getType()));
  6. }
  7. }

代码示例来源:origin: apache/hive

  1. private void addPartition(Partition p) throws AlreadyExistsException, MetaException {
  2. String partName = Warehouse.makePartName(tTable.getPartitionKeys(), p.getValues());
  3. if(parts.putIfAbsent(partName, p) != null) {
  4. throw new AlreadyExistsException("Partition " + partName + " already exists");
  5. }
  6. }
  7. /**

代码示例来源:origin: apache/hive

  1. private PartitionHelper newWarehousePartitionHelper() throws MetaException, WorkerException {
  2. String location = table.getTable().getSd().getLocation();
  3. Path tablePath = new Path(location);
  4. List<FieldSchema> partitionFields = table.getTable().getPartitionKeys();
  5. List<String> partitionColumns = new ArrayList<>(partitionFields.size());
  6. for (FieldSchema field : partitionFields) {
  7. partitionColumns.add(field.getName());
  8. }
  9. return new WarehousePartitionHelper(configuration, tablePath, partitionColumns);
  10. }

代码示例来源:origin: prestodb/presto

  1. public static Table fromMetastoreApiTable(org.apache.hadoop.hive.metastore.api.Table table, List<FieldSchema> schema)
  2. {
  3. StorageDescriptor storageDescriptor = table.getSd();
  4. if (storageDescriptor == null) {
  5. throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor");
  6. }
  7. Table.Builder tableBuilder = Table.builder()
  8. .setDatabaseName(table.getDbName())
  9. .setTableName(table.getTableName())
  10. .setOwner(nullToEmpty(table.getOwner()))
  11. .setTableType(table.getTableType())
  12. .setDataColumns(schema.stream()
  13. .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema)
  14. .collect(toList()))
  15. .setPartitionColumns(table.getPartitionKeys().stream()
  16. .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema)
  17. .collect(toList()))
  18. .setParameters(table.getParameters() == null ? ImmutableMap.of() : table.getParameters())
  19. .setViewOriginalText(Optional.ofNullable(emptyToNull(table.getViewOriginalText())))
  20. .setViewExpandedText(Optional.ofNullable(emptyToNull(table.getViewExpandedText())));
  21. fromMetastoreApiStorageDescriptor(storageDescriptor, tableBuilder.getStorageBuilder(), table.getTableName());
  22. return tableBuilder.build();
  23. }

代码示例来源:origin: prestodb/presto

  1. @Override
  2. public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName)
  3. {
  4. Optional<org.apache.hadoop.hive.metastore.api.Table> source = delegate.getTable(databaseName, tableName);
  5. if (!source.isPresent()) {
  6. throw new TableNotFoundException(new SchemaTableName(databaseName, tableName));
  7. }
  8. org.apache.hadoop.hive.metastore.api.Table table = source.get();
  9. for (FieldSchema fieldSchema : table.getPartitionKeys()) {
  10. if (fieldSchema.getName().equals(oldColumnName)) {
  11. throw new PrestoException(NOT_SUPPORTED, "Renaming partition columns is not supported");
  12. }
  13. }
  14. for (FieldSchema fieldSchema : table.getSd().getCols()) {
  15. if (fieldSchema.getName().equals(oldColumnName)) {
  16. fieldSchema.setName(newColumnName);
  17. }
  18. }
  19. alterTable(databaseName, tableName, table);
  20. }

代码示例来源:origin: apache/hive

  1. private void addTxnWriteNotificationLog(Table tableObj, Partition ptnObj, WriteNotificationLogRequest rqst)
  2. throws MetaException {
  3. String partition = ""; //Empty string is an invalid partition name. Can be used for non partitioned table.
  4. if (ptnObj != null) {
  5. partition = Warehouse.makePartName(tableObj.getPartitionKeys(), rqst.getPartitionVals());
  6. }
  7. AcidWriteEvent event = new AcidWriteEvent(partition, tableObj, ptnObj, rqst);
  8. getTxnHandler().addWriteNotificationLog(event);
  9. if (listeners != null && !listeners.isEmpty()) {
  10. MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ACID_WRITE, event);
  11. }
  12. }

代码示例来源:origin: apache/hive

  1. public static Properties getSchema(
  2. org.apache.hadoop.hive.metastore.api.Partition part,
  3. org.apache.hadoop.hive.metastore.api.Table table) {
  4. return MetaStoreUtils.getSchema(part.getSd(), table.getSd(), table
  5. .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys());
  6. }

代码示例来源:origin: apache/hive

  1. public static Properties getTableMetadata(
  2. org.apache.hadoop.hive.metastore.api.Table table) {
  3. return MetaStoreUtils.getSchema(table.getSd(), table.getSd(), table
  4. .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys());
  5. }

代码示例来源:origin: apache/hive

  1. public static Properties getPartitionMetadata(
  2. org.apache.hadoop.hive.metastore.api.Partition partition,
  3. org.apache.hadoop.hive.metastore.api.Table table) {
  4. return MetaStoreUtils
  5. .getSchema(partition.getSd(), partition.getSd(), partition
  6. .getParameters(), table.getDbName(), table.getTableName(),
  7. table.getPartitionKeys());
  8. }

代码示例来源:origin: apache/hive

  1. @Test(expected = InvalidOperationException.class)
  2. public void testAlterTableInvalidStorageDescriptorRemovePartitionColumn() throws Exception {
  3. Table originalTable = partitionedTable;
  4. Table newTable = originalTable.deepCopy();
  5. newTable.getPartitionKeys().remove(0);
  6. client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
  7. }

代码示例来源:origin: apache/hive

  1. @Test(expected = InvalidOperationException.class)
  2. public void testAlterTableInvalidStorageDescriptorAlterPartitionColumnName() throws Exception {
  3. Table originalTable = partitionedTable;
  4. Table newTable = originalTable.deepCopy();
  5. newTable.getPartitionKeys().get(0).setName("altered_name");
  6. client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
  7. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testCreateTblWithLowerCasePartNames() throws Exception {
  3. driver.run("drop table junit_sem_analysis");
  4. CommandProcessorResponse resp = driver.run("create table junit_sem_analysis (a int) partitioned by (B string) stored as TEXTFILE");
  5. assertEquals(resp.getResponseCode(), 0);
  6. assertEquals(null, resp.getErrorMessage());
  7. Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME);
  8. assertEquals("Partition key name case problem", "b", tbl.getPartitionKeys().get(0).getName());
  9. driver.run("drop table junit_sem_analysis");
  10. }

代码示例来源:origin: apache/storm

  1. private static void addPartition(IMetaStoreClient client, Table tbl
  2. , List<String> partValues)
  3. throws IOException, TException {
  4. Partition part = new Partition();
  5. part.setDbName(tbl.getDbName());
  6. part.setTableName(tbl.getTableName());
  7. StorageDescriptor sd = new StorageDescriptor(tbl.getSd());
  8. sd.setLocation(sd.getLocation() + Path.SEPARATOR + makePartPath(tbl.getPartitionKeys(), partValues));
  9. part.setSd(sd);
  10. part.setValues(partValues);
  11. client.add_partition(part);
  12. }

相关文章

Table类方法