org.apache.hadoop.hive.metastore.api.Table.getPartitionKeys()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(8.6k)|赞(0)|评价(0)|浏览(234)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.getPartitionKeys()方法的一些代码示例,展示了Table.getPartitionKeys()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getPartitionKeys()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:getPartitionKeys

Table.getPartitionKeys介绍

暂无

代码示例

代码示例来源:origin: apache/hive

static boolean isPartitionedTable(Table tableObj) {
 List<FieldSchema> partKeys = tableObj.getPartitionKeys();
 if (partKeys != null && partKeys.size() > 0) {
  return true;
 }
 return false;
}

代码示例来源:origin: apache/hive

private String buildPartColStr(Table table) {
 String partColStr = "";
 for (int i = 0; i < table.getPartitionKeysSize(); ++i) {
  if (i != 0) {
   partColStr += ",";
  }
  partColStr += table.getPartitionKeys().get(i).getName();
 }
 return partColStr;
}

代码示例来源:origin: apache/hive

public List<FieldSchema> getPartCols() {
 List<FieldSchema> partKeys = tTable.getPartitionKeys();
 if (partKeys == null) {
  partKeys = new ArrayList<FieldSchema>();
  tTable.setPartitionKeys(partKeys);
 }
 return partKeys;
}

代码示例来源:origin: apache/hive

public static Map<String, String> getPartitionKeyValues(Table table, Partition partition) {
 Map<String, String> partitionKeys = new LinkedHashMap<>();
 for (int i = 0; i < table.getPartitionKeysSize(); ++i) {
  partitionKeys.put(table.getPartitionKeys().get(i).getName(),
    partition.getValues().get(i));
 }
 return partitionKeys;
}

代码示例来源:origin: apache/hive

public static List<FieldSchema> getPartCols(Table table) {
 List<FieldSchema> partKeys = table.getPartitionKeys();
 if (partKeys == null) {
  partKeys = new ArrayList<>();
  table.setPartitionKeys(partKeys);
 }
 return partKeys;
}

代码示例来源:origin: apache/hive

private static Map<String, String> getPtnDesc(Table t, Partition p) {
 assertEquals(t.getPartitionKeysSize(),p.getValuesSize());
 Map<String,String> retval = new HashMap<String,String>();
 Iterator<String> pval = p.getValuesIterator();
 for (FieldSchema fs : t.getPartitionKeys()){
  retval.put(fs.getName(),pval.next());
 }
 return retval;
}

代码示例来源:origin: apache/hive

private Partition getPartitionObj(String db, String table, List<String> partitionVals, Table tableObj)
    throws MetaException, NoSuchObjectException {
 if (tableObj.isSetPartitionKeys() && !tableObj.getPartitionKeys().isEmpty()) {
  return get_partition(db, table, partitionVals);
 }
 return null;
}

代码示例来源:origin: apache/hive

public ExpressionBuilder(Table table, Map<String, String> partSpecs) {
 this.partSpecs = partSpecs;
 for (FieldSchema partField : table.getPartitionKeys()) {
  partColumnTypesMap.put(partField.getName().toLowerCase(),
    TypeInfoFactory.getPrimitiveTypeInfo(partField.getType()));
 }
}

代码示例来源:origin: apache/hive

private void addPartition(Partition p) throws AlreadyExistsException, MetaException {
 String partName = Warehouse.makePartName(tTable.getPartitionKeys(), p.getValues());
 if(parts.putIfAbsent(partName, p) != null) {
  throw new AlreadyExistsException("Partition " + partName + " already exists");
 }
}
/**

代码示例来源:origin: apache/hive

private PartitionHelper newWarehousePartitionHelper() throws MetaException, WorkerException {
 String location = table.getTable().getSd().getLocation();
 Path tablePath = new Path(location);
 List<FieldSchema> partitionFields = table.getTable().getPartitionKeys();
 List<String> partitionColumns = new ArrayList<>(partitionFields.size());
 for (FieldSchema field : partitionFields) {
  partitionColumns.add(field.getName());
 }
 return new WarehousePartitionHelper(configuration, tablePath, partitionColumns);
}

代码示例来源:origin: prestodb/presto

public static Table fromMetastoreApiTable(org.apache.hadoop.hive.metastore.api.Table table, List<FieldSchema> schema)
{
  StorageDescriptor storageDescriptor = table.getSd();
  if (storageDescriptor == null) {
    throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor");
  }
  Table.Builder tableBuilder = Table.builder()
      .setDatabaseName(table.getDbName())
      .setTableName(table.getTableName())
      .setOwner(nullToEmpty(table.getOwner()))
      .setTableType(table.getTableType())
      .setDataColumns(schema.stream()
          .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema)
          .collect(toList()))
      .setPartitionColumns(table.getPartitionKeys().stream()
          .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema)
          .collect(toList()))
      .setParameters(table.getParameters() == null ? ImmutableMap.of() : table.getParameters())
      .setViewOriginalText(Optional.ofNullable(emptyToNull(table.getViewOriginalText())))
      .setViewExpandedText(Optional.ofNullable(emptyToNull(table.getViewExpandedText())));
  fromMetastoreApiStorageDescriptor(storageDescriptor, tableBuilder.getStorageBuilder(), table.getTableName());
  return tableBuilder.build();
}

代码示例来源:origin: prestodb/presto

@Override
public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName)
{
  Optional<org.apache.hadoop.hive.metastore.api.Table> source = delegate.getTable(databaseName, tableName);
  if (!source.isPresent()) {
    throw new TableNotFoundException(new SchemaTableName(databaseName, tableName));
  }
  org.apache.hadoop.hive.metastore.api.Table table = source.get();
  for (FieldSchema fieldSchema : table.getPartitionKeys()) {
    if (fieldSchema.getName().equals(oldColumnName)) {
      throw new PrestoException(NOT_SUPPORTED, "Renaming partition columns is not supported");
    }
  }
  for (FieldSchema fieldSchema : table.getSd().getCols()) {
    if (fieldSchema.getName().equals(oldColumnName)) {
      fieldSchema.setName(newColumnName);
    }
  }
  alterTable(databaseName, tableName, table);
}

代码示例来源:origin: apache/hive

private void addTxnWriteNotificationLog(Table tableObj, Partition ptnObj, WriteNotificationLogRequest rqst)
    throws MetaException {
 String partition = ""; //Empty string is an invalid partition name. Can be used for non partitioned table.
 if (ptnObj != null) {
  partition = Warehouse.makePartName(tableObj.getPartitionKeys(), rqst.getPartitionVals());
 }
 AcidWriteEvent event = new AcidWriteEvent(partition, tableObj, ptnObj, rqst);
 getTxnHandler().addWriteNotificationLog(event);
 if (listeners != null && !listeners.isEmpty()) {
  MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ACID_WRITE, event);
 }
}

代码示例来源:origin: apache/hive

public static Properties getSchema(
  org.apache.hadoop.hive.metastore.api.Partition part,
  org.apache.hadoop.hive.metastore.api.Table table) {
 return MetaStoreUtils.getSchema(part.getSd(), table.getSd(), table
   .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys());
}

代码示例来源:origin: apache/hive

public static Properties getTableMetadata(
  org.apache.hadoop.hive.metastore.api.Table table) {
 return MetaStoreUtils.getSchema(table.getSd(), table.getSd(), table
   .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys());
}

代码示例来源:origin: apache/hive

public static Properties getPartitionMetadata(
  org.apache.hadoop.hive.metastore.api.Partition partition,
  org.apache.hadoop.hive.metastore.api.Table table) {
 return MetaStoreUtils
   .getSchema(partition.getSd(), partition.getSd(), partition
       .getParameters(), table.getDbName(), table.getTableName(),
     table.getPartitionKeys());
}

代码示例来源:origin: apache/hive

@Test(expected = InvalidOperationException.class)
public void testAlterTableInvalidStorageDescriptorRemovePartitionColumn() throws Exception {
 Table originalTable = partitionedTable;
 Table newTable = originalTable.deepCopy();
 newTable.getPartitionKeys().remove(0);
 client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
}

代码示例来源:origin: apache/hive

@Test(expected = InvalidOperationException.class)
public void testAlterTableInvalidStorageDescriptorAlterPartitionColumnName() throws Exception {
 Table originalTable = partitionedTable;
 Table newTable = originalTable.deepCopy();
 newTable.getPartitionKeys().get(0).setName("altered_name");
 client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
}

代码示例来源:origin: apache/hive

@Test
public void testCreateTblWithLowerCasePartNames() throws Exception {
 driver.run("drop table junit_sem_analysis");
 CommandProcessorResponse resp = driver.run("create table junit_sem_analysis (a int) partitioned by (B string) stored as TEXTFILE");
 assertEquals(resp.getResponseCode(), 0);
 assertEquals(null, resp.getErrorMessage());
 Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME);
 assertEquals("Partition key name case problem", "b", tbl.getPartitionKeys().get(0).getName());
 driver.run("drop table junit_sem_analysis");
}

代码示例来源:origin: apache/storm

private static void addPartition(IMetaStoreClient client, Table tbl
  , List<String> partValues)
  throws IOException, TException {
  Partition part = new Partition();
  part.setDbName(tbl.getDbName());
  part.setTableName(tbl.getTableName());
  StorageDescriptor sd = new StorageDescriptor(tbl.getSd());
  sd.setLocation(sd.getLocation() + Path.SEPARATOR + makePartPath(tbl.getPartitionKeys(), partValues));
  part.setSd(sd);
  part.setValues(partValues);
  client.add_partition(part);
}

相关文章

Table类方法