org.apache.hadoop.hive.ql.metadata.Table.getPartitionKeys()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(14.1k)|赞(0)|评价(0)|浏览(166)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getPartitionKeys()方法的一些代码示例,展示了Table.getPartitionKeys()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getPartitionKeys()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getPartitionKeys

Table.getPartitionKeys介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. /**
  2. * Specify the table object since sometimes no connections
  3. * to the metastore will be opened.
  4. * @param table table object.
  5. * @return builder
  6. */
  7. public Builder withTableObject(Table table) {
  8. this.tableObject = table;
  9. this.isPartitioned = tableObject.getPartitionKeys() != null
  10. && !tableObject.getPartitionKeys().isEmpty();
  11. return this;
  12. }

代码示例来源:origin: apache/hive

  1. static Map<String, String> createPtnKeyValueMap(Table table, Partition ptn)
  2. throws IOException {
  3. List<String> values = ptn.getValues();
  4. if (values.size() != table.getPartitionKeys().size()) {
  5. throw new IOException(
  6. "Partition values in partition inconsistent with table definition, table "
  7. + table.getTableName() + " has "
  8. + table.getPartitionKeys().size()
  9. + " partition keys, partition has " + values.size()
  10. + "partition values");
  11. }
  12. Map<String, String> ptnKeyValues = new HashMap<String, String>();
  13. int i = 0;
  14. for (FieldSchema schema : table.getPartitionKeys()) {
  15. // CONCERN : the way this mapping goes, the order *needs* to be
  16. // preserved for table.getPartitionKeys() and ptn.getValues()
  17. ptnKeyValues.put(schema.getName().toLowerCase(), values.get(i));
  18. i++;
  19. }
  20. return ptnKeyValues;
  21. }
  22. }

代码示例来源:origin: apache/hive

  1. /**
  2. * return the partition columns from a table instance
  3. *
  4. * @param table the instance to extract partition columns from
  5. * @return HCatSchema instance which contains the partition columns
  6. * @throws IOException
  7. */
  8. public static HCatSchema getPartitionColumns(Table table) throws IOException {
  9. HCatSchema cols = new HCatSchema(new LinkedList<HCatFieldSchema>());
  10. if (table.getPartitionKeys().size() != 0) {
  11. for (FieldSchema fs : table.getPartitionKeys()) {
  12. cols.append(HCatSchemaUtils.getHCatFieldSchema(fs));
  13. }
  14. }
  15. return cols;
  16. }

代码示例来源:origin: apache/hive

  1. private static String getColTypeOf(Table tbl, String partKey) throws SemanticException{
  2. for (FieldSchema fs : tbl.getPartitionKeys()) {
  3. if (partKey.equalsIgnoreCase(fs.getName())) {
  4. return fs.getType().toLowerCase();
  5. }
  6. }
  7. throw new SemanticException("Unknown partition key : " + partKey);
  8. }

代码示例来源:origin: apache/hive

  1. public static HCatSchema getTableSchemaWithPtnCols(Table table) throws IOException {
  2. HCatSchema tableSchema = new HCatSchema(HCatUtil.getHCatFieldSchemaList(table.getCols()));
  3. if (table.getPartitionKeys().size() != 0) {
  4. // add partition keys to table schema
  5. // NOTE : this assumes that we do not ever have ptn keys as columns
  6. // inside the table schema as well!
  7. for (FieldSchema fs : table.getPartitionKeys()) {
  8. tableSchema.append(HCatSchemaUtils.getHCatFieldSchema(fs));
  9. }
  10. }
  11. return tableSchema;
  12. }

代码示例来源:origin: apache/drill

  1. private String getColTypeOf (String partKey) throws SemanticException{
  2. for (FieldSchema fs : tbl.getPartitionKeys()) {
  3. if (partKey.equalsIgnoreCase(fs.getName())) {
  4. return fs.getType().toLowerCase();
  5. }
  6. }
  7. throw new SemanticException ("Unknown partition key : " + partKey);
  8. }

代码示例来源:origin: apache/hive

  1. /**
  2. * Convert the partition value map to a value list in the partition key order.
  3. * @param table the table being written to
  4. * @param valueMap the partition value map
  5. * @return the partition value list
  6. * @throws java.io.IOException
  7. */
  8. static List<String> getPartitionValueList(Table table, Map<String, String> valueMap) throws IOException {
  9. if (valueMap.size() != table.getPartitionKeys().size()) {
  10. throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,
  11. "Table "
  12. + table.getTableName() + " has " +
  13. table.getPartitionKeys().size() + " partition keys, got " +
  14. valueMap.size());
  15. }
  16. List<String> values = new ArrayList<String>();
  17. for (FieldSchema schema : table.getPartitionKeys()) {
  18. String value = valueMap.get(schema.getName().toLowerCase());
  19. if (value == null) {
  20. throw new HCatException(ErrorType.ERROR_MISSING_PARTITION_KEY,
  21. "Key " + schema.getName() + " of table " + table.getTableName());
  22. }
  23. values.add(value);
  24. }
  25. return values;
  26. }

代码示例来源:origin: apache/hive

  1. protected void preparePartitioningFields() {
  2. final int numPartitions = table.getPartitionKeys().size();
  3. this.partitionFieldData = new Object[numPartitions];
  4. this.partitionObjInspectors = new ObjectInspector[numPartitions];
  5. int startIdx = inputRowObjectInspector.getAllStructFieldRefs().size() - numPartitions;
  6. int endIdx = inputRowObjectInspector.getAllStructFieldRefs().size();
  7. int j = 0;
  8. for (int i = startIdx; i < endIdx; i++) {
  9. StructField structField = inputRowObjectInspector.getAllStructFieldRefs().get(i);
  10. partitionObjInspectors[j++] = structField.getFieldObjectInspector();
  11. }
  12. this.partitionStructFields = new StructField[partitionColumns.size()];
  13. for (int i = 0; i < partitionColumns.size(); i++) {
  14. String partCol = partitionColumns.get(i);
  15. partitionStructFields[i] = inputRowObjectInspector.getStructFieldRef(partCol);
  16. }
  17. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public String[] getPartitionKeys(String location, Job job)
  3. throws IOException {
  4. Table table = phutil.getTable(location,
  5. hcatServerUri != null ? hcatServerUri : PigHCatUtil.getHCatServerUri(job),
  6. PigHCatUtil.getHCatServerPrincipal(job),
  7. job); // Pass job to initialize metastore conf overrides
  8. List<FieldSchema> tablePartitionKeys = table.getPartitionKeys();
  9. String[] partitionKeys = new String[tablePartitionKeys.size()];
  10. for (int i = 0; i < tablePartitionKeys.size(); i++) {
  11. partitionKeys[i] = tablePartitionKeys.get(i).getName();
  12. }
  13. return partitionKeys;
  14. }

代码示例来源:origin: apache/incubator-gobblin

  1. private void checkPartitionedTableCompatibility(Table desiredTargetTable, Table existingTargetTable)
  2. throws IOException {
  3. if (!desiredTargetTable.getDataLocation().equals(existingTargetTable.getDataLocation())) {
  4. throw new HiveTableLocationNotMatchException(desiredTargetTable.getDataLocation(),
  5. existingTargetTable.getDataLocation());
  6. }
  7. if (HiveUtils.isPartitioned(desiredTargetTable) != HiveUtils.isPartitioned(existingTargetTable)) {
  8. throw new IOException(String.format(
  9. "%s: Desired target table %s partitioned, existing target table %s partitioned. Tables are incompatible.",
  10. this.dataset.tableIdentifier, HiveUtils.isPartitioned(desiredTargetTable) ? "is" : "is not",
  11. HiveUtils.isPartitioned(existingTargetTable) ? "is" : "is not"));
  12. }
  13. if (desiredTargetTable.isPartitioned()
  14. && !desiredTargetTable.getPartitionKeys().equals(existingTargetTable.getPartitionKeys())) {
  15. throw new IOException(String.format(
  16. "%s: Desired target table has partition keys %s, existing target table has partition keys %s. "
  17. + "Tables are incompatible.",
  18. this.dataset.tableIdentifier, gson.toJson(desiredTargetTable.getPartitionKeys()),
  19. gson.toJson(existingTargetTable.getPartitionKeys())));
  20. }
  21. }

代码示例来源:origin: apache/hive

  1. private String getFinalDynamicPartitionDestination(Table table, Map<String, String> partKVs,
  2. OutputJobInfo jobInfo) {
  3. Path partPath = new Path(table.getTTable().getSd().getLocation());
  4. if (!customDynamicLocationUsed) {
  5. // file:///tmp/hcat_junit_warehouse/employee/_DYN0.7770480401313761/emp_country=IN/emp_state=KA ->
  6. // file:///tmp/hcat_junit_warehouse/employee/emp_country=IN/emp_state=KA
  7. for (FieldSchema partKey : table.getPartitionKeys()) {
  8. partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs);
  9. }
  10. return partPath.toString();
  11. } else {
  12. // if custom root specified, update the parent path
  13. if (jobInfo.getCustomDynamicRoot() != null
  14. && jobInfo.getCustomDynamicRoot().length() > 0) {
  15. partPath = new Path(partPath, jobInfo.getCustomDynamicRoot());
  16. }
  17. return new Path(partPath, HCatFileUtil.resolveCustomPath(jobInfo, partKVs, false)).toString();
  18. }
  19. }

代码示例来源:origin: apache/hive

  1. private void validateTable() throws InvalidTable, ConnectionError {
  2. try {
  3. tableObject = new Table(getMSC().getTable(database, table));
  4. } catch (Exception e) {
  5. LOG.warn("Unable to validate the table for connection: " + toConnectionInfoString(), e);
  6. throw new InvalidTable(database, table, e);
  7. }
  8. // 1 - check that the table is Acid
  9. if (!AcidUtils.isFullAcidTable(tableObject)) {
  10. LOG.error("HiveEndPoint " + this + " must use an acid table");
  11. throw new InvalidTable(database, table, "is not an Acid table");
  12. }
  13. if (tableObject.getPartitionKeys() != null && !tableObject.getPartitionKeys().isEmpty()) {
  14. setPartitionedTable(true);
  15. } else {
  16. setPartitionedTable(false);
  17. }
  18. // partition values are specified on non-partitioned table
  19. if (!isPartitionedTable() && (staticPartitionValues != null && !staticPartitionValues.isEmpty())) {
  20. // Invalid if table is not partitioned, but endPoint's partitionVals is not empty
  21. String errMsg = this.toString() + " specifies partitions for un-partitioned table";
  22. LOG.error(errMsg);
  23. throw new ConnectionError(errMsg);
  24. }
  25. }

代码示例来源:origin: apache/hive

  1. /**
  2. * Generate the statement of SELECT compute_stats(col1) compute_stats(col2),...,
  3. * similar to the one generated from ANALYZE TABLE t1 COMPUTE STATISTICS FOR COLUMNS,
  4. * but t1 is replaced by a TABLE(VALUES(cast(null as int),cast(null as string))) AS t1(col1,col2).
  5. *
  6. * We use TABLE-VALUES statement for computing stats for CTAS statement because in those cases
  7. * the table has not been created yet. Once the plan for the SELECT statement is generated,
  8. * we connect it to the existing CTAS plan as we do for INSERT or INSERT OVERWRITE.
  9. */
  10. public void insertTableValuesAnalyzePipeline() throws SemanticException {
  11. // Instead of starting from analyze statement, we just generate the Select plan
  12. boolean isPartitionStats = conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned();
  13. if (isPartitionStats) {
  14. partSpec = new HashMap<>();
  15. List<String> partKeys = Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys());
  16. for (String partKey : partKeys) {
  17. partSpec.put(partKey, null);
  18. }
  19. }
  20. String command = ColumnStatsSemanticAnalyzer.genRewrittenQuery(
  21. tbl, Utilities.getColumnNamesFromFieldSchema(tbl.getCols()), conf, partSpec, isPartitionStats, true);
  22. insertAnalyzePipeline(command, true);
  23. }

代码示例来源:origin: apache/incubator-gobblin

  1. public HiveWorkUnit(HiveDataset hiveDataset, Partition partition) {
  2. this(hiveDataset);
  3. setPartitionName(partition.getName());
  4. setPartitionLocation(partition.getLocation());
  5. setPartitionKeys(partition.getTable().getPartitionKeys());
  6. }

代码示例来源:origin: apache/drill

  1. public Properties getSchemaFromTableSchema(Properties tblSchema) {
  2. return MetaStoreUtils.getPartSchemaFromTableSchema(tPartition.getSd(), table.getTTable().getSd(),
  3. tPartition.getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys(),
  4. tblSchema);
  5. }

代码示例来源:origin: apache/incubator-gobblin

  1. @Test
  2. public void testDroppedPartitions() throws Exception {
  3. WorkUnitState previousWus = new WorkUnitState();
  4. previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "db@test_dataset_urn");
  5. previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
  6. previousWus
  7. .setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2015-01", 100l, "2015-02", 101l)));
  8. SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
  9. PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
  10. Table table = mockTable("test_dataset_urn");
  11. Mockito.when(table.getPartitionKeys()).thenReturn(ImmutableList.of(new FieldSchema("year", "string", "")));
  12. Partition partition2015 = mockPartition(table, ImmutableList.of("2015"));
  13. // partition 2015 replaces 2015-01 and 2015-02
  14. Mockito.when(partition2015.getParameters()).thenReturn(
  15. ImmutableMap.of(AbstractAvroToOrcConverter.REPLACED_PARTITIONS_HIVE_METASTORE_KEY, "2015-01|2015-02"));
  16. watermarker.onPartitionProcessBegin(partition2015, 0l, 0l);
  17. Assert.assertEquals(watermarker.getExpectedHighWatermarks().get("db@test_dataset_urn"), ImmutableMap.of("2015", 0l));
  18. }

代码示例来源:origin: apache/hive

  1. private boolean createDynPartSpec(ASTNode ast) {
  2. if(ast.getToken().getType() != HiveParser.TOK_CREATETABLE &&
  3. ast.getToken().getType() != HiveParser.TOK_CREATE_MATERIALIZED_VIEW &&
  4. ast.getToken().getType() != HiveParser.TOK_ALTER_MATERIALIZED_VIEW &&
  5. tableHandle.getPartitionKeys().size() > 0
  6. && (ast.getParent() != null && (ast.getParent().getType() == HiveParser.TOK_INSERT_INTO
  7. || ast.getParent().getType() == HiveParser.TOK_INSERT)
  8. || ast.getParent().getType() == HiveParser.TOK_DESTINATION
  9. || ast.getParent().getType() == HiveParser.TOK_ANALYZE)) {
  10. return true;
  11. }
  12. return false;
  13. }
  14. public TableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartitionsSpec,

代码示例来源:origin: apache/hive

  1. public PartitionDesc(final Partition part, final TableDesc tableDesc) throws HiveException {
  2. PartitionDescConstructorHelper(part, tableDesc, true);
  3. if (Utilities.isInputFileFormatSelfDescribing(this)) {
  4. // if IF is self describing no need to send column info per partition, since its not used anyway.
  5. Table tbl = part.getTable();
  6. setProperties(MetaStoreUtils.getSchemaWithoutCols(part.getTPartition().getSd(),
  7. part.getParameters(), tbl.getDbName(), tbl.getTableName(), tbl.getPartitionKeys()));
  8. } else {
  9. setProperties(part.getMetadataFromPartitionSchema());
  10. }
  11. }

代码示例来源:origin: apache/hive

  1. private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab,
  2. Map<String, String> partSpec, String dest) throws SemanticException {
  3. List<FieldSchema> parts = dest_tab.getPartitionKeys();
  4. if (parts == null || parts.isEmpty()) {
  5. return null; // table is not partitioned
  6. }
  7. if (partSpec == null || partSpec.size() == 0) { // user did NOT specify partition
  8. throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest),
  9. ErrorMsg.NEED_PARTITION_ERROR.getMsg()));
  10. }
  11. DynamicPartitionCtx dpCtx = qbm.getDPCtx(dest);
  12. if (dpCtx == null) {
  13. dest_tab.validatePartColumnNames(partSpec, false);
  14. dpCtx = new DynamicPartitionCtx(partSpec,
  15. conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME),
  16. conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
  17. qbm.setDPCtx(dest, dpCtx);
  18. }
  19. if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) { // allow DP
  20. throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest),
  21. ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg()));
  22. }
  23. if ((dest_tab.getNumBuckets() > 0)) {
  24. dpCtx.setNumBuckets(dest_tab.getNumBuckets());
  25. }
  26. return dpCtx;
  27. }

代码示例来源:origin: apache/drill

  1. public PartitionDesc(final Partition part) throws HiveException {
  2. PartitionDescConstructorHelper(part, getTableDesc(part.getTable()), true);
  3. if (Utilities.isInputFileFormatSelfDescribing(this)) {
  4. // if IF is self describing no need to send column info per partition, since its not used anyway.
  5. Table tbl = part.getTable();
  6. setProperties(MetaStoreUtils.getSchemaWithoutCols(part.getTPartition().getSd(), part.getTPartition().getSd(),
  7. part.getParameters(), tbl.getDbName(), tbl.getTableName(), tbl.getPartitionKeys()));
  8. } else {
  9. setProperties(part.getMetadataFromPartitionSchema());
  10. }
  11. }

相关文章

Table类方法