org.apache.hadoop.hive.ql.metadata.Table.getSortCols()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(12.0k)|赞(0)|评价(0)|浏览(168)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getSortCols()方法的一些代码示例,展示了Table.getSortCols()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getSortCols()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getSortCols

Table.getSortCols介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. private ArrayList<Integer> getSortOrders(String dest, QB qb, Table tab, Operator input)
  2. throws SemanticException {
  3. List<Order> tabSortCols = tab.getSortCols();
  4. List<FieldSchema> tabCols = tab.getCols();
  5. ArrayList<Integer> orders = new ArrayList<Integer>();
  6. for (Order sortCol : tabSortCols) {
  7. for (FieldSchema tabCol : tabCols) {
  8. if (sortCol.getCol().equals(tabCol.getName())) {
  9. orders.add(sortCol.getOrder());
  10. break;
  11. }
  12. }
  13. }
  14. return orders;
  15. }

代码示例来源:origin: apache/hive

  1. private ArrayList<ExprNodeDesc> getSortCols(String dest, QB qb, Table tab, TableDesc table_desc,
  2. Operator input, boolean convert)
  3. throws SemanticException {
  4. List<Order> tabSortCols = tab.getSortCols();
  5. List<FieldSchema> tabCols = tab.getCols();
  6. // Partition by the bucketing column
  7. List<Integer> posns = new ArrayList<Integer>();
  8. for (Order sortCol : tabSortCols) {
  9. int pos = 0;
  10. for (FieldSchema tabCol : tabCols) {
  11. if (sortCol.getCol().equals(tabCol.getName())) {
  12. posns.add(pos);
  13. break;
  14. }
  15. pos++;
  16. }
  17. }
  18. return genConvertCol(dest, qb, tab, table_desc, input, posns, convert);
  19. }

代码示例来源:origin: apache/drill

  1. private ArrayList<Integer> getSortOrders(String dest, QB qb, Table tab, Operator input)
  2. throws SemanticException {
  3. List<Order> tabSortCols = tab.getSortCols();
  4. List<FieldSchema> tabCols = tab.getCols();
  5. ArrayList<Integer> orders = new ArrayList<Integer>();
  6. for (Order sortCol : tabSortCols) {
  7. for (FieldSchema tabCol : tabCols) {
  8. if (sortCol.getCol().equals(tabCol.getName())) {
  9. orders.add(sortCol.getOrder());
  10. break;
  11. }
  12. }
  13. }
  14. return orders;
  15. }

代码示例来源:origin: apache/drill

  1. private ArrayList<ExprNodeDesc> getSortCols(String dest, QB qb, Table tab, TableDesc table_desc,
  2. Operator input, boolean convert)
  3. throws SemanticException {
  4. List<Order> tabSortCols = tab.getSortCols();
  5. List<FieldSchema> tabCols = tab.getCols();
  6. // Partition by the bucketing column
  7. List<Integer> posns = new ArrayList<Integer>();
  8. for (Order sortCol : tabSortCols) {
  9. int pos = 0;
  10. for (FieldSchema tabCol : tabCols) {
  11. if (sortCol.getCol().equals(tabCol.getName())) {
  12. posns.add(pos);
  13. break;
  14. }
  15. pos++;
  16. }
  17. }
  18. return genConvertCol(dest, qb, tab, table_desc, input, posns, convert);
  19. }

代码示例来源:origin: apache/hive

  1. private boolean checkTable(Table table,
  2. List<Integer> bucketPositionsDest,
  3. List<Integer> sortPositionsDest,
  4. List<Integer> sortOrderDest,
  5. int numBucketsDest) {
  6. // The bucketing and sorting positions should exactly match
  7. int numBuckets = table.getNumBuckets();
  8. if (numBucketsDest != numBuckets) {
  9. return false;
  10. }
  11. List<Integer> tableBucketPositions =
  12. getBucketPositions(table.getBucketCols(), table.getCols());
  13. List<Integer> sortPositions =
  14. getSortPositions(table.getSortCols(), table.getCols());
  15. List<Integer> sortOrder =
  16. getSortOrder(table.getSortCols(), table.getCols());
  17. return bucketPositionsDest.equals(tableBucketPositions) &&
  18. sortPositionsDest.equals(sortPositions) &&
  19. sortOrderDest.equals(sortOrder);
  20. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public List<RelCollation> getCollationList() {
  3. ImmutableList.Builder<RelFieldCollation> collationList = new ImmutableList.Builder<RelFieldCollation>();
  4. for (Order sortColumn : this.hiveTblMetadata.getSortCols()) {
  5. for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) {
  6. FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i);
  7. if (field.getName().equals(sortColumn.getCol())) {
  8. Direction direction;
  9. NullDirection nullDirection;
  10. if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) {
  11. direction = Direction.ASCENDING;
  12. nullDirection = NullDirection.FIRST;
  13. } else {
  14. direction = Direction.DESCENDING;
  15. nullDirection = NullDirection.LAST;
  16. }
  17. collationList.add(new RelFieldCollation(i, direction, nullDirection));
  18. break;
  19. }
  20. }
  21. }
  22. return new ImmutableList.Builder<RelCollation>()
  23. .add(RelCollationTraitDef.INSTANCE.canonize(
  24. new HiveRelCollation(collationList.build())))
  25. .build();
  26. }

代码示例来源:origin: apache/drill

  1. private boolean checkTable(Table table,
  2. List<Integer> bucketPositionsDest,
  3. List<Integer> sortPositionsDest,
  4. List<Integer> sortOrderDest,
  5. int numBucketsDest) {
  6. // The bucketing and sorting positions should exactly match
  7. int numBuckets = table.getNumBuckets();
  8. if (numBucketsDest != numBuckets) {
  9. return false;
  10. }
  11. List<Integer> tableBucketPositions =
  12. getBucketPositions(table.getBucketCols(), table.getCols());
  13. List<Integer> sortPositions =
  14. getSortPositions(table.getSortCols(), table.getCols());
  15. List<Integer> sortOrder =
  16. getSortOrder(table.getSortCols(), table.getCols());
  17. return bucketPositionsDest.equals(tableBucketPositions) &&
  18. sortPositionsDest.equals(sortPositions) &&
  19. sortOrderDest.equals(sortOrder);
  20. }

代码示例来源:origin: apache/hive

  1. private void genPartnCols(String dest, Operator input, QB qb,
  2. TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException {
  3. boolean enforceBucketing = false;
  4. ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>();
  5. if ((dest_tab.getNumBuckets() > 0)) {
  6. enforceBucketing = true;
  7. if (updating(dest) || deleting(dest)) {
  8. partnColsNoConvert = getPartitionColsFromBucketColsForUpdateDelete(input, false);
  9. } else {
  10. partnColsNoConvert = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input,
  11. false);
  12. }
  13. }
  14. if ((dest_tab.getSortCols() != null) &&
  15. (dest_tab.getSortCols().size() > 0)) {
  16. if (!enforceBucketing) {
  17. throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
  18. }
  19. else {
  20. if(!enforceBucketing) {
  21. partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false);
  22. }
  23. }
  24. enforceBucketing = true;
  25. }
  26. if (enforceBucketing) {
  27. ctx.setPartnCols(partnColsNoConvert);
  28. }
  29. }

代码示例来源:origin: apache/drill

  1. private void checkAcidConstraints(QB qb, TableDesc tableDesc,
  2. Table table) throws SemanticException {
  3. String tableName = tableDesc.getTableName();
  4. if (!qb.getParseInfo().isInsertIntoTable(tableName)) {
  5. LOG.debug("Couldn't find table " + tableName + " in insertIntoTable");
  6. throw new SemanticException(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID.getMsg());
  7. }
  8. /*
  9. LOG.info("Modifying config values for ACID write");
  10. conf.setBoolVar(ConfVars.HIVEOPTREDUCEDEDUPLICATION, true);
  11. conf.setIntVar(ConfVars.HIVEOPTREDUCEDEDUPLICATIONMINREDUCER, 1);
  12. These props are now enabled elsewhere (see commit diffs). It would be better instead to throw
  13. if they are not set. For exmaple, if user has set hive.optimize.reducededuplication=false for
  14. some reason, we'll run a query contrary to what they wanted... But throwing now would be
  15. backwards incompatible.
  16. */
  17. conf.set(AcidUtils.CONF_ACID_KEY, "true");
  18. if (table.getNumBuckets() < 1) {
  19. throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, table.getTableName());
  20. }
  21. if (table.getSortCols() != null && table.getSortCols().size() > 0) {
  22. throw new SemanticException(ErrorMsg.ACID_NO_SORTED_BUCKETS, table.getTableName());
  23. }
  24. }

代码示例来源:origin: apache/hive

  1. sortColumnsFirstTable.addAll(tbl.getSortCols());
  2. return checkSortColsAndJoinCols(tbl.getSortCols(),
  3. joinCols,
  4. sortColumnsFirstTable);

代码示例来源:origin: apache/drill

  1. private void genPartnCols(String dest, Operator input, QB qb,
  2. TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException {
  3. boolean enforceBucketing = false;
  4. ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>();
  5. if ((dest_tab.getNumBuckets() > 0)) {
  6. enforceBucketing = true;
  7. if (updating(dest) || deleting(dest)) {
  8. partnColsNoConvert = getPartitionColsFromBucketColsForUpdateDelete(input, false);
  9. } else {
  10. partnColsNoConvert = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input,
  11. false);
  12. }
  13. }
  14. if ((dest_tab.getSortCols() != null) &&
  15. (dest_tab.getSortCols().size() > 0)) {
  16. if (!enforceBucketing && !dest_tab.isIndexTable()) {
  17. throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
  18. }
  19. else {
  20. if(!enforceBucketing) {
  21. partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false);
  22. }
  23. }
  24. enforceBucketing = true;
  25. }
  26. if (enforceBucketing) {
  27. ctx.setPartnCols(partnColsNoConvert);
  28. }
  29. }

代码示例来源:origin: apache/hive

  1. numBuckets = table.getNumBuckets();
  2. List<String> sortCols = new ArrayList<String>();
  3. for (Order colSortOrder : table.getSortCols()) {
  4. sortCols.add(colSortOrder.getCol());

代码示例来源:origin: apache/drill

  1. numBuckets = table.getNumBuckets();
  2. List<String> sortCols = new ArrayList<String>();
  3. for (Order colSortOrder : table.getSortCols()) {
  4. sortCols.add(colSortOrder.getCol());

代码示例来源:origin: apache/hive

  1. List<String> sortCols = Utilities.getColumnNamesFromSortCols(table.getSortCols());
  2. List<String> bucketCols = table.getBucketCols();
  3. return matchBucketSortCols(groupByCols, bucketCols, sortCols);

代码示例来源:origin: apache/hive

  1. if ((dest_tab.getSortCols() != null) &&
  2. (dest_tab.getSortCols().size() > 0)) {
  3. sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true);
  4. sortOrders = getSortOrders(dest, qb, dest_tab, input);

代码示例来源:origin: apache/drill

  1. if ((dest_tab.getSortCols() != null) &&
  2. (dest_tab.getSortCols().size() > 0)) {
  3. sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true);
  4. sortOrders = getSortOrders(dest, qb, dest_tab, input);

代码示例来源:origin: apache/hive

  1. if (!destTable.getSortCols().isEmpty()) {
  2. sortPositions = getSortPositions(destTable.getSortCols(), destTable.getCols());
  3. sortOrder = getSortOrders(destTable.getSortCols(), destTable.getCols());
  4. } else {

代码示例来源:origin: apache/drill

  1. @Override
  2. public List<RelCollation> getCollationList() {
  3. ImmutableList.Builder<RelFieldCollation> collationList = new ImmutableList.Builder<RelFieldCollation>();
  4. for (Order sortColumn : this.hiveTblMetadata.getSortCols()) {
  5. for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) {
  6. FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i);
  7. if (field.getName().equals(sortColumn.getCol())) {
  8. Direction direction;
  9. NullDirection nullDirection;
  10. if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) {
  11. direction = Direction.ASCENDING;
  12. nullDirection = NullDirection.FIRST;
  13. }
  14. else {
  15. direction = Direction.DESCENDING;
  16. nullDirection = NullDirection.LAST;
  17. }
  18. collationList.add(new RelFieldCollation(i,direction,nullDirection));
  19. break;
  20. }
  21. }
  22. }
  23. return new ImmutableList.Builder<RelCollation>()
  24. .add(RelCollationTraitDef.INSTANCE.canonize(
  25. new HiveRelCollation(collationList.build())))
  26. .build();
  27. }

代码示例来源:origin: apache/hive

  1. private void alterPartitionSpecInMemory(Table tbl,
  2. Map<String, String> partSpec,
  3. org.apache.hadoop.hive.metastore.api.Partition tpart,
  4. boolean inheritTableSpecs,
  5. String partPath) throws HiveException, InvalidOperationException {
  6. LOG.debug("altering partition for table " + tbl.getTableName() + " with partition spec : "
  7. + partSpec);
  8. if (inheritTableSpecs) {
  9. tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat());
  10. tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat());
  11. tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib());
  12. tpart.getSd().getSerdeInfo().setParameters(
  13. tbl.getTTable().getSd().getSerdeInfo().getParameters());
  14. tpart.getSd().setBucketCols(tbl.getBucketCols());
  15. tpart.getSd().setNumBuckets(tbl.getNumBuckets());
  16. tpart.getSd().setSortCols(tbl.getSortCols());
  17. }
  18. if (partPath == null || partPath.trim().equals("")) {
  19. throw new HiveException("new partition path should not be null or empty.");
  20. }
  21. tpart.getSd().setLocation(partPath);
  22. }

代码示例来源:origin: apache/drill

  1. private void alterPartitionSpecInMemory(Table tbl,
  2. Map<String, String> partSpec,
  3. org.apache.hadoop.hive.metastore.api.Partition tpart,
  4. boolean inheritTableSpecs,
  5. String partPath) throws HiveException, InvalidOperationException {
  6. LOG.debug("altering partition for table " + tbl.getTableName() + " with partition spec : "
  7. + partSpec);
  8. if (inheritTableSpecs) {
  9. tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat());
  10. tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat());
  11. tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib());
  12. tpart.getSd().getSerdeInfo().setParameters(
  13. tbl.getTTable().getSd().getSerdeInfo().getParameters());
  14. tpart.getSd().setBucketCols(tbl.getBucketCols());
  15. tpart.getSd().setNumBuckets(tbl.getNumBuckets());
  16. tpart.getSd().setSortCols(tbl.getSortCols());
  17. }
  18. if (partPath == null || partPath.trim().equals("")) {
  19. throw new HiveException("new partition path should not be null or empty.");
  20. }
  21. tpart.getSd().setLocation(partPath);
  22. }

相关文章

Table类方法