org.apache.hadoop.hive.ql.metadata.Table.getSd()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(9.8k)|赞(0)|评价(0)|浏览(312)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getSd()方法的一些代码示例,展示了Table.getSd()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getSd()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getSd

Table.getSd介绍

暂无

代码示例

代码示例来源:origin: apache/incubator-gobblin

private boolean isAvro(Table table) {
  return AvroSerDe.class.getName().equals(table.getSd().getSerdeInfo().getSerializationLib());
 }
}

代码示例来源:origin: apache/hive

protected void prepareBucketingFields() {
 this.isBucketed = table.getSd().getNumBuckets() > 0;
 // For unbucketed tables we have exactly 1 RecordUpdater (until HIVE-19208) for each AbstractRecordWriter which
 // ends up writing to a file bucket_000000.
 // See also {@link #getBucket(Object)}
 this.totalBuckets = isBucketed ? table.getSd().getNumBuckets() : 1;
 if (isBucketed) {
  this.bucketIds = getBucketColIDs(table.getSd().getBucketCols(), table.getSd().getCols());
  this.bucketFieldData = new Object[bucketIds.size()];
  this.bucketObjInspectors = getObjectInspectorsForBucketedCols(bucketIds, inputRowObjectInspector);
  this.bucketStructFields = new StructField[bucketIds.size()];
  List<? extends StructField> allFields = inputRowObjectInspector.getAllStructFieldRefs();
  for (int i = 0; i < bucketIds.size(); i++) {
   bucketStructFields[i] = allFields.get(bucketIds.get(i));
  }
 }
}

代码示例来源:origin: apache/hive

@Override
public RelDistribution getDistribution() {
 ImmutableList.Builder<Integer> columnPositions = new ImmutableList.Builder<Integer>();
 for (String bucketColumn : this.hiveTblMetadata.getBucketCols()) {
  for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) {
   FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i);
   if (field.getName().equals(bucketColumn)) {
    columnPositions.add(i);
    break;
   }
  }
 }
 return new HiveRelDistribution(RelDistribution.Type.HASH_DISTRIBUTED,
     columnPositions.build());
}

代码示例来源:origin: apache/hive

public static void addSchemaEvolutionToTableScanOperator(Table table,
  TableScanOperator tableScanOp) {
 String colNames = MetaStoreUtils.getColumnNamesFromFieldSchema(table.getSd().getCols());
 String colTypes = MetaStoreUtils.getColumnTypesFromFieldSchema(table.getSd().getCols());
 tableScanOp.setSchemaEvolution(colNames, colTypes);
}

代码示例来源:origin: apache/drill

public static void addSchemaEvolutionToTableScanOperator(Table table,
  TableScanOperator tableScanOp) {
 String colNames = MetaStoreUtils.getColumnNamesFromFieldSchema(table.getSd().getCols());
 String colTypes = MetaStoreUtils.getColumnTypesFromFieldSchema(table.getSd().getCols());
 tableScanOp.setSchemaEvolution(colNames, colTypes);
}

代码示例来源:origin: apache/drill

@Override
public RelDistribution getDistribution() {
 ImmutableList.Builder<Integer> columnPositions = new ImmutableList.Builder<Integer>();
 for (String bucketColumn : this.hiveTblMetadata.getBucketCols()) {
  for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) {
   FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i);
   if (field.getName().equals(bucketColumn)) {
    columnPositions.add(i);
    break;
   }
  }
 }
 return new HiveRelDistribution(RelDistribution.Type.HASH_DISTRIBUTED,
     columnPositions.build());
}

代码示例来源:origin: apache/hive

@Override
public List<RelCollation> getCollationList() {
 ImmutableList.Builder<RelFieldCollation> collationList = new ImmutableList.Builder<RelFieldCollation>();
 for (Order sortColumn : this.hiveTblMetadata.getSortCols()) {
  for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) {
   FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i);
   if (field.getName().equals(sortColumn.getCol())) {
    Direction direction;
    NullDirection nullDirection;
    if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) {
     direction = Direction.ASCENDING;
     nullDirection = NullDirection.FIRST;
    } else {
     direction = Direction.DESCENDING;
     nullDirection = NullDirection.LAST;
    }
    collationList.add(new RelFieldCollation(i, direction, nullDirection));
    break;
   }
  }
 }
 return new ImmutableList.Builder<RelCollation>()
     .add(RelCollationTraitDef.INSTANCE.canonize(
         new HiveRelCollation(collationList.build())))
     .build();
}

代码示例来源:origin: apache/hive

deserializerClassName = tableScanDesc.getTableMetadata().getSd().getSerdeInfo().getSerializationLib();
Deserializer deserializer = ReflectionUtil.newInstance(
  context.conf.getClassByName(deserializerClassName)

代码示例来源:origin: apache/hive

private Table getTable(Hive db) throws SemanticException, HiveException {
 Table tbl = work.getTable();
 // FIXME for ctas this is still needed because location is not set sometimes
 if (tbl.getSd().getLocation() == null) {
  tbl = db.getTable(work.getFullTableName());
 }
 return tbl;
}

代码示例来源:origin: apache/hive

public void checkValidity() throws HiveException {
  if (!tPartition.getSd().equals(table.getSd())) {
   Table.validateColumns(getCols(), table.getPartCols());
  }
 }
}

代码示例来源:origin: apache/drill

public void checkValidity() throws HiveException {
  if (!tPartition.getSd().equals(table.getSd())) {
   Table.validateColumns(getCols(), table.getPartCols());
  }
 }
}

代码示例来源:origin: apache/incubator-gobblin

break;
case INPUT_FORMAT:
 InputFormat<?, ?> inputFormat = HiveUtils.getInputFormat(table.getSd());

代码示例来源:origin: apache/incubator-gobblin

/**
 * Automatically serializes the {@link HiveDataset} by calling {@link #setHiveDataset(HiveDataset)}
 * @param hiveDataset for which the workunit is being created
 */
@SuppressWarnings("deprecation")
public HiveWorkUnit(HiveDataset hiveDataset) {
 super();
 setHiveDataset(hiveDataset);
 if (hiveDataset.getTable().getTableType() != TableType.VIRTUAL_VIEW) {
  setTableLocation(hiveDataset.getTable().getSd().getLocation());
 }
}

代码示例来源:origin: apache/hive

public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject(
  Table tbl, Map<String, String> partSpec, Path location) throws HiveException {
 List<String> pvals = new ArrayList<String>();
 for (FieldSchema field : tbl.getPartCols()) {
  String val = partSpec.get(field.getName());
  if (val == null || val.isEmpty()) {
   throw new HiveException("partition spec is invalid; field "
     + field.getName() + " does not exist or is empty");
  }
  pvals.add(val);
 }
 org.apache.hadoop.hive.metastore.api.Partition tpart =
   new org.apache.hadoop.hive.metastore.api.Partition();
 tpart.setDbName(tbl.getDbName());
 tpart.setTableName(tbl.getTableName());
 tpart.setValues(pvals);
 if (!tbl.isView()) {
  tpart.setSd(tbl.getSd().deepCopy());
  tpart.getSd().setLocation((location != null) ? location.toString() : null);
 }
 return tpart;
}

代码示例来源:origin: apache/hive

private int updateColumns(Table tbl, Partition part)
    throws HiveException {
 String serializationLib = tbl.getSd().getSerdeInfo().getSerializationLib();
 if (MetastoreConf.getStringCollection(conf,
     MetastoreConf.ConfVars.SERDES_USING_METASTORE_FOR_SCHEMA).contains(serializationLib)) {
  throw new HiveException(tbl.getTableName() + " has serde " + serializationLib + " for which schema " +
      "is already handled by HMS.");
 }
 Deserializer deserializer = tbl.getDeserializer(true);
 try {
  LOG.info("Updating metastore columns for table: {}", tbl.getTableName());
  final List<FieldSchema> fields = HiveMetaStoreUtils.getFieldsFromDeserializer(
      tbl.getTableName(), deserializer);
  StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
  sd.setCols(fields);
 } catch (org.apache.hadoop.hive.serde2.SerDeException | MetaException e) {
  LOG.error("alter table update columns: {}", e);
  throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
 }
 return 0;
}

代码示例来源:origin: apache/hive

Table table = new Table(rv.getTable());
 parsedLocation = ReplExternalTables
   .externalTableLocation(context.hiveConf, table.getSd().getLocation());
} catch (IOException e) {
 throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);

代码示例来源:origin: apache/drill

public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject(
  Table tbl, Map<String, String> partSpec, Path location) throws HiveException {
 List<String> pvals = new ArrayList<String>();
 for (FieldSchema field : tbl.getPartCols()) {
  String val = partSpec.get(field.getName());
  if (val == null || val.isEmpty()) {
   throw new HiveException("partition spec is invalid; field "
     + field.getName() + " does not exist or is empty");
  }
  pvals.add(val);
 }
 org.apache.hadoop.hive.metastore.api.Partition tpart =
   new org.apache.hadoop.hive.metastore.api.Partition();
 tpart.setDbName(tbl.getDbName());
 tpart.setTableName(tbl.getTableName());
 tpart.setValues(pvals);
 if (!tbl.isView()) {
  tpart.setSd(tbl.getSd().deepCopy());
  tpart.getSd().setLocation((location != null) ? location.toString() : null);
 }
 return tpart;
}

代码示例来源:origin: apache/drill

@Override
public List<RelCollation> getCollationList() {
 ImmutableList.Builder<RelFieldCollation> collationList = new ImmutableList.Builder<RelFieldCollation>();
 for (Order sortColumn : this.hiveTblMetadata.getSortCols()) {
  for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) {
   FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i);
   if (field.getName().equals(sortColumn.getCol())) {
    Direction direction;
    NullDirection nullDirection;
    if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) {
     direction = Direction.ASCENDING;
     nullDirection = NullDirection.FIRST;
    }
    else {
     direction = Direction.DESCENDING;
     nullDirection = NullDirection.LAST;
    }
    collationList.add(new RelFieldCollation(i,direction,nullDirection));
    break;
   }
  }
 }
 return new ImmutableList.Builder<RelCollation>()
     .add(RelCollationTraitDef.INSTANCE.canonize(
         new HiveRelCollation(collationList.build())))
     .build();
}

代码示例来源:origin: apache/hive

private static class ThreadLocalHive extends ThreadLocal<Hive> {
 @Override
 protected Hive initialValue() {
  return null;
 }
 @Override
 public synchronized void set(Hive hiveObj) {
  Hive currentHive = this.get();
  if (currentHive != hiveObj) {
   // Remove/close current thread-local Hive object before overwriting with new Hive object.
   remove();
   super.set(hiveObj);
  }
 }
 @Override
 public synchronized void remove() {
  Hive currentHive = this.get();
  if (currentHive != null) {
   // Close the metastore connections before removing it from thread local hiveDB.
   currentHive.close(false);
   super.remove();
  }
 }
}

代码示例来源:origin: apache/hive

Path destLocation;
if (partitionValues == null) {
 destLocation = new Path(table.getSd().getLocation());
} else {
 Map<String, String> partSpec = Warehouse.makeSpecFromValues(

相关文章

Table类方法