org.apache.hadoop.hive.ql.metadata.Table.setDataLocation()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(6.8k)|赞(0)|评价(0)|浏览(197)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.setDataLocation()方法的一些代码示例,展示了Table.setDataLocation()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setDataLocation()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:setDataLocation

Table.setDataLocation介绍

暂无

代码示例

代码示例来源:origin: apache/hive

void updateTableLocation(Table table, Path newLocation) throws HiveException {
 String msg = String.format("ALTER TABLE %s SET LOCATION '%s'",
   getQualifiedName(table), newLocation);
 LOG.info(msg);
 boolean isTxn = TxnUtils.isTransactionalTable(table);
 org.apache.hadoop.hive.ql.metadata.Table modifiedTable =
   new org.apache.hadoop.hive.ql.metadata.Table(table);
 modifiedTable.setDataLocation(newLocation);
 alterTableInternal(isTxn, table, modifiedTable);
}

代码示例来源:origin: apache/incubator-gobblin

private Table getTargetTable(Table originTable, Path targetLocation) throws IOException {
 try {
  Table targetTable = originTable.copy();
  targetTable.setDbName(this.targetDatabase);
  targetTable.setDataLocation(targetLocation);
  /*
   * Need to set the table owner as the flow executor
   */
  targetTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName());
  targetTable.getTTable().putToParameters(HiveDataset.REGISTERER, GOBBLIN_DISTCP);
  targetTable.getTTable().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.startTime));
  targetTable.getTTable().unsetCreateTime();
  HiveAvroCopyEntityHelper.updateTableAttributesIfAvro(targetTable, this);
  return targetTable;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

代码示例来源:origin: apache/hive

fields.add(new FieldSchema("val", "int", null));
table.setFields(fields);
table.setDataLocation(Warehouse.getDnsPath(new Path(SessionState.get().getTempTableSpace(),
  tableName), conf));
table.getTTable().setTemporary(true);

代码示例来源:origin: apache/drill

fields.add(new FieldSchema("val", "int", null));
table.setFields(fields);
table.setDataLocation(Warehouse.getDnsPath(new Path(SessionState.get().getTempTableSpace(),
 tableName), conf));
table.getTTable().setTemporary(true);

代码示例来源:origin: apache/hive

Table table = context.getHive().newTable(desc.getTableName());
if (desc.getLocation() != null) {
 table.setDataLocation(new Path(desc.getLocation()));

代码示例来源:origin: apache/hive

tempTableObj.setDataLocation(new Path(fromURI));
if (inputFormatClassName != null && serDeClassName != null) {
 try {

代码示例来源:origin: apache/drill

table.setSerializationLib(format.getSerde());
table.setFields(fields);
table.setDataLocation(tablePath);
table.getTTable().setTemporary(true);
table.setStoredAsSubDirectories(false);

代码示例来源:origin: apache/hive

tbl.setDataLocation(new Path(crtTbl.getLocation()));
} else {
 tbl.unsetDataLocation();

代码示例来源:origin: apache/drill

tbl.setDataLocation(new Path(crtTbl.getLocation()));
} else {
 tbl.unsetDataLocation();

代码示例来源:origin: apache/hive

tbl.setDataLocation(new Path(getLocation()));

代码示例来源:origin: apache/drill

tbl.setDataLocation(new Path(getLocation()));

代码示例来源:origin: apache/hive

tbl.setDataLocation(new Path(getLocation()));

代码示例来源:origin: apache/hive

table.setDataLocation(null);
if(!externalTableOnSource) {
 replicationSpec().setMigratingToExternalTable();

代码示例来源:origin: apache/hive

tbl.setDataLocation(ft.getDataLocation());
tbl.setCreateTime(ft.getTTable().getCreateTime());
tbl.getParameters().put(hive_metastoreConstants.DDL_TIME,

代码示例来源:origin: apache/drill

tbl.setDataLocation(new Path(crtView.getLocation()));

代码示例来源:origin: com.linkedin.gobblin/gobblin-data-management

private Table getTargetTable(Table originTable, Path targetLocation) throws IOException {
 try {
  Table targetTable = originTable.copy();
  targetTable.setDbName(this.targetDatabase);
  targetTable.setDataLocation(targetLocation);
  /*
   * Need to set the table owner as the flow executor
   */
  targetTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName());
  targetTable.getTTable().putToParameters(HiveDataset.REGISTERER, GOBBLIN_DISTCP);
  targetTable.getTTable().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.startTime));
  targetTable.getTTable().unsetCreateTime();
  HiveAvroCopyEntityHelper.updateTableAttributesIfAvro(targetTable, this);
  return targetTable;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

代码示例来源:origin: org.apache.gobblin/gobblin-data-management

private Table getTargetTable(Table originTable, Path targetLocation) throws IOException {
 try {
  Table targetTable = originTable.copy();
  targetTable.setDbName(this.targetDatabase);
  targetTable.setDataLocation(targetLocation);
  /*
   * Need to set the table owner as the flow executor
   */
  targetTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName());
  targetTable.getTTable().putToParameters(HiveDataset.REGISTERER, GOBBLIN_DISTCP);
  targetTable.getTTable().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.startTime));
  targetTable.getTTable().unsetCreateTime();
  HiveAvroCopyEntityHelper.updateTableAttributesIfAvro(targetTable, this);
  return targetTable;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

代码示例来源:origin: qubole/streamx

private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
   throws HiveMetaStoreException {
  Table table = new Table(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(avroSerde);
  try {
   table.setInputFormatClass(avroInputFormat);
   table.setOutputFormatClass(avroOutputFormat);
  } catch (HiveException e) {
   throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
  return table;
 }
}

代码示例来源:origin: qubole/streamx

private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
 Table table = new Table(database, tableName);
 table.setTableType(TableType.EXTERNAL_TABLE);
 table.getParameters().put("EXTERNAL", "TRUE");
 String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
 table.setDataLocation(new Path(tablePath));
 table.setSerializationLib(getHiveParquetSerde());
 try {
  table.setInputFormatClass(getHiveParquetInputFormat());
  table.setOutputFormatClass(getHiveParquetOutputFormat());
 } catch (HiveException e) {
  throw new HiveMetaStoreException("Cannot find input/output format:", e);
 }
 // convert copycat schema schema to Hive columns
 List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
 table.setFields(columns);
 table.setPartCols(partitioner.partitionFields());
 return table;
}

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

tbl.setDataLocation(new Path(crtTbl.getLocation()).toUri());
} else {
 tbl.unsetDataLocation();

相关文章

Table类方法