本文整理了Java中org.apache.hadoop.hive.ql.metadata.Hive.createPartitions()
方法的一些代码示例,展示了Hive.createPartitions()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Hive.createPartitions()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Hive
类名称:Hive
方法名:createPartitions
暂无
代码示例来源:origin: apache/hive
/**
* Add a partitions to a table.
*
* @param db
* Database to add the partition to.
* @param addPartitionDesc
* Add these partitions.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
*/
private int addPartitions(Hive db, AddPartitionDesc addPartitionDesc) throws HiveException {
List<Partition> parts = db.createPartitions(addPartitionDesc);
for (Partition part : parts) {
addIfAbsentByName(new WriteEntity(part, WriteEntity.WriteType.INSERT));
}
return 0;
}
代码示例来源:origin: apache/drill
/**
* Add a partitions to a table.
*
* @param db
* Database to add the partition to.
* @param addPartitionDesc
* Add these partitions.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
*/
private int addPartitions(Hive db, AddPartitionDesc addPartitionDesc) throws HiveException {
List<Partition> parts = db.createPartitions(addPartitionDesc);
for (Partition part : parts) {
addIfAbsentByName(new WriteEntity(part, WriteEntity.WriteType.INSERT));
}
return 0;
}
代码示例来源:origin: apache/drill
+ ':' + part.getPartitionName());
if (counter % batch_size == 0 || counter == partsNotInMs.size()) {
db.createPartitions(apd);
apd = new AddPartitionDesc(table.getDbName(), table.getTableName(), false);
+ ':' + part.getPartitionName());
db.createPartitions(apd);
代码示例来源:origin: com.facebook.presto.hive/hive-apache
/**
* Add a partitions to a table.
*
* @param db
* Database to add the partition to.
* @param addPartitionDesc
* Add these partitions.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
*/
private int addPartitions(Hive db, AddPartitionDesc addPartitionDesc) throws HiveException {
List<Partition> parts = db.createPartitions(addPartitionDesc);
for (Partition part : parts) {
work.getOutputs().add(new WriteEntity(part, WriteEntity.WriteType.INSERT));
}
return 0;
}
代码示例来源:origin: apache/lens
partSpec.put("dummy_partition_col", "dummy_val");
partitionDesc.addPartition(partSpec, partDir.toUri().toString());
Hive.get(conf).createPartitions(partitionDesc);
log.info("{}: Added partition {}", tableName, partDir.toUri().toString());
代码示例来源:origin: apache/lens
partSpec.put(TEMP_TABLE_PART_COL, TEMP_TABLE_PART_VAL);
partitionDesc.addPartition(partSpec, dataLocation);
hiveClient.createPartitions(partitionDesc);
log.info("Created partition in {} for data in {}", tableName, dataLocation);
代码示例来源:origin: com.facebook.presto.hive/hive-apache
+ msckDesc.getTableName() + ':' + part.getPartitionName());
db.createPartitions(apd);
} catch (Exception e) {
LOG.info("Could not bulk-add partitions to metastore; trying one by one", e);
代码示例来源:origin: apache/lens
partSpec.put("dummy_partition_col", "dummy_val");
partitionDesc.addPartition(partSpec, partDir.toUri().toString());
Hive.get(conf).createPartitions(partitionDesc);
log.info("{}: Added partition {}", tableName, partDir.toUri().toString());
代码示例来源:origin: org.apache.lens/lens-cube
List<Partition> partitionsAdded = client.createPartitions(addParts);
success = true;
return partitionsAdded;
代码示例来源:origin: apache/lens
List<Partition> partitionsAdded = client.createPartitions(addParts);
success = true;
return partitionsAdded;
代码示例来源:origin: org.apache.lens/lens-cube
latestPart.getPartition(0).setBucketCols(latest.part.getBucketCols());
latestPart.getPartition(0).setSortCols(latest.part.getSortCols());
client.createPartitions(latestPart);
代码示例来源:origin: apache/lens
latestPart.getPartition(0).setBucketCols(latest.part.getBucketCols());
latestPart.getPartition(0).setSortCols(latest.part.getSortCols());
client.createPartitions(latestPart);
内容来源于网络,如有侵权,请联系作者删除!