本文整理了Java中org.apache.hadoop.hive.ql.metadata.Hive.getPartitions()
方法的一些代码示例,展示了Hive.getPartitions()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Hive.getPartitions()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Hive
类名称:Hive
方法名:getPartitions
[英]get all the partitions that the table has
[中]获取表的所有分区
代码示例来源:origin: apache/hive
/**
* get all the partitions of the table that matches the given partial
* specification. partition columns whose value is can be anything should be
* an empty string.
*
* @param tbl
* object for which partition is needed. Must be partitioned.
* @return list of partition objects
* @throws HiveException
*/
public List<Partition> getPartitions(Table tbl, Map<String, String> partialPartSpec)
throws HiveException {
return getPartitions(tbl, partialPartSpec, (short)-1);
}
代码示例来源:origin: apache/drill
private List<Path> getLocations(Hive db, Table table, Map<String, String> partSpec)
throws HiveException, InvalidOperationException {
List<Path> locations = new ArrayList<Path>();
if (partSpec == null) {
if (table.isPartitioned()) {
for (Partition partition : db.getPartitions(table)) {
locations.add(partition.getDataLocation());
EnvironmentContext environmentContext = new EnvironmentContext();
if (needToUpdateStats(partition.getParameters(), environmentContext)) {
db.alterPartition(table.getDbName(), table.getTableName(), partition, environmentContext);
}
}
} else {
locations.add(table.getPath());
EnvironmentContext environmentContext = new EnvironmentContext();
if (needToUpdateStats(table.getParameters(), environmentContext)) {
db.alterTable(table.getDbName()+"."+table.getTableName(), table, environmentContext);
}
}
} else {
for (Partition partition : db.getPartitionsByNames(table, partSpec)) {
locations.add(partition.getDataLocation());
EnvironmentContext environmentContext = new EnvironmentContext();
if (needToUpdateStats(partition.getParameters(), environmentContext)) {
db.alterPartition(table.getDbName(), table.getTableName(), partition, environmentContext);
}
}
}
return locations;
}
代码示例来源:origin: apache/drill
/**
* get all the partitions of the table that matches the given partial
* specification. partition columns whose value is can be anything should be
* an empty string.
*
* @param tbl
* object for which partition is needed. Must be partitioned.
* @return list of partition objects
* @throws HiveException
*/
public List<Partition> getPartitions(Table tbl, Map<String, String> partialPartSpec)
throws HiveException {
return getPartitions(tbl, partialPartSpec, (short)-1);
}
代码示例来源:origin: apache/hive
protected List<Partition> getPartitions(Table table, Map<String, String> partSpec,
boolean throwException) throws SemanticException {
List<Partition> partitions;
try {
partitions = partSpec == null ? db.getPartitions(table) :
db.getPartitions(table, partSpec);
} catch (Exception e) {
throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e);
}
if (partitions.isEmpty() && throwException) {
throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec));
}
return partitions;
}
代码示例来源:origin: apache/drill
protected List<Partition> getPartitions(Table table, Map<String, String> partSpec,
boolean throwException) throws SemanticException {
List<Partition> partitions;
try {
partitions = partSpec == null ? db.getPartitions(table) :
db.getPartitions(table, partSpec);
} catch (Exception e) {
throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e);
}
if (partitions.isEmpty() && throwException) {
throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec));
}
return partitions;
}
代码示例来源:origin: apache/hive
private List<Path> makeTableStatusLocations(Table tbl, Hive db, Partition par)
throws HiveException {
// output file system information
Path tblPath = tbl.getPath();
List<Path> locations = new ArrayList<Path>();
if (tbl.isPartitioned()) {
if (par == null) {
for (Partition curPart : db.getPartitions(tbl)) {
if (curPart.getLocation() != null) {
locations.add(new Path(curPart.getLocation()));
}
}
} else {
if (par.getLocation() != null) {
locations.add(new Path(par.getLocation()));
}
}
} else {
if (tblPath != null) {
locations.add(tblPath);
}
}
return locations;
}
代码示例来源:origin: apache/drill
private boolean containsPartition(Index index, Map<String, String> partSpec)
throws HiveException {
String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName());
Table indexTable = hive.getTable(qualified[0], qualified[1]);
List<Partition> parts = hive.getPartitions(indexTable, partSpec);
return (parts == null || parts.size() == 0);
}
}
代码示例来源:origin: apache/drill
private List<Path> makeTableStatusLocations(Table tbl, Hive db, Partition par)
throws HiveException {
// output file system information
Path tblPath = tbl.getPath();
List<Path> locations = new ArrayList<Path>();
if (tbl.isPartitioned()) {
if (par == null) {
for (Partition curPart : db.getPartitions(tbl)) {
if (curPart.getLocation() != null) {
locations.add(new Path(curPart.getLocation()));
}
}
} else {
if (par.getLocation() != null) {
locations.add(new Path(par.getLocation()));
}
}
} else {
if (tblPath != null) {
locations.add(tblPath);
}
}
return locations;
}
代码示例来源:origin: apache/hive
public TableSpec(Hive db, String tableName, Map<String, String> partSpec, boolean allowPartialPartitionsSpec)
throws HiveException {
Table table = db.getTable(tableName);
tableHandle = table;
this.tableName = table.getDbName() + "." + table.getTableName();
if (partSpec == null) {
specType = SpecType.TABLE_ONLY;
} else if(allowPartialPartitionsSpec) {
partitions = db.getPartitions(table, partSpec);
specType = SpecType.STATIC_PARTITION;
} else {
Partition partition = db.getPartition(table, partSpec, false);
if (partition == null) {
throw new SemanticException("partition is unknown: " + table + "/" + partSpec);
}
partHandle = partition;
partitions = Collections.singletonList(partHandle);
specType = SpecType.STATIC_PARTITION;
}
}
代码示例来源:origin: apache/hive
if (allowMany) {
try {
parts = db.getPartitions(table, partSpec);
} catch (HiveException e) {
LOG.error("Got HiveException during obtaining list of partitions"
代码示例来源:origin: apache/drill
if (allowMany) {
try {
parts = db.getPartitions(table, partSpec);
} catch (HiveException e) {
LOG.error("Got HiveException during obtaining list of partitions"
代码示例来源:origin: apache/hive
@Test(expected = MetastoreException.class)
public void testInvalidPartitionKeyName()
throws HiveException, AlreadyExistsException, IOException, MetastoreException {
Table table = createTestTable();
List<Partition> partitions = hive.getPartitions(table);
assertEquals(2, partitions.size());
// add a fake partition dir on fs
fs = partitions.get(0).getDataLocation().getFileSystem(hive.getConf());
Path fakePart = new Path(table.getDataLocation().toString(),
"fakedate=2009-01-01/fakecity=sanjose");
fs.mkdirs(fakePart);
fs.deleteOnExit(fakePart);
checker.checkMetastore(catName, dbName, tableName, null, new CheckResult());
}
代码示例来源:origin: apache/drill
baseTblPartitions = db.getPartitions(baseTbl);
for (Partition basePart : baseTblPartitions) {
HashMap<String, String> pSpec = basePart.getSpec();
代码示例来源:origin: apache/hive
@Test
public void testAdditionalPartitionDirs()
throws HiveException, AlreadyExistsException, IOException, MetastoreException {
Table table = createTestTable();
List<Partition> partitions = hive.getPartitions(table);
assertEquals(2, partitions.size());
// add a fake partition dir on fs
fs = partitions.get(0).getDataLocation().getFileSystem(hive.getConf());
Path fakePart = new Path(table.getDataLocation().toString(),
partDateName + "=2017-01-01/" + partCityName + "=paloalto/fakePartCol=fakepartValue");
fs.mkdirs(fakePart);
fs.deleteOnExit(fakePart);
CheckResult result = new CheckResult();
checker.checkMetastore(catName, dbName, tableName, null, result);
assertEquals(Collections.<String> emptySet(), result.getTablesNotInMs());
assertEquals(Collections.<String> emptySet(), result.getTablesNotOnFs());
assertEquals(Collections.<CheckResult.PartitionResult> emptySet(), result.getPartitionsNotOnFs());
//fakePart path partition is added since the defined partition keys are valid
assertEquals(1, result.getPartitionsNotInMs().size());
}
代码示例来源:origin: apache/hive
write(lineFor(table.getTableName(), fullyQualifiedDataLocation, hiveConf));
if (table.isPartitioned()) {
List<Partition> partitions = Hive.get(hiveConf).getPartitions(table);
for (Partition partition : partitions) {
boolean partitionLocOutsideTableLoc = !FileUtils.isPathWithinSubtree(
代码示例来源:origin: apache/hive
@Test
public void testSkipInvalidPartitionKeyName()
throws HiveException, AlreadyExistsException, IOException, MetastoreException {
hive.getConf().set(HiveConf.ConfVars.HIVE_MSCK_PATH_VALIDATION.varname, "skip");
checker = new HiveMetaStoreChecker(msc, hive.getConf());
Table table = createTestTable();
List<Partition> partitions = hive.getPartitions(table);
assertEquals(2, partitions.size());
// add a fake partition dir on fs
fs = partitions.get(0).getDataLocation().getFileSystem(hive.getConf());
Path fakePart =
new Path(table.getDataLocation().toString(), "fakedate=2009-01-01/fakecity=sanjose");
fs.mkdirs(fakePart);
fs.deleteOnExit(fakePart);
createPartitionsDirectoriesOnFS(table, 2);
CheckResult result = new CheckResult();
checker.checkMetastore(catName, dbName, tableName, null, result);
assertEquals(Collections.<String> emptySet(), result.getTablesNotInMs());
assertEquals(Collections.<String> emptySet(), result.getTablesNotOnFs());
assertEquals(Collections.<CheckResult.PartitionResult> emptySet(), result.getPartitionsNotOnFs());
// only 2 valid partitions should be added
assertEquals(2, result.getPartitionsNotInMs().size());
}
代码示例来源:origin: apache/hive
List<Partition> partitions = hive.getPartitions(table);
assertEquals(numOfPartKeys * valuesPerPartition, partitions.size());
return table;
代码示例来源:origin: apache/hive
FetchWork work;
if (!tbl.getPartCols().isEmpty()) {
List<Partition> partitions = hive.getPartitions(tbl);
List<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();
List<Path> partLocs = new ArrayList<Path>();
代码示例来源:origin: apache/hive
List<Partition> partitions = db.getPartitions(tbl, partSpec);
if (partitions.size() > 1) {
throw new HiveException(ErrorMsg.TOO_MANY_COMPACTION_PARTITIONS);
代码示例来源:origin: apache/hive
assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), result.getPartitionsNotInMs());
List<Partition> partitions = hive.getPartitions(table);
assertEquals(2, partitions.size());
内容来源于网络,如有侵权,请联系作者删除!