本文整理了Java中org.apache.hadoop.hive.ql.metadata.Hive.getPartition()
方法的一些代码示例,展示了Hive.getPartition()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Hive.getPartition()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Hive
类名称:Hive
方法名:getPartition
[英]Returns partition metadata
[中]返回分区元数据
代码示例来源:origin: apache/hive
public Partition getPartition(Table tbl, Map<String, String> partSpec,
boolean forceCreate) throws HiveException {
return getPartition(tbl, partSpec, forceCreate, null, true);
}
代码示例来源:origin: apache/hive
public TableSpec(Hive db, String tableName, Map<String, String> partSpec, boolean allowPartialPartitionsSpec)
throws HiveException {
Table table = db.getTable(tableName);
tableHandle = table;
this.tableName = table.getDbName() + "." + table.getTableName();
if (partSpec == null) {
specType = SpecType.TABLE_ONLY;
} else if(allowPartialPartitionsSpec) {
partitions = db.getPartitions(table, partSpec);
specType = SpecType.STATIC_PARTITION;
} else {
Partition partition = db.getPartition(table, partSpec, false);
if (partition == null) {
throw new SemanticException("partition is unknown: " + table + "/" + partSpec);
}
partHandle = partition;
partitions = Collections.singletonList(partHandle);
specType = SpecType.STATIC_PARTITION;
}
}
代码示例来源:origin: apache/drill
public Partition getPartition(Table tbl, Map<String, String> partSpec,
boolean forceCreate) throws HiveException {
return getPartition(tbl, partSpec, forceCreate, null, true, null);
}
代码示例来源:origin: apache/drill
/**
* Returns partition metadata
*
* @param tbl
* the partition's table
* @param partSpec
* partition keys and values
* @param forceCreate
* if this is true and partition doesn't exist then a partition is
* created
* @param partPath the path where the partition data is located
* @param inheritTableSpecs whether to copy over the table specs for if/of/serde
* @return result partition object or null if there is no partition
* @throws HiveException
*/
public Partition getPartition(Table tbl, Map<String, String> partSpec, boolean forceCreate,
String partPath, boolean inheritTableSpecs)
throws HiveException {
return getPartition(tbl, partSpec, forceCreate, partPath, inheritTableSpecs, null);
}
代码示例来源:origin: apache/drill
public TableSpec(Hive db, HiveConf conf, String tableName, Map<String, String> partSpec)
throws HiveException {
this.tableName = tableName;
this.partSpec = partSpec;
this.tableHandle = db.getTable(tableName);
if (partSpec != null) {
this.specType = SpecType.STATIC_PARTITION;
this.partHandle = db.getPartition(tableHandle, partSpec, false);
this.partitions = Arrays.asList(partHandle);
} else {
this.specType = SpecType.TABLE_ONLY;
}
}
代码示例来源:origin: apache/hive
private ReplLoadOpType getLoadPartitionType(Map<String, String> partSpec) throws InvalidOperationException, HiveException {
Partition ptn = context.hiveDb.getPartition(table, partSpec, false);
if (ptn == null) {
return ReplLoadOpType.LOAD_NEW;
}
if (ReplUtils.replCkptStatus(tableContext.dbNameToLoadIn, ptn.getParameters(), context.dumpDirectory)) {
return ReplLoadOpType.LOAD_SKIP;
}
return ReplLoadOpType.LOAD_REPLACE;
}
}
代码示例来源:origin: apache/hive
protected Partition getPartition(Table table, Map<String, String> partSpec,
boolean throwException) throws SemanticException {
Partition partition;
try {
partition = db.getPartition(table, partSpec, false);
} catch (Exception e) {
throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e);
}
if (partition == null && throwException) {
throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec));
}
return partition;
}
代码示例来源:origin: apache/drill
protected Partition getPartition(Table table, Map<String, String> partSpec,
boolean throwException) throws SemanticException {
Partition partition;
try {
partition = db.getPartition(table, partSpec, false);
} catch (Exception e) {
throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e);
}
if (partition == null && throwException) {
throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec));
}
return partition;
}
代码示例来源:origin: apache/hive
private Partition getPartition(Table table, Map<String, String> partSpec)
throws SemanticException {
try {
Partition partition = db.getPartition(table, partSpec, false);
if (partition == null) {
throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec));
}
return partition;
} catch (HiveException e) {
if(e instanceof SemanticException) {
throw (SemanticException)e;
}
throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e);
}
}
private String toMessage(ErrorMsg message, Object detail) {
代码示例来源:origin: apache/drill
private Partition getPartition(Table table, Map<String, String> partSpec)
throws SemanticException {
try {
Partition partition = db.getPartition(table, partSpec, false);
if (partition == null) {
throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec));
}
return partition;
} catch (HiveException e) {
if(e instanceof SemanticException) {
throw (SemanticException)e;
}
throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e);
}
}
private String toMessage(ErrorMsg message, Object detail) {
代码示例来源:origin: apache/hive
partn = db.getPartition(tab, partSpec, false);
} catch (HiveException e) {
partn = null;
代码示例来源:origin: apache/hive
@Before
public void setup() throws Exception {
queryState = new QueryState.Builder().build();
//set authorization mode to V2
HiveConf conf = queryState.getConf();
conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
SQLStdHiveAuthorizerFactory.class.getName());
db = Mockito.mock(Hive.class);
table = new Table(DB, TABLE);
SessionState.start(conf);
Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table);
Mockito.when(db.getTable(TABLE_QNAME, false)).thenReturn(table);
Mockito.when(db.getPartition(table, new HashMap<String, String>(), false))
.thenReturn(partition);
}
代码示例来源:origin: apache/drill
/**
* check that every index table contains the given partition and is fresh
*/
private static boolean containsPartition(Hive hive, Partition part, List<Index> indexes)
throws HiveException {
HashMap<String, String> partSpec = part.getSpec();
if (partSpec.isEmpty()) {
// empty specs come from non-partitioned tables
return isIndexTableFresh(hive, indexes, part.getTable());
}
for (Index index : indexes) {
// index.getDbName() is used as a default database, which is database of target table,
// if index.getIndexTableName() does not contain database name
String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName());
Table indexTable = hive.getTable(qualified[0], qualified[1]);
// get partitions that match the spec
Partition matchingPartition = hive.getPartition(indexTable, partSpec, false);
if (matchingPartition == null) {
LOG.info("Index table " + indexTable + "did not contain built partition that matched " + partSpec);
return false;
} else if (!isIndexPartitionFresh(hive, index, part)) {
return false;
}
}
return true;
}
代码示例来源:origin: apache/hive
@Before
public void setup() throws Exception {
queryState = new QueryState.Builder().build();
db = Mockito.mock(Hive.class);
HiveConf hiveConf = queryState.getConf();
table = new Table(DB, TABLE);
partition = new Partition(table);
hiveConf
.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
SessionState.start(hiveConf);
Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table);
Mockito.when(db.getTable(TABLE_QNAME, false)).thenReturn(table);
Mockito.when(db.getPartition(table, new HashMap<String, String>(), false))
.thenReturn(partition);
}
代码示例来源:origin: apache/hive
/**
* Creates a locking object for a table (when partition spec is not provided)
* or a table partition
* @param hiveDB an object to communicate with the metastore
* @param tableName the table to create the locking object on
* @param partSpec the spec of a partition to create the locking object on
* @return the locking object
* @throws HiveException
*/
public static HiveLockObject createFrom(Hive hiveDB, String tableName,
Map<String, String> partSpec) throws HiveException {
Table tbl = hiveDB.getTable(tableName);
if (tbl == null) {
throw new HiveException("Table " + tableName + " does not exist ");
}
HiveLockObject obj = null;
if (partSpec == null) {
obj = new HiveLockObject(tbl, null);
}
else {
Partition par = hiveDB.getPartition(tbl, partSpec, false);
if (par == null) {
throw new HiveException("Partition " + partSpec + " for table " +
tableName + " does not exist");
}
obj = new HiveLockObject(par, null);
}
return obj;
}
代码示例来源:origin: apache/hive
Partition existingPtn = db.getPartition(existingTable, partSpec, false);
return ((existingPtn != null)
&& replicationSpec.allowEventReplacementInto(existingPtn.getParameters()));
代码示例来源:origin: apache/hive
Partition par = db.getPartition(tbl, partSpec, false);
if (par == null) {
throw new HiveException("Partition " + partSpec + " for table " +
代码示例来源:origin: apache/hive
@Before
public void setup() throws Exception {
queryState = new QueryState.Builder().build();
HiveConf conf = queryState.getConf();
conf.setVar(ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY,
TestHiveAuthorizationTaskFactory.DummyHiveAuthorizationTaskFactoryImpl.class.getName());
conf
.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
db = Mockito.mock(Hive.class);
table = new Table(DB, TABLE);
partition = new Partition(table);
SessionState.start(conf);
context = new Context(conf);
parseDriver = new ParseDriver();
analyzer = new DDLSemanticAnalyzer(queryState, db);
Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table);
Mockito.when(db.getTable(TABLE_QNAME, false)).thenReturn(table);
Mockito.when(db.getPartition(table, new HashMap<String, String>(), false))
.thenReturn(partition);
HadoopDefaultAuthenticator auth = new HadoopDefaultAuthenticator();
auth.setConf(conf);
currentUser = auth.getUserName();
DummyHiveAuthorizationTaskFactoryImpl.reset();
}
/**
代码示例来源:origin: apache/hive
addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
} else {
Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false);
if (part == null) {
throw new HiveException("Specified partition does not exist");
代码示例来源:origin: apache/hive
private DataContainer handleStaticParts(Hive db, Table table, LoadTableDesc tbd,
TaskInformation ti) throws HiveException, IOException, InvalidOperationException {
List<String> partVals = MetaStoreUtils.getPvals(table.getPartCols(), tbd.getPartitionSpec());
db.validatePartitionNameCharacters(partVals);
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
Utilities.FILE_OP_LOGGER.trace("loadPartition called from " + tbd.getSourcePath()
+ " into " + tbd.getTable().getTableName());
}
db.loadPartition(tbd.getSourcePath(), db.getTable(tbd.getTable().getTableName()),
tbd.getPartitionSpec(), tbd.getLoadFileType(), tbd.getInheritTableSpecs(),
tbd.getInheritLocation(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(),
work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID &&
!tbd.isMmTable(),
resetStatisticsProps(table), tbd.getWriteId(), tbd.getStmtId(),
tbd.isInsertOverwrite());
Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false);
// See the comment inside updatePartitionBucketSortColumns.
if (!tbd.isMmTable() && (ti.bucketCols != null || ti.sortCols != null)) {
updatePartitionBucketSortColumns(db, table, partn, ti.bucketCols,
ti.numBuckets, ti.sortCols);
}
DataContainer dc = new DataContainer(table.getTTable(), partn.getTPartition());
// add this partition to post-execution hook
if (work.getOutputs() != null) {
DDLTask.addIfAbsentByName(new WriteEntity(partn,
getWriteType(tbd, work.getLoadTableWork().getWriteType())), work.getOutputs());
}
return dc;
}
内容来源于网络,如有侵权,请联系作者删除!