org.apache.hadoop.hive.ql.metadata.Hive.createPartition()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(5.9k)|赞(0)|评价(0)|浏览(188)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Hive.createPartition()方法的一些代码示例,展示了Hive.createPartition()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Hive.createPartition()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Hive
类名称:Hive
方法名:createPartition

Hive.createPartition介绍

[英]Creates a partition.
[中]创建一个分区。

代码示例

代码示例来源:origin: apache/drill

private void msckAddPartitionsOneByOne(Hive db, Table table,
  Set<CheckResult.PartitionResult> partsNotInMs, List<String> repairOutput) {
 for (CheckResult.PartitionResult part : partsNotInMs) {
  try {
   db.createPartition(table, Warehouse.makeSpecFromName(part.getPartitionName()));
   repairOutput.add("Repair: Added partition to metastore "
     + table.getTableName() + ':' + part.getPartitionName());
  } catch (Exception e) {
   LOG.warn("Repair error, could not add partition to metastore: ", e);
  }
 }
}

代码示例来源:origin: apache/drill

Partition indexPart = db.getPartition(indexTbl, partSpec, false);
if (indexPart == null) {
 indexPart = db.createPartition(indexTbl, partSpec);
 Partition indexPart = db.getPartition(indexTbl, pSpec, false);
 if (indexPart == null) {
  indexPart = db.createPartition(indexTbl, pSpec);

代码示例来源:origin: apache/hive

@BeforeClass
public static void init() throws Exception {
 queryState = new QueryState.Builder().build();
 conf = queryState.getConf();
 conf
 .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
   "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
 SessionState.start(conf);
 // Create a table so we can work against it
 Hive h = Hive.get(conf);
 List<String> cols = new ArrayList<String>();
 cols.add("a");
 List<String> partCols = new ArrayList<String>();
 partCols.add("ds");
 h.createTable("foo", cols, partCols, OrcInputFormat.class, OrcOutputFormat.class);
 Table t = h.getTable("foo");
 Map<String, String> partSpec = new HashMap<String, String>();
 partSpec.put("ds", "today");
 h.createPartition(t, partSpec);
}

代码示例来源:origin: apache/hive

part_spec.put("hr", "12");
try {
 hm.createPartition(tbl, part_spec);
} catch (HiveException e) {
 System.err.println(StringUtils.stringifyException(e));

代码示例来源:origin: apache/hive

Map<String, String> partVals = new HashMap<String, String>(2);
partVals.put("ds", "yesterday");
db.createPartition(u, partVals);
partVals.clear();
partVals.put("ds", "today");
db.createPartition(u, partVals);
sem.analyze(tree, ctx);

代码示例来源:origin: apache/hive

hive.createPartition(table, partSpec);

代码示例来源:origin: apache/hive

hm.createPartition(table, partitionSpec);

代码示例来源:origin: apache/hive

hm.createPartition(table, partitionSpec);
hm.createPartition(table, partitionSpec);

代码示例来源:origin: apache/hive

private Table createTestTable() throws HiveException, AlreadyExistsException {
 Database db = new Database();
 db.setName(dbName);
 hive.createDatabase(db, true);
 Table table = new Table(dbName, tableName);
 table.setDbName(dbName);
 table.setInputFormatClass(TextInputFormat.class);
 table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
 table.setPartCols(partCols);
 hive.createTable(table);
 table = hive.getTable(dbName, tableName);
 Assert.assertTrue(table.getTTable().isSetId());
 table.getTTable().unsetId();
 for (Map<String, String> partSpec : parts) {
  hive.createPartition(table, partSpec);
 }
 return table;
}

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

/**
 * Creates a partition.
 *
 * @param tbl
 *          table for which partition needs to be created
 * @param partSpec
 *          partition keys and their values
 * @return created partition object
 * @throws HiveException
 *           if table doesn't exist or partition already exists
 */
public Partition createPartition(Table tbl, Map<String, String> partSpec)
  throws HiveException {
 return createPartition(tbl, partSpec, null);
}

代码示例来源:origin: apache/lens

/**
 * Adds the partition.
 *
 * @param eventName the event name
 * @param key       the key
 * @param finalPath the final path
 * @param className the class name
 * @return true, if successful
 */
private boolean addPartition(String eventName, String key, Path finalPath, String className) {
 try {
  Table t = getTable(eventName, className);
  HashMap<String, String> partSpec = new HashMap<String, String>();
  partSpec.put("dt", key);
  Partition p = client.createPartition(t, partSpec);
  p.setLocation(finalPath.toString());
  client.alterPartition(database, eventName, p, null);
  return true;
 } catch (Exception e) {
  LOG.warn("Unable to add the partition ", e);
  return false;
 }
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

private void msckAddPartitionsOneByOne(Hive db, Table table,
  List<CheckResult.PartitionResult> partsNotInMs, List<String> repairOutput) {
 for (CheckResult.PartitionResult part : partsNotInMs) {
  try {
   db.createPartition(table, Warehouse.makeSpecFromName(part.getPartitionName()));
   repairOutput.add("Repair: Added partition to metastore "
     + table.getTableName() + ':' + part.getPartitionName());
  } catch (Exception e) {
   LOG.warn("Repair error, could not add partition to metastore: ", e);
  }
 }
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

Partition indexPart = db.getPartition(indexTbl, partSpec, false);
if (indexPart == null) {
 indexPart = db.createPartition(indexTbl, partSpec);
 Partition indexPart = db.getPartition(indexTbl, pSpec, false);
 if (indexPart == null) {
  indexPart = db.createPartition(indexTbl, pSpec);

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

Partition indexPart = db.getPartition(indexTbl, partSpec, false);
if (indexPart == null) {
 indexPart = db.createPartition(indexTbl, partSpec);
 Partition indexPart = db.getPartition(indexTbl, pSpec, false);
 if (indexPart == null) {
  indexPart = db.createPartition(indexTbl, pSpec);

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

for (CheckResult.PartitionResult part : result.getPartitionsNotInMs()) {
 try {
  db.createPartition(table, Warehouse.makeSpecFromName(part
    .getPartitionName()));
  repairOutput.add("Repair: Added partition to metastore "

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

db.createPartition(tbl, addPartitionDesc.getPartSpec());
} else {
 if (tbl.isView()) {
 db.createPartition(tbl, addPartitionDesc.getPartSpec(), new Path(tbl
   .getPath(), addPartitionDesc.getLocation()));

相关文章

Hive类方法