org.apache.hadoop.hive.metastore.api.Table.isSetPartitionKeys()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(7.0k)|赞(0)|评价(0)|浏览(162)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.isSetPartitionKeys()方法的一些代码示例,展示了Table.isSetPartitionKeys()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.isSetPartitionKeys()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:isSetPartitionKeys

Table.isSetPartitionKeys介绍

[英]Returns true if field partitionKeys is set (has been assigned a value) and false otherwise
[中]如果设置了字段partitionKeys(已指定值),则返回true,否则返回false

代码示例

代码示例来源:origin: apache/hive

private Partition getPartitionObj(String db, String table, List<String> partitionVals, Table tableObj)
    throws MetaException, NoSuchObjectException {
 if (tableObj.isSetPartitionKeys() && !tableObj.getPartitionKeys().isEmpty()) {
  return get_partition(db, table, partitionVals);
 }
 return null;
}

代码示例来源:origin: apache/hive

list.add(sd);
boolean present_partitionKeys = true && (isSetPartitionKeys());
list.add(present_partitionKeys);
if (present_partitionKeys)

代码示例来源:origin: apache/hive

static private void updateStatsForTable(RawStore rawStore, Table before, Table after, String catalogName,
                    String dbName, String tableName) throws Exception {
 ColumnStatistics colStats = null;
 List<String> deletedCols = new ArrayList<>();
 if (before.isSetPartitionKeys()) {
  List<Partition> parts = sharedCache.listCachedPartitions(catalogName, dbName, tableName, -1);
  for (Partition part : parts) {
   colStats = updateStatsForPart(rawStore, before, catalogName, dbName, tableName, part);
  }
 }
 boolean needUpdateAggrStat = false;
 List<ColumnStatisticsObj> statisticsObjs = HiveAlterHandler.alterTableUpdateTableColumnStats(rawStore, before,
     after,null, null, rawStore.getConf(), deletedCols);
 if (colStats != null) {
  sharedCache.updateTableColStatsInCache(catalogName, dbName, tableName, statisticsObjs);
  needUpdateAggrStat = true;
 }
 for (String column : deletedCols) {
  sharedCache.removeTableColStatsFromCache(catalogName, dbName, tableName, column);
  needUpdateAggrStat = true;
 }
}

代码示例来源:origin: apache/hive

lastComparison = Boolean.valueOf(isSetPartitionKeys()).compareTo(other.isSetPartitionKeys());
if (lastComparison != 0) {
 return lastComparison;
if (isSetPartitionKeys()) {
 lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionKeys, other.partitionKeys);
 if (lastComparison != 0) {

代码示例来源:origin: apache/hive

AggrStats aggrStatsAllPartitions = null;
AggrStats aggrStatsAllButDefaultPartition = null;
if (table.isSetPartitionKeys()) {
 Deadline.startTimer("getPartitions");
 partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE);

代码示例来源:origin: apache/hive

if (!table.isSetPartitionKeys() && (tableColStats != null)) {
 if (!tblWrapper.updateTableColStats(tableColStats.getStatsObj())) {
  return false;

代码示例来源:origin: apache/hive

this.sd = new StorageDescriptor(other.sd);
if (other.isSetPartitionKeys()) {
 List<FieldSchema> __this__partitionKeys = new ArrayList<FieldSchema>(other.partitionKeys.size());
 for (FieldSchema other_element : other.partitionKeys) {

代码示例来源:origin: apache/hive

optionals.set(7);
if (struct.isSetPartitionKeys()) {
 optionals.set(8);
 struct.sd.write(oprot);
if (struct.isSetPartitionKeys()) {

代码示例来源:origin: apache/hive

private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) {
 boolean committed = false;
 rawStore.openTransaction();
 try {
  Table table = rawStore.getTable(catName, dbName, tblName);
  if (!table.isSetPartitionKeys()) {
   List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
   Deadline.startTimer("getTableColumnStatistics");
   ColumnStatistics tableColStats =
     rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
   Deadline.stopTimer();
   if (tableColStats != null) {
    sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName),
      StringUtils.normalizeIdentifier(dbName),
      StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj());
    // Update the table to get consistent stats state.
    sharedCache.alterTableInCache(catName, dbName, tblName, table);
   }
  }
  committed = rawStore.commitTransaction();
 } catch (MetaException | NoSuchObjectException e) {
  LOG.info("Unable to refresh table column stats for table: " + tblName, e);
 } finally {
  if (!committed) {
   sharedCache.removeAllTableColStatsFromCache(catName, dbName, tblName);
   rawStore.rollbackTransaction();
  }
 }
}

代码示例来源:origin: apache/hive

boolean this_present_partitionKeys = true && this.isSetPartitionKeys();
boolean that_present_partitionKeys = true && that.isSetPartitionKeys();
if (this_present_partitionKeys || that_present_partitionKeys) {
 if (!(this_present_partitionKeys && that_present_partitionKeys))

代码示例来源:origin: apache/hive

return isSetSd();
case PARTITION_KEYS:
 return isSetPartitionKeys();
case PARAMETERS:
 return isSetParameters();

代码示例来源:origin: apache/hive

throw new NoSuchObjectException(dbName + "." + tblName + " not found");
boolean isPartitioned = tbl.isSetPartitionKeys() && tbl.getPartitionKeysSize() > 0;
String tableInputFormat = tbl.isSetSd() ? tbl.getSd().getInputFormat() : null;
if (!isPartitioned) {

代码示例来源:origin: org.apache.hadoop.hive/hive-metastore

this.sd = new StorageDescriptor(other.sd);
if (other.isSetPartitionKeys()) {
 List<FieldSchema> __this__partitionKeys = new ArrayList<FieldSchema>();
 for (FieldSchema other_element : other.partitionKeys) {

代码示例来源:origin: org.spark-project.hive/hive-metastore

this.sd = new StorageDescriptor(other.sd);
if (other.isSetPartitionKeys()) {
 List<FieldSchema> __this__partitionKeys = new ArrayList<FieldSchema>();
 for (FieldSchema other_element : other.partitionKeys) {

代码示例来源:origin: com.facebook.presto.hive/hive-apache

this.sd = new StorageDescriptor(other.sd);
if (other.isSetPartitionKeys()) {
 List<FieldSchema> __this__partitionKeys = new ArrayList<FieldSchema>();
 for (FieldSchema other_element : other.partitionKeys) {

代码示例来源:origin: org.apache.hive/hive-standalone-metastore

private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) {
 try {
  Table table = rawStore.getTable(catName, dbName, tblName);
  if (!table.isSetPartitionKeys()) {
   List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
   Deadline.startTimer("getTableColumnStatistics");
   ColumnStatistics tableColStats =
     rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
   Deadline.stopTimer();
   if (tableColStats != null) {
    sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName),
      StringUtils.normalizeIdentifier(dbName),
      StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj());
   }
  }
 } catch (MetaException | NoSuchObjectException e) {
  LOG.info("Unable to refresh table column stats for table: " + tblName, e);
 }
}

代码示例来源:origin: org.apache.hadoop.hive/hive-metastore

return isSetSd();
case PARTITION_KEYS:
 return isSetPartitionKeys();
case PARAMETERS:
 return isSetParameters();

代码示例来源:origin: com.facebook.presto.hive/hive-apache

return isSetSd();
case PARTITION_KEYS:
 return isSetPartitionKeys();
case PARAMETERS:
 return isSetParameters();

代码示例来源:origin: org.spark-project.hive/hive-metastore

return isSetSd();
case PARTITION_KEYS:
 return isSetPartitionKeys();
case PARAMETERS:
 return isSetParameters();

代码示例来源:origin: org.apache.hive/hive-standalone-metastore

return isSetSd();
case PARTITION_KEYS:
 return isSetPartitionKeys();
case PARAMETERS:
 return isSetParameters();

相关文章

Table类方法