org.apache.hadoop.hive.metastore.api.Table.getPartitionKeysSize()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(8.2k)|赞(0)|评价(0)|浏览(324)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.getPartitionKeysSize()方法的一些代码示例,展示了Table.getPartitionKeysSize()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getPartitionKeysSize()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:getPartitionKeysSize

Table.getPartitionKeysSize介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. TempTable(org.apache.hadoop.hive.metastore.api.Table t) {
  2. assert t != null;
  3. this.tTable = t;
  4. pTree = t.getPartitionKeysSize() > 0 ? new PartitionTree(tTable) : null;
  5. }
  6. private void addPartition(Partition p) throws AlreadyExistsException, MetaException {

代码示例来源:origin: apache/hive

  1. private String getPartitionStr(Table tbl, Map<String,String> partName) throws InvalidPartitionException{
  2. if(tbl.getPartitionKeysSize() != partName.size()){
  3. throw new InvalidPartitionException("Number of partition columns in table: "+ tbl.getPartitionKeysSize() +
  4. " doesn't match with number of supplied partition values: "+partName.size());
  5. }
  6. final List<String> storedVals = new ArrayList<>(tbl.getPartitionKeysSize());
  7. for(FieldSchema partKey : tbl.getPartitionKeys()){
  8. String partVal = partName.get(partKey.getName());
  9. if(null == partVal) {
  10. throw new InvalidPartitionException("No value found for partition column: "+partKey.getName());
  11. }
  12. storedVals.add(partVal);
  13. }
  14. return join(storedVals,',');
  15. }

代码示例来源:origin: apache/hive

  1. public static Map<String, String> getPartitionKeyValues(Table table, Partition partition) {
  2. Map<String, String> partitionKeys = new LinkedHashMap<>();
  3. for (int i = 0; i < table.getPartitionKeysSize(); ++i) {
  4. partitionKeys.put(table.getPartitionKeys().get(i).getName(),
  5. partition.getValues().get(i));
  6. }
  7. return partitionKeys;
  8. }

代码示例来源:origin: apache/hive

  1. private String buildPartColStr(Table table) {
  2. String partColStr = "";
  3. for (int i = 0; i < table.getPartitionKeysSize(); ++i) {
  4. if (i != 0) {
  5. partColStr += ",";
  6. }
  7. partColStr += table.getPartitionKeys().get(i).getName();
  8. }
  9. return partColStr;
  10. }

代码示例来源:origin: apache/hive

  1. private static String getCompactionCommand(Table t, Partition p) {
  2. StringBuilder sb = new StringBuilder("ALTER TABLE ").append(Warehouse.getQualifiedName(t));
  3. if(t.getPartitionKeysSize() > 0) {
  4. assert p != null : "must supply partition for partitioned table " +
  5. Warehouse.getQualifiedName(t);
  6. sb.append(" PARTITION(");
  7. for (int i = 0; i < t.getPartitionKeysSize(); i++) {
  8. sb.append(t.getPartitionKeys().get(i).getName()).append('=').append(
  9. genPartValueString(t.getPartitionKeys().get(i).getType(), p.getValues().get(i))).
  10. append(",");
  11. }
  12. sb.setCharAt(sb.length() - 1, ')');//replace trailing ','
  13. }
  14. return sb.append(" COMPACT 'major'").toString();
  15. }

代码示例来源:origin: apache/hive

  1. private void assertPartitioned() throws MetaException {
  2. if(tTable.getPartitionKeysSize() <= 0) {
  3. throw new MetaException(Warehouse.getQualifiedName(tTable) + " is not partitioned");
  4. }
  5. }

代码示例来源:origin: apache/drill

  1. private int getSerDeOverheadFactor() {
  2. final int projectedColumnCount;
  3. if (Utilities.isStarQuery(columns)) {
  4. Table hiveTable = hiveReadEntry.getTable();
  5. projectedColumnCount = hiveTable.getSd().getColsSize() + hiveTable.getPartitionKeysSize();
  6. } else {
  7. // In cost estimation, # of project columns should be >= 1, even for skipAll query.
  8. projectedColumnCount = Math.max(columns.size(), 1);
  9. }
  10. return projectedColumnCount * HIVE_SERDE_SCAN_OVERHEAD_FACTOR_PER_COLUMN;
  11. }
  12. }

代码示例来源:origin: apache/hive

  1. private static void createTempTable(org.apache.hadoop.hive.metastore.api.Table t) {
  2. if(t.getPartitionKeysSize() <= 0) {
  3. //do nothing as it's not a partitioned table
  4. return;
  5. }
  6. String qualifiedTableName = Warehouse.
  7. getQualifiedName(t.getDbName().toLowerCase(), t.getTableName().toLowerCase());
  8. SessionState ss = SessionState.get();
  9. if (ss == null) {
  10. LOG.warn("No current SessionState, skipping temp partitions for " + qualifiedTableName);
  11. return;
  12. }
  13. TempTable tt = new TempTable(t);
  14. if(ss.getTempPartitions().putIfAbsent(qualifiedTableName, tt) != null) {
  15. throw new IllegalStateException("TempTable for " + qualifiedTableName + " already exists");
  16. }
  17. }
  18. }

代码示例来源:origin: apache/hive

  1. private List<Path> getLocationsForTruncate(final RawStore ms,
  2. final String catName,
  3. final String dbName,
  4. final String tableName,
  5. final Table table,
  6. final List<String> partNames) throws Exception {
  7. List<Path> locations = new ArrayList<>();
  8. if (partNames == null) {
  9. if (0 != table.getPartitionKeysSize()) {
  10. for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) {
  11. locations.add(new Path(partition.getSd().getLocation()));
  12. }
  13. } else {
  14. locations.add(new Path(table.getSd().getLocation()));
  15. }
  16. } else {
  17. for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) {
  18. locations.add(new Path(partition.getSd().getLocation()));
  19. }
  20. }
  21. return locations;
  22. }

代码示例来源:origin: apache/hive

  1. if (!updateStats || newDir || tbl.getPartitionKeysSize() != 0) {
  2. return;

代码示例来源:origin: apache/hive

  1. if (table.getPartitionKeysSize() == 0) {
  2. Map<String, String> params = table.getParameters();
  3. List<String> colsToUpdate = null;

代码示例来源:origin: apache/hive

  1. if (!customDynamicLocationUsed) {
  2. src = new Path(getPartitionRootLocation(jobInfo.getLocation(), jobInfo.getTableInfo().getTable()
  3. .getPartitionKeysSize()));
  4. } else {
  5. src = new Path(getCustomPartitionRootLocation(jobInfo, jobContext.getConfiguration()));

代码示例来源:origin: apache/hive

  1. private static Map<String, String> getPtnDesc(Table t, Partition p) {
  2. assertEquals(t.getPartitionKeysSize(),p.getValuesSize());
  3. Map<String,String> retval = new HashMap<String,String>();
  4. Iterator<String> pval = p.getValuesIterator();
  5. for (FieldSchema fs : t.getPartitionKeys()){
  6. retval.put(fs.getName(),pval.next());
  7. }
  8. return retval;
  9. }

代码示例来源:origin: apache/hive

  1. return Collections.emptyList();
  2. if(t.getPartitionKeysSize() <= 0) {

代码示例来源:origin: apache/hive

  1. @Before public void before() throws Throwable {
  2. tableWorkingPath = temporaryFolder.newFolder().getAbsolutePath();
  3. segmentsTable = derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable();
  4. Map<String, String> params = new HashMap<>();
  5. params.put("external.table.purge", "TRUE");
  6. Mockito.when(tableMock.getParameters()).thenReturn(params);
  7. Mockito.when(tableMock.getPartitionKeysSize()).thenReturn(0);
  8. StorageDescriptor storageDes = Mockito.mock(StorageDescriptor.class);
  9. Mockito.when(storageDes.getBucketColsSize()).thenReturn(0);
  10. Mockito.when(tableMock.getSd()).thenReturn(storageDes);
  11. Mockito.when(tableMock.getDbName()).thenReturn(DB_NAME);
  12. Mockito.when(tableMock.getTableName()).thenReturn(TABLE_NAME);
  13. config = new Configuration();
  14. config.set(String.valueOf(HiveConf.ConfVars.HIVEQUERYID), "hive-" + UUID.randomUUID().toString());
  15. config.set(String.valueOf(HiveConf.ConfVars.DRUID_WORKING_DIR), tableWorkingPath);
  16. config.set(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY),
  17. new Path(tableWorkingPath, "finalSegmentDir").toString());
  18. config.set("hive.druid.maxTries", "0");
  19. druidStorageHandler =
  20. new DruidStorageHandler(derbyConnectorRule.getConnector(),
  21. derbyConnectorRule.metadataTablesConfigSupplier().get());
  22. druidStorageHandler.setConf(config);
  23. }

代码示例来源:origin: apache/hive

  1. throw new MetaException("LOCATION may not be specified for Druid");
  2. if (table.getPartitionKeysSize() != 0) {
  3. throw new MetaException("PARTITIONED BY may not be specified for Druid");

代码示例来源:origin: apache/incubator-gobblin

  1. Partition nativePartition = HiveMetaStoreUtils.getPartition(partition);
  2. Preconditions.checkArgument(table.getPartitionKeysSize() == nativePartition.getValues().size(),
  3. String.format("Partition key size is %s but partition value size is %s", table.getPartitionKeys().size(),
  4. nativePartition.getValues().size()));

代码示例来源:origin: apache/hive

  1. String validWriteIds, long writeId) throws Exception {
  2. if (partNames == null) {
  3. if (0 != table.getPartitionKeysSize()) {
  4. for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) {
  5. alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition,

代码示例来源:origin: apache/hive

  1. StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
  2. LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters.");
  3. } else if (isTxn && tbl.getPartitionKeysSize() == 0) {
  4. if (isCurrentStatsValidForTheQuery(mtable, writeIdList, false)) {
  5. tbl.setIsStatsCompliant(true);

代码示例来源:origin: apache/hive

  1. partInfo.getTableName());
  2. if (tbl.getPartitionKeysSize() == 0) {
  3. throw new HCatException("The table " + partInfo.getTableName()
  4. + " is not partitioned.");

相关文章

Table类方法