org.apache.hadoop.hive.metastore.api.Table.getParameters()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(8.9k)|赞(0)|评价(0)|浏览(248)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.getParameters()方法的一些代码示例,展示了Table.getParameters()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getParameters()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:getParameters

Table.getParameters介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. /**
  2. * @return The table parameters.
  3. * @see org.apache.hadoop.hive.metastore.api.Table#getParameters()
  4. */
  5. public Map<String, String> getParameters() {
  6. return tTable.getParameters();
  7. }

代码示例来源:origin: apache/hive

  1. public static boolean isNonNativeTable(Table table) {
  2. if (table == null || table.getParameters() == null) {
  3. return false;
  4. }
  5. return (table.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE) != null);
  6. }

代码示例来源:origin: prestodb/presto

  1. private static boolean isPrestoView(Table table)
  2. {
  3. return "true".equals(table.getParameters().get(PRESTO_VIEW_FLAG));
  4. }

代码示例来源:origin: apache/hive

  1. /**
  2. * When a table is marked transactional=true but transactional_properties is not set then
  3. * transactional_properties should take on the default value. Easier to make this explicit in
  4. * table definition than keep checking everywhere if it's set or not.
  5. */
  6. private void normalizeTransactionalPropertyDefault(Table table) {
  7. table.getParameters().put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES,
  8. DEFAULT_TRANSACTIONAL_PROPERTY);
  9. }
  10. /**

代码示例来源:origin: apache/hive

  1. private boolean noAutoCompactSet(Table t) {
  2. String noAutoCompact =
  3. t.getParameters().get(hive_metastoreConstants.TABLE_NO_AUTO_COMPACT);
  4. if (noAutoCompact == null) {
  5. noAutoCompact =
  6. t.getParameters().get(hive_metastoreConstants.TABLE_NO_AUTO_COMPACT.toUpperCase());
  7. }
  8. return noAutoCompact != null && noAutoCompact.equalsIgnoreCase("true");
  9. }
  10. }

代码示例来源:origin: apache/hive

  1. private static boolean is_partition_spec_grouping_enabled(Table table) {
  2. Map<String, String> parameters = table.getParameters();
  3. return parameters.containsKey("hive.hcatalog.partition.spec.grouping.enabled")
  4. && parameters.get("hive.hcatalog.partition.spec.grouping.enabled").equalsIgnoreCase("true");
  5. }

代码示例来源:origin: apache/hive

  1. /**
  2. * Should produce the same result as
  3. * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable(org.apache.hadoop.hive.ql.metadata.Table)}.
  4. */
  5. public static boolean isAcidTable(Table table) {
  6. return TxnUtils.isTransactionalTable(table) &&
  7. TransactionalValidationListener.DEFAULT_TRANSACTIONAL_PROPERTY.equals(table.getParameters()
  8. .get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES));
  9. }

代码示例来源:origin: apache/hive

  1. private static boolean isMustPurge(EnvironmentContext envContext, Table tbl) {
  2. // Data needs deletion. Check if trash may be skipped.
  3. // Trash may be skipped iff:
  4. // 1. deleteData == true, obviously.
  5. // 2. tbl is external.
  6. // 3. Either
  7. // 3.1. User has specified PURGE from the commandline, and if not,
  8. // 3.2. User has set the table to auto-purge.
  9. return ((envContext != null) && Boolean.parseBoolean(envContext.getProperties().get("ifPurge")))
  10. || (tbl.isSetParameters() && "true".equalsIgnoreCase(tbl.getParameters().get("auto.purge")));
  11. }
  12. private void deleteParentRecursive(Path parent, int depth, boolean mustPurge, boolean needRecycle)

代码示例来源:origin: apache/hive

  1. public static boolean isAvroTableWithExternalSchema(Table table) {
  2. if (table.getSd().getSerdeInfo().getSerializationLib().equals(AVRO_SERDE_CLASSNAME)) {
  3. String schemaUrl = table.getParameters().get(AVRO_SCHEMA_URL_PROPERTY);
  4. if (schemaUrl != null && !schemaUrl.isEmpty()) {
  5. return true;
  6. }
  7. }
  8. return false;
  9. }

代码示例来源:origin: apache/hive

  1. /**
  2. * Should produce the same result as
  3. * {@link org.apache.hadoop.hive.metastore.txn.TxnUtils#isAcidTable(org.apache.hadoop.hive.metastore.api.Table)}
  4. */
  5. public static boolean isFullAcidTable(org.apache.hadoop.hive.metastore.api.Table table) {
  6. return isTransactionalTable(table) &&
  7. !isInsertOnlyTable(table.getParameters());
  8. }

代码示例来源:origin: prestodb/presto

  1. public static boolean isAvroTableWithSchemaSet(org.apache.hadoop.hive.metastore.api.Table table)
  2. {
  3. if (table.getParameters() == null) {
  4. return false;
  5. }
  6. StorageDescriptor storageDescriptor = table.getSd();
  7. if (storageDescriptor == null) {
  8. throw new PrestoException(HIVE_INVALID_METADATA, "Table does not contain a storage descriptor: " + table);
  9. }
  10. SerDeInfo serdeInfo = storageDescriptor.getSerdeInfo();
  11. if (serdeInfo == null) {
  12. throw new PrestoException(HIVE_INVALID_METADATA, "Table storage descriptor is missing SerDe info");
  13. }
  14. return serdeInfo.getSerializationLib() != null &&
  15. table.getParameters().get(AVRO_SCHEMA_URL_KEY) != null &&
  16. serdeInfo.getSerializationLib().equals(AVRO.getSerDe());
  17. }

代码示例来源:origin: prestodb/presto

  1. @Override
  2. public PartitionStatistics getTableStatistics(String databaseName, String tableName)
  3. {
  4. Table table = getTable(databaseName, tableName)
  5. .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
  6. List<String> dataColumns = table.getSd().getCols().stream()
  7. .map(FieldSchema::getName)
  8. .collect(toImmutableList());
  9. HiveBasicStatistics basicStatistics = getHiveBasicStatistics(table.getParameters());
  10. Map<String, HiveColumnStatistics> columnStatistics = getTableColumnStatistics(databaseName, tableName, dataColumns, basicStatistics.getRowCount());
  11. return new PartitionStatistics(basicStatistics, columnStatistics);
  12. }

代码示例来源:origin: prestodb/presto

  1. @Override
  2. public synchronized void updateTableStatistics(String databaseName, String tableName, Function<PartitionStatistics, PartitionStatistics> update)
  3. {
  4. PartitionStatistics currentStatistics = getTableStatistics(databaseName, tableName);
  5. PartitionStatistics updatedStatistics = update.apply(currentStatistics);
  6. Table originalTable = getTable(databaseName, tableName)
  7. .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
  8. Table modifiedTable = originalTable.deepCopy();
  9. HiveBasicStatistics basicStatistics = updatedStatistics.getBasicStatistics();
  10. modifiedTable.setParameters(updateStatisticsParameters(modifiedTable.getParameters(), basicStatistics));
  11. alterTable(databaseName, tableName, modifiedTable);
  12. com.facebook.presto.hive.metastore.Table table = fromMetastoreApiTable(modifiedTable);
  13. OptionalLong rowCount = basicStatistics.getRowCount();
  14. List<ColumnStatisticsObj> metastoreColumnStatistics = updatedStatistics.getColumnStatistics().entrySet().stream()
  15. .map(entry -> createMetastoreColumnStatistics(entry.getKey(), table.getColumn(entry.getKey()).get().getType(), entry.getValue(), rowCount))
  16. .collect(toImmutableList());
  17. if (!metastoreColumnStatistics.isEmpty()) {
  18. setTableColumnStatistics(databaseName, tableName, metastoreColumnStatistics);
  19. }
  20. Set<String> removedColumnStatistics = difference(currentStatistics.getColumnStatistics().keySet(), updatedStatistics.getColumnStatistics().keySet());
  21. removedColumnStatistics.forEach(column -> deleteTableColumnStatistics(databaseName, tableName, column));
  22. }

代码示例来源:origin: apache/hive

  1. public static String createTableObjJson(Table tableObj) throws TException {
  2. //Note: The parameters of the Table object will be removed in the filter if it matches
  3. // any pattern provided through EVENT_NOTIFICATION_PARAMETERS_EXCLUDE_PATTERNS
  4. filterMapkeys(tableObj.getParameters(), paramsFilter);
  5. TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
  6. return serializer.toString(tableObj, "UTF-8");
  7. }

代码示例来源:origin: apache/hive

  1. public static Properties getSchema(
  2. org.apache.hadoop.hive.metastore.api.Partition part,
  3. org.apache.hadoop.hive.metastore.api.Table table) {
  4. return MetaStoreUtils.getSchema(part.getSd(), table.getSd(), table
  5. .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys());
  6. }

代码示例来源:origin: apache/hive

  1. private void verifyStatsUpToDate(String tbl, List<String> cols, IMetaStoreClient msClient,
  2. boolean isUpToDate) throws Exception {
  3. Table table = msClient.getTable(ss.getCurrentDatabase(), tbl);
  4. verifyStatsUpToDate(table.getParameters(), cols, isUpToDate);
  5. }

代码示例来源:origin: apache/hive

  1. private void setTableSkipProperty(
  2. IMetaStoreClient msClient, String tbl, String val) throws Exception {
  3. Table table = msClient.getTable(ss.getCurrentDatabase(), tbl);
  4. table.getParameters().put(StatsUpdaterThread.SKIP_STATS_AUTOUPDATE_PROPERTY, val);
  5. msClient.alter_table(table.getDbName(), table.getTableName(), table);
  6. }

代码示例来源:origin: apache/hive

  1. @Test(expected = InvalidOperationException.class)
  2. public void alterTableBogusCatalog() throws TException {
  3. Table t = testTables[0].deepCopy();
  4. t.getParameters().put("a", "b");
  5. client.alter_table("nosuch", t.getDbName(), t.getTableName(), t);
  6. }

代码示例来源:origin: apache/hive

  1. private void verifyStatsUpToDate(String tbl, List<String> cols, IMetaStoreClient msClient,
  2. String validWriteIds, boolean isUpToDate) throws Exception {
  3. Table table = msClient.getTable(ss.getCurrentCatalog(), ss.getCurrentDatabase(), tbl, validWriteIds);
  4. verifyStatsUpToDate(table.getParameters(), cols, isUpToDate);
  5. }

代码示例来源:origin: apache/hive

  1. private TableMeta createTestTable(String dbName, String tableName, TableType type, String comment)
  2. throws Exception {
  3. Table table = createTable(dbName, tableName, type);
  4. table.getParameters().put("comment", comment);
  5. client.createTable(table);
  6. TableMeta tableMeta = new TableMeta(dbName, tableName, type.name());
  7. tableMeta.setComments(comment);
  8. tableMeta.setCatName("hive");
  9. return tableMeta;
  10. }

相关文章

Table类方法