org.apache.hadoop.hive.metastore.api.Table.getParameters()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(8.9k)|赞(0)|评价(0)|浏览(201)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.getParameters()方法的一些代码示例,展示了Table.getParameters()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getParameters()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:getParameters

Table.getParameters介绍

暂无

代码示例

代码示例来源:origin: apache/hive

/**
 * @return The table parameters.
 * @see org.apache.hadoop.hive.metastore.api.Table#getParameters()
 */
public Map<String, String> getParameters() {
 return tTable.getParameters();
}

代码示例来源:origin: apache/hive

public static boolean isNonNativeTable(Table table) {
 if (table == null || table.getParameters() == null) {
  return false;
 }
 return (table.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE) != null);
}

代码示例来源:origin: prestodb/presto

private static boolean isPrestoView(Table table)
{
  return "true".equals(table.getParameters().get(PRESTO_VIEW_FLAG));
}

代码示例来源:origin: apache/hive

/**
 * When a table is marked transactional=true but transactional_properties is not set then
 * transactional_properties should take on the default value.  Easier to make this explicit in
 * table definition than keep checking everywhere if it's set or not.
 */
private void normalizeTransactionalPropertyDefault(Table table) {
 table.getParameters().put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES,
   DEFAULT_TRANSACTIONAL_PROPERTY);
}
/**

代码示例来源:origin: apache/hive

private boolean noAutoCompactSet(Table t) {
  String noAutoCompact =
    t.getParameters().get(hive_metastoreConstants.TABLE_NO_AUTO_COMPACT);
  if (noAutoCompact == null) {
   noAutoCompact =
     t.getParameters().get(hive_metastoreConstants.TABLE_NO_AUTO_COMPACT.toUpperCase());
  }
  return noAutoCompact != null && noAutoCompact.equalsIgnoreCase("true");
 }
}

代码示例来源:origin: apache/hive

private static boolean is_partition_spec_grouping_enabled(Table table) {
 Map<String, String> parameters = table.getParameters();
 return parameters.containsKey("hive.hcatalog.partition.spec.grouping.enabled")
   && parameters.get("hive.hcatalog.partition.spec.grouping.enabled").equalsIgnoreCase("true");
}

代码示例来源:origin: apache/hive

/**
 * Should produce the same result as
 * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable(org.apache.hadoop.hive.ql.metadata.Table)}.
 */
public static boolean isAcidTable(Table table) {
 return TxnUtils.isTransactionalTable(table) &&
  TransactionalValidationListener.DEFAULT_TRANSACTIONAL_PROPERTY.equals(table.getParameters()
  .get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES));
}

代码示例来源:origin: apache/hive

private static boolean isMustPurge(EnvironmentContext envContext, Table tbl) {
 // Data needs deletion. Check if trash may be skipped.
 // Trash may be skipped iff:
 //  1. deleteData == true, obviously.
 //  2. tbl is external.
 //  3. Either
 //    3.1. User has specified PURGE from the commandline, and if not,
 //    3.2. User has set the table to auto-purge.
 return ((envContext != null) && Boolean.parseBoolean(envContext.getProperties().get("ifPurge")))
  || (tbl.isSetParameters() && "true".equalsIgnoreCase(tbl.getParameters().get("auto.purge")));
}
private void deleteParentRecursive(Path parent, int depth, boolean mustPurge, boolean needRecycle)

代码示例来源:origin: apache/hive

public static boolean isAvroTableWithExternalSchema(Table table) {
 if (table.getSd().getSerdeInfo().getSerializationLib().equals(AVRO_SERDE_CLASSNAME)) {
  String schemaUrl = table.getParameters().get(AVRO_SCHEMA_URL_PROPERTY);
  if (schemaUrl != null && !schemaUrl.isEmpty()) {
   return true;
  }
 }
 return false;
}

代码示例来源:origin: apache/hive

/**
 * Should produce the same result as
 * {@link org.apache.hadoop.hive.metastore.txn.TxnUtils#isAcidTable(org.apache.hadoop.hive.metastore.api.Table)}
 */
public static boolean isFullAcidTable(org.apache.hadoop.hive.metastore.api.Table table) {
 return isTransactionalTable(table) &&
   !isInsertOnlyTable(table.getParameters());
}

代码示例来源:origin: prestodb/presto

public static boolean isAvroTableWithSchemaSet(org.apache.hadoop.hive.metastore.api.Table table)
{
  if (table.getParameters() == null) {
    return false;
  }
  StorageDescriptor storageDescriptor = table.getSd();
  if (storageDescriptor == null) {
    throw new PrestoException(HIVE_INVALID_METADATA, "Table does not contain a storage descriptor: " + table);
  }
  SerDeInfo serdeInfo = storageDescriptor.getSerdeInfo();
  if (serdeInfo == null) {
    throw new PrestoException(HIVE_INVALID_METADATA, "Table storage descriptor is missing SerDe info");
  }
  return serdeInfo.getSerializationLib() != null &&
      table.getParameters().get(AVRO_SCHEMA_URL_KEY) != null &&
      serdeInfo.getSerializationLib().equals(AVRO.getSerDe());
}

代码示例来源:origin: prestodb/presto

@Override
public PartitionStatistics getTableStatistics(String databaseName, String tableName)
{
  Table table = getTable(databaseName, tableName)
      .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
  List<String> dataColumns = table.getSd().getCols().stream()
      .map(FieldSchema::getName)
      .collect(toImmutableList());
  HiveBasicStatistics basicStatistics = getHiveBasicStatistics(table.getParameters());
  Map<String, HiveColumnStatistics> columnStatistics = getTableColumnStatistics(databaseName, tableName, dataColumns, basicStatistics.getRowCount());
  return new PartitionStatistics(basicStatistics, columnStatistics);
}

代码示例来源:origin: prestodb/presto

@Override
public synchronized void updateTableStatistics(String databaseName, String tableName, Function<PartitionStatistics, PartitionStatistics> update)
{
  PartitionStatistics currentStatistics = getTableStatistics(databaseName, tableName);
  PartitionStatistics updatedStatistics = update.apply(currentStatistics);
  Table originalTable = getTable(databaseName, tableName)
      .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
  Table modifiedTable = originalTable.deepCopy();
  HiveBasicStatistics basicStatistics = updatedStatistics.getBasicStatistics();
  modifiedTable.setParameters(updateStatisticsParameters(modifiedTable.getParameters(), basicStatistics));
  alterTable(databaseName, tableName, modifiedTable);
  com.facebook.presto.hive.metastore.Table table = fromMetastoreApiTable(modifiedTable);
  OptionalLong rowCount = basicStatistics.getRowCount();
  List<ColumnStatisticsObj> metastoreColumnStatistics = updatedStatistics.getColumnStatistics().entrySet().stream()
      .map(entry -> createMetastoreColumnStatistics(entry.getKey(), table.getColumn(entry.getKey()).get().getType(), entry.getValue(), rowCount))
      .collect(toImmutableList());
  if (!metastoreColumnStatistics.isEmpty()) {
    setTableColumnStatistics(databaseName, tableName, metastoreColumnStatistics);
  }
  Set<String> removedColumnStatistics = difference(currentStatistics.getColumnStatistics().keySet(), updatedStatistics.getColumnStatistics().keySet());
  removedColumnStatistics.forEach(column -> deleteTableColumnStatistics(databaseName, tableName, column));
}

代码示例来源:origin: apache/hive

public static String createTableObjJson(Table tableObj) throws TException {
 //Note: The parameters of the Table object will be removed in the filter if it matches
 // any pattern provided through EVENT_NOTIFICATION_PARAMETERS_EXCLUDE_PATTERNS
 filterMapkeys(tableObj.getParameters(), paramsFilter);
 TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
 return serializer.toString(tableObj, "UTF-8");
}

代码示例来源:origin: apache/hive

public static Properties getSchema(
  org.apache.hadoop.hive.metastore.api.Partition part,
  org.apache.hadoop.hive.metastore.api.Table table) {
 return MetaStoreUtils.getSchema(part.getSd(), table.getSd(), table
   .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys());
}

代码示例来源:origin: apache/hive

private void verifyStatsUpToDate(String tbl, List<String> cols, IMetaStoreClient msClient,
  boolean isUpToDate) throws Exception {
 Table table = msClient.getTable(ss.getCurrentDatabase(), tbl);
 verifyStatsUpToDate(table.getParameters(), cols, isUpToDate);
}

代码示例来源:origin: apache/hive

private void setTableSkipProperty(
  IMetaStoreClient msClient, String tbl, String val) throws Exception {
 Table table = msClient.getTable(ss.getCurrentDatabase(), tbl);
 table.getParameters().put(StatsUpdaterThread.SKIP_STATS_AUTOUPDATE_PROPERTY, val);
 msClient.alter_table(table.getDbName(), table.getTableName(), table);
}

代码示例来源:origin: apache/hive

@Test(expected = InvalidOperationException.class)
public void alterTableBogusCatalog() throws TException {
 Table t = testTables[0].deepCopy();
 t.getParameters().put("a", "b");
 client.alter_table("nosuch", t.getDbName(), t.getTableName(), t);
}

代码示例来源:origin: apache/hive

private void verifyStatsUpToDate(String tbl, List<String> cols, IMetaStoreClient msClient,
  String validWriteIds, boolean isUpToDate) throws Exception {
 Table table = msClient.getTable(ss.getCurrentCatalog(), ss.getCurrentDatabase(), tbl, validWriteIds);
 verifyStatsUpToDate(table.getParameters(), cols, isUpToDate);
}

代码示例来源:origin: apache/hive

private TableMeta createTestTable(String dbName, String tableName, TableType type, String comment)
 throws Exception {
 Table table  = createTable(dbName, tableName, type);
 table.getParameters().put("comment", comment);
 client.createTable(table);
 TableMeta tableMeta = new TableMeta(dbName, tableName, type.name());
 tableMeta.setComments(comment);
 tableMeta.setCatName("hive");
 return tableMeta;
}

相关文章

Table类方法