org.apache.hadoop.hive.metastore.api.Table.putToParameters()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(10.6k)|赞(0)|评价(0)|浏览(190)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.putToParameters()方法的一些代码示例,展示了Table.putToParameters()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.putToParameters()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:putToParameters

Table.putToParameters介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. @Override public void preCreateTable(Table table) throws MetaException {
  2. if (!table.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
  3. throw new MetaException(KAFKA_STORAGE_HANDLER + " supports only " + TableType.EXTERNAL_TABLE);
  4. }
  5. Arrays.stream(KafkaTableProperties.values())
  6. .filter(KafkaTableProperties::isMandatory)
  7. .forEach(key -> Preconditions.checkNotNull(table.getParameters().get(key.getName()),
  8. "Set Table property " + key.getName()));
  9. // Put all the default at the pre create.
  10. Arrays.stream(KafkaTableProperties.values()).forEach((key) -> {
  11. if (table.getParameters().get(key.getName()) == null) {
  12. table.putToParameters(key.getName(), key.getDefaultValue());
  13. }
  14. });
  15. }

代码示例来源:origin: apache/hive

  1. private Table updatePropertiesInTable(Table table, ReplicationSpec additionalPropertiesProvider) {
  2. // Remove all the entries from the parameters which are added by repl tasks internally.
  3. Map<String, String> parameters = table.getParameters();
  4. if (parameters != null) {
  5. parameters.entrySet()
  6. .removeIf(e -> e.getKey().equals(ReplUtils.REPL_CHECKPOINT_KEY));
  7. }
  8. if (additionalPropertiesProvider.isInReplicationScope()) {
  9. // Current replication state must be set on the Table object only for bootstrap dump.
  10. // Event replication State will be null in case of bootstrap dump.
  11. if (additionalPropertiesProvider.getReplSpecType()
  12. != ReplicationSpec.Type.INCREMENTAL_DUMP) {
  13. table.putToParameters(
  14. ReplicationSpec.KEY.CURR_STATE_ID.toString(),
  15. additionalPropertiesProvider.getCurrentReplicationState());
  16. }
  17. } else {
  18. // ReplicationSpec.KEY scopeKey = ReplicationSpec.KEY.REPL_SCOPE;
  19. // write(out, ",\""+ scopeKey.toString() +"\":\"" + replicationSpec.get(scopeKey) + "\"");
  20. // TODO: if we want to be explicit about this dump not being a replication dump, we can
  21. // uncomment this else section, but currently unneeded. Will require a lot of golden file
  22. // regen if we do so.
  23. }
  24. return table;
  25. }

代码示例来源:origin: apache/incubator-gobblin

  1. private Table getTargetTable(Table originTable, Path targetLocation) throws IOException {
  2. try {
  3. Table targetTable = originTable.copy();
  4. targetTable.setDbName(this.targetDatabase);
  5. targetTable.setDataLocation(targetLocation);
  6. /*
  7. * Need to set the table owner as the flow executor
  8. */
  9. targetTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName());
  10. targetTable.getTTable().putToParameters(HiveDataset.REGISTERER, GOBBLIN_DISTCP);
  11. targetTable.getTTable().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
  12. Long.toString(this.startTime));
  13. targetTable.getTTable().unsetCreateTime();
  14. HiveAvroCopyEntityHelper.updateTableAttributesIfAvro(targetTable, this);
  15. return targetTable;
  16. } catch (HiveException he) {
  17. throw new IOException(he);
  18. }
  19. }

代码示例来源:origin: apache/hive

  1. newTable.putToParameters("EXTERNAL", "TRUE");
  2. newTable.setTableType(TableType.EXTERNAL_TABLE.toString());
  3. } else {
  4. newTable.putToParameters("comment", comment);
  5. newTable.putToParameters("bucketing_version", "2");

代码示例来源:origin: apache/hive

  1. @Test public void configureJobPropertiesWithDefaultValues() throws MetaException {
  2. KafkaStorageHandler kafkaStorageHandler = new KafkaStorageHandler();
  3. TableDesc tableDesc = Mockito.mock(TableDesc.class);
  4. Properties properties = new Properties();
  5. Table preCreateTable = new Table();
  6. preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC);
  7. preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);
  8. preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString());
  9. kafkaStorageHandler.preCreateTable(preCreateTable);
  10. preCreateTable.getParameters().forEach(properties::setProperty);
  11. Mockito.when(tableDesc.getProperties()).thenReturn(properties);
  12. Map<String, String> jobProperties = new HashMap<>();
  13. kafkaStorageHandler.configureInputJobProperties(tableDesc, jobProperties);
  14. kafkaStorageHandler.configureOutputJobProperties(tableDesc, jobProperties);
  15. Assert.assertEquals(jobProperties.get(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName()), TEST_TOPIC);
  16. Assert.assertEquals(jobProperties.get(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName()), LOCALHOST_9291);
  17. Arrays.stream(KafkaTableProperties.values())
  18. .filter(key -> !key.isMandatory())
  19. .forEach((key) -> Assert.assertEquals("Wrong match for key " + key.getName(),
  20. key.getDefaultValue(),
  21. jobProperties.get(key.getName())));
  22. }

代码示例来源:origin: apache/hive

  1. preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC);
  2. preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);
  3. kafkaStorageHandler.preCreateTable(preCreateTable);
  4. preCreateTable.getParameters().forEach(properties::setProperty);

代码示例来源:origin: apache/drill

  1. if (tblProps != null) {
  2. for (Entry<String, String> prop : tblProps.entrySet()) {
  3. tt.putToParameters(prop.getKey(), prop.getValue());

代码示例来源:origin: apache/hive

  1. newTable.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
  2. .currentTimeMillis() / 1000));

代码示例来源:origin: apache/hive

  1. preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC);
  2. preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);
  3. preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString());
  4. kafkaStorageHandler.preCreateTable(preCreateTable);

代码示例来源:origin: apache/hive

  1. if (tbl.getParameters() == null ||
  2. tbl.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) {
  3. tbl.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));

代码示例来源:origin: apache/hive

  1. private Table createTestTable(IMetaStoreClient client, String dbName, String tableName,
  2. List<String> partCols, boolean setPartitionLevelPrivilages)
  3. throws TException {
  4. TableBuilder builder = new TableBuilder()
  5. .setDbName(dbName)
  6. .setTableName(tableName)
  7. .addCol("id", "int")
  8. .addCol("name", "string");
  9. partCols.forEach(col -> builder.addPartCol(col, "string"));
  10. Table table = builder.build(metaStore.getConf());
  11. if (setPartitionLevelPrivilages) {
  12. table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true");
  13. }
  14. client.createTable(table);
  15. return table;
  16. }

代码示例来源:origin: apache/hive

  1. private Table createTestTable(IMetaStoreClient client, String dbName, String tableName,
  2. List<String> partCols, boolean setPartitionLevelPrivilages)
  3. throws TException {
  4. TableBuilder builder = new TableBuilder()
  5. .setDbName(dbName)
  6. .setTableName(tableName)
  7. .addCol("id", "int")
  8. .addCol("name", "string");
  9. partCols.forEach(col -> builder.addPartCol(col, "string"));
  10. Table table = builder.build(metaStore.getConf());
  11. if (setPartitionLevelPrivilages) {
  12. table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true");
  13. }
  14. client.createTable(table);
  15. return table;
  16. }

代码示例来源:origin: apache/hive

  1. private Table createTestTable(IMetaStoreClient client, String dbName, String tableName,
  2. List<String> partCols, boolean setPartitionLevelPrivilages)
  3. throws Exception {
  4. TableBuilder builder = new TableBuilder()
  5. .setDbName(dbName)
  6. .setTableName(tableName)
  7. .addCol("id", "int")
  8. .addCol("name", "string");
  9. partCols.forEach(col -> builder.addPartCol(col, "string"));
  10. Table table = builder.build(metaStore.getConf());
  11. if (setPartitionLevelPrivilages) {
  12. table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true");
  13. }
  14. client.createTable(table);
  15. return table;
  16. }

代码示例来源:origin: apache/drill

  1. ttable.putToParameters(
  2. ReplicationSpec.KEY.CURR_STATE_ID.toString(), replicationSpec.getCurrentReplicationState());
  3. if ((ttable.getParameters().containsKey("EXTERNAL")) &&
  4. (ttable.getParameters().get("EXTERNAL").equalsIgnoreCase("TRUE"))){
  5. ttable.putToParameters("EXTERNAL","FALSE");

代码示例来源:origin: apache/hive

  1. newTable.putToParameters("EXTERNAL", "TRUE");
  2. newTable.setTableType(TableType.EXTERNAL_TABLE.toString());
  3. } else {

代码示例来源:origin: com.hotels/circus-train-avro

  1. Table apply(Table table, String avroSchemaDestination, String eventId) throws Exception {
  2. if (avroSchemaDestination == null) {
  3. return table;
  4. }
  5. avroSchemaDestination = addTrailingSlash(avroSchemaDestination);
  6. avroSchemaDestination += eventId;
  7. String avroSchemaSource = table.getParameters().get(AVRO_SCHEMA_URL_PARAMETER);
  8. copy(avroSchemaSource, avroSchemaDestination);
  9. table.putToParameters(AVRO_SCHEMA_URL_PARAMETER,
  10. avroSchemaDestination + "/" + getAvroSchemaFileName(avroSchemaSource));
  11. LOG.info("Avro SerDe transformation has been applied to table '{}'", table.getTableName());
  12. return table;
  13. }

代码示例来源:origin: HotelsDotCom/circus-train

  1. Table apply(Table table, String avroSchemaDestination, String eventId) throws Exception {
  2. if (avroSchemaDestination == null) {
  3. return table;
  4. }
  5. avroSchemaDestination = addTrailingSlash(avroSchemaDestination);
  6. avroSchemaDestination += eventId;
  7. String avroSchemaSource = table.getParameters().get(AVRO_SCHEMA_URL_PARAMETER);
  8. copy(avroSchemaSource, avroSchemaDestination);
  9. table.putToParameters(AVRO_SCHEMA_URL_PARAMETER,
  10. avroSchemaDestination + "/" + getAvroSchemaFileName(avroSchemaSource));
  11. LOG.info("Avro SerDe transformation has been applied to table '{}'", table.getTableName());
  12. return table;
  13. }

代码示例来源:origin: uber/hudi

  1. void updateLastCommitTimeSynced() {
  2. // Set the last commit time from the TBLproperties
  3. String lastCommitSynced = activeTimeline.lastInstant().get().getTimestamp();
  4. try {
  5. Table table = client.getTable(syncConfig.databaseName, syncConfig.tableName);
  6. table.putToParameters(HOODIE_LAST_COMMIT_TIME_SYNC, lastCommitSynced);
  7. client.alter_table(syncConfig.databaseName, syncConfig.tableName, table, true);
  8. } catch (Exception e) {
  9. throw new HoodieHiveSyncException(
  10. "Failed to get update last commit time synced to " + lastCommitSynced, e);
  11. }
  12. }

代码示例来源:origin: com.hotels/circus-train-core

  1. private void setReplicaTableType(Table source, Table replica) {
  2. if (TableType.VIRTUAL_VIEW.name().equals(source.getTableType())) {
  3. replica.setTableType(TableType.VIRTUAL_VIEW.name());
  4. return;
  5. }
  6. // We set the table to external no matter what. We don't want to delete data accidentally when dropping a mirrored
  7. // table.
  8. replica.setTableType(TableType.EXTERNAL_TABLE.name());
  9. replica.putToParameters(EXTERNAL, "TRUE");
  10. }

代码示例来源:origin: HotelsDotCom/circus-train

  1. private void setReplicaTableType(Table source, Table replica) {
  2. if (TableType.VIRTUAL_VIEW.name().equals(source.getTableType())) {
  3. replica.setTableType(TableType.VIRTUAL_VIEW.name());
  4. return;
  5. }
  6. // We set the table to external no matter what. We don't want to delete data accidentally when dropping a mirrored
  7. // table.
  8. replica.setTableType(TableType.EXTERNAL_TABLE.name());
  9. replica.putToParameters(EXTERNAL, "TRUE");
  10. }

相关文章

Table类方法