本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.putToParameters()
方法的一些代码示例,展示了Table.putToParameters()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.putToParameters()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:putToParameters
暂无
代码示例来源:origin: apache/hive
@Override public void preCreateTable(Table table) throws MetaException {
if (!table.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
throw new MetaException(KAFKA_STORAGE_HANDLER + " supports only " + TableType.EXTERNAL_TABLE);
}
Arrays.stream(KafkaTableProperties.values())
.filter(KafkaTableProperties::isMandatory)
.forEach(key -> Preconditions.checkNotNull(table.getParameters().get(key.getName()),
"Set Table property " + key.getName()));
// Put all the default at the pre create.
Arrays.stream(KafkaTableProperties.values()).forEach((key) -> {
if (table.getParameters().get(key.getName()) == null) {
table.putToParameters(key.getName(), key.getDefaultValue());
}
});
}
代码示例来源:origin: apache/hive
private Table updatePropertiesInTable(Table table, ReplicationSpec additionalPropertiesProvider) {
// Remove all the entries from the parameters which are added by repl tasks internally.
Map<String, String> parameters = table.getParameters();
if (parameters != null) {
parameters.entrySet()
.removeIf(e -> e.getKey().equals(ReplUtils.REPL_CHECKPOINT_KEY));
}
if (additionalPropertiesProvider.isInReplicationScope()) {
// Current replication state must be set on the Table object only for bootstrap dump.
// Event replication State will be null in case of bootstrap dump.
if (additionalPropertiesProvider.getReplSpecType()
!= ReplicationSpec.Type.INCREMENTAL_DUMP) {
table.putToParameters(
ReplicationSpec.KEY.CURR_STATE_ID.toString(),
additionalPropertiesProvider.getCurrentReplicationState());
}
} else {
// ReplicationSpec.KEY scopeKey = ReplicationSpec.KEY.REPL_SCOPE;
// write(out, ",\""+ scopeKey.toString() +"\":\"" + replicationSpec.get(scopeKey) + "\"");
// TODO: if we want to be explicit about this dump not being a replication dump, we can
// uncomment this else section, but currently unneeded. Will require a lot of golden file
// regen if we do so.
}
return table;
}
代码示例来源:origin: apache/incubator-gobblin
private Table getTargetTable(Table originTable, Path targetLocation) throws IOException {
try {
Table targetTable = originTable.copy();
targetTable.setDbName(this.targetDatabase);
targetTable.setDataLocation(targetLocation);
/*
* Need to set the table owner as the flow executor
*/
targetTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName());
targetTable.getTTable().putToParameters(HiveDataset.REGISTERER, GOBBLIN_DISTCP);
targetTable.getTTable().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
Long.toString(this.startTime));
targetTable.getTTable().unsetCreateTime();
HiveAvroCopyEntityHelper.updateTableAttributesIfAvro(targetTable, this);
return targetTable;
} catch (HiveException he) {
throw new IOException(he);
}
}
代码示例来源:origin: apache/hive
newTable.putToParameters("EXTERNAL", "TRUE");
newTable.setTableType(TableType.EXTERNAL_TABLE.toString());
} else {
newTable.putToParameters("comment", comment);
newTable.putToParameters("bucketing_version", "2");
代码示例来源:origin: apache/hive
@Test public void configureJobPropertiesWithDefaultValues() throws MetaException {
KafkaStorageHandler kafkaStorageHandler = new KafkaStorageHandler();
TableDesc tableDesc = Mockito.mock(TableDesc.class);
Properties properties = new Properties();
Table preCreateTable = new Table();
preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC);
preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);
preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString());
kafkaStorageHandler.preCreateTable(preCreateTable);
preCreateTable.getParameters().forEach(properties::setProperty);
Mockito.when(tableDesc.getProperties()).thenReturn(properties);
Map<String, String> jobProperties = new HashMap<>();
kafkaStorageHandler.configureInputJobProperties(tableDesc, jobProperties);
kafkaStorageHandler.configureOutputJobProperties(tableDesc, jobProperties);
Assert.assertEquals(jobProperties.get(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName()), TEST_TOPIC);
Assert.assertEquals(jobProperties.get(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName()), LOCALHOST_9291);
Arrays.stream(KafkaTableProperties.values())
.filter(key -> !key.isMandatory())
.forEach((key) -> Assert.assertEquals("Wrong match for key " + key.getName(),
key.getDefaultValue(),
jobProperties.get(key.getName())));
}
代码示例来源:origin: apache/hive
preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC);
preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);
kafkaStorageHandler.preCreateTable(preCreateTable);
preCreateTable.getParameters().forEach(properties::setProperty);
代码示例来源:origin: apache/drill
if (tblProps != null) {
for (Entry<String, String> prop : tblProps.entrySet()) {
tt.putToParameters(prop.getKey(), prop.getValue());
代码示例来源:origin: apache/hive
newTable.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
.currentTimeMillis() / 1000));
代码示例来源:origin: apache/hive
preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC);
preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);
preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString());
kafkaStorageHandler.preCreateTable(preCreateTable);
代码示例来源:origin: apache/hive
if (tbl.getParameters() == null ||
tbl.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) {
tbl.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
代码示例来源:origin: apache/hive
private Table createTestTable(IMetaStoreClient client, String dbName, String tableName,
List<String> partCols, boolean setPartitionLevelPrivilages)
throws TException {
TableBuilder builder = new TableBuilder()
.setDbName(dbName)
.setTableName(tableName)
.addCol("id", "int")
.addCol("name", "string");
partCols.forEach(col -> builder.addPartCol(col, "string"));
Table table = builder.build(metaStore.getConf());
if (setPartitionLevelPrivilages) {
table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true");
}
client.createTable(table);
return table;
}
代码示例来源:origin: apache/hive
private Table createTestTable(IMetaStoreClient client, String dbName, String tableName,
List<String> partCols, boolean setPartitionLevelPrivilages)
throws TException {
TableBuilder builder = new TableBuilder()
.setDbName(dbName)
.setTableName(tableName)
.addCol("id", "int")
.addCol("name", "string");
partCols.forEach(col -> builder.addPartCol(col, "string"));
Table table = builder.build(metaStore.getConf());
if (setPartitionLevelPrivilages) {
table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true");
}
client.createTable(table);
return table;
}
代码示例来源:origin: apache/hive
private Table createTestTable(IMetaStoreClient client, String dbName, String tableName,
List<String> partCols, boolean setPartitionLevelPrivilages)
throws Exception {
TableBuilder builder = new TableBuilder()
.setDbName(dbName)
.setTableName(tableName)
.addCol("id", "int")
.addCol("name", "string");
partCols.forEach(col -> builder.addPartCol(col, "string"));
Table table = builder.build(metaStore.getConf());
if (setPartitionLevelPrivilages) {
table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true");
}
client.createTable(table);
return table;
}
代码示例来源:origin: apache/drill
ttable.putToParameters(
ReplicationSpec.KEY.CURR_STATE_ID.toString(), replicationSpec.getCurrentReplicationState());
if ((ttable.getParameters().containsKey("EXTERNAL")) &&
(ttable.getParameters().get("EXTERNAL").equalsIgnoreCase("TRUE"))){
ttable.putToParameters("EXTERNAL","FALSE");
代码示例来源:origin: apache/hive
newTable.putToParameters("EXTERNAL", "TRUE");
newTable.setTableType(TableType.EXTERNAL_TABLE.toString());
} else {
代码示例来源:origin: com.hotels/circus-train-avro
Table apply(Table table, String avroSchemaDestination, String eventId) throws Exception {
if (avroSchemaDestination == null) {
return table;
}
avroSchemaDestination = addTrailingSlash(avroSchemaDestination);
avroSchemaDestination += eventId;
String avroSchemaSource = table.getParameters().get(AVRO_SCHEMA_URL_PARAMETER);
copy(avroSchemaSource, avroSchemaDestination);
table.putToParameters(AVRO_SCHEMA_URL_PARAMETER,
avroSchemaDestination + "/" + getAvroSchemaFileName(avroSchemaSource));
LOG.info("Avro SerDe transformation has been applied to table '{}'", table.getTableName());
return table;
}
代码示例来源:origin: HotelsDotCom/circus-train
Table apply(Table table, String avroSchemaDestination, String eventId) throws Exception {
if (avroSchemaDestination == null) {
return table;
}
avroSchemaDestination = addTrailingSlash(avroSchemaDestination);
avroSchemaDestination += eventId;
String avroSchemaSource = table.getParameters().get(AVRO_SCHEMA_URL_PARAMETER);
copy(avroSchemaSource, avroSchemaDestination);
table.putToParameters(AVRO_SCHEMA_URL_PARAMETER,
avroSchemaDestination + "/" + getAvroSchemaFileName(avroSchemaSource));
LOG.info("Avro SerDe transformation has been applied to table '{}'", table.getTableName());
return table;
}
代码示例来源:origin: uber/hudi
void updateLastCommitTimeSynced() {
// Set the last commit time from the TBLproperties
String lastCommitSynced = activeTimeline.lastInstant().get().getTimestamp();
try {
Table table = client.getTable(syncConfig.databaseName, syncConfig.tableName);
table.putToParameters(HOODIE_LAST_COMMIT_TIME_SYNC, lastCommitSynced);
client.alter_table(syncConfig.databaseName, syncConfig.tableName, table, true);
} catch (Exception e) {
throw new HoodieHiveSyncException(
"Failed to get update last commit time synced to " + lastCommitSynced, e);
}
}
代码示例来源:origin: com.hotels/circus-train-core
private void setReplicaTableType(Table source, Table replica) {
if (TableType.VIRTUAL_VIEW.name().equals(source.getTableType())) {
replica.setTableType(TableType.VIRTUAL_VIEW.name());
return;
}
// We set the table to external no matter what. We don't want to delete data accidentally when dropping a mirrored
// table.
replica.setTableType(TableType.EXTERNAL_TABLE.name());
replica.putToParameters(EXTERNAL, "TRUE");
}
代码示例来源:origin: HotelsDotCom/circus-train
private void setReplicaTableType(Table source, Table replica) {
if (TableType.VIRTUAL_VIEW.name().equals(source.getTableType())) {
replica.setTableType(TableType.VIRTUAL_VIEW.name());
return;
}
// We set the table to external no matter what. We don't want to delete data accidentally when dropping a mirrored
// table.
replica.setTableType(TableType.EXTERNAL_TABLE.name());
replica.putToParameters(EXTERNAL, "TRUE");
}
内容来源于网络,如有侵权,请联系作者删除!