本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.setParameters()
方法的一些代码示例,展示了Table.setParameters()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setParameters()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:setParameters
暂无
代码示例来源:origin: apache/hive
public void setParameters(Map<String, String> params) {
tTable.setParameters(params);
}
代码示例来源:origin: apache/drill
public void setParameters(Map<String, String> params) {
tTable.setParameters(params);
}
代码示例来源:origin: prestodb/presto
@Override
public synchronized void updateTableStatistics(String databaseName, String tableName, Function<PartitionStatistics, PartitionStatistics> update)
{
PartitionStatistics currentStatistics = getTableStatistics(databaseName, tableName);
PartitionStatistics updatedStatistics = update.apply(currentStatistics);
Table originalTable = getTable(databaseName, tableName)
.orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
Table modifiedTable = originalTable.deepCopy();
HiveBasicStatistics basicStatistics = updatedStatistics.getBasicStatistics();
modifiedTable.setParameters(updateStatisticsParameters(modifiedTable.getParameters(), basicStatistics));
alterTable(databaseName, tableName, modifiedTable);
com.facebook.presto.hive.metastore.Table table = fromMetastoreApiTable(modifiedTable);
OptionalLong rowCount = basicStatistics.getRowCount();
List<ColumnStatisticsObj> metastoreColumnStatistics = updatedStatistics.getColumnStatistics().entrySet().stream()
.map(entry -> createMetastoreColumnStatistics(entry.getKey(), table.getColumn(entry.getKey()).get().getType(), entry.getValue(), rowCount))
.collect(toImmutableList());
if (!metastoreColumnStatistics.isEmpty()) {
setTableColumnStatistics(databaseName, tableName, metastoreColumnStatistics);
}
Set<String> removedColumnStatistics = difference(currentStatistics.getColumnStatistics().keySet(), updatedStatistics.getColumnStatistics().keySet());
removedColumnStatistics.forEach(column -> deleteTableColumnStatistics(databaseName, tableName, column));
}
代码示例来源:origin: prestodb/presto
public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges)
{
org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table();
result.setDbName(table.getDatabaseName());
result.setTableName(table.getTableName());
result.setOwner(table.getOwner());
result.setTableType(table.getTableType());
result.setParameters(table.getParameters());
result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList()));
result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage()));
result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges));
result.setViewOriginalText(table.getViewOriginalText().orElse(null));
result.setViewExpandedText(table.getViewExpandedText().orElse(null));
return result;
}
代码示例来源:origin: apache/incubator-gobblin
table.setDbName(hiveTable.getDbName());
table.setTableName(hiveTable.getTableName());
table.setParameters(getParameters(props));
if (hiveTable.getCreateTime().isPresent()) {
table.setCreateTime(Ints.checkedCast(hiveTable.getCreateTime().get()));
代码示例来源:origin: apache/hive
public TableBuilder(Database database) {
this.database = database;
partitions = new ArrayList<>();
columnNames = new ArrayList<>();
columnTypes = new ArrayList<>();
partitionKeys = Collections.emptyList();
table = new Table();
table.setDbName(database.getName());
table.setTableType(TableType.MANAGED_TABLE.toString());
Map<String, String> tableParams = new HashMap<String, String>();
tableParams.put("transactional", Boolean.TRUE.toString());
table.setParameters(tableParams);
sd = new StorageDescriptor();
sd.setInputFormat(OrcInputFormat.class.getName());
sd.setOutputFormat(OrcOutputFormat.class.getName());
sd.setNumBuckets(1);
table.setSd(sd);
serDeInfo = new SerDeInfo();
serDeInfo.setParameters(new HashMap<String, String>());
serDeInfo.getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
serDeInfo.setSerializationLib(OrcSerde.class.getName());
sd.setSerdeInfo(serDeInfo);
}
代码示例来源:origin: apache/hive
newTable.setTableName(tableName);
if (tblProps != null) {
newTable.setParameters(tblProps);
代码示例来源:origin: apache/hive
TransactionalValidationListener.INSERTONLY_TRANSACTIONAL_PROPERTY);
table.setParameters(parameters);
if (isTemporary) table.setTemporary(true);
代码示例来源:origin: apache/hive
tbl.setParameters(
adjustStatsParamsForGet(tbl.getParameters(), tbl.getParameters(), tbl.getWriteId(), validWriteIds));
代码示例来源:origin: apache/hive
tableParams.put("hcat.testarg", "testArgValue");
tbl.setParameters(tableParams);
代码示例来源:origin: apache/hive
Table build() {
StorageDescriptor sd = new StorageDescriptor();
if (columns == null) {
sd.setCols(Collections.emptyList());
} else {
sd.setCols(columns);
}
SerDeInfo serdeInfo = new SerDeInfo();
serdeInfo.setSerializationLib(serde);
serdeInfo.setName(tableName);
sd.setSerdeInfo(serdeInfo);
sd.setInputFormat(inputFormat);
sd.setOutputFormat(outputFormat);
if (location != null) {
sd.setLocation(location);
}
Table table = new Table();
table.setDbName(dbName);
table.setTableName(tableName);
table.setSd(sd);
table.setParameters(parameters);
table.setOwner(owner);
if (partitionKeys != null) {
table.setPartitionKeys(partitionKeys);
}
table.setTableType(tableType.toString());
return table;
}
}
代码示例来源:origin: apache/hive
private void createTable(String dbName, String tableName) throws Exception {
String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME
: dbName;
try {
msc.dropTable(databaseName, tableName);
} catch (Exception e) {
} // can fail with NoSuchObjectException
Table tbl = new Table();
tbl.setDbName(databaseName);
tbl.setTableName(tableName);
tbl.setTableType("MANAGED_TABLE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(getTableColumns());
tbl.setPartitionKeys(getPartitionKeys());
tbl.setSd(sd);
sd.setBucketCols(new ArrayList<String>(2));
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(ColumnarSerDe.class.getName());
sd.setInputFormat(RCFileInputFormat.class.getName());
sd.setOutputFormat(RCFileOutputFormat.class.getName());
Map<String, String> tableParams = new HashMap<String, String>();
tbl.setParameters(tableParams);
msc.createTable(tbl);
}
代码示例来源:origin: apache/hive
@Test
public void testAlterTableChangeCols() throws Exception {
Table originalTable = partitionedTable;
Table newTable = originalTable.deepCopy();
List<FieldSchema> cols = newTable.getSd().getCols();
// Change a column
cols.get(0).setName("modified_col");
// Remove a column
cols.remove(1);
// Add a new column
cols.add(new FieldSchema("new_col", "int", null));
// Store the changes
client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
Assert.assertTrue("Original table directory should be kept",
metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
// The following data might be changed
alteredTable.setParameters(newTable.getParameters());
Assert.assertEquals("The table data should be the same", newTable, alteredTable);
// Modify partition column type, and comment
newTable.getPartitionKeys().get(0).setType("string");
newTable.getPartitionKeys().get(0).setComment("changed comment");
client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
// The following data might be changed
alteredTable.setParameters(newTable.getParameters());
Assert.assertEquals("The table data should be the same", newTable, alteredTable);
}
代码示例来源:origin: apache/hive
StorageDescriptor sd = new StorageDescriptor(oldtbl.getSd());
newTable.setSd(sd);
newTable.setParameters(oldtbl.getParameters());
if (location == null) {
newTable.getSd().setLocation(oldtbl.getSd().getLocation());
代码示例来源:origin: apache/storm
tbl.setParameters(tableParams);
client.createTable(tbl);
try {
代码示例来源:origin: apache/hive
@Test
public void testAlterTableExternalTable() throws Exception {
Table originalTable = externalTable;
String originalTableName = originalTable.getTableName();
String originalDatabase = originalTable.getDbName();
Table newTable = originalTable.deepCopy();
newTable.setTableName("new_external_table_for_test");
client.alter_table(originalDatabase, originalTableName, newTable);
List<String> tableNames = client.getTables(originalDatabase, originalTableName);
Assert.assertEquals("Original table should be removed", 0, tableNames.size());
Assert.assertTrue("Original table directory should be kept",
metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
Assert.assertEquals("New location should be the same", originalTable.getSd().getLocation(),
alteredTable.getSd().getLocation());
Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile");
Assert.assertTrue("The location should contain data", metaStore.isPathExists(dataFile));
// The extra parameters will be added on server side, so check that the required ones are
// present
for(String key: newTable.getParameters().keySet()) {
Assert.assertEquals("parameters are present", newTable.getParameters().get(key),
alteredTable.getParameters().get(key));
}
// The parameters are checked manually, so do not check them
newTable.setParameters(alteredTable.getParameters());
Assert.assertEquals("The table data should be the same", newTable, alteredTable);
}
代码示例来源:origin: apache/hive
@Test
public void testAlterTableExternalTableChangeLocation() throws Exception {
Table originalTable = externalTable;
// Change the location, and see the results
Table newTable = originalTable.deepCopy();
newTable.getSd().setLocation(newTable.getSd().getLocation() + "_modified");
client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
Assert.assertTrue("Original table directory should be kept",
metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
Assert.assertEquals("New location should be the new one", newTable.getSd().getLocation(),
alteredTable.getSd().getLocation());
Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile");
Assert.assertFalse("The location should not contain data", metaStore.isPathExists(dataFile));
// The extra parameters will be added on server side, so check that the required ones are
// present
for(String key: newTable.getParameters().keySet()) {
Assert.assertEquals("parameters are present", newTable.getParameters().get(key),
alteredTable.getParameters().get(key));
}
// The parameters are checked manually, so do not check them
newTable.setParameters(alteredTable.getParameters());
// The following data should be changed, other data should be the same
newTable.getSd().setLocation(alteredTable.getSd().getLocation());
Assert.assertEquals("The table data should be the same", newTable, alteredTable);
}
代码示例来源:origin: apache/hive
table.setParameters(newParams);
sharedCache.alterTableInCache(catName, dbName, tblName, table);
sharedCache.updateTableColStatsInCache(catName, dbName, tblName, colStats.getStatsObj());
代码示例来源:origin: apache/hive
@Test
public void testAlterTable() throws Exception {
Table originalTable = testTables[2];
String originalTableName = originalTable.getTableName();
String originalDatabase = originalTable.getDbName();
Table newTable = getTableWithAllParametersSet();
newTable.setTableName(originalTableName);
newTable.setDbName(originalDatabase);
// Partition keys can not be set, but getTableWithAllParametersSet is added one, so remove for
// this test
newTable.setPartitionKeys(originalTable.getPartitionKeys());
client.alter_table(originalDatabase, originalTableName, newTable);
Table alteredTable = client.getTable(originalDatabase, originalTableName);
// The extra parameters will be added on server side, so check that the required ones are
// present
for(String key: newTable.getParameters().keySet()) {
Assert.assertEquals("parameters are present", newTable.getParameters().get(key),
alteredTable.getParameters().get(key));
}
// The parameters are checked manually, so do not check them
newTable.setParameters(alteredTable.getParameters());
// Some of the data is set on the server side, so reset those
newTable.setCreateTime(alteredTable.getCreateTime());
newTable.setCreationMetadata(alteredTable.getCreationMetadata());
newTable.setWriteId(alteredTable.getWriteId());
Assert.assertTrue(alteredTable.isSetId());
alteredTable.unsetId();
Assert.assertEquals("The table data should be the same", newTable, alteredTable);
}
代码示例来源:origin: apache/hive
table.setParameters(createdTable.getParameters());
table.setCreationMetadata(createdTable.getCreationMetadata());
table.setWriteId(createdTable.getWriteId());
内容来源于网络,如有侵权,请联系作者删除!