本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.setPartitionKeys()
方法的一些代码示例,展示了Table.setPartitionKeys()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setPartitionKeys()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:setPartitionKeys
暂无
代码示例来源:origin: apache/hive
public void setPartCols(List<FieldSchema> partCols) {
tTable.setPartitionKeys(partCols);
}
代码示例来源:origin: apache/drill
public void setPartCols(List<FieldSchema> partCols) {
tTable.setPartitionKeys(partCols);
}
代码示例来源:origin: apache/hive
public static List<FieldSchema> getPartCols(Table table) {
List<FieldSchema> partKeys = table.getPartitionKeys();
if (partKeys == null) {
partKeys = new ArrayList<>();
table.setPartitionKeys(partKeys);
}
return partKeys;
}
代码示例来源:origin: apache/hive
public List<FieldSchema> getPartCols() {
List<FieldSchema> partKeys = tTable.getPartitionKeys();
if (partKeys == null) {
partKeys = new ArrayList<FieldSchema>();
tTable.setPartitionKeys(partKeys);
}
return partKeys;
}
代码示例来源:origin: apache/drill
public List<FieldSchema> getPartCols() {
List<FieldSchema> partKeys = tTable.getPartitionKeys();
if (partKeys == null) {
partKeys = new ArrayList<FieldSchema>();
tTable.setPartitionKeys(partKeys);
}
return partKeys;
}
代码示例来源:origin: prestodb/presto
public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges)
{
org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table();
result.setDbName(table.getDatabaseName());
result.setTableName(table.getTableName());
result.setOwner(table.getOwner());
result.setTableType(table.getTableType());
result.setParameters(table.getParameters());
result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList()));
result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage()));
result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges));
result.setViewOriginalText(table.getViewOriginalText().orElse(null));
result.setViewExpandedText(table.getViewExpandedText().orElse(null));
return result;
}
代码示例来源:origin: apache/hive
@Test
public void testGetTableSchemaWithPtnColsApi() throws IOException {
// Check the schema of a table with one field & no partition keys.
StorageDescriptor sd = new StorageDescriptor(
Lists.newArrayList(new FieldSchema("username", serdeConstants.STRING_TYPE_NAME, null)),
"location", "org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.mapred.TextOutputFormat", false, -1, new SerDeInfo(),
new ArrayList<String>(), new ArrayList<Order>(), new HashMap<String, String>());
org.apache.hadoop.hive.metastore.api.Table apiTable =
new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner",
0, 0, 0, sd, new ArrayList<FieldSchema>(), new HashMap<String, String>(),
"viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name());
Table table = new Table(apiTable);
List<HCatFieldSchema> expectedHCatSchema =
Lists.newArrayList(new HCatFieldSchema("username", HCatFieldSchema.Type.STRING, null));
Assert.assertEquals(new HCatSchema(expectedHCatSchema),
HCatUtil.getTableSchemaWithPtnCols(table));
// Add a partition key & ensure its reflected in the schema.
List<FieldSchema> partitionKeys =
Lists.newArrayList(new FieldSchema("dt", serdeConstants.STRING_TYPE_NAME, null));
table.getTTable().setPartitionKeys(partitionKeys);
expectedHCatSchema.add(new HCatFieldSchema("dt", HCatFieldSchema.Type.STRING, null));
Assert.assertEquals(new HCatSchema(expectedHCatSchema),
HCatUtil.getTableSchemaWithPtnCols(table));
}
代码示例来源:origin: apache/incubator-gobblin
@Test
public void dropReplacedPartitionsTest() throws Exception {
Table table = ConvertibleHiveDatasetTest.getTestTable("dbName", "tableName");
table.setTableType("VIRTUAL_VIEW");
table.setPartitionKeys(ImmutableList.of(new FieldSchema("year", "string", ""), new FieldSchema("month", "string", "")));
Partition part = new Partition();
part.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01"));
SchemaAwareHiveTable hiveTable = new SchemaAwareHiveTable(table, null);
SchemaAwareHivePartition partition = new SchemaAwareHivePartition(table, part, null);
QueryBasedHiveConversionEntity conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(partition));
List<ImmutableMap<String, String>> expected =
ImmutableList.of(ImmutableMap.of("year", "2015", "month", "12"), ImmutableMap.of("year", "2016", "month", "01"));
Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected);
// Make sure that a partition itself is not dropped
Partition replacedSelf = new Partition();
replacedSelf.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01|2016,02"));
replacedSelf.setValues(ImmutableList.of("2016", "02"));
conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(new SchemaAwareHivePartition(table, replacedSelf, null)));
Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected);
}
代码示例来源:origin: apache/hive
private static void createTable(String tableName, String tablePerm) throws Exception {
Table tbl = new Table();
tbl.setDbName(DATABASE);
tbl.setTableName(tableName);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(ColumnHolder.colMapping.get(tableName));
tbl.setSd(sd);
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.setInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat.class.getName());
sd.setOutputFormat(org.apache.hadoop.hive.ql.io.RCFileOutputFormat.class.getName());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName());
tbl.setPartitionKeys(ColumnHolder.partitionCols);
hmsc.createTable(tbl);
Path path = new Path(warehousedir, tableName);
FileSystem fs = path.getFileSystem(hiveConf);
fs.setPermission(path, new FsPermission(tablePerm));
}
代码示例来源:origin: apache/hive
partitionFields.add(new FieldSchema(partitionKey, serdeConstants.STRING_TYPE_NAME, ""));
table.setPartitionKeys(partitionFields);
代码示例来源:origin: apache/hive
@Test
public void testAddPartition() throws IOException {
Table t = new Table();
t.setDbName("testdb");
t.setTableName("testtable");
List<FieldSchema> pkeys = HCatSchemaUtils.getFieldSchemas(
HCatSchemaUtils.getHCatSchema("a:int,b:string").getFields());
t.setPartitionKeys(pkeys);
List<Partition> addedPtns = new ArrayList<Partition>();
addedPtns.add(createPtn(t, Arrays.asList("120", "abc")));
addedPtns.add(createPtn(t, Arrays.asList("201", "xyz")));
NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
HCatConstants.HCAT_ADD_PARTITION_EVENT, msgFactory.buildAddPartitionMessage(t, addedPtns.iterator()).toString());
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
HCatNotificationEvent hev = new HCatNotificationEvent(event);
ReplicationTask rtask = ReplicationTask.create(client,hev);
assertEquals(hev.toString(), rtask.getEvent().toString());
verifyAddPartitionReplicationTask(rtask, t, addedPtns);
}
代码示例来源:origin: apache/hive
Table build() {
StorageDescriptor sd = new StorageDescriptor();
if (columns == null) {
sd.setCols(Collections.emptyList());
} else {
sd.setCols(columns);
}
SerDeInfo serdeInfo = new SerDeInfo();
serdeInfo.setSerializationLib(serde);
serdeInfo.setName(tableName);
sd.setSerdeInfo(serdeInfo);
sd.setInputFormat(inputFormat);
sd.setOutputFormat(outputFormat);
if (location != null) {
sd.setLocation(location);
}
Table table = new Table();
table.setDbName(dbName);
table.setTableName(tableName);
table.setSd(sd);
table.setParameters(parameters);
table.setOwner(owner);
if (partitionKeys != null) {
table.setPartitionKeys(partitionKeys);
}
table.setTableType(tableType.toString());
return table;
}
}
代码示例来源:origin: apache/hive
private void createTable(String dbName, String tableName) throws Exception {
String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME
: dbName;
try {
msc.dropTable(databaseName, tableName);
} catch (Exception e) {
} // can fail with NoSuchObjectException
Table tbl = new Table();
tbl.setDbName(databaseName);
tbl.setTableName(tableName);
tbl.setTableType("MANAGED_TABLE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(getTableColumns());
tbl.setPartitionKeys(getPartitionKeys());
tbl.setSd(sd);
sd.setBucketCols(new ArrayList<String>(2));
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(ColumnarSerDe.class.getName());
sd.setInputFormat(RCFileInputFormat.class.getName());
sd.setOutputFormat(RCFileOutputFormat.class.getName());
Map<String, String> tableParams = new HashMap<String, String>();
tbl.setParameters(tableParams);
msc.createTable(tbl);
}
代码示例来源:origin: apache/hive
private Table createPartitionedTable(String catName, String dbName, String tableName) throws Exception {
try {
db.dropTable(catName, dbName, tableName);
Table table = new Table();
table.setCatName(catName);
table.setDbName(dbName);
table.setTableName(tableName);
FieldSchema col1 = new FieldSchema("key", "string", "");
FieldSchema col2 = new FieldSchema("value", "int", "");
FieldSchema col3 = new FieldSchema("city", "string", "");
StorageDescriptor sd = new StorageDescriptor();
sd.setSerdeInfo(new SerDeInfo());
sd.setInputFormat(TextInputFormat.class.getCanonicalName());
sd.setOutputFormat(HiveIgnoreKeyTextOutputFormat.class.getCanonicalName());
sd.setCols(Arrays.asList(col1, col2));
table.setPartitionKeys(Arrays.asList(col3));
table.setSd(sd);
db.createTable(table);
return db.getTable(catName, dbName, tableName);
} catch (Exception exception) {
fail("Unable to drop and create table " + StatsUtils.getFullyQualifiedTableName(dbName, tableName) + " because "
+ StringUtils.stringifyException(exception));
throw exception;
}
}
代码示例来源:origin: apache/hive
@Test
public void testDropPartition() throws HCatException {
Table t = new Table();
t.setDbName("testdb");
t.setTableName("testtable");
List<FieldSchema> pkeys = HCatSchemaUtils.getFieldSchemas(
HCatSchemaUtils.getHCatSchema("a:int,b:string").getFields());
t.setPartitionKeys(pkeys);
Partition p = createPtn(t, Arrays.asList("102", "lmn"));
NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
HCatConstants.HCAT_DROP_PARTITION_EVENT, msgFactory.buildDropPartitionMessage(
t, Collections.singletonList(p).iterator()).toString());
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
HCatNotificationEvent hev = new HCatNotificationEvent(event);
ReplicationTask rtask = ReplicationTask.create(client,hev);
assertEquals(hev.toString(), rtask.getEvent().toString());
verifyDropPartitionReplicationTask(rtask, t, p);
}
代码示例来源:origin: apache/hive
private Table createPartitionedTable(String catName, String dbName, String tableName) throws Exception {
try {
db.dropTable(catName, dbName, tableName);
Table table = new Table();
table.setCatName(catName);
table.setDbName(dbName);
table.setTableName(tableName);
FieldSchema col1 = new FieldSchema("key", "string", "");
FieldSchema col2 = new FieldSchema("value", "int", "");
FieldSchema col3 = new FieldSchema("city", "string", "");
StorageDescriptor sd = new StorageDescriptor();
sd.setSerdeInfo(new SerDeInfo());
sd.setInputFormat(TextInputFormat.class.getCanonicalName());
sd.setOutputFormat(HiveIgnoreKeyTextOutputFormat.class.getCanonicalName());
sd.setCols(Arrays.asList(col1, col2));
table.setPartitionKeys(Arrays.asList(col3));
table.setSd(sd);
db.createTable(table);
return db.getTable(catName, dbName, tableName);
} catch (Exception exception) {
fail("Unable to drop and create table " + StatsUtils
.getFullyQualifiedTableName(dbName, tableName) + " because " + StringUtils
.stringifyException(exception));
throw exception;
}
}
代码示例来源:origin: apache/storm
sd.setLocation(dbLocation + Path.SEPARATOR + tableName);
if (partNames != null && partNames.length != 0) {
tbl.setPartitionKeys(getPartitionKeys(partNames));
代码示例来源:origin: apache/hive
@Test
public void testInsert() throws HCatException {
Table t = new Table();
t.setDbName("testdb");
t.setTableName("testtable");
List<FieldSchema> pkeys = HCatSchemaUtils.getFieldSchemas(
HCatSchemaUtils.getHCatSchema("a:int,b:string").getFields());
t.setPartitionKeys(pkeys);
Partition p = createPtn(t, Arrays.asList("102", "lmn"));
List<String> files = Arrays.asList("/tmp/test123");
NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
HCatConstants.HCAT_INSERT_EVENT, msgFactory.buildInsertMessage(
t.getDbName(),
t.getTableName(),
getPtnDesc(t,p),
files
).toString());
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
HCatNotificationEvent hev = new HCatNotificationEvent(event);
ReplicationTask rtask = ReplicationTask.create(client,hev);
assertEquals(hev.toString(), rtask.getEvent().toString());
verifyInsertReplicationTask(rtask, t, p);
}
代码示例来源:origin: apache/hive
@Test
public void testAlterTable() throws Exception {
Table originalTable = testTables[2];
String originalTableName = originalTable.getTableName();
String originalDatabase = originalTable.getDbName();
Table newTable = getTableWithAllParametersSet();
newTable.setTableName(originalTableName);
newTable.setDbName(originalDatabase);
// Partition keys can not be set, but getTableWithAllParametersSet is added one, so remove for
// this test
newTable.setPartitionKeys(originalTable.getPartitionKeys());
client.alter_table(originalDatabase, originalTableName, newTable);
Table alteredTable = client.getTable(originalDatabase, originalTableName);
// The extra parameters will be added on server side, so check that the required ones are
// present
for(String key: newTable.getParameters().keySet()) {
Assert.assertEquals("parameters are present", newTable.getParameters().get(key),
alteredTable.getParameters().get(key));
}
// The parameters are checked manually, so do not check them
newTable.setParameters(alteredTable.getParameters());
// Some of the data is set on the server side, so reset those
newTable.setCreateTime(alteredTable.getCreateTime());
newTable.setCreationMetadata(alteredTable.getCreationMetadata());
newTable.setWriteId(alteredTable.getWriteId());
Assert.assertTrue(alteredTable.isSetId());
alteredTable.unsetId();
Assert.assertEquals("The table data should be the same", newTable, alteredTable);
}
代码示例来源:origin: apache/hive
@Test
public void testAlterPartition() throws HCatException {
Table t = new Table();
t.setDbName("testdb");
t.setTableName("testtable");
List<FieldSchema> pkeys = HCatSchemaUtils.getFieldSchemas(
HCatSchemaUtils.getHCatSchema("a:int,b:string").getFields());
t.setPartitionKeys(pkeys);
Partition p = createPtn(t, Arrays.asList("102", "lmn"));
NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
HCatConstants.HCAT_ALTER_PARTITION_EVENT, msgFactory.buildAlterPartitionMessage(t,
p, p, p.getWriteId()).toString());
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
HCatNotificationEvent hev = new HCatNotificationEvent(event);
ReplicationTask rtask = ReplicationTask.create(client,hev);
assertEquals(hev.toString(), rtask.getEvent().toString());
verifyAlterPartitionReplicationTask(rtask, t, p);
}
内容来源于网络,如有侵权,请联系作者删除!