本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.setDbName()
方法的一些代码示例,展示了Table.setDbName()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setDbName()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:setDbName
暂无
代码示例来源:origin: apache/hive
public void setDbName(String databaseName) {
tTable.setDbName(databaseName);
}
代码示例来源:origin: apache/drill
public void setDbName(String databaseName) {
tTable.setDbName(databaseName);
}
代码示例来源:origin: prestodb/presto
@Override
public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName)
{
Optional<org.apache.hadoop.hive.metastore.api.Table> source = delegate.getTable(databaseName, tableName);
if (!source.isPresent()) {
throw new TableNotFoundException(new SchemaTableName(databaseName, tableName));
}
org.apache.hadoop.hive.metastore.api.Table table = source.get();
table.setDbName(newDatabaseName);
table.setTableName(newTableName);
alterTable(databaseName, tableName, table);
}
代码示例来源:origin: apache/hive
private org.apache.hadoop.hive.metastore.api.Table deepCopyAndLowerCaseTable(
org.apache.hadoop.hive.metastore.api.Table tbl) {
org.apache.hadoop.hive.metastore.api.Table newCopy = deepCopy(tbl);
newCopy.setDbName(newCopy.getDbName().toLowerCase());
newCopy.setTableName(newCopy.getTableName().toLowerCase());
return newCopy;
}
代码示例来源:origin: apache/drill
private org.apache.hadoop.hive.metastore.api.Table deepCopyAndLowerCaseTable(
org.apache.hadoop.hive.metastore.api.Table tbl) {
org.apache.hadoop.hive.metastore.api.Table newCopy = deepCopy(tbl);
newCopy.setDbName(newCopy.getDbName().toLowerCase());
newCopy.setTableName(newCopy.getTableName().toLowerCase());
return newCopy;
}
代码示例来源:origin: apache/hive
private void addTable(String databaseName, String tableName, Set<Table> tables) {
checkNotNullOrEmpty(databaseName);
checkNotNullOrEmpty(tableName);
Table table = new Table();
table.setDbName(databaseName);
table.setTableName(tableName);
tables.add(table);
}
代码示例来源:origin: apache/hive
private static List<Table> createTable() {
Table table = new Table();
table.setDbName("DB");
table.setTableName("TABLE");
return Arrays.asList(table);
}
}
代码示例来源:origin: apache/hive
@Test(expected = InvalidObjectException.class)
public void testCreateTableNoSuchDatabase() throws Exception {
Table table = testTables[0];
table.setDbName("no_such_database");
client.createTable(table);
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testCreateTableNullDatabase() throws Exception {
Table table = testTables[0];
table.setDbName(null);
client.createTable(table);
}
代码示例来源:origin: apache/hive
private static Table createTable(String databaseName, String tableName) {
Table table = new Table();
table.setDbName(databaseName);
table.setTableName(tableName);
return table;
}
代码示例来源:origin: prestodb/presto
public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges)
{
org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table();
result.setDbName(table.getDatabaseName());
result.setTableName(table.getTableName());
result.setOwner(table.getOwner());
result.setTableType(table.getTableType());
result.setParameters(table.getParameters());
result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList()));
result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage()));
result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges));
result.setViewOriginalText(table.getViewOriginalText().orElse(null));
result.setViewExpandedText(table.getViewExpandedText().orElse(null));
return result;
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testAlterTableNullDatabaseInNew() throws Exception {
Table originalTable = testTables[0];
Table newTable = originalTable.deepCopy();
newTable.setDbName(null);
client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
}
代码示例来源:origin: apache/incubator-gobblin
public static Table getTestTable(String dbName, String tableName) {
Table table = new Table();
table.setDbName(dbName);
table.setTableName(tableName);
table.setTableType(TableType.EXTERNAL_TABLE.name());
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation("/tmp/test");
table.setSd(sd);
return table;
}
代码示例来源:origin: apache/hive
@Test
public void testCreateTableDefaultValuesView() throws Exception {
Table table = new Table();
StorageDescriptor sd = new StorageDescriptor();
List<FieldSchema> cols = new ArrayList<>();
table.setDbName(DEFAULT_DATABASE);
table.setTableName("test_table_2");
table.setTableType("VIRTUAL_VIEW");
cols.add(new FieldSchema("column_name", "int", null));
sd.setCols(cols);
sd.setSerdeInfo(new SerDeInfo());
table.setSd(sd);
client.createTable(table);
Table createdTable = client.getTable(table.getDbName(), table.getTableName());
// No location should be created for views
Assert.assertNull("Storage descriptor location should be null",
createdTable.getSd().getLocation());
}
代码示例来源:origin: apache/hive
@Test
public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception {
Table table = new Table();
StorageDescriptor sd = new StorageDescriptor();
List<FieldSchema> cols = new ArrayList<>();
table.setDbName(OTHER_DATABASE);
table.setTableName("test_table_2");
cols.add(new FieldSchema("column_name", "int", null));
sd.setCols(cols);
sd.setSerdeInfo(new SerDeInfo());
table.setSd(sd);
client.createTable(table);
Table createdTable = client.getTable(table.getDbName(), table.getTableName());
Assert.assertEquals("Storage descriptor location", metaStore.getWarehouseRoot()
+ "/" + table.getDbName() + ".db/" + table.getTableName(),
createdTable.getSd().getLocation());
}
代码示例来源:origin: apache/hive
@Test
public void testCreateTable() throws IOException {
Table t = new Table();
t.setDbName("testdb");
t.setTableName("testtable");
NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
HCatConstants.HCAT_CREATE_TABLE_EVENT, msgFactory.buildCreateTableMessage(t).toString());
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
HCatNotificationEvent hev = new HCatNotificationEvent(event);
ReplicationTask rtask = ReplicationTask.create(client,hev);
assertEquals(hev.toString(), rtask.getEvent().toString());
verifyCreateTableReplicationTask(rtask);
}
代码示例来源:origin: apache/hive
@Test
public void testDropTable() throws IOException {
Table t = new Table();
t.setDbName("testdb");
t.setTableName("testtable");
NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
HCatConstants.HCAT_DROP_TABLE_EVENT, msgFactory.buildDropTableMessage(t).toString());
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
HCatNotificationEvent hev = new HCatNotificationEvent(event);
ReplicationTask rtask = ReplicationTask.create(client,hev);
assertEquals(hev.toString(), rtask.getEvent().toString());
verifyDropTableReplicationTask(rtask);
}
代码示例来源:origin: apache/hive
@Test
public static void testCreate() throws HCatException {
Table t = new Table();
t.setDbName("testdb");
t.setTableName("testtable");
NotificationEvent event = new NotificationEvent(0, (int)System.currentTimeMillis(),
HCatConstants.HCAT_CREATE_TABLE_EVENT, msgFactory.buildCreateTableMessage(t).toString());
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
ReplicationTask.resetFactory(null);
ReplicationTask rtask = ReplicationTask.create(HCatClient.create(new HiveConf()),new HCatNotificationEvent(event));
assertTrue("Provided factory instantiation should yield CreateTableReplicationTask", rtask instanceof CreateTableReplicationTask);
ReplicationTask.resetFactory(NoopFactory.class);
rtask = ReplicationTask.create(HCatClient.create(new HiveConf()),new HCatNotificationEvent(event));
assertTrue("Provided factory instantiation should yield NoopReplicationTask", rtask instanceof NoopReplicationTask);
ReplicationTask.resetFactory(null);
}
代码示例来源:origin: apache/hive
@Test
public void testAlterTable() throws IOException {
Table t = new Table();
t.setDbName("testdb");
t.setTableName("testtable");
NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
HCatConstants.HCAT_ALTER_TABLE_EVENT,
msgFactory.buildAlterTableMessage(t, t, t.getWriteId()).toString());
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
HCatNotificationEvent hev = new HCatNotificationEvent(event);
ReplicationTask rtask = ReplicationTask.create(client,hev);
assertEquals(hev.toString(), rtask.getEvent().toString());
verifyAlterTableReplicationTask(rtask);
}
代码示例来源:origin: apache/hive
@Test
public void testAlterPartition() throws HCatException {
Table t = new Table();
t.setDbName("testdb");
t.setTableName("testtable");
List<FieldSchema> pkeys = HCatSchemaUtils.getFieldSchemas(
HCatSchemaUtils.getHCatSchema("a:int,b:string").getFields());
t.setPartitionKeys(pkeys);
Partition p = createPtn(t, Arrays.asList("102", "lmn"));
NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
HCatConstants.HCAT_ALTER_PARTITION_EVENT, msgFactory.buildAlterPartitionMessage(t,
p, p, p.getWriteId()).toString());
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
HCatNotificationEvent hev = new HCatNotificationEvent(event);
ReplicationTask rtask = ReplicationTask.create(client,hev);
assertEquals(hev.toString(), rtask.getEvent().toString());
verifyAlterPartitionReplicationTask(rtask, t, p);
}
内容来源于网络,如有侵权,请联系作者删除!