org.apache.hadoop.hive.metastore.api.Table.setDbName()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(9.4k)|赞(0)|评价(0)|浏览(239)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.setDbName()方法的一些代码示例,展示了Table.setDbName()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setDbName()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:setDbName

Table.setDbName介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. public void setDbName(String databaseName) {
  2. tTable.setDbName(databaseName);
  3. }

代码示例来源:origin: apache/drill

  1. public void setDbName(String databaseName) {
  2. tTable.setDbName(databaseName);
  3. }

代码示例来源:origin: prestodb/presto

  1. @Override
  2. public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName)
  3. {
  4. Optional<org.apache.hadoop.hive.metastore.api.Table> source = delegate.getTable(databaseName, tableName);
  5. if (!source.isPresent()) {
  6. throw new TableNotFoundException(new SchemaTableName(databaseName, tableName));
  7. }
  8. org.apache.hadoop.hive.metastore.api.Table table = source.get();
  9. table.setDbName(newDatabaseName);
  10. table.setTableName(newTableName);
  11. alterTable(databaseName, tableName, table);
  12. }

代码示例来源:origin: apache/hive

  1. private org.apache.hadoop.hive.metastore.api.Table deepCopyAndLowerCaseTable(
  2. org.apache.hadoop.hive.metastore.api.Table tbl) {
  3. org.apache.hadoop.hive.metastore.api.Table newCopy = deepCopy(tbl);
  4. newCopy.setDbName(newCopy.getDbName().toLowerCase());
  5. newCopy.setTableName(newCopy.getTableName().toLowerCase());
  6. return newCopy;
  7. }

代码示例来源:origin: apache/drill

  1. private org.apache.hadoop.hive.metastore.api.Table deepCopyAndLowerCaseTable(
  2. org.apache.hadoop.hive.metastore.api.Table tbl) {
  3. org.apache.hadoop.hive.metastore.api.Table newCopy = deepCopy(tbl);
  4. newCopy.setDbName(newCopy.getDbName().toLowerCase());
  5. newCopy.setTableName(newCopy.getTableName().toLowerCase());
  6. return newCopy;
  7. }

代码示例来源:origin: apache/hive

  1. private void addTable(String databaseName, String tableName, Set<Table> tables) {
  2. checkNotNullOrEmpty(databaseName);
  3. checkNotNullOrEmpty(tableName);
  4. Table table = new Table();
  5. table.setDbName(databaseName);
  6. table.setTableName(tableName);
  7. tables.add(table);
  8. }

代码示例来源:origin: apache/hive

  1. private static List<Table> createTable() {
  2. Table table = new Table();
  3. table.setDbName("DB");
  4. table.setTableName("TABLE");
  5. return Arrays.asList(table);
  6. }
  7. }

代码示例来源:origin: apache/hive

  1. @Test(expected = InvalidObjectException.class)
  2. public void testCreateTableNoSuchDatabase() throws Exception {
  3. Table table = testTables[0];
  4. table.setDbName("no_such_database");
  5. client.createTable(table);
  6. }

代码示例来源:origin: apache/hive

  1. @Test(expected = MetaException.class)
  2. public void testCreateTableNullDatabase() throws Exception {
  3. Table table = testTables[0];
  4. table.setDbName(null);
  5. client.createTable(table);
  6. }

代码示例来源:origin: apache/hive

  1. private static Table createTable(String databaseName, String tableName) {
  2. Table table = new Table();
  3. table.setDbName(databaseName);
  4. table.setTableName(tableName);
  5. return table;
  6. }

代码示例来源:origin: prestodb/presto

  1. public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges)
  2. {
  3. org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table();
  4. result.setDbName(table.getDatabaseName());
  5. result.setTableName(table.getTableName());
  6. result.setOwner(table.getOwner());
  7. result.setTableType(table.getTableType());
  8. result.setParameters(table.getParameters());
  9. result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList()));
  10. result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage()));
  11. result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges));
  12. result.setViewOriginalText(table.getViewOriginalText().orElse(null));
  13. result.setViewExpandedText(table.getViewExpandedText().orElse(null));
  14. return result;
  15. }

代码示例来源:origin: apache/hive

  1. @Test(expected = MetaException.class)
  2. public void testAlterTableNullDatabaseInNew() throws Exception {
  3. Table originalTable = testTables[0];
  4. Table newTable = originalTable.deepCopy();
  5. newTable.setDbName(null);
  6. client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
  7. }

代码示例来源:origin: apache/incubator-gobblin

  1. public static Table getTestTable(String dbName, String tableName) {
  2. Table table = new Table();
  3. table.setDbName(dbName);
  4. table.setTableName(tableName);
  5. table.setTableType(TableType.EXTERNAL_TABLE.name());
  6. StorageDescriptor sd = new StorageDescriptor();
  7. sd.setLocation("/tmp/test");
  8. table.setSd(sd);
  9. return table;
  10. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testCreateTableDefaultValuesView() throws Exception {
  3. Table table = new Table();
  4. StorageDescriptor sd = new StorageDescriptor();
  5. List<FieldSchema> cols = new ArrayList<>();
  6. table.setDbName(DEFAULT_DATABASE);
  7. table.setTableName("test_table_2");
  8. table.setTableType("VIRTUAL_VIEW");
  9. cols.add(new FieldSchema("column_name", "int", null));
  10. sd.setCols(cols);
  11. sd.setSerdeInfo(new SerDeInfo());
  12. table.setSd(sd);
  13. client.createTable(table);
  14. Table createdTable = client.getTable(table.getDbName(), table.getTableName());
  15. // No location should be created for views
  16. Assert.assertNull("Storage descriptor location should be null",
  17. createdTable.getSd().getLocation());
  18. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception {
  3. Table table = new Table();
  4. StorageDescriptor sd = new StorageDescriptor();
  5. List<FieldSchema> cols = new ArrayList<>();
  6. table.setDbName(OTHER_DATABASE);
  7. table.setTableName("test_table_2");
  8. cols.add(new FieldSchema("column_name", "int", null));
  9. sd.setCols(cols);
  10. sd.setSerdeInfo(new SerDeInfo());
  11. table.setSd(sd);
  12. client.createTable(table);
  13. Table createdTable = client.getTable(table.getDbName(), table.getTableName());
  14. Assert.assertEquals("Storage descriptor location", metaStore.getWarehouseRoot()
  15. + "/" + table.getDbName() + ".db/" + table.getTableName(),
  16. createdTable.getSd().getLocation());
  17. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testCreateTable() throws IOException {
  3. Table t = new Table();
  4. t.setDbName("testdb");
  5. t.setTableName("testtable");
  6. NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
  7. HCatConstants.HCAT_CREATE_TABLE_EVENT, msgFactory.buildCreateTableMessage(t).toString());
  8. event.setDbName(t.getDbName());
  9. event.setTableName(t.getTableName());
  10. HCatNotificationEvent hev = new HCatNotificationEvent(event);
  11. ReplicationTask rtask = ReplicationTask.create(client,hev);
  12. assertEquals(hev.toString(), rtask.getEvent().toString());
  13. verifyCreateTableReplicationTask(rtask);
  14. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testDropTable() throws IOException {
  3. Table t = new Table();
  4. t.setDbName("testdb");
  5. t.setTableName("testtable");
  6. NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
  7. HCatConstants.HCAT_DROP_TABLE_EVENT, msgFactory.buildDropTableMessage(t).toString());
  8. event.setDbName(t.getDbName());
  9. event.setTableName(t.getTableName());
  10. HCatNotificationEvent hev = new HCatNotificationEvent(event);
  11. ReplicationTask rtask = ReplicationTask.create(client,hev);
  12. assertEquals(hev.toString(), rtask.getEvent().toString());
  13. verifyDropTableReplicationTask(rtask);
  14. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public static void testCreate() throws HCatException {
  3. Table t = new Table();
  4. t.setDbName("testdb");
  5. t.setTableName("testtable");
  6. NotificationEvent event = new NotificationEvent(0, (int)System.currentTimeMillis(),
  7. HCatConstants.HCAT_CREATE_TABLE_EVENT, msgFactory.buildCreateTableMessage(t).toString());
  8. event.setDbName(t.getDbName());
  9. event.setTableName(t.getTableName());
  10. ReplicationTask.resetFactory(null);
  11. ReplicationTask rtask = ReplicationTask.create(HCatClient.create(new HiveConf()),new HCatNotificationEvent(event));
  12. assertTrue("Provided factory instantiation should yield CreateTableReplicationTask", rtask instanceof CreateTableReplicationTask);
  13. ReplicationTask.resetFactory(NoopFactory.class);
  14. rtask = ReplicationTask.create(HCatClient.create(new HiveConf()),new HCatNotificationEvent(event));
  15. assertTrue("Provided factory instantiation should yield NoopReplicationTask", rtask instanceof NoopReplicationTask);
  16. ReplicationTask.resetFactory(null);
  17. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testAlterTable() throws IOException {
  3. Table t = new Table();
  4. t.setDbName("testdb");
  5. t.setTableName("testtable");
  6. NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
  7. HCatConstants.HCAT_ALTER_TABLE_EVENT,
  8. msgFactory.buildAlterTableMessage(t, t, t.getWriteId()).toString());
  9. event.setDbName(t.getDbName());
  10. event.setTableName(t.getTableName());
  11. HCatNotificationEvent hev = new HCatNotificationEvent(event);
  12. ReplicationTask rtask = ReplicationTask.create(client,hev);
  13. assertEquals(hev.toString(), rtask.getEvent().toString());
  14. verifyAlterTableReplicationTask(rtask);
  15. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testAlterPartition() throws HCatException {
  3. Table t = new Table();
  4. t.setDbName("testdb");
  5. t.setTableName("testtable");
  6. List<FieldSchema> pkeys = HCatSchemaUtils.getFieldSchemas(
  7. HCatSchemaUtils.getHCatSchema("a:int,b:string").getFields());
  8. t.setPartitionKeys(pkeys);
  9. Partition p = createPtn(t, Arrays.asList("102", "lmn"));
  10. NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
  11. HCatConstants.HCAT_ALTER_PARTITION_EVENT, msgFactory.buildAlterPartitionMessage(t,
  12. p, p, p.getWriteId()).toString());
  13. event.setDbName(t.getDbName());
  14. event.setTableName(t.getTableName());
  15. HCatNotificationEvent hev = new HCatNotificationEvent(event);
  16. ReplicationTask rtask = ReplicationTask.create(client,hev);
  17. assertEquals(hev.toString(), rtask.getEvent().toString());
  18. verifyAlterPartitionReplicationTask(rtask, t, p);
  19. }

相关文章

Table类方法