org.apache.hadoop.hive.metastore.api.Table.setSd()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(16.6k)|赞(0)|评价(0)|浏览(231)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.setSd()方法的一些代码示例,展示了Table.setSd()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setSd()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:setSd

Table.setSd介绍

暂无

代码示例

代码示例来源:origin: apache/drill

  1. @Override
  2. public void analyzeIndexDefinition(Table baseTable, Index index,
  3. Table indexTable) throws HiveException {
  4. StorageDescriptor storageDesc = index.getSd();
  5. if (this.usesIndexTable() && indexTable != null) {
  6. StorageDescriptor indexTableSd = storageDesc.deepCopy();
  7. List<FieldSchema> indexTblCols = indexTableSd.getCols();
  8. FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", "");
  9. indexTblCols.add(bucketFileName);
  10. FieldSchema offSets = new FieldSchema("_offsets", "array<bigint>", "");
  11. indexTblCols.add(offSets);
  12. indexTable.setSd(indexTableSd);
  13. }
  14. }

代码示例来源:origin: apache/drill

  1. @Override
  2. public void analyzeIndexDefinition(Table baseTable, Index index,
  3. Table indexTable) throws HiveException {
  4. StorageDescriptor storageDesc = index.getSd();
  5. if (this.usesIndexTable() && indexTable != null) {
  6. StorageDescriptor indexTableSd = storageDesc.deepCopy();
  7. List<FieldSchema> indexTblCols = indexTableSd.getCols();
  8. FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", "");
  9. indexTblCols.add(bucketFileName);
  10. FieldSchema offSets = new FieldSchema("_offset", "bigint", "");
  11. indexTblCols.add(offSets);
  12. FieldSchema bitmaps = new FieldSchema("_bitmaps", "array<bigint>", "");
  13. indexTblCols.add(bitmaps);
  14. indexTable.setSd(indexTableSd);
  15. }
  16. }

代码示例来源:origin: apache/hive

  1. @Test(expected = MetaException.class)
  2. public void testCreateTableNullStorageDescriptor() throws Exception {
  3. Table table = testTables[0];
  4. table.setSd(null);
  5. client.createTable(table);
  6. }

代码示例来源:origin: prestodb/presto

  1. public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges)
  2. {
  3. org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table();
  4. result.setDbName(table.getDatabaseName());
  5. result.setTableName(table.getTableName());
  6. result.setOwner(table.getOwner());
  7. result.setTableType(table.getTableType());
  8. result.setParameters(table.getParameters());
  9. result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList()));
  10. result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage()));
  11. result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges));
  12. result.setViewOriginalText(table.getViewOriginalText().orElse(null));
  13. result.setViewExpandedText(table.getViewExpandedText().orElse(null));
  14. return result;
  15. }

代码示例来源:origin: apache/hive

  1. static Table assemble(TableWrapper wrapper, SharedCache sharedCache) {
  2. Table t = wrapper.getTable().deepCopy();
  3. if (wrapper.getSdHash() != null) {
  4. StorageDescriptor sdCopy = sharedCache.getSdFromCache(wrapper.getSdHash()).deepCopy();
  5. if (sdCopy.getBucketCols() == null) {
  6. sdCopy.setBucketCols(Collections.emptyList());
  7. }
  8. if (sdCopy.getSortCols() == null) {
  9. sdCopy.setSortCols(Collections.emptyList());
  10. }
  11. if (sdCopy.getSkewedInfo() == null) {
  12. sdCopy.setSkewedInfo(new SkewedInfo(Collections.emptyList(),
  13. Collections.emptyList(), Collections.emptyMap()));
  14. }
  15. sdCopy.setLocation(wrapper.getLocation());
  16. sdCopy.setParameters(wrapper.getParameters());
  17. t.setSd(sdCopy);
  18. }
  19. return t;
  20. }

代码示例来源:origin: apache/hive

  1. public TableBuilder(Database database) {
  2. this.database = database;
  3. partitions = new ArrayList<>();
  4. columnNames = new ArrayList<>();
  5. columnTypes = new ArrayList<>();
  6. partitionKeys = Collections.emptyList();
  7. table = new Table();
  8. table.setDbName(database.getName());
  9. table.setTableType(TableType.MANAGED_TABLE.toString());
  10. Map<String, String> tableParams = new HashMap<String, String>();
  11. tableParams.put("transactional", Boolean.TRUE.toString());
  12. table.setParameters(tableParams);
  13. sd = new StorageDescriptor();
  14. sd.setInputFormat(OrcInputFormat.class.getName());
  15. sd.setOutputFormat(OrcOutputFormat.class.getName());
  16. sd.setNumBuckets(1);
  17. table.setSd(sd);
  18. serDeInfo = new SerDeInfo();
  19. serDeInfo.setParameters(new HashMap<String, String>());
  20. serDeInfo.getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
  21. serDeInfo.setSerializationLib(OrcSerde.class.getName());
  22. sd.setSerdeInfo(serDeInfo);
  23. }

代码示例来源:origin: apache/hive

  1. @Test(expected = MetaException.class)
  2. public void testAlterTableNullStorageDescriptorInNew() throws Exception {
  3. Table originalTable = testTables[0];
  4. Table newTable = originalTable.deepCopy();
  5. newTable.setSd(null);
  6. client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
  7. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testAlterTableChangePosNotUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
  3. FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
  4. FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
  5. FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
  6. FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
  7. StorageDescriptor oldSd = new StorageDescriptor();
  8. oldSd.setCols(Arrays.asList(col1, col2, col3, col4));
  9. Table oldTable = new Table();
  10. oldTable.setDbName("default");
  11. oldTable.setTableName("test_table");
  12. oldTable.setSd(oldSd);
  13. StorageDescriptor newSd = new StorageDescriptor(oldSd);
  14. newSd.setCols(Arrays.asList(col1, col4, col2, col3));
  15. Table newTable = new Table(oldTable);
  16. newTable.setSd(newSd);
  17. RawStore msdb = Mockito.mock(RawStore.class);
  18. Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics(
  19. getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4"));
  20. HiveAlterHandler handler = new HiveAlterHandler();
  21. handler.setConf(conf);
  22. handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, null, conf, null);
  23. }

代码示例来源:origin: apache/hive

  1. private void addSd(ArrayList<FieldSchema> cols, Table tbl) {
  2. StorageDescriptor sd = new StorageDescriptor();
  3. sd.setCols(cols);
  4. sd.setCompressed(false);
  5. sd.setNumBuckets(1);
  6. sd.setParameters(new HashMap<String, String>());
  7. sd.setBucketCols(new ArrayList<String>());
  8. sd.setSerdeInfo(new SerDeInfo());
  9. sd.getSerdeInfo().setName(tbl.getTableName());
  10. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  11. sd.getSerdeInfo().getParameters()
  12. .put(serdeConstants.SERIALIZATION_FORMAT, "1");
  13. sd.setSortCols(new ArrayList<Order>());
  14. sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
  15. sd.setInputFormat(HiveInputFormat.class.getName());
  16. sd.setOutputFormat(HiveOutputFormat.class.getName());
  17. tbl.setSd(sd);
  18. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
  3. FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
  4. FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
  5. FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
  6. FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
  7. StorageDescriptor oldSd = new StorageDescriptor();
  8. oldSd.setCols(Arrays.asList(col1, col2, col3));
  9. Table oldTable = new Table();
  10. oldTable.setDbName("default");
  11. oldTable.setTableName("test_table");
  12. oldTable.setSd(oldSd);
  13. StorageDescriptor newSd = new StorageDescriptor(oldSd);
  14. newSd.setCols(Arrays.asList(col1, col2, col3, col4));
  15. Table newTable = new Table(oldTable);
  16. newTable.setSd(newSd);
  17. RawStore msdb = Mockito.mock(RawStore.class);
  18. Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics(
  19. getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3"));
  20. HiveAlterHandler handler = new HiveAlterHandler();
  21. handler.setConf(conf);
  22. handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, null, conf, null);
  23. }

代码示例来源:origin: apache/hive

  1. private static void createTable(String tableName, String tablePerm) throws Exception {
  2. Table tbl = new Table();
  3. tbl.setDbName(DATABASE);
  4. tbl.setTableName(tableName);
  5. StorageDescriptor sd = new StorageDescriptor();
  6. sd.setCols(ColumnHolder.colMapping.get(tableName));
  7. tbl.setSd(sd);
  8. sd.setParameters(new HashMap<String, String>());
  9. sd.setSerdeInfo(new SerDeInfo());
  10. sd.getSerdeInfo().setName(tbl.getTableName());
  11. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  12. sd.setInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat.class.getName());
  13. sd.setOutputFormat(org.apache.hadoop.hive.ql.io.RCFileOutputFormat.class.getName());
  14. sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
  15. sd.getSerdeInfo().setSerializationLib(
  16. org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName());
  17. tbl.setPartitionKeys(ColumnHolder.partitionCols);
  18. hmsc.createTable(tbl);
  19. Path path = new Path(warehousedir, tableName);
  20. FileSystem fs = path.getFileSystem(hiveConf);
  21. fs.setPermission(path, new FsPermission(tablePerm));
  22. }

代码示例来源:origin: apache/incubator-gobblin

  1. public static Table getTestTable(String dbName, String tableName) {
  2. Table table = new Table();
  3. table.setDbName(dbName);
  4. table.setTableName(tableName);
  5. table.setTableType(TableType.EXTERNAL_TABLE.name());
  6. StorageDescriptor sd = new StorageDescriptor();
  7. sd.setLocation("/tmp/test");
  8. table.setSd(sd);
  9. return table;
  10. }

代码示例来源:origin: apache/hive

  1. private void createTable(String dbName, String tableName) throws Exception {
  2. String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME
  3. : dbName;
  4. try {
  5. msc.dropTable(databaseName, tableName);
  6. } catch (Exception e) {
  7. } // can fail with NoSuchObjectException
  8. Table tbl = new Table();
  9. tbl.setDbName(databaseName);
  10. tbl.setTableName(tableName);
  11. tbl.setTableType("MANAGED_TABLE");
  12. StorageDescriptor sd = new StorageDescriptor();
  13. sd.setCols(getTableColumns());
  14. tbl.setPartitionKeys(getPartitionKeys());
  15. tbl.setSd(sd);
  16. sd.setBucketCols(new ArrayList<String>(2));
  17. sd.setSerdeInfo(new SerDeInfo());
  18. sd.getSerdeInfo().setName(tbl.getTableName());
  19. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  20. sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
  21. sd.getSerdeInfo().setSerializationLib(ColumnarSerDe.class.getName());
  22. sd.setInputFormat(RCFileInputFormat.class.getName());
  23. sd.setOutputFormat(RCFileOutputFormat.class.getName());
  24. Map<String, String> tableParams = new HashMap<String, String>();
  25. tbl.setParameters(tableParams);
  26. msc.createTable(tbl);
  27. }

代码示例来源:origin: apache/hive

  1. private Table createPartitionedTable(String catName, String dbName, String tableName) throws Exception {
  2. try {
  3. db.dropTable(catName, dbName, tableName);
  4. Table table = new Table();
  5. table.setCatName(catName);
  6. table.setDbName(dbName);
  7. table.setTableName(tableName);
  8. FieldSchema col1 = new FieldSchema("key", "string", "");
  9. FieldSchema col2 = new FieldSchema("value", "int", "");
  10. FieldSchema col3 = new FieldSchema("city", "string", "");
  11. StorageDescriptor sd = new StorageDescriptor();
  12. sd.setSerdeInfo(new SerDeInfo());
  13. sd.setInputFormat(TextInputFormat.class.getCanonicalName());
  14. sd.setOutputFormat(HiveIgnoreKeyTextOutputFormat.class.getCanonicalName());
  15. sd.setCols(Arrays.asList(col1, col2));
  16. table.setPartitionKeys(Arrays.asList(col3));
  17. table.setSd(sd);
  18. db.createTable(table);
  19. return db.getTable(catName, dbName, tableName);
  20. } catch (Exception exception) {
  21. fail("Unable to drop and create table " + StatsUtils
  22. .getFullyQualifiedTableName(dbName, tableName) + " because " + StringUtils
  23. .stringifyException(exception));
  24. throw exception;
  25. }
  26. }

代码示例来源:origin: apache/hive

  1. private Table createPartitionedTable(String catName, String dbName, String tableName) throws Exception {
  2. try {
  3. db.dropTable(catName, dbName, tableName);
  4. Table table = new Table();
  5. table.setCatName(catName);
  6. table.setDbName(dbName);
  7. table.setTableName(tableName);
  8. FieldSchema col1 = new FieldSchema("key", "string", "");
  9. FieldSchema col2 = new FieldSchema("value", "int", "");
  10. FieldSchema col3 = new FieldSchema("city", "string", "");
  11. StorageDescriptor sd = new StorageDescriptor();
  12. sd.setSerdeInfo(new SerDeInfo());
  13. sd.setInputFormat(TextInputFormat.class.getCanonicalName());
  14. sd.setOutputFormat(HiveIgnoreKeyTextOutputFormat.class.getCanonicalName());
  15. sd.setCols(Arrays.asList(col1, col2));
  16. table.setPartitionKeys(Arrays.asList(col3));
  17. table.setSd(sd);
  18. db.createTable(table);
  19. return db.getTable(catName, dbName, tableName);
  20. } catch (Exception exception) {
  21. fail("Unable to drop and create table " + StatsUtils.getFullyQualifiedTableName(dbName, tableName) + " because "
  22. + StringUtils.stringifyException(exception));
  23. throw exception;
  24. }
  25. }

代码示例来源:origin: apache/hive

  1. private Table getTable(String dbName, String tblName, String typeName) throws NoSuchObjectException, MetaException, TException, AlreadyExistsException, InvalidObjectException {
  2. msc.dropTable(dbName, tblName);
  3. silentDropDatabase(dbName);
  4. msc.dropType(typeName);
  5. Type typ1 = new Type();
  6. typ1.setName(typeName);
  7. typ1.setFields(new ArrayList<FieldSchema>(1));
  8. typ1.getFields().add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
  9. msc.createType(typ1);
  10. Table tbl = new Table();
  11. tbl.setDbName(dbName);
  12. tbl.setTableName(tblName);
  13. StorageDescriptor sd = new StorageDescriptor();
  14. sd.setSerdeInfo(new SerDeInfo());
  15. sd.getSerdeInfo().setName(tblName);
  16. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  17. sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
  18. sd.setInputFormat(HiveInputFormat.class.getName());
  19. sd.setOutputFormat(HiveOutputFormat.class.getName());
  20. tbl.setSd(sd);
  21. sd.setCols(typ1.getFields());
  22. sd.setSerdeInfo(new SerDeInfo());
  23. return tbl;
  24. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testCreateTableDefaultValuesView() throws Exception {
  3. Table table = new Table();
  4. StorageDescriptor sd = new StorageDescriptor();
  5. List<FieldSchema> cols = new ArrayList<>();
  6. table.setDbName(DEFAULT_DATABASE);
  7. table.setTableName("test_table_2");
  8. table.setTableType("VIRTUAL_VIEW");
  9. cols.add(new FieldSchema("column_name", "int", null));
  10. sd.setCols(cols);
  11. sd.setSerdeInfo(new SerDeInfo());
  12. table.setSd(sd);
  13. client.createTable(table);
  14. Table createdTable = client.getTable(table.getDbName(), table.getTableName());
  15. // No location should be created for views
  16. Assert.assertNull("Storage descriptor location should be null",
  17. createdTable.getSd().getLocation());
  18. }

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception {
  3. Table table = new Table();
  4. StorageDescriptor sd = new StorageDescriptor();
  5. List<FieldSchema> cols = new ArrayList<>();
  6. table.setDbName(OTHER_DATABASE);
  7. table.setTableName("test_table_2");
  8. cols.add(new FieldSchema("column_name", "int", null));
  9. sd.setCols(cols);
  10. sd.setSerdeInfo(new SerDeInfo());
  11. table.setSd(sd);
  12. client.createTable(table);
  13. Table createdTable = client.getTable(table.getDbName(), table.getTableName());
  14. Assert.assertEquals("Storage descriptor location", metaStore.getWarehouseRoot()
  15. + "/" + table.getDbName() + ".db/" + table.getTableName(),
  16. createdTable.getSd().getLocation());
  17. }

代码示例来源:origin: apache/hive

  1. private TableWrapper createTableWrapper(String catName, String dbName, String tblName,
  2. Table tbl) {
  3. TableWrapper wrapper;
  4. Table tblCopy = tbl.deepCopy();
  5. tblCopy.setCatName(normalizeIdentifier(catName));
  6. tblCopy.setDbName(normalizeIdentifier(dbName));
  7. tblCopy.setTableName(normalizeIdentifier(tblName));
  8. if (tblCopy.getPartitionKeys() != null) {
  9. for (FieldSchema fs : tblCopy.getPartitionKeys()) {
  10. fs.setName(normalizeIdentifier(fs.getName()));
  11. }
  12. }
  13. if (tbl.getSd() != null) {
  14. byte[] sdHash = MetaStoreServerUtils.hashStorageDescriptor(tbl.getSd(), md);
  15. StorageDescriptor sd = tbl.getSd();
  16. increSd(sd, sdHash);
  17. tblCopy.setSd(null);
  18. wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters());
  19. } else {
  20. wrapper = new TableWrapper(tblCopy, null, null, null);
  21. }
  22. return wrapper;
  23. }

代码示例来源:origin: apache/hive

  1. private void updateTableObj(Table newTable, SharedCache sharedCache) {
  2. byte[] sdHash = getSdHash();
  3. // Remove old table object's sd hash
  4. if (sdHash != null) {
  5. sharedCache.decrSd(sdHash);
  6. }
  7. Table tblCopy = newTable.deepCopy();
  8. if (tblCopy.getPartitionKeys() != null) {
  9. for (FieldSchema fs : tblCopy.getPartitionKeys()) {
  10. fs.setName(StringUtils.normalizeIdentifier(fs.getName()));
  11. }
  12. }
  13. setTable(tblCopy);
  14. if (tblCopy.getSd() != null) {
  15. sdHash = MetaStoreServerUtils.hashStorageDescriptor(tblCopy.getSd(), md);
  16. StorageDescriptor sd = tblCopy.getSd();
  17. sharedCache.increSd(sd, sdHash);
  18. tblCopy.setSd(null);
  19. setSdHash(sdHash);
  20. setLocation(sd.getLocation());
  21. setParameters(sd.getParameters());
  22. } else {
  23. setSdHash(null);
  24. setLocation(null);
  25. setParameters(null);
  26. }
  27. }

相关文章

Table类方法