本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.setSd()
方法的一些代码示例,展示了Table.setSd()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setSd()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:setSd
暂无
代码示例来源:origin: apache/drill
@Override
public void analyzeIndexDefinition(Table baseTable, Index index,
Table indexTable) throws HiveException {
StorageDescriptor storageDesc = index.getSd();
if (this.usesIndexTable() && indexTable != null) {
StorageDescriptor indexTableSd = storageDesc.deepCopy();
List<FieldSchema> indexTblCols = indexTableSd.getCols();
FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", "");
indexTblCols.add(bucketFileName);
FieldSchema offSets = new FieldSchema("_offsets", "array<bigint>", "");
indexTblCols.add(offSets);
indexTable.setSd(indexTableSd);
}
}
代码示例来源:origin: apache/drill
@Override
public void analyzeIndexDefinition(Table baseTable, Index index,
Table indexTable) throws HiveException {
StorageDescriptor storageDesc = index.getSd();
if (this.usesIndexTable() && indexTable != null) {
StorageDescriptor indexTableSd = storageDesc.deepCopy();
List<FieldSchema> indexTblCols = indexTableSd.getCols();
FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", "");
indexTblCols.add(bucketFileName);
FieldSchema offSets = new FieldSchema("_offset", "bigint", "");
indexTblCols.add(offSets);
FieldSchema bitmaps = new FieldSchema("_bitmaps", "array<bigint>", "");
indexTblCols.add(bitmaps);
indexTable.setSd(indexTableSd);
}
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testCreateTableNullStorageDescriptor() throws Exception {
Table table = testTables[0];
table.setSd(null);
client.createTable(table);
}
代码示例来源:origin: prestodb/presto
public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges)
{
org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table();
result.setDbName(table.getDatabaseName());
result.setTableName(table.getTableName());
result.setOwner(table.getOwner());
result.setTableType(table.getTableType());
result.setParameters(table.getParameters());
result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList()));
result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage()));
result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges));
result.setViewOriginalText(table.getViewOriginalText().orElse(null));
result.setViewExpandedText(table.getViewExpandedText().orElse(null));
return result;
}
代码示例来源:origin: apache/hive
static Table assemble(TableWrapper wrapper, SharedCache sharedCache) {
Table t = wrapper.getTable().deepCopy();
if (wrapper.getSdHash() != null) {
StorageDescriptor sdCopy = sharedCache.getSdFromCache(wrapper.getSdHash()).deepCopy();
if (sdCopy.getBucketCols() == null) {
sdCopy.setBucketCols(Collections.emptyList());
}
if (sdCopy.getSortCols() == null) {
sdCopy.setSortCols(Collections.emptyList());
}
if (sdCopy.getSkewedInfo() == null) {
sdCopy.setSkewedInfo(new SkewedInfo(Collections.emptyList(),
Collections.emptyList(), Collections.emptyMap()));
}
sdCopy.setLocation(wrapper.getLocation());
sdCopy.setParameters(wrapper.getParameters());
t.setSd(sdCopy);
}
return t;
}
代码示例来源:origin: apache/hive
public TableBuilder(Database database) {
this.database = database;
partitions = new ArrayList<>();
columnNames = new ArrayList<>();
columnTypes = new ArrayList<>();
partitionKeys = Collections.emptyList();
table = new Table();
table.setDbName(database.getName());
table.setTableType(TableType.MANAGED_TABLE.toString());
Map<String, String> tableParams = new HashMap<String, String>();
tableParams.put("transactional", Boolean.TRUE.toString());
table.setParameters(tableParams);
sd = new StorageDescriptor();
sd.setInputFormat(OrcInputFormat.class.getName());
sd.setOutputFormat(OrcOutputFormat.class.getName());
sd.setNumBuckets(1);
table.setSd(sd);
serDeInfo = new SerDeInfo();
serDeInfo.setParameters(new HashMap<String, String>());
serDeInfo.getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
serDeInfo.setSerializationLib(OrcSerde.class.getName());
sd.setSerdeInfo(serDeInfo);
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testAlterTableNullStorageDescriptorInNew() throws Exception {
Table originalTable = testTables[0];
Table newTable = originalTable.deepCopy();
newTable.setSd(null);
client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
}
代码示例来源:origin: apache/hive
@Test
public void testAlterTableChangePosNotUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
StorageDescriptor oldSd = new StorageDescriptor();
oldSd.setCols(Arrays.asList(col1, col2, col3, col4));
Table oldTable = new Table();
oldTable.setDbName("default");
oldTable.setTableName("test_table");
oldTable.setSd(oldSd);
StorageDescriptor newSd = new StorageDescriptor(oldSd);
newSd.setCols(Arrays.asList(col1, col4, col2, col3));
Table newTable = new Table(oldTable);
newTable.setSd(newSd);
RawStore msdb = Mockito.mock(RawStore.class);
Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics(
getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4"));
HiveAlterHandler handler = new HiveAlterHandler();
handler.setConf(conf);
handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, null, conf, null);
}
代码示例来源:origin: apache/hive
private void addSd(ArrayList<FieldSchema> cols, Table tbl) {
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(cols);
sd.setCompressed(false);
sd.setNumBuckets(1);
sd.setParameters(new HashMap<String, String>());
sd.setBucketCols(new ArrayList<String>());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters()
.put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.setSortCols(new ArrayList<Order>());
sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
sd.setInputFormat(HiveInputFormat.class.getName());
sd.setOutputFormat(HiveOutputFormat.class.getName());
tbl.setSd(sd);
}
代码示例来源:origin: apache/hive
@Test
public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
StorageDescriptor oldSd = new StorageDescriptor();
oldSd.setCols(Arrays.asList(col1, col2, col3));
Table oldTable = new Table();
oldTable.setDbName("default");
oldTable.setTableName("test_table");
oldTable.setSd(oldSd);
StorageDescriptor newSd = new StorageDescriptor(oldSd);
newSd.setCols(Arrays.asList(col1, col2, col3, col4));
Table newTable = new Table(oldTable);
newTable.setSd(newSd);
RawStore msdb = Mockito.mock(RawStore.class);
Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics(
getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3"));
HiveAlterHandler handler = new HiveAlterHandler();
handler.setConf(conf);
handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, null, conf, null);
}
代码示例来源:origin: apache/hive
private static void createTable(String tableName, String tablePerm) throws Exception {
Table tbl = new Table();
tbl.setDbName(DATABASE);
tbl.setTableName(tableName);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(ColumnHolder.colMapping.get(tableName));
tbl.setSd(sd);
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.setInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat.class.getName());
sd.setOutputFormat(org.apache.hadoop.hive.ql.io.RCFileOutputFormat.class.getName());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName());
tbl.setPartitionKeys(ColumnHolder.partitionCols);
hmsc.createTable(tbl);
Path path = new Path(warehousedir, tableName);
FileSystem fs = path.getFileSystem(hiveConf);
fs.setPermission(path, new FsPermission(tablePerm));
}
代码示例来源:origin: apache/incubator-gobblin
public static Table getTestTable(String dbName, String tableName) {
Table table = new Table();
table.setDbName(dbName);
table.setTableName(tableName);
table.setTableType(TableType.EXTERNAL_TABLE.name());
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation("/tmp/test");
table.setSd(sd);
return table;
}
代码示例来源:origin: apache/hive
private void createTable(String dbName, String tableName) throws Exception {
String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME
: dbName;
try {
msc.dropTable(databaseName, tableName);
} catch (Exception e) {
} // can fail with NoSuchObjectException
Table tbl = new Table();
tbl.setDbName(databaseName);
tbl.setTableName(tableName);
tbl.setTableType("MANAGED_TABLE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(getTableColumns());
tbl.setPartitionKeys(getPartitionKeys());
tbl.setSd(sd);
sd.setBucketCols(new ArrayList<String>(2));
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(ColumnarSerDe.class.getName());
sd.setInputFormat(RCFileInputFormat.class.getName());
sd.setOutputFormat(RCFileOutputFormat.class.getName());
Map<String, String> tableParams = new HashMap<String, String>();
tbl.setParameters(tableParams);
msc.createTable(tbl);
}
代码示例来源:origin: apache/hive
private Table createPartitionedTable(String catName, String dbName, String tableName) throws Exception {
try {
db.dropTable(catName, dbName, tableName);
Table table = new Table();
table.setCatName(catName);
table.setDbName(dbName);
table.setTableName(tableName);
FieldSchema col1 = new FieldSchema("key", "string", "");
FieldSchema col2 = new FieldSchema("value", "int", "");
FieldSchema col3 = new FieldSchema("city", "string", "");
StorageDescriptor sd = new StorageDescriptor();
sd.setSerdeInfo(new SerDeInfo());
sd.setInputFormat(TextInputFormat.class.getCanonicalName());
sd.setOutputFormat(HiveIgnoreKeyTextOutputFormat.class.getCanonicalName());
sd.setCols(Arrays.asList(col1, col2));
table.setPartitionKeys(Arrays.asList(col3));
table.setSd(sd);
db.createTable(table);
return db.getTable(catName, dbName, tableName);
} catch (Exception exception) {
fail("Unable to drop and create table " + StatsUtils
.getFullyQualifiedTableName(dbName, tableName) + " because " + StringUtils
.stringifyException(exception));
throw exception;
}
}
代码示例来源:origin: apache/hive
private Table createPartitionedTable(String catName, String dbName, String tableName) throws Exception {
try {
db.dropTable(catName, dbName, tableName);
Table table = new Table();
table.setCatName(catName);
table.setDbName(dbName);
table.setTableName(tableName);
FieldSchema col1 = new FieldSchema("key", "string", "");
FieldSchema col2 = new FieldSchema("value", "int", "");
FieldSchema col3 = new FieldSchema("city", "string", "");
StorageDescriptor sd = new StorageDescriptor();
sd.setSerdeInfo(new SerDeInfo());
sd.setInputFormat(TextInputFormat.class.getCanonicalName());
sd.setOutputFormat(HiveIgnoreKeyTextOutputFormat.class.getCanonicalName());
sd.setCols(Arrays.asList(col1, col2));
table.setPartitionKeys(Arrays.asList(col3));
table.setSd(sd);
db.createTable(table);
return db.getTable(catName, dbName, tableName);
} catch (Exception exception) {
fail("Unable to drop and create table " + StatsUtils.getFullyQualifiedTableName(dbName, tableName) + " because "
+ StringUtils.stringifyException(exception));
throw exception;
}
}
代码示例来源:origin: apache/hive
private Table getTable(String dbName, String tblName, String typeName) throws NoSuchObjectException, MetaException, TException, AlreadyExistsException, InvalidObjectException {
msc.dropTable(dbName, tblName);
silentDropDatabase(dbName);
msc.dropType(typeName);
Type typ1 = new Type();
typ1.setName(typeName);
typ1.setFields(new ArrayList<FieldSchema>(1));
typ1.getFields().add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
msc.createType(typ1);
Table tbl = new Table();
tbl.setDbName(dbName);
tbl.setTableName(tblName);
StorageDescriptor sd = new StorageDescriptor();
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tblName);
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
sd.setInputFormat(HiveInputFormat.class.getName());
sd.setOutputFormat(HiveOutputFormat.class.getName());
tbl.setSd(sd);
sd.setCols(typ1.getFields());
sd.setSerdeInfo(new SerDeInfo());
return tbl;
}
代码示例来源:origin: apache/hive
@Test
public void testCreateTableDefaultValuesView() throws Exception {
Table table = new Table();
StorageDescriptor sd = new StorageDescriptor();
List<FieldSchema> cols = new ArrayList<>();
table.setDbName(DEFAULT_DATABASE);
table.setTableName("test_table_2");
table.setTableType("VIRTUAL_VIEW");
cols.add(new FieldSchema("column_name", "int", null));
sd.setCols(cols);
sd.setSerdeInfo(new SerDeInfo());
table.setSd(sd);
client.createTable(table);
Table createdTable = client.getTable(table.getDbName(), table.getTableName());
// No location should be created for views
Assert.assertNull("Storage descriptor location should be null",
createdTable.getSd().getLocation());
}
代码示例来源:origin: apache/hive
@Test
public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception {
Table table = new Table();
StorageDescriptor sd = new StorageDescriptor();
List<FieldSchema> cols = new ArrayList<>();
table.setDbName(OTHER_DATABASE);
table.setTableName("test_table_2");
cols.add(new FieldSchema("column_name", "int", null));
sd.setCols(cols);
sd.setSerdeInfo(new SerDeInfo());
table.setSd(sd);
client.createTable(table);
Table createdTable = client.getTable(table.getDbName(), table.getTableName());
Assert.assertEquals("Storage descriptor location", metaStore.getWarehouseRoot()
+ "/" + table.getDbName() + ".db/" + table.getTableName(),
createdTable.getSd().getLocation());
}
代码示例来源:origin: apache/hive
private TableWrapper createTableWrapper(String catName, String dbName, String tblName,
Table tbl) {
TableWrapper wrapper;
Table tblCopy = tbl.deepCopy();
tblCopy.setCatName(normalizeIdentifier(catName));
tblCopy.setDbName(normalizeIdentifier(dbName));
tblCopy.setTableName(normalizeIdentifier(tblName));
if (tblCopy.getPartitionKeys() != null) {
for (FieldSchema fs : tblCopy.getPartitionKeys()) {
fs.setName(normalizeIdentifier(fs.getName()));
}
}
if (tbl.getSd() != null) {
byte[] sdHash = MetaStoreServerUtils.hashStorageDescriptor(tbl.getSd(), md);
StorageDescriptor sd = tbl.getSd();
increSd(sd, sdHash);
tblCopy.setSd(null);
wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters());
} else {
wrapper = new TableWrapper(tblCopy, null, null, null);
}
return wrapper;
}
代码示例来源:origin: apache/hive
private void updateTableObj(Table newTable, SharedCache sharedCache) {
byte[] sdHash = getSdHash();
// Remove old table object's sd hash
if (sdHash != null) {
sharedCache.decrSd(sdHash);
}
Table tblCopy = newTable.deepCopy();
if (tblCopy.getPartitionKeys() != null) {
for (FieldSchema fs : tblCopy.getPartitionKeys()) {
fs.setName(StringUtils.normalizeIdentifier(fs.getName()));
}
}
setTable(tblCopy);
if (tblCopy.getSd() != null) {
sdHash = MetaStoreServerUtils.hashStorageDescriptor(tblCopy.getSd(), md);
StorageDescriptor sd = tblCopy.getSd();
sharedCache.increSd(sd, sdHash);
tblCopy.setSd(null);
setSdHash(sdHash);
setLocation(sd.getLocation());
setParameters(sd.getParameters());
} else {
setSdHash(null);
setLocation(null);
setParameters(null);
}
}
内容来源于网络,如有侵权,请联系作者删除!