本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.getOwner()
方法的一些代码示例,展示了Table.getOwner()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getOwner()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:getOwner
暂无
代码示例来源:origin: apache/hive
/**
* @return The owner of the table.
* @see org.apache.hadoop.hive.metastore.api.Table#getOwner()
*/
public String getOwner() {
return tTable.getOwner();
}
代码示例来源:origin: apache/hive
convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), ownerType, tbl
.getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(),
convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(),
代码示例来源:origin: apache/drill
/**
* @return The owner of the table.
* @see org.apache.hadoop.hive.metastore.api.Table#getOwner()
*/
public String getOwner() {
return tTable.getOwner();
}
代码示例来源:origin: apache/hive
return getOwner();
代码示例来源:origin: prestodb/presto
default boolean isTableOwner(String user, String databaseName, String tableName)
{
// a table can only be owned by a user
Optional<Table> table = getTable(databaseName, tableName);
return table.isPresent() && user.equals(table.get().getOwner());
}
代码示例来源:origin: apache/hive
Assert.assertEquals("Owner of table was not altered", newTbl1.getOwner(), alteredTable.getOwner());
Assert.assertEquals("Owner type of table was not altered", newTbl1.getOwnerType(), alteredTable.getOwnerType());
代码示例来源:origin: apache/hive
UserGroupInformation ugi = UserGroupInformation.createProxyUser(t.getOwner(),
UserGroupInformation.getLoginUser());
ugi.doAs(new PrivilegedExceptionAction<Object>() {
") or table owner(" + t.getOwner() + "), giving up");
throw new IOException("Unable to stat file: " + p);
代码示例来源:origin: apache/hive
tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
Assert.assertEquals("Owner of the table did not change.", tblOwner, tbl.getOwner());
Assert.assertEquals("Owner type of the table did not change", PrincipalType.ROLE, tbl.getOwnerType());
代码示例来源:origin: apache/drill
UserGroupInformation ugi = UserGroupInformation.createProxyUser(t.getOwner(),
UserGroupInformation.getLoginUser());
ugi.doAs(new PrivilegedExceptionAction<Object>() {
") or table owner(" + t.getOwner() + "), giving up");
throw new IOException("Unable to stat file: " + p);
代码示例来源:origin: apache/hive
Assert.assertNull("Comparing OwnerName", createdTable.getOwner());
Assert.assertNotEquals("Comparing CreateTime", 0, createdTable.getCreateTime());
Assert.assertEquals("Comparing LastAccessTime", 0, createdTable.getLastAccessTime());
代码示例来源:origin: prestodb/presto
public static Table fromMetastoreApiTable(org.apache.hadoop.hive.metastore.api.Table table, List<FieldSchema> schema)
{
StorageDescriptor storageDescriptor = table.getSd();
if (storageDescriptor == null) {
throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor");
}
Table.Builder tableBuilder = Table.builder()
.setDatabaseName(table.getDbName())
.setTableName(table.getTableName())
.setOwner(nullToEmpty(table.getOwner()))
.setTableType(table.getTableType())
.setDataColumns(schema.stream()
.map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema)
.collect(toList()))
.setPartitionColumns(table.getPartitionKeys().stream()
.map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema)
.collect(toList()))
.setParameters(table.getParameters() == null ? ImmutableMap.of() : table.getParameters())
.setViewOriginalText(Optional.ofNullable(emptyToNull(table.getViewOriginalText())))
.setViewExpandedText(Optional.ofNullable(emptyToNull(table.getViewExpandedText())));
fromMetastoreApiStorageDescriptor(storageDescriptor, tableBuilder.getStorageBuilder(), table.getTableName());
return tableBuilder.build();
}
代码示例来源:origin: apache/hive
HCatTable(Table hiveTable) throws HCatException {
tableName = hiveTable.getTableName();
dbName = hiveTable.getDbName();
tableType = hiveTable.getTableType();
isExternal = hiveTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString());
sd = hiveTable.getSd();
for (FieldSchema colFS : sd.getCols()) {
cols.add(HCatSchemaUtils.getHCatFieldSchema(colFS));
}
partCols = new ArrayList<HCatFieldSchema>();
for (FieldSchema colFS : hiveTable.getPartitionKeys()) {
partCols.add(HCatSchemaUtils.getHCatFieldSchema(colFS));
}
if (hiveTable.getParameters() != null) {
tblProps.putAll(hiveTable.getParameters());
}
if (StringUtils.isNotBlank(tblProps.get("comment"))) {
comment = tblProps.get("comment");
}
owner = hiveTable.getOwner();
}
代码示例来源:origin: apache/hive
throwGetObjErr(e, hivePrivObject);
return userName.equals(thriftTableObj.getOwner());
代码示例来源:origin: apache/drill
runJobAsSelf(runAs) ? runAs : t.getOwner());
final CompactorMR mr = new CompactorMR();
launchedJob = true;
mr.run(conf, jobName.toString(), t, sd, txns, ci, su, txnHandler);
} else {
UserGroupInformation ugi = UserGroupInformation.createProxyUser(t.getOwner(),
UserGroupInformation.getLoginUser());
ugi.doAs(new PrivilegedExceptionAction<Object>() {
代码示例来源:origin: apache/incubator-gobblin
private static State getTableProps(Table table) {
State tableProps = new State();
for (Map.Entry<String, String> entry : table.getParameters().entrySet()) {
tableProps.setProp(entry.getKey(), entry.getValue());
}
if (table.isSetCreateTime()) {
tableProps.setProp(HiveConstants.CREATE_TIME, table.getCreateTime());
}
if (table.isSetLastAccessTime()) {
tableProps.setProp(HiveConstants.LAST_ACCESS_TIME, table.getCreateTime());
}
if (table.isSetOwner()) {
tableProps.setProp(HiveConstants.OWNER, table.getOwner());
}
if (table.isSetTableType()) {
tableProps.setProp(HiveConstants.TABLE_TYPE, table.getTableType());
}
if (table.isSetRetention()) {
tableProps.setProp(HiveConstants.RETENTION, table.getRetention());
}
return tableProps;
}
代码示例来源:origin: apache/drill
throwGetObjErr(e, hivePrivObject);
return userName.equals(thriftTableObj.getOwner());
代码示例来源:origin: apache/hive
final StatsUpdater su = StatsUpdater.init(ci, msc.findColumnsWithStats(
CompactionInfo.compactionInfoToStruct(ci)), conf,
runJobAsSelf(ci.runAs) ? ci.runAs : t.getOwner());
final CompactorMR mr = new CompactorMR();
launchedJob = true;
mr.run(conf, jobName.toString(), t, p, sd, tblValidWriteIds, ci, su, msc);
} else {
UserGroupInformation ugi = UserGroupInformation.createProxyUser(t.getOwner(),
UserGroupInformation.getLoginUser());
final Partition fp = p;
代码示例来源:origin: apache/kylin
builder.setSdInputFormat(table.getSd().getInputFormat());
builder.setSdOutputFormat(table.getSd().getOutputFormat());
builder.setOwner(table.getOwner());
builder.setLastAccessTime(table.getLastAccessTime());
builder.setTableType(table.getTableType());
代码示例来源:origin: KylinOLAP/Kylin
map.put(MetadataConstants.TABLE_EXD_IF, table.getSd().getInputFormat());
map.put(MetadataConstants.TABLE_EXD_OF, table.getSd().getOutputFormat());
map.put(MetadataConstants.TABLE_EXD_OWNER, table.getOwner());
map.put(MetadataConstants.TABLE_EXD_LAT, String.valueOf(table.getLastAccessTime()));
map.put(MetadataConstants.TABLE_EXD_PC, partitionColumnString.toString());
代码示例来源:origin: org.apache.hadoop.hive/hive-exec
/**
* @return The owner of the table.
* @see org.apache.hadoop.hive.metastore.api.Table#getOwner()
*/
public String getOwner() {
return tTable.getOwner();
}
内容来源于网络,如有侵权,请联系作者删除!