本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.isView()
方法的一些代码示例,展示了Table.isView()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.isView()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:isView
暂无
代码示例来源:origin: apache/hive
/**
* @return whether this table is actually a view
*/
public boolean isView() {
return table.isView();
}
代码示例来源:origin: apache/drill
/**
* @return whether this table is actually a view
*/
public boolean isView() { return table.isView(); }
代码示例来源:origin: apache/hive
public TYPE getDescType() {
if (table.isView() || table.isMaterializedView()) {
return TYPE.VIEW;
}
return TYPE.TABLE;
}
代码示例来源:origin: apache/drill
public TYPE getTableType() {
if (table.isView() || table.isMaterializedView()) {
return TYPE.VIEW;
}
return TYPE.TABLE;
}
代码示例来源:origin: apache/hive
public static String getTableInformation(Table table, boolean isOutputPadded) {
StringBuilder tableInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
// Table Metadata
tableInfo.append(LINE_DELIM).append("# Detailed Table Information").append(LINE_DELIM);
getTableMetaDataInformation(tableInfo, table, isOutputPadded);
// Storage information.
tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM);
getStorageDescriptorInfo(tableInfo, table.getTTable().getSd());
if (table.isView() || table.isMaterializedView()) {
tableInfo.append(LINE_DELIM).append(table.isView() ? "# View Information" : "# Materialized View Information").append(LINE_DELIM);
getViewInfo(tableInfo, table);
}
return tableInfo.toString();
}
代码示例来源:origin: apache/drill
public static String getTableInformation(Table table, boolean isOutputPadded) {
StringBuilder tableInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
// Table Metadata
tableInfo.append(LINE_DELIM).append("# Detailed Table Information").append(LINE_DELIM);
getTableMetaDataInformation(tableInfo, table, isOutputPadded);
// Storage information.
tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM);
getStorageDescriptorInfo(tableInfo, table.getTTable().getSd());
if (table.isView() || table.isMaterializedView()) {
tableInfo.append(LINE_DELIM).append("# View Information").append(LINE_DELIM);
getViewInfo(tableInfo, table);
}
return tableInfo.toString();
}
代码示例来源:origin: apache/hive
private Set<String> getTablesUsed(ParseContext parseCtx) throws SemanticException {
Set<String> tablesUsed = new HashSet<>();
for (TableScanOperator topOp : parseCtx.getTopOps().values()) {
Table table = topOp.getConf().getTableMetadata();
if (!table.isMaterializedTable() && !table.isView()) {
// Add to signature
tablesUsed.add(table.getFullyQualifiedName());
}
}
return tablesUsed;
}
代码示例来源:origin: apache/hive
/**
* create an empty partition.
* SemanticAnalyzer code requires that an empty partition when the table is not partitioned.
*/
public Partition(Table tbl) throws HiveException {
org.apache.hadoop.hive.metastore.api.Partition tPart =
new org.apache.hadoop.hive.metastore.api.Partition();
if (!tbl.isView()) {
tPart.setSd(tbl.getTTable().getSd().deepCopy());
}
initialize(tbl, tPart);
}
代码示例来源:origin: apache/hive
public TableExport(Paths paths, TableSpec tableSpec, ReplicationSpec replicationSpec, Hive db,
String distCpDoAsUser, HiveConf conf, MmContext mmCtx) {
this.tableSpec = (tableSpec != null
&& tableSpec.tableHandle.isTemporary()
&& replicationSpec.isInReplicationScope())
? null
: tableSpec;
this.replicationSpec = replicationSpec;
if (conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY) ||
(this.tableSpec != null && this.tableSpec.tableHandle.isView())) {
this.replicationSpec.setIsMetadataOnly(true);
this.tableSpec.tableHandle.setStatsStateLikeNewTable();
}
this.db = db;
this.distCpDoAsUser = distCpDoAsUser;
this.conf = conf;
this.paths = paths;
this.mmCtx = mmCtx;
}
代码示例来源:origin: apache/drill
/**
* create an empty partition.
* SemanticAnalyzer code requires that an empty partition when the table is not partitioned.
*/
public Partition(Table tbl) throws HiveException {
org.apache.hadoop.hive.metastore.api.Partition tPart =
new org.apache.hadoop.hive.metastore.api.Partition();
if (!tbl.isView()) {
tPart.setSd(tbl.getTTable().getSd().deepCopy());
}
initialize(tbl, tPart);
}
代码示例来源:origin: apache/hive
public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject(
Table tbl, Map<String, String> partSpec, Path location) throws HiveException {
List<String> pvals = new ArrayList<String>();
for (FieldSchema field : tbl.getPartCols()) {
String val = partSpec.get(field.getName());
if (val == null || val.isEmpty()) {
throw new HiveException("partition spec is invalid; field "
+ field.getName() + " does not exist or is empty");
}
pvals.add(val);
}
org.apache.hadoop.hive.metastore.api.Partition tpart =
new org.apache.hadoop.hive.metastore.api.Partition();
tpart.setDbName(tbl.getDbName());
tpart.setTableName(tbl.getTableName());
tpart.setValues(pvals);
if (!tbl.isView()) {
tpart.setSd(tbl.getSd().deepCopy());
tpart.getSd().setLocation((location != null) ? location.toString() : null);
}
return tpart;
}
代码示例来源:origin: apache/hive
private FetchWork convertToWork() throws HiveException {
inputs.clear();
Utilities.addSchemaEvolutionToTableScanOperator(table, scanOp);
TableDesc tableDesc = Utilities.getTableDesc(table);
if (!table.isPartitioned()) {
inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null));
FetchWork work = new FetchWork(table.getPath(), tableDesc);
PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc());
work.setSplitSample(splitSample);
return work;
}
List<Path> listP = new ArrayList<Path>();
List<PartitionDesc> partP = new ArrayList<PartitionDesc>();
for (Partition partition : partsList.getNotDeniedPartns()) {
inputs.add(new ReadEntity(partition, parent, parent == null));
listP.add(partition.getDataLocation());
partP.add(Utilities.getPartitionDescFromTableDesc(tableDesc, partition, true));
}
Table sourceTable = partsList.getSourceTable();
inputs.add(new ReadEntity(sourceTable, parent, parent == null));
TableDesc table = Utilities.getTableDesc(sourceTable);
FetchWork work = new FetchWork(listP, partP, table);
if (!work.getPartDesc().isEmpty()) {
PartitionDesc part0 = work.getPartDesc().get(0);
PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc());
work.setSplitSample(splitSample);
}
return work;
}
代码示例来源:origin: apache/hive
if (qlMdTable.isView()) {
withinContext.replicationSpec.setIsMetadataOnly(true);
代码示例来源:origin: apache/drill
public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject(
Table tbl, Map<String, String> partSpec, Path location) throws HiveException {
List<String> pvals = new ArrayList<String>();
for (FieldSchema field : tbl.getPartCols()) {
String val = partSpec.get(field.getName());
if (val == null || val.isEmpty()) {
throw new HiveException("partition spec is invalid; field "
+ field.getName() + " does not exist or is empty");
}
pvals.add(val);
}
org.apache.hadoop.hive.metastore.api.Partition tpart =
new org.apache.hadoop.hive.metastore.api.Partition();
tpart.setDbName(tbl.getDbName());
tpart.setTableName(tbl.getTableName());
tpart.setValues(pvals);
if (!tbl.isView()) {
tpart.setSd(tbl.getSd().deepCopy());
tpart.getSd().setLocation((location != null) ? location.toString() : null);
}
return tpart;
}
代码示例来源:origin: apache/drill
private FetchWork convertToWork() throws HiveException {
inputs.clear();
Utilities.addSchemaEvolutionToTableScanOperator(table, scanOp);
TableDesc tableDesc = Utilities.getTableDesc(table);
if (!table.isPartitioned()) {
inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null));
FetchWork work = new FetchWork(table.getPath(), tableDesc);
PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc());
work.setSplitSample(splitSample);
return work;
}
List<Path> listP = new ArrayList<Path>();
List<PartitionDesc> partP = new ArrayList<PartitionDesc>();
for (Partition partition : partsList.getNotDeniedPartns()) {
inputs.add(new ReadEntity(partition, parent, parent == null));
listP.add(partition.getDataLocation());
partP.add(Utilities.getPartitionDescFromTableDesc(tableDesc, partition, true));
}
Table sourceTable = partsList.getSourceTable();
inputs.add(new ReadEntity(sourceTable, parent, parent == null));
TableDesc table = Utilities.getTableDesc(sourceTable);
FetchWork work = new FetchWork(listP, partP, table);
if (!work.getPartDesc().isEmpty()) {
PartitionDesc part0 = work.getPartDesc().get(0);
PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc());
work.setSplitSample(splitSample);
}
return work;
}
代码示例来源:origin: apache/drill
private static void getTableMetaDataInformation(StringBuilder tableInfo, Table tbl,
boolean isOutputPadded) {
formatOutput("Database:", tbl.getDbName(), tableInfo);
formatOutput("Owner:", tbl.getOwner(), tableInfo);
formatOutput("CreateTime:", formatDate(tbl.getTTable().getCreateTime()), tableInfo);
formatOutput("LastAccessTime:", formatDate(tbl.getTTable().getLastAccessTime()), tableInfo);
formatOutput("Retention:", Integer.toString(tbl.getRetention()), tableInfo);
if (!tbl.isView()) {
formatOutput("Location:", tbl.getDataLocation().toString(), tableInfo);
}
formatOutput("Table Type:", tbl.getTableType().name(), tableInfo);
if (tbl.getParameters().size() > 0) {
tableInfo.append("Table Parameters:").append(LINE_DELIM);
displayAllParameters(tbl.getParameters(), tableInfo, false, isOutputPadded);
}
}
代码示例来源:origin: apache/hive
private static void getTableMetaDataInformation(StringBuilder tableInfo, Table tbl,
boolean isOutputPadded) {
formatOutput("Database:", tbl.getDbName(), tableInfo);
formatOutput("OwnerType:", (tbl.getOwnerType() != null) ? tbl.getOwnerType().name() : "null", tableInfo);
formatOutput("Owner:", tbl.getOwner(), tableInfo);
formatOutput("CreateTime:", formatDate(tbl.getTTable().getCreateTime()), tableInfo);
formatOutput("LastAccessTime:", formatDate(tbl.getTTable().getLastAccessTime()), tableInfo);
formatOutput("Retention:", Integer.toString(tbl.getRetention()), tableInfo);
if (!tbl.isView()) {
formatOutput("Location:", tbl.getDataLocation().toString(), tableInfo);
}
formatOutput("Table Type:", tbl.getTableType().name(), tableInfo);
if (tbl.getParameters().size() > 0) {
tableInfo.append("Table Parameters:").append(LINE_DELIM);
displayAllParameters(tbl.getParameters(), tableInfo, false, isOutputPadded);
}
}
代码示例来源:origin: apache/hive
"at least one column must be specified for the table");
if (!isView()) {
if (null == getDeserializerFromMetaStore(false)) {
throw new HiveException("must specify a non-null serDe");
if (isView() || isMaterializedView()) {
assert (getViewOriginalText() != null);
assert (getViewExpandedText() != null);
代码示例来源:origin: apache/hive
private void validateAlterTableType(Table tbl, AlterTableTypes op, boolean expectView)
throws SemanticException {
if (tbl.isView()) {
if (!expectView) {
throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_VIEWS.getMsg());
}
switch (op) {
case ADDPARTITION:
case DROPPARTITION:
case RENAMEPARTITION:
case ADDPROPS:
case DROPPROPS:
case RENAME:
// allow this form
break;
default:
throw new SemanticException(ErrorMsg.ALTER_VIEW_DISALLOWED_OP.getMsg(op.toString()));
}
} else {
if (expectView) {
throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_TABLES.getMsg());
}
}
if (tbl.isNonNative() && !AlterTableTypes.nonNativeTableAllowedTypes.contains(op)) {
throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_NATIVE.getMsg(tbl.getTableName()));
}
}
代码示例来源:origin: apache/drill
private void validateAlterTableType(Table tbl, AlterTableTypes op, boolean expectView)
throws SemanticException {
if (tbl.isView()) {
if (!expectView) {
throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_VIEWS.getMsg());
}
switch (op) {
case ADDPARTITION:
case DROPPARTITION:
case RENAMEPARTITION:
case ADDPROPS:
case DROPPROPS:
case RENAME:
// allow this form
break;
default:
throw new SemanticException(ErrorMsg.ALTER_VIEW_DISALLOWED_OP.getMsg(op.toString()));
}
} else {
if (expectView) {
throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_TABLES.getMsg());
}
}
if (tbl.isNonNative()) {
throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_NATIVE.getMsg(tbl.getTableName()));
}
}
内容来源于网络,如有侵权,请联系作者删除!