org.apache.hadoop.hive.ql.metadata.Table.isView()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(11.7k)|赞(0)|评价(0)|浏览(170)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.isView()方法的一些代码示例,展示了Table.isView()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.isView()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:isView

Table.isView介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. /**
  2. * @return whether this table is actually a view
  3. */
  4. public boolean isView() {
  5. return table.isView();
  6. }

代码示例来源:origin: apache/drill

  1. /**
  2. * @return whether this table is actually a view
  3. */
  4. public boolean isView() { return table.isView(); }

代码示例来源:origin: apache/hive

  1. public TYPE getDescType() {
  2. if (table.isView() || table.isMaterializedView()) {
  3. return TYPE.VIEW;
  4. }
  5. return TYPE.TABLE;
  6. }

代码示例来源:origin: apache/drill

  1. public TYPE getTableType() {
  2. if (table.isView() || table.isMaterializedView()) {
  3. return TYPE.VIEW;
  4. }
  5. return TYPE.TABLE;
  6. }

代码示例来源:origin: apache/hive

  1. public static String getTableInformation(Table table, boolean isOutputPadded) {
  2. StringBuilder tableInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
  3. // Table Metadata
  4. tableInfo.append(LINE_DELIM).append("# Detailed Table Information").append(LINE_DELIM);
  5. getTableMetaDataInformation(tableInfo, table, isOutputPadded);
  6. // Storage information.
  7. tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM);
  8. getStorageDescriptorInfo(tableInfo, table.getTTable().getSd());
  9. if (table.isView() || table.isMaterializedView()) {
  10. tableInfo.append(LINE_DELIM).append(table.isView() ? "# View Information" : "# Materialized View Information").append(LINE_DELIM);
  11. getViewInfo(tableInfo, table);
  12. }
  13. return tableInfo.toString();
  14. }

代码示例来源:origin: apache/drill

  1. public static String getTableInformation(Table table, boolean isOutputPadded) {
  2. StringBuilder tableInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
  3. // Table Metadata
  4. tableInfo.append(LINE_DELIM).append("# Detailed Table Information").append(LINE_DELIM);
  5. getTableMetaDataInformation(tableInfo, table, isOutputPadded);
  6. // Storage information.
  7. tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM);
  8. getStorageDescriptorInfo(tableInfo, table.getTTable().getSd());
  9. if (table.isView() || table.isMaterializedView()) {
  10. tableInfo.append(LINE_DELIM).append("# View Information").append(LINE_DELIM);
  11. getViewInfo(tableInfo, table);
  12. }
  13. return tableInfo.toString();
  14. }

代码示例来源:origin: apache/hive

  1. private Set<String> getTablesUsed(ParseContext parseCtx) throws SemanticException {
  2. Set<String> tablesUsed = new HashSet<>();
  3. for (TableScanOperator topOp : parseCtx.getTopOps().values()) {
  4. Table table = topOp.getConf().getTableMetadata();
  5. if (!table.isMaterializedTable() && !table.isView()) {
  6. // Add to signature
  7. tablesUsed.add(table.getFullyQualifiedName());
  8. }
  9. }
  10. return tablesUsed;
  11. }

代码示例来源:origin: apache/hive

  1. /**
  2. * create an empty partition.
  3. * SemanticAnalyzer code requires that an empty partition when the table is not partitioned.
  4. */
  5. public Partition(Table tbl) throws HiveException {
  6. org.apache.hadoop.hive.metastore.api.Partition tPart =
  7. new org.apache.hadoop.hive.metastore.api.Partition();
  8. if (!tbl.isView()) {
  9. tPart.setSd(tbl.getTTable().getSd().deepCopy());
  10. }
  11. initialize(tbl, tPart);
  12. }

代码示例来源:origin: apache/hive

  1. public TableExport(Paths paths, TableSpec tableSpec, ReplicationSpec replicationSpec, Hive db,
  2. String distCpDoAsUser, HiveConf conf, MmContext mmCtx) {
  3. this.tableSpec = (tableSpec != null
  4. && tableSpec.tableHandle.isTemporary()
  5. && replicationSpec.isInReplicationScope())
  6. ? null
  7. : tableSpec;
  8. this.replicationSpec = replicationSpec;
  9. if (conf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY) ||
  10. (this.tableSpec != null && this.tableSpec.tableHandle.isView())) {
  11. this.replicationSpec.setIsMetadataOnly(true);
  12. this.tableSpec.tableHandle.setStatsStateLikeNewTable();
  13. }
  14. this.db = db;
  15. this.distCpDoAsUser = distCpDoAsUser;
  16. this.conf = conf;
  17. this.paths = paths;
  18. this.mmCtx = mmCtx;
  19. }

代码示例来源:origin: apache/drill

  1. /**
  2. * create an empty partition.
  3. * SemanticAnalyzer code requires that an empty partition when the table is not partitioned.
  4. */
  5. public Partition(Table tbl) throws HiveException {
  6. org.apache.hadoop.hive.metastore.api.Partition tPart =
  7. new org.apache.hadoop.hive.metastore.api.Partition();
  8. if (!tbl.isView()) {
  9. tPart.setSd(tbl.getTTable().getSd().deepCopy());
  10. }
  11. initialize(tbl, tPart);
  12. }

代码示例来源:origin: apache/hive

  1. public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject(
  2. Table tbl, Map<String, String> partSpec, Path location) throws HiveException {
  3. List<String> pvals = new ArrayList<String>();
  4. for (FieldSchema field : tbl.getPartCols()) {
  5. String val = partSpec.get(field.getName());
  6. if (val == null || val.isEmpty()) {
  7. throw new HiveException("partition spec is invalid; field "
  8. + field.getName() + " does not exist or is empty");
  9. }
  10. pvals.add(val);
  11. }
  12. org.apache.hadoop.hive.metastore.api.Partition tpart =
  13. new org.apache.hadoop.hive.metastore.api.Partition();
  14. tpart.setDbName(tbl.getDbName());
  15. tpart.setTableName(tbl.getTableName());
  16. tpart.setValues(pvals);
  17. if (!tbl.isView()) {
  18. tpart.setSd(tbl.getSd().deepCopy());
  19. tpart.getSd().setLocation((location != null) ? location.toString() : null);
  20. }
  21. return tpart;
  22. }

代码示例来源:origin: apache/hive

  1. private FetchWork convertToWork() throws HiveException {
  2. inputs.clear();
  3. Utilities.addSchemaEvolutionToTableScanOperator(table, scanOp);
  4. TableDesc tableDesc = Utilities.getTableDesc(table);
  5. if (!table.isPartitioned()) {
  6. inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null));
  7. FetchWork work = new FetchWork(table.getPath(), tableDesc);
  8. PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc());
  9. work.setSplitSample(splitSample);
  10. return work;
  11. }
  12. List<Path> listP = new ArrayList<Path>();
  13. List<PartitionDesc> partP = new ArrayList<PartitionDesc>();
  14. for (Partition partition : partsList.getNotDeniedPartns()) {
  15. inputs.add(new ReadEntity(partition, parent, parent == null));
  16. listP.add(partition.getDataLocation());
  17. partP.add(Utilities.getPartitionDescFromTableDesc(tableDesc, partition, true));
  18. }
  19. Table sourceTable = partsList.getSourceTable();
  20. inputs.add(new ReadEntity(sourceTable, parent, parent == null));
  21. TableDesc table = Utilities.getTableDesc(sourceTable);
  22. FetchWork work = new FetchWork(listP, partP, table);
  23. if (!work.getPartDesc().isEmpty()) {
  24. PartitionDesc part0 = work.getPartDesc().get(0);
  25. PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc());
  26. work.setSplitSample(splitSample);
  27. }
  28. return work;
  29. }

代码示例来源:origin: apache/hive

  1. if (qlMdTable.isView()) {
  2. withinContext.replicationSpec.setIsMetadataOnly(true);

代码示例来源:origin: apache/drill

  1. public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject(
  2. Table tbl, Map<String, String> partSpec, Path location) throws HiveException {
  3. List<String> pvals = new ArrayList<String>();
  4. for (FieldSchema field : tbl.getPartCols()) {
  5. String val = partSpec.get(field.getName());
  6. if (val == null || val.isEmpty()) {
  7. throw new HiveException("partition spec is invalid; field "
  8. + field.getName() + " does not exist or is empty");
  9. }
  10. pvals.add(val);
  11. }
  12. org.apache.hadoop.hive.metastore.api.Partition tpart =
  13. new org.apache.hadoop.hive.metastore.api.Partition();
  14. tpart.setDbName(tbl.getDbName());
  15. tpart.setTableName(tbl.getTableName());
  16. tpart.setValues(pvals);
  17. if (!tbl.isView()) {
  18. tpart.setSd(tbl.getSd().deepCopy());
  19. tpart.getSd().setLocation((location != null) ? location.toString() : null);
  20. }
  21. return tpart;
  22. }

代码示例来源:origin: apache/drill

  1. private FetchWork convertToWork() throws HiveException {
  2. inputs.clear();
  3. Utilities.addSchemaEvolutionToTableScanOperator(table, scanOp);
  4. TableDesc tableDesc = Utilities.getTableDesc(table);
  5. if (!table.isPartitioned()) {
  6. inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null));
  7. FetchWork work = new FetchWork(table.getPath(), tableDesc);
  8. PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc());
  9. work.setSplitSample(splitSample);
  10. return work;
  11. }
  12. List<Path> listP = new ArrayList<Path>();
  13. List<PartitionDesc> partP = new ArrayList<PartitionDesc>();
  14. for (Partition partition : partsList.getNotDeniedPartns()) {
  15. inputs.add(new ReadEntity(partition, parent, parent == null));
  16. listP.add(partition.getDataLocation());
  17. partP.add(Utilities.getPartitionDescFromTableDesc(tableDesc, partition, true));
  18. }
  19. Table sourceTable = partsList.getSourceTable();
  20. inputs.add(new ReadEntity(sourceTable, parent, parent == null));
  21. TableDesc table = Utilities.getTableDesc(sourceTable);
  22. FetchWork work = new FetchWork(listP, partP, table);
  23. if (!work.getPartDesc().isEmpty()) {
  24. PartitionDesc part0 = work.getPartDesc().get(0);
  25. PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc());
  26. work.setSplitSample(splitSample);
  27. }
  28. return work;
  29. }

代码示例来源:origin: apache/drill

  1. private static void getTableMetaDataInformation(StringBuilder tableInfo, Table tbl,
  2. boolean isOutputPadded) {
  3. formatOutput("Database:", tbl.getDbName(), tableInfo);
  4. formatOutput("Owner:", tbl.getOwner(), tableInfo);
  5. formatOutput("CreateTime:", formatDate(tbl.getTTable().getCreateTime()), tableInfo);
  6. formatOutput("LastAccessTime:", formatDate(tbl.getTTable().getLastAccessTime()), tableInfo);
  7. formatOutput("Retention:", Integer.toString(tbl.getRetention()), tableInfo);
  8. if (!tbl.isView()) {
  9. formatOutput("Location:", tbl.getDataLocation().toString(), tableInfo);
  10. }
  11. formatOutput("Table Type:", tbl.getTableType().name(), tableInfo);
  12. if (tbl.getParameters().size() > 0) {
  13. tableInfo.append("Table Parameters:").append(LINE_DELIM);
  14. displayAllParameters(tbl.getParameters(), tableInfo, false, isOutputPadded);
  15. }
  16. }

代码示例来源:origin: apache/hive

  1. private static void getTableMetaDataInformation(StringBuilder tableInfo, Table tbl,
  2. boolean isOutputPadded) {
  3. formatOutput("Database:", tbl.getDbName(), tableInfo);
  4. formatOutput("OwnerType:", (tbl.getOwnerType() != null) ? tbl.getOwnerType().name() : "null", tableInfo);
  5. formatOutput("Owner:", tbl.getOwner(), tableInfo);
  6. formatOutput("CreateTime:", formatDate(tbl.getTTable().getCreateTime()), tableInfo);
  7. formatOutput("LastAccessTime:", formatDate(tbl.getTTable().getLastAccessTime()), tableInfo);
  8. formatOutput("Retention:", Integer.toString(tbl.getRetention()), tableInfo);
  9. if (!tbl.isView()) {
  10. formatOutput("Location:", tbl.getDataLocation().toString(), tableInfo);
  11. }
  12. formatOutput("Table Type:", tbl.getTableType().name(), tableInfo);
  13. if (tbl.getParameters().size() > 0) {
  14. tableInfo.append("Table Parameters:").append(LINE_DELIM);
  15. displayAllParameters(tbl.getParameters(), tableInfo, false, isOutputPadded);
  16. }
  17. }

代码示例来源:origin: apache/hive

  1. "at least one column must be specified for the table");
  2. if (!isView()) {
  3. if (null == getDeserializerFromMetaStore(false)) {
  4. throw new HiveException("must specify a non-null serDe");
  5. if (isView() || isMaterializedView()) {
  6. assert (getViewOriginalText() != null);
  7. assert (getViewExpandedText() != null);

代码示例来源:origin: apache/hive

  1. private void validateAlterTableType(Table tbl, AlterTableTypes op, boolean expectView)
  2. throws SemanticException {
  3. if (tbl.isView()) {
  4. if (!expectView) {
  5. throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_VIEWS.getMsg());
  6. }
  7. switch (op) {
  8. case ADDPARTITION:
  9. case DROPPARTITION:
  10. case RENAMEPARTITION:
  11. case ADDPROPS:
  12. case DROPPROPS:
  13. case RENAME:
  14. // allow this form
  15. break;
  16. default:
  17. throw new SemanticException(ErrorMsg.ALTER_VIEW_DISALLOWED_OP.getMsg(op.toString()));
  18. }
  19. } else {
  20. if (expectView) {
  21. throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_TABLES.getMsg());
  22. }
  23. }
  24. if (tbl.isNonNative() && !AlterTableTypes.nonNativeTableAllowedTypes.contains(op)) {
  25. throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_NATIVE.getMsg(tbl.getTableName()));
  26. }
  27. }

代码示例来源:origin: apache/drill

  1. private void validateAlterTableType(Table tbl, AlterTableTypes op, boolean expectView)
  2. throws SemanticException {
  3. if (tbl.isView()) {
  4. if (!expectView) {
  5. throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_VIEWS.getMsg());
  6. }
  7. switch (op) {
  8. case ADDPARTITION:
  9. case DROPPARTITION:
  10. case RENAMEPARTITION:
  11. case ADDPROPS:
  12. case DROPPROPS:
  13. case RENAME:
  14. // allow this form
  15. break;
  16. default:
  17. throw new SemanticException(ErrorMsg.ALTER_VIEW_DISALLOWED_OP.getMsg(op.toString()));
  18. }
  19. } else {
  20. if (expectView) {
  21. throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_TABLES.getMsg());
  22. }
  23. }
  24. if (tbl.isNonNative()) {
  25. throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_NATIVE.getMsg(tbl.getTableName()));
  26. }
  27. }

相关文章

Table类方法