org.apache.hadoop.hive.ql.metadata.Table.setPartCols()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(9.1k)|赞(0)|评价(0)|浏览(232)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.setPartCols()方法的一些代码示例,展示了Table.setPartCols()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setPartCols()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:setPartCols

Table.setPartCols介绍

暂无

代码示例

代码示例来源:origin: apache/hive

  1. private Table newTable(boolean isPartitioned) {
  2. Table t = new Table("default", "table" + Integer.toString(nextInput++));
  3. if (isPartitioned) {
  4. FieldSchema fs = new FieldSchema();
  5. fs.setName("version");
  6. fs.setType("String");
  7. List<FieldSchema> partCols = new ArrayList<FieldSchema>(1);
  8. partCols.add(fs);
  9. t.setPartCols(partCols);
  10. }
  11. return t;
  12. }
  13. }

代码示例来源:origin: apache/hive

  1. private Table newTable(boolean isPartitioned) {
  2. Table t = new Table("default", "table" + Integer.toString(nextInput++));
  3. if (isPartitioned) {
  4. FieldSchema fs = new FieldSchema();
  5. fs.setName("version");
  6. fs.setType("String");
  7. List<FieldSchema> partCols = new ArrayList<FieldSchema>(1);
  8. partCols.add(fs);
  9. t.setPartCols(partCols);
  10. }
  11. Map<String, String> tblProps = t.getParameters();
  12. if(tblProps == null) {
  13. tblProps = new HashMap<>();
  14. }
  15. tblProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
  16. t.setParameters(tblProps);
  17. return t;
  18. }

代码示例来源:origin: apache/hive

  1. tempTableObj.setPartCols(new ArrayList<>());

代码示例来源:origin: apache/hive

  1. partKeys.add(new FieldSchema(partName, serdeConstants.STRING_TYPE_NAME, ""));
  2. table.setPartCols(partKeys);

代码示例来源:origin: apache/hive

  1. @Test
  2. public void testDataDeletion() throws HiveException,
  3. IOException, TException {
  4. Database db = new Database();
  5. db.setName(dbName);
  6. hive.createDatabase(db);
  7. Table table = new Table(dbName, tableName);
  8. table.setDbName(dbName);
  9. table.setInputFormatClass(TextInputFormat.class);
  10. table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
  11. table.setPartCols(partCols);
  12. hive.createTable(table);
  13. table = hive.getTable(dbName, tableName);
  14. Path fakeTable = table.getPath().getParent().suffix(
  15. Path.SEPARATOR + "faketable");
  16. fs = fakeTable.getFileSystem(hive.getConf());
  17. fs.mkdirs(fakeTable);
  18. fs.deleteOnExit(fakeTable);
  19. Path fakePart = new Path(table.getDataLocation().toString(),
  20. "fakepartition=fakevalue");
  21. fs.mkdirs(fakePart);
  22. fs.deleteOnExit(fakePart);
  23. hive.dropTable(dbName, tableName, true, true);
  24. assertFalse(fs.exists(fakePart));
  25. hive.dropDatabase(dbName);
  26. assertFalse(fs.exists(fakeTable));
  27. }

代码示例来源:origin: apache/hive

  1. tbl.setPartCols(oldtbl.getPartCols());

代码示例来源:origin: apache/drill

  1. tbl.setPartCols(oldtbl.getPartCols());

代码示例来源:origin: apache/hive

  1. tbl.setPartCols(getPartCols());

代码示例来源:origin: apache/drill

  1. tbl.setPartCols(getPartCols());

代码示例来源:origin: apache/hive

  1. tbl.setPartCols(getPartCols());

代码示例来源:origin: apache/hive

  1. serdeConstants.STRING_TYPE_NAME,
  2. "partition column, date but in string format as date type is not yet supported in QL"));
  3. tbl.setPartCols(partCols);

代码示例来源:origin: apache/hive

  1. private Table createTestTable() throws HiveException, AlreadyExistsException {
  2. Database db = new Database();
  3. db.setName(dbName);
  4. hive.createDatabase(db, true);
  5. Table table = new Table(dbName, tableName);
  6. table.setDbName(dbName);
  7. table.setInputFormatClass(TextInputFormat.class);
  8. table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
  9. table.setPartCols(partCols);
  10. hive.createTable(table);
  11. table = hive.getTable(dbName, tableName);
  12. Assert.assertTrue(table.getTTable().isSetId());
  13. table.getTTable().unsetId();
  14. for (Map<String, String> partSpec : parts) {
  15. hive.createPartition(table, partSpec);
  16. }
  17. return table;
  18. }

代码示例来源:origin: apache/hive

  1. oldview.getTTable().getParameters().putAll(crtView.getTblProps());
  2. oldview.setPartCols(crtView.getPartCols());
  3. if (crtView.getInputFormat() != null) {
  4. oldview.setInputFormatClass(crtView.getInputFormat());

代码示例来源:origin: apache/drill

  1. oldview.getTTable().getParameters().putAll(crtView.getTblProps());
  2. oldview.setPartCols(crtView.getPartCols());
  3. if (crtView.getInputFormat() != null) {
  4. oldview.setInputFormatClass(crtView.getInputFormat());
  5. tbl.setPartCols(crtView.getPartCols());

代码示例来源:origin: apache/lens

  1. private Table getHiveTable() {
  2. Table t = new Table(LensConfConstants.DEFAULT_STATISTICS_DATABASE, EVENT_NAME);
  3. LinkedList<FieldSchema> partCols = new LinkedList<FieldSchema>();
  4. partCols.add(new FieldSchema("dt", "string", "partCol"));
  5. t.setPartCols(partCols);
  6. return t;
  7. }

代码示例来源:origin: apache/lens

  1. @Override
  2. public Table getHiveTable(HiveConf conf) {
  3. Table table = new Table(conf.get(LensConfConstants.STATISTICS_DATABASE_KEY,
  4. LensConfConstants.DEFAULT_STATISTICS_DATABASE), this.getClass().getSimpleName());
  5. LinkedList<FieldSchema> colList = new LinkedList<FieldSchema>();
  6. colList.add(new FieldSchema("handle", "string", "Query Handle"));
  7. colList.add(new FieldSchema("userQuery", "string", "User Query before rewrite"));
  8. colList.add(new FieldSchema("submitter", "string", "submitter"));
  9. colList.add(new FieldSchema("clusterUser", "string", "Cluster User which will do all operations on hdfs"));
  10. colList.add(new FieldSchema("sessionId", "string", "Lens Session which ran the query"));
  11. colList.add(new FieldSchema("submissionTime", "bigint", "Time which query was submitted"));
  12. colList.add(new FieldSchema("startTime", "bigint", "Timestamp which query was Started"));
  13. colList.add(new FieldSchema("endTime", "bigint", "Timestamp which query was finished"));
  14. colList.add(new FieldSchema("result", "string", "path to result of query"));
  15. colList.add(new FieldSchema("cause", "string", "failure/eror cause if any"));
  16. colList.add(new FieldSchema("status", "map<string,string>", "status object of the query"));
  17. colList.add(new FieldSchema("driverStats", "map<string,string>", "driver statistics of the query"));
  18. table.setFields(colList);
  19. LinkedList<FieldSchema> partCols = new LinkedList<FieldSchema>();
  20. partCols.add(new FieldSchema("dt", "string", "partCol"));
  21. table.setPartCols(partCols);
  22. table.setSerializationLib(JSonSerde.class.getName());
  23. try {
  24. table.setInputFormatClass(TextInputFormat.class.getName());
  25. } catch (HiveException e) {
  26. log.error("Encountered hive exception.", e);
  27. }
  28. return table;
  29. }

代码示例来源:origin: qubole/streamx

  1. private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
  2. throws HiveMetaStoreException {
  3. Table table = new Table(database, tableName);
  4. table.setTableType(TableType.EXTERNAL_TABLE);
  5. table.getParameters().put("EXTERNAL", "TRUE");
  6. String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  7. table.setDataLocation(new Path(tablePath));
  8. table.setSerializationLib(avroSerde);
  9. try {
  10. table.setInputFormatClass(avroInputFormat);
  11. table.setOutputFormatClass(avroOutputFormat);
  12. } catch (HiveException e) {
  13. throw new HiveMetaStoreException("Cannot find input/output format:", e);
  14. }
  15. List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  16. table.setFields(columns);
  17. table.setPartCols(partitioner.partitionFields());
  18. table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
  19. return table;
  20. }
  21. }

代码示例来源:origin: apache/lens

  1. /**
  2. * Creates the hive table.
  3. *
  4. * @param tableName the table name
  5. * @throws HiveException the hive exception
  6. */
  7. public static void createHiveTable(String tableName, Map<String, String> parameters) throws HiveException {
  8. List<FieldSchema> columns = new ArrayList<FieldSchema>();
  9. columns.add(new FieldSchema("col1", "string", ""));
  10. List<FieldSchema> partCols = new ArrayList<FieldSchema>();
  11. partCols.add(new FieldSchema("pcol1", "string", ""));
  12. Map<String, String> params = new HashMap<String, String>();
  13. params.put("test.hive.table.prop", "tvalue");
  14. if (null != parameters && !parameters.isEmpty()) {
  15. params.putAll(parameters);
  16. }
  17. Table tbl = Hive.get().newTable(tableName);
  18. tbl.setTableType(TableType.MANAGED_TABLE);
  19. tbl.getTTable().getSd().setCols(columns);
  20. tbl.setPartCols(partCols);
  21. tbl.getTTable().getParameters().putAll(params);
  22. Hive.get().createTable(tbl);
  23. }

代码示例来源:origin: qubole/streamx

  1. private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
  2. Table table = new Table(database, tableName);
  3. table.setTableType(TableType.EXTERNAL_TABLE);
  4. table.getParameters().put("EXTERNAL", "TRUE");
  5. String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  6. table.setDataLocation(new Path(tablePath));
  7. table.setSerializationLib(getHiveParquetSerde());
  8. try {
  9. table.setInputFormatClass(getHiveParquetInputFormat());
  10. table.setOutputFormatClass(getHiveParquetOutputFormat());
  11. } catch (HiveException e) {
  12. throw new HiveMetaStoreException("Cannot find input/output format:", e);
  13. }
  14. // convert copycat schema schema to Hive columns
  15. List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  16. table.setFields(columns);
  17. table.setPartCols(partitioner.partitionFields());
  18. return table;
  19. }

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

  1. tbl.setPartCols(crtView.getPartCols());

相关文章

Table类方法