org.apache.hadoop.hive.ql.metadata.Table.getCompleteName()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(7.3k)|赞(0)|评价(0)|浏览(202)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getCompleteName()方法的一些代码示例,展示了Table.getCompleteName()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getCompleteName()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getCompleteName

Table.getCompleteName介绍

暂无

代码示例

代码示例来源:origin: apache/incubator-gobblin

@Override
public String datasetURN() {
 return this.table.getCompleteName();
}

代码示例来源:origin: apache/incubator-gobblin

@VisibleForTesting
public static String tableKey(Table table) {
 return table.getCompleteName();
}

代码示例来源:origin: apache/incubator-gobblin

@Override
public LongWatermark getPreviousHighWatermark(Table table) {
 if (this.tableWatermarks.containsKey(table.getCompleteName())) {
  return this.tableWatermarks.get(table.getCompleteName());
 }
 return new LongWatermark(0);
}

代码示例来源:origin: apache/incubator-gobblin

@Override
 public Void call() throws Exception {
  // Execute validation queries
  log.debug(String.format("Going to execute queries: %s for format: %s", validationQueries, format));
  List<Long> rowCounts = ValidationJob.this.getValidationOutputFromHive(validationQueries);
  log.debug(String.format("Going to execute queries: %s for format: %s", dataValidationQueries, format));
  List<Long> rowDataValidatedCount = ValidationJob.this.getValidationOutputFromHive(dataValidationQueries);
  // Validate and populate report
  validateAndPopulateReport(hiveDataset.getTable().getCompleteName(), updateTime, rowCounts, rowDataValidatedCount);
  return null;
 }
}));

代码示例来源:origin: apache/incubator-gobblin

private boolean canCopyTable() {
  if (!COPYABLE_TABLES.contains(this.table.getTableType())) {
   log.warn(String.format("Not copying %s: tables of type %s are not copyable.", this.table.getCompleteName(),
     this.table.getTableType()));
   return false;
  }
  return true;
 }
}

代码示例来源:origin: apache/incubator-gobblin

private List<FileSet<CopyEntity>> generateAllFileSets(Map<List<String>, Partition> partitionMap) {
 List<FileSet<CopyEntity>> fileSets = Lists.newArrayList();
 for (Map.Entry<List<String>, Partition> partition : partitionMap.entrySet()) {
  fileSets.add(fileSetForPartition(partition.getValue()));
  HiveCopyEntityHelper.this.targetPartitions.remove(partition.getKey());
 }
 if (!HiveCopyEntityHelper.this.targetPartitions.isEmpty()) {
  fileSets.add(new HivePartitionsDeregisterFileSet(
    HiveCopyEntityHelper.this.dataset.getTable().getCompleteName() + DEREGISTER_FILE_SET,
    HiveCopyEntityHelper.this.dataset, HiveCopyEntityHelper.this.targetPartitions.values(), HiveCopyEntityHelper.this));
 }
 return fileSets;
}

代码示例来源:origin: apache/hive

@Override
 public String apply(FooterStatCollector sc) {
  return String.format("%s#%s", sc.partish.getTable().getCompleteName(), sc.partish.getPartishType());
 }
};

代码示例来源:origin: apache/drill

@Explain(explainLevels = { Level.USER })
public String getTbl() {
 StringBuffer sb = new StringBuffer();
 sb.append(this.tableMetadata.getCompleteName());
 sb.append("," + alias);
 if (isAcidTable()) {
  sb.append(", ACID table");
 }
 sb.append(",Tbl:");
 sb.append(this.statistics.getBasicStatsState());
 sb.append(",Col:");
 sb.append(this.statistics.getColumnStatsState());
 return sb.toString();
}

代码示例来源:origin: apache/hive

/**
 * @return include the db name
 */
public String getCompleteName() {
 return getTable().getCompleteName() + "@" + getName();
}

代码示例来源:origin: apache/hive

/**
 * @return include the db name
 */
public String getCompleteName() {
 return getCompleteName(getDbName(), getTableName());
}

代码示例来源:origin: apache/hive

@Explain(explainLevels = { Level.USER })
public String getTbl() {
 StringBuilder sb = new StringBuilder();
 sb.append(this.tableMetadata.getCompleteName());
 sb.append("," + alias);
 if (AcidUtils.isFullAcidTable(tableMetadata)) {
  sb.append(", ACID table");
 } else if (isTranscationalTable()) {
  sb.append(", transactional table");
 }
 sb.append(",Tbl:");
 sb.append(this.statistics.getBasicStatsState());
 sb.append(",Col:");
 sb.append(this.statistics.getColumnStatsState());
 return sb.toString();
}

代码示例来源:origin: apache/drill

/**
 * @return include the db name
 */
public String getCompleteName() {
 return getTable().getCompleteName() + "@" + getName();
}

代码示例来源:origin: apache/drill

/**
 * @return include the db name
 */
public String getCompleteName() {
 return getCompleteName(getDbName(), getTableName());
}

代码示例来源:origin: apache/incubator-gobblin

private static Table mockTable(String name) {
 Table table = Mockito.mock(Table.class, Mockito.RETURNS_SMART_NULLS);
 Mockito.when(table.getCompleteName()).thenReturn("db@" + name);
 return table;
}

代码示例来源:origin: apache/incubator-gobblin

private static Table mockTable(String name) {
 Table table = Mockito.mock(Table.class, Mockito.RETURNS_SMART_NULLS);
 Mockito.when(table.getCompleteName()).thenReturn(name);
 return table;
}

代码示例来源:origin: apache/incubator-gobblin

/**
 * Automatically sets the dataset urn by calling {@link #setDatasetUrn(String)}
 */
public void setHiveDataset(HiveDataset hiveDataset) {
 this.setProp(HIVE_DATASET_SERIALIZED_KEY, HiveSource.GENERICS_AWARE_GSON.toJson(hiveDataset, HiveDataset.class));
 setDatasetUrn(hiveDataset.getTable().getCompleteName());
}

代码示例来源:origin: apache/hive

/**
 *  For updates, we need to set the column access info so that it contains information on
 *  the columns we are updating.
 *  (But not all the columns of the target table even though the rewritten query writes
 *  all columns of target table since that is an implmentation detail).
 */
protected void setUpAccessControlInfoForUpdate(Table mTable, Map<String, ASTNode> setCols) {
 ColumnAccessInfo cai = new ColumnAccessInfo();
 for (String colName : setCols.keySet()) {
  cai.add(Table.getCompleteName(mTable.getDbName(), mTable.getTableName()), colName);
 }
 setUpdateColumnAccessInfo(cai);
}

代码示例来源:origin: apache/incubator-gobblin

/**
 * Currently updated the {@link #HIVE_TABLE_AVRO_SCHEMA_URL} location for new hive table
 * @param targetTable, new Table to be registered in hive
 * @throws IOException
 */
public static void updateTableAttributesIfAvro(Table targetTable, HiveCopyEntityHelper hiveHelper) throws IOException {
 if (isHiveTableAvroType(targetTable)) {
  updateAvroSchemaURL(targetTable.getCompleteName(), targetTable.getTTable().getSd(), hiveHelper);
 }
}

代码示例来源:origin: apache/drill

/**
 *  For updates, we need to set the column access info so that it contains information on
 *  the columns we are updating.
 *  (But not all the columns of the target table even though the rewritten query writes
 *  all columns of target table since that is an implmentation detail)
 */
private void setUpAccessControlInfoForUpdate(Table mTable, Map<String, ASTNode> setCols) {
 ColumnAccessInfo cai = new ColumnAccessInfo();
 for (String colName : setCols.keySet()) {
  cai.add(Table.getCompleteName(mTable.getDbName(), mTable.getTableName()), colName);
 }
 setUpdateColumnAccessInfo(cai);
}
/**

代码示例来源:origin: apache/incubator-gobblin

/**
 * Set SLA event metadata in the workunit. The publisher will use this metadta to publish sla events
 */
public static void setTableSlaEventMetadata(WorkUnit state, Table table, long updateTime, long lowWatermark,
  long beginGetWorkunitsTime) {
 state.setProp(SlaEventKeys.DATASET_URN_KEY, state.getProp(ConfigurationKeys.DATASET_URN_KEY));
 state.setProp(SlaEventKeys.PARTITION_KEY, table.getCompleteName());
 state.setProp(SlaEventKeys.UPSTREAM_TS_IN_MILLI_SECS_KEY, String.valueOf(updateTime));
 // Time when the workunit was created
 state.setProp(SlaEventKeys.ORIGIN_TS_IN_MILLI_SECS_KEY, System.currentTimeMillis());
 state.setProp(EventConstants.WORK_UNIT_CREATE_TIME, state.getProp(SlaEventKeys.ORIGIN_TS_IN_MILLI_SECS_KEY));
 state.setProp(EventConstants.BEGIN_GET_WORKUNITS_TIME, beginGetWorkunitsTime);
 state.setProp(SlaEventKeys.PREVIOUS_PUBLISH_TS_IN_MILLI_SECS_KEY, lowWatermark);
}

相关文章

Table类方法