org.apache.hadoop.hive.ql.metadata.Table.getParameters()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(10.1k)|赞(0)|评价(0)|浏览(236)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getParameters()方法的一些代码示例,展示了Table.getParameters()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getParameters()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getParameters

Table.getParameters介绍

暂无

代码示例

代码示例来源:origin: apache/incubator-gobblin

private static Properties getTableProperties(Table table) {
  Properties properties = new Properties();
  properties.putAll(table.getParameters());
  return properties;
 }
}

代码示例来源:origin: apache/hive

/**
 * Get the parameter map of the Entity.
 */
public Map<String, String> getParameters() {
 if (p != null) {
  return p.getParameters();
 } else {
  return t.getParameters();
 }
}

代码示例来源:origin: apache/drill

/**
 * Get the parameter map of the Entity.
 */
public Map<String, String> getParameters() {
 if (p != null) {
  return p.getParameters();
 } else {
  return t.getParameters();
 }
}

代码示例来源:origin: apache/incubator-gobblin

public Map<String, String> getTableParams() {
 return this.hivePartition.getTable().getParameters();
}

代码示例来源:origin: apache/hive

private ReplLoadOpType getLoadTableType(Table table) throws InvalidOperationException, HiveException {
 if (table == null) {
  return ReplLoadOpType.LOAD_NEW;
 }
 if (ReplUtils.replCkptStatus(table.getDbName(), table.getParameters(), context.dumpDirectory)) {
  return ReplLoadOpType.LOAD_SKIP;
 }
 return ReplLoadOpType.LOAD_REPLACE;
}

代码示例来源:origin: apache/hive

protected int getBucket(Object row) {
 if (!isBucketed) {
  return 0;
 }
 Object[] bucketFields = getBucketFields(row);
 int bucketingVersion = Utilities.getBucketingVersion(
  table.getParameters().get(hive_metastoreConstants.TABLE_BUCKETING_VERSION));
 return bucketingVersion == 2 ?
  ObjectInspectorUtils.getBucketNumber(bucketFields, bucketObjInspectors, totalBuckets) :
  ObjectInspectorUtils.getBucketNumberOld(bucketFields, bucketObjInspectors, totalBuckets);
}

代码示例来源:origin: apache/hive

/**
 * Actually makes the table transactional
 */
private static void alterTable(Table t, Hive db, boolean isMM)
  throws HiveException, InvalidOperationException {
 org.apache.hadoop.hive.ql.metadata.Table metaTable =
   //clone to make sure new prop doesn't leak
   new org.apache.hadoop.hive.ql.metadata.Table(t.deepCopy());
 metaTable.getParameters().put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
 if(isMM) {
  metaTable.getParameters()
    .put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, "insert_only");
 }
 EnvironmentContext ec = new EnvironmentContext();
 /*we are not modifying any data so stats should be exactly the same*/
 ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
 db.alterTable(Warehouse.getQualifiedName(t), metaTable, false, ec, false);
}

代码示例来源:origin: apache/drill

/**
 * Determines if a current replication object(current state of dump) is allowed to
 * replicate-replace-into a given table
 */
public boolean allowReplacementInto(Table table) {
 return allowReplacement(getLastReplicatedStateFromParameters(table.getParameters()),this.getCurrentReplicationState());
}

代码示例来源:origin: apache/drill

/**
 * Determines if a current replication event specification is allowed to
 * replicate-replace-into a given table
 */
public boolean allowEventReplacementInto(Table table) {
 return allowReplacement(getLastReplicatedStateFromParameters(table.getParameters()),this.getReplicationState());
}

代码示例来源:origin: apache/hive

private boolean resetStatisticsProps(Table table) {
 if (hasFollowingStatsTask()) {
  // If there's a follow-on stats task then the stats will be correct after load, so don't
  // need to reset the statistics.
  return false;
 }
 if (!work.getIsInReplicationScope()) {
  // If the load is not happening during replication and there is not follow-on stats
  // task, stats will be inaccurate after load and so need to be reset.
  return true;
 }
 // If we are loading a table during replication, the stats will also be replicated
 // and hence accurate if it's a non-transactional table. For transactional table we
 // do not replicate stats yet.
 return AcidUtils.isTransactionalTable(table.getParameters());
}

代码示例来源:origin: apache/hive

/**
  * Setup the table level stats as if the table is new. Used when setting up Table for a new
  * table or during replication.
  */
 public void setStatsStateLikeNewTable() {
  // We do not replicate statistics for
  // an ACID Table right now, so don't touch them right now.
  if (AcidUtils.isTransactionalTable(this)) {
   return;
  }

  if (isPartitioned()) {
   StatsSetupConst.setStatsStateForCreateTable(getParameters(), null,
       StatsSetupConst.FALSE);
  } else {
   StatsSetupConst.setStatsStateForCreateTable(getParameters(),
       MetaStoreUtils.getColumnNames(getCols()), StatsSetupConst.TRUE);
  }
 }
};

代码示例来源:origin: apache/hive

public boolean isMmTable() {
 if (getTable() != null) {
  return AcidUtils.isInsertOnlyTable(table.getParameters());
 } else { // Dynamic Partition Insert case
  return AcidUtils.isInsertOnlyTable(getTableInfo().getProperties());
 }
}
public boolean isFullAcidTable() {

代码示例来源:origin: apache/hive

public static MmContext createIfNeeded(Table t) {
 if (t == null) return null;
 if (!AcidUtils.isInsertOnlyTable(t.getParameters())) return null;
 return new MmContext(AcidUtils.getFullTableName(t.getDbName(), t.getTableName()));
}

代码示例来源:origin: apache/hive

private ColStatistics extractColStats(RexInputRef ref) {
 RelColumnOrigin columnOrigin = this.metadataProvider.getColumnOrigin(filterOp, ref.getIndex());
 if (columnOrigin != null) {
  RelOptHiveTable table = (RelOptHiveTable) columnOrigin.getOriginTable();
  if (table != null) {
   ColStatistics colStats =
     table.getColStat(Lists.newArrayList(columnOrigin.getOriginColumnOrdinal()), false).get(0);
   if (colStats != null && StatsUtils.areColumnStatsUptoDateForQueryAnswering(
     table.getHiveTableMD(), table.getHiveTableMD().getParameters(), colStats.getColumnName())) {
    return colStats;
   }
  }
 }
 return null;
}

代码示例来源:origin: apache/hive

protected RecordUpdater createRecordUpdater(final Path partitionPath, int bucketId, Long minWriteId,
 Long maxWriteID)
 throws IOException {
 // Initialize table properties from the table parameters. This is required because the table
 // may define certain table parameters that may be required while writing. The table parameter
 // 'transactional_properties' is one such example.
 Properties tblProperties = new Properties();
 tblProperties.putAll(table.getParameters());
 return acidOutputFormat.getRecordUpdater(partitionPath,
  new AcidOutputFormat.Options(conf)
   .filesystem(fs)
   .inspector(outputRowObjectInspector)
   .bucket(bucketId)
   .tableProperties(tblProperties)
   .minimumWriteId(minWriteId)
   .maximumWriteId(maxWriteID)
   .statementId(statementId)
   .finalDestination(partitionPath));
}

代码示例来源:origin: apache/hive

private Table newTable(boolean isPartitioned) {
 Table t = new Table("default", "table" + Integer.toString(nextInput++));
 if (isPartitioned) {
  FieldSchema fs = new FieldSchema();
  fs.setName("version");
  fs.setType("String");
  List<FieldSchema> partCols = new ArrayList<FieldSchema>(1);
  partCols.add(fs);
  t.setPartCols(partCols);
 }
 Map<String, String> tblProps = t.getParameters();
 if(tblProps == null) {
  tblProps = new HashMap<>();
 }
 tblProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
 t.setParameters(tblProps);
 return t;
}

代码示例来源:origin: apache/hive

private static void getTableMetaDataInformation(StringBuilder tableInfo, Table  tbl,
  boolean isOutputPadded) {
 formatOutput("Database:", tbl.getDbName(), tableInfo);
 formatOutput("OwnerType:", (tbl.getOwnerType() != null) ? tbl.getOwnerType().name() : "null", tableInfo);
 formatOutput("Owner:", tbl.getOwner(), tableInfo);
 formatOutput("CreateTime:", formatDate(tbl.getTTable().getCreateTime()), tableInfo);
 formatOutput("LastAccessTime:", formatDate(tbl.getTTable().getLastAccessTime()), tableInfo);
 formatOutput("Retention:", Integer.toString(tbl.getRetention()), tableInfo);
 if (!tbl.isView()) {
  formatOutput("Location:", tbl.getDataLocation().toString(), tableInfo);
 }
 formatOutput("Table Type:", tbl.getTableType().name(), tableInfo);
 if (tbl.getParameters().size() > 0) {
  tableInfo.append("Table Parameters:").append(LINE_DELIM);
  displayAllParameters(tbl.getParameters(), tableInfo, false, isOutputPadded);
 }
}

代码示例来源:origin: apache/hive

private Long extractRowCount(RexInputRef ref) {
 RelColumnOrigin columnOrigin = this.metadataProvider.getColumnOrigin(filterOp, ref.getIndex());
 if (columnOrigin != null) {
  RelOptHiveTable table = (RelOptHiveTable) columnOrigin.getOriginTable();
  if (table != null) {
   if (StatsUtils.areBasicStatsUptoDateForQueryAnswering(table.getHiveTableMD(),
     table.getHiveTableMD().getParameters())) {
    return StatsUtils.getNumRows(table.getHiveTableMD());
   }
  }
 }
 return null;
}

代码示例来源:origin: apache/hive

@Before
public void before() {
 HepProgramBuilder programBuilder = new HepProgramBuilder();
 programBuilder.addRuleInstance(HiveReduceExpressionsWithStatsRule.INSTANCE);
 planner = new HepPlanner(programBuilder.build());
 JavaTypeFactoryImpl typeFactory = new JavaTypeFactoryImpl();
 RexBuilder rexBuilder = new RexBuilder(typeFactory);
 final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder);
 RelDataType rowTypeMock = typeFactory.createStructType(MyRecord.class);
 Mockito.doReturn(rowTypeMock).when(tableMock).getRowType();
 Mockito.doReturn(tableMock).when(schemaMock).getTableForMember(Matchers.any());
 statObj = new ColStatistics("_int", "int");
 Mockito.doReturn(Lists.newArrayList(statObj)).when(tableMock).getColStat(Matchers.anyListOf(Integer.class), Matchers.eq(false));
 Mockito.doReturn(hiveTableMDMock).when(tableMock).getHiveTableMD();
 Mockito.doReturn(tableParams).when(hiveTableMDMock).getParameters();
 builder = HiveRelFactories.HIVE_BUILDER.create(optCluster, schemaMock);
 StatsSetupConst.setStatsStateForCreateTable(tableParams, Lists.newArrayList("_int"), StatsSetupConst.TRUE);
 tableParams.put(StatsSetupConst.ROW_COUNT, "3");
}

代码示例来源:origin: apache/hive

private List<FieldSchema> getColsInternal(boolean forMs) {
 try {
  String serializationLib = tPartition.getSd().getSerdeInfo().getSerializationLib();
  // Do the lightweight check for general case.
  if (Table.hasMetastoreBasedSchema(SessionState.getSessionConf(), serializationLib)) {
   return tPartition.getSd().getCols();
  } else if (forMs && !Table.shouldStoreFieldsInMetastore(
    SessionState.getSessionConf(), serializationLib, table.getParameters())) {
   return Hive.getFieldsFromDeserializerForMsStorage(table, getDeserializer());
  }
  return HiveMetaStoreUtils.getFieldsFromDeserializer(table.getTableName(), getDeserializer());
 } catch (Exception e) {
  LOG.error("Unable to get cols from serde: " +
    tPartition.getSd().getSerdeInfo().getSerializationLib(), e);
 }
 return new ArrayList<FieldSchema>();
}

相关文章

Table类方法