org.apache.spark.rdd.RDD.sparkContext()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(4.2k)|赞(0)|评价(0)|浏览(236)

本文整理了Java中org.apache.spark.rdd.RDD.sparkContext方法的一些代码示例,展示了RDD.sparkContext的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。RDD.sparkContext方法的具体详情如下:
包路径:org.apache.spark.rdd.RDD
类名称:RDD
方法名:sparkContext

RDD.sparkContext介绍

暂无

代码示例

代码示例来源:origin: uber/marmaray

private void logWriteMetrics(final Optional<JavaRDD<WriteStatus>> writesStatuses) {
  if (writesStatuses.isPresent() && this.dataFeedMetrics.isPresent()) {
    final LongAccumulator totalCount = writesStatuses.get().rdd().sparkContext().longAccumulator();
    final LongAccumulator errorCount = writesStatuses.get().rdd().sparkContext().longAccumulator();
    writesStatuses.get().foreach(writeStatus -> {
        errorCount.add(writeStatus.getFailedRecords().size());
        totalCount.add(writeStatus.getTotalRecords());
      });
    this.dataFeedMetrics.get().createLongMetric(DataFeedMetricNames.ERROR_ROWCOUNT, errorCount.value(),
        this.dataFeedMetricsTags);
    this.dataFeedMetrics.get().createLongMetric(DataFeedMetricNames.OUTPUT_ROWCOUNT,
        totalCount.value() - errorCount.value(), this.dataFeedMetricsTags);
  }
}

代码示例来源:origin: com.basho.riak/spark-riak-connector-java

public void saveToRiakTS(
    RiakConnector connector,
    String bucketType,
    String bucketName,
    WriteConf writeConf,
    WriteDataMapperFactory<T, RowDef> factory
) {
  RiakWriter<T, RowDef> writer = RiakWriter.tsWriter(connector, bucketType, bucketName, writeConf, factory);
  rdd.sparkContext().runJob(rdd, new JobFunc<>(writer), JavaApiHelper.getClassTag(Void.class));
}

代码示例来源:origin: com.basho.riak/spark-riak-connector

public void saveToRiakTS(
    RiakConnector connector,
    String bucketType,
    String bucketName,
    WriteConf writeConf,
    WriteDataMapperFactory<T, RowDef> factory
) {
  RiakWriter<T, RowDef> writer = RiakWriter.tsWriter(connector, bucketType, bucketName, writeConf, factory);
  rdd.sparkContext().runJob(rdd, new JobFunc<>(writer), JavaApiHelper.getClassTag(Void.class));
}

代码示例来源:origin: uber/marmaray

public JavaRDD<DI> getRDD(final int filterKey) {
  final long count = getCount(filterKey);
  log.info("#records for :{} = {}", filterKey, count);
  if (count > 0) {
    return getRDD(new FilterFunction<>(filterKey));
  } else {
    return (new JavaSparkContext(inputRDD.rdd().sparkContext())).emptyRDD();
  }
}

代码示例来源:origin: uber/marmaray

public final void execute() {
  this.forkFunction.registerAccumulators(this.inputRDD.rdd().sparkContext());
  // Converts JavaRDD<T> -> JavaRDD<List<Integer>, T>
  JavaRDD<ForkData<DI>> forkedData = this.inputRDD.flatMap(this.forkFunction)
    .persist(this.persistLevel);
  final String jobName = SparkJobTracker.getJobName(this.inputRDD.rdd().sparkContext());
  forkedData.setName(String.format("%s-%s", jobName, forkedData.id()));
  // deliberately calling count so that DAG gets executed.
  final long processedRecords = forkedData.count();
  final Optional<RDDInfo> rddInfo = SparkUtil.getRddInfo(forkedData.context(), forkedData.id());
  log.info("#processed records :{} name:{}", processedRecords, forkedData.name());
  if (rddInfo.isPresent()) {
    final long size = rddInfo.get().diskSize() + rddInfo.get().memSize();
    log.info("rddInfo -> name:{} partitions:{} size:{}", forkedData.name(), rddInfo.get().numPartitions(),
      size);
  }
  this.groupRDD = Optional.of(forkedData);
}

代码示例来源:origin: uber/marmaray

/**
 * {@link #updateSinkStat(Optional)} will compute {@link SinkStat} and persist changes into {@link IMetadataManager}.
 * As a part of {@link SinkStat} computation; it will compute avg record size for current run.
 * @param writesStatuses
 */
private void updateSinkStat(final Optional<JavaRDD<WriteStatus>> writesStatuses) {
  if (writesStatuses.isPresent()) {
    final LongAccumulator avgRecordSizeCounter = writesStatuses.get().rdd().sparkContext().longAccumulator();
    writesStatuses.get().foreach(
      writeStatus -> {
        final long writeBytes = writeStatus.getStat().getTotalWriteBytes();
        final long numInserts = writeStatus.getStat().getNumWrites()
            - writeStatus.getStat().getNumUpdateWrites();
        if (writeBytes > 0 && numInserts > 0) {
          avgRecordSizeCounter.add(writeBytes / numInserts);
        }
      }
    );
    final long avgRecordSize = (int) avgRecordSizeCounter.avg();
    if (avgRecordSize > 0) {
      log.info("Updating Sink Stat manager : avgRecordSize : {}", avgRecordSize);
      this.sinkStatMgr.getCurrentStat().put(SinkStat.AVG_RECORD_SIZE, Long.toString(avgRecordSize));
    }
  }
  this.sinkStatMgr.persist();
}

相关文章