scala.collection.Iterable.toSeq()方法的使用及代码示例

x33g5p2x  于2022-01-21 转载在 其他  
字(10.7k)|赞(0)|评价(0)|浏览(220)

本文整理了Java中scala.collection.Iterable.toSeq()方法的一些代码示例,展示了Iterable.toSeq()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Iterable.toSeq()方法的具体详情如下:
包路径:scala.collection.Iterable
类名称:Iterable
方法名:toSeq

Iterable.toSeq介绍

暂无

代码示例

代码示例来源:origin: org.apache.spark/spark-core

@Test
public void combineByKey() {
 JavaRDD<Integer> originalRDD = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6));
 Function<Integer, Integer> keyFunction = v1 -> v1 % 3;
 Function<Integer, Integer> createCombinerFunction = v1 -> v1;
 Function2<Integer, Integer, Integer> mergeValueFunction = (v1, v2) -> v1 + v2;
 JavaPairRDD<Integer, Integer> combinedRDD = originalRDD.keyBy(keyFunction)
  .combineByKey(createCombinerFunction, mergeValueFunction, mergeValueFunction);
 Map<Integer, Integer> results = combinedRDD.collectAsMap();
 ImmutableMap<Integer, Integer> expected = ImmutableMap.of(0, 9, 1, 5, 2, 7);
 assertEquals(expected, results);
 Partitioner defaultPartitioner = Partitioner.defaultPartitioner(
  combinedRDD.rdd(),
  JavaConverters.collectionAsScalaIterableConverter(
   Collections.<RDD<?>>emptyList()).asScala().toSeq());
 combinedRDD = originalRDD.keyBy(keyFunction)
  .combineByKey(
   createCombinerFunction,
   mergeValueFunction,
   mergeValueFunction,
   defaultPartitioner,
   false,
   new KryoSerializer(new SparkConf()));
 results = combinedRDD.collectAsMap();
 assertEquals(expected, results);
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Test
public void combineByKey() {
 JavaRDD<Integer> originalRDD = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6));
 Function<Integer, Integer> keyFunction = v1 -> v1 % 3;
 Function<Integer, Integer> createCombinerFunction = v1 -> v1;
 Function2<Integer, Integer, Integer> mergeValueFunction = (v1, v2) -> v1 + v2;
 JavaPairRDD<Integer, Integer> combinedRDD = originalRDD.keyBy(keyFunction)
  .combineByKey(createCombinerFunction, mergeValueFunction, mergeValueFunction);
 Map<Integer, Integer> results = combinedRDD.collectAsMap();
 ImmutableMap<Integer, Integer> expected = ImmutableMap.of(0, 9, 1, 5, 2, 7);
 assertEquals(expected, results);
 Partitioner defaultPartitioner = Partitioner.defaultPartitioner(
  combinedRDD.rdd(),
  JavaConverters.collectionAsScalaIterableConverter(
   Collections.<RDD<?>>emptyList()).asScala().toSeq());
 combinedRDD = originalRDD.keyBy(keyFunction)
  .combineByKey(
   createCombinerFunction,
   mergeValueFunction,
   mergeValueFunction,
   defaultPartitioner,
   false,
   new KryoSerializer(new SparkConf()));
 results = combinedRDD.collectAsMap();
 assertEquals(expected, results);
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void combineByKey() {
 JavaRDD<Integer> originalRDD = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6));
 Function<Integer, Integer> keyFunction = v1 -> v1 % 3;
 Function<Integer, Integer> createCombinerFunction = v1 -> v1;
 Function2<Integer, Integer, Integer> mergeValueFunction = (v1, v2) -> v1 + v2;
 JavaPairRDD<Integer, Integer> combinedRDD = originalRDD.keyBy(keyFunction)
  .combineByKey(createCombinerFunction, mergeValueFunction, mergeValueFunction);
 Map<Integer, Integer> results = combinedRDD.collectAsMap();
 ImmutableMap<Integer, Integer> expected = ImmutableMap.of(0, 9, 1, 5, 2, 7);
 assertEquals(expected, results);
 Partitioner defaultPartitioner = Partitioner.defaultPartitioner(
  combinedRDD.rdd(),
  JavaConverters.collectionAsScalaIterableConverter(
   Collections.<RDD<?>>emptyList()).asScala().toSeq());
 combinedRDD = originalRDD.keyBy(keyFunction)
  .combineByKey(
   createCombinerFunction,
   mergeValueFunction,
   mergeValueFunction,
   defaultPartitioner,
   false,
   new KryoSerializer(new SparkConf()));
 results = combinedRDD.collectAsMap();
 assertEquals(expected, results);
}

代码示例来源:origin: com.typesafe.play/play_2.10

/**
   * Select a preferred language, given the list of candidates.
   *
   * Will select the preferred language, based on what languages are available, or return the default language if
   * none of the candidates are available.
   *
   * @param candidates The candidate languages
   * @return The preferred language
   */
  public Lang preferred(Collection<Lang> candidates) {
    return new Lang(langs.preferred((scala.collection.Seq) JavaConversions.collectionAsScalaIterable(candidates).toSeq()));
  }
}

代码示例来源:origin: com.typesafe.play/play_2.10

/**
 * Get a messages context appropriate for the given candidates.
 *
 * Will select a language from the candidates, based on the languages available, and fallback to the default language
 * if none of the candidates are available.
 */
public Messages preferred(Collection<Lang> candidates) {
  Seq<Lang> cs = JavaConversions.collectionAsScalaIterable(candidates).toSeq();
  play.api.i18n.Messages msgs = messages.preferred((Seq) cs);
  return new Messages(new Lang(msgs.lang()), this);
}

代码示例来源:origin: FINRAOS/DataGenerator

/**
 * Get allowable parent types
 * @param nodeOfThisType Node of this type
 * @return Sequence of allowable parent types
 */
@Override
public Seq<UserTypeVal> getAllowableParentTypes(Node<UserStub> nodeOfThisType) {
  LinkedList<UserTypeVal> list = new LinkedList<>();
  list.add(UserType.ADMIN);
  return ScalaInJavaHelper.linkedListToScalaIterable(list).toSeq();
}

代码示例来源:origin: FINRAOS/DataGenerator

/**
 * Get allowable child types
 * @param nodeOfThisType Node of this type
 * @return Sequence of allowable child types
 */
@Override
public Seq<UserTypeVal> getAllowableChildTypes(Node<UserStub> nodeOfThisType) {
  LinkedList<UserTypeVal> list = new LinkedList<>();
  list.add(UserType.SOCIAL_NETWORK_EMPLOYEE);
  list.add(UserType.PUBLIC_USER);
  return ScalaInJavaHelper.linkedListToScalaIterable(list).toSeq();
}

代码示例来源:origin: FINRAOS/DataGenerator

/**
 * Get allowable child types
 * @param nodeOfThisType Node of this type
 * @return Sequence of allowable child types
 */
@Override
public Seq<UserTypeVal> getAllowableChildTypes(Node<UserStub> nodeOfThisType) {
  LinkedList<UserTypeVal> list = new LinkedList<>();
  list.add(UserType.PUBLIC_USER);
  return ScalaInJavaHelper.linkedListToScalaIterable(list).toSeq();
}

代码示例来源:origin: FINRAOS/DataGenerator

/**
 * Get allowable parent types
 * @param nodeOfThisType Node of this type
 * @return Sequence of allowable parent types
 */
@Override
public Seq<UserTypeVal> getAllowableParentTypes(Node<UserStub> nodeOfThisType) {
  LinkedList<UserTypeVal> list = new LinkedList<>();
  list.add(UserType.ADMIN);
  list.add(UserType.SOCIAL_NETWORK_EMPLOYEE);
  return ScalaInJavaHelper.linkedListToScalaIterable(list).toSeq();
}

代码示例来源:origin: FINRAOS/DataGenerator

/**
 * Get allowable child types
 * @param nodeOfThisType Node of this type
 * @return Sequence of allowable child types
 */
@Override
public Seq<UserTypeVal> getAllowableChildTypes(Node<UserStub> nodeOfThisType) {
  LinkedList<UserTypeVal> list = new LinkedList<>();
  list.add(UserType.ADMIN);
  list.add(UserType.SOCIAL_NETWORK_EMPLOYEE);
  list.add(UserType.PUBLIC_USER);
  return ScalaInJavaHelper.linkedListToScalaIterable(list).toSeq();
}

代码示例来源:origin: FINRAOS/DataGenerator

/**
 * Get allowable parent types
 * @param nodeOfThisType Node of this type
 * @return Sequence of allowable parent types
 */
@Override
public Seq<UserTypeVal> getAllowableParentTypes(Node<UserStub> nodeOfThisType) {
  LinkedList<UserTypeVal> list = new LinkedList<>();
  list.add(UserType.ADMIN);
  list.add(UserType.SOCIAL_NETWORK_EMPLOYEE);
  list.add(UserType.PUBLIC_USER);
  return ScalaInJavaHelper.linkedListToScalaIterable(list).toSeq();
}

代码示例来源:origin: com.twitter/util-core_2.12

/**
 * @see Offer$#choose(scala.collection.Seq)
 */
public static <T> Offer<T> choose(Collection<Offer<T>> offers) {
 scala.collection.Seq<Offer<T>> scalaSeq = JavaConverters.collectionAsScalaIterableConverter(offers).asScala().toSeq();
 return Offer$.MODULE$.choose(scalaSeq);
}

代码示例来源:origin: SiftScience/kafka-assigner

private static void printCurrentAssignment(ZkUtils zkUtils, List<String> specifiedTopics) {
  Seq<String> topics = specifiedTopics != null ?
      JavaConversions.iterableAsScalaIterable(specifiedTopics).toSeq() :
      zkUtils.getAllTopics();
  System.out.println("CURRENT ASSIGNMENT:");
  System.out.println(
      zkUtils.formatAsReassignmentJson(zkUtils.getReplicaAssignmentForTopics(
          topics)));
}

代码示例来源:origin: com.twitter/util-core_2.11

/**
 * @see Offer$#choose(scala.collection.Seq)
 */
public static <T> Offer<T> choose(Collection<Offer<T>> offers) {
 scala.collection.Seq<Offer<T>> scalaSeq = JavaConverters.collectionAsScalaIterableConverter(offers).asScala().toSeq();
 return Offer$.MODULE$.choose(scalaSeq);
}

代码示例来源:origin: cloudera-labs/envelope

@Override
public Dataset<Row> derive(Map<String, Dataset<Row>> dependencies) throws Exception {
 dependencyCheck(dependencies);
 Dataset<Row> sourceStep = dependencies.get(stepName);
 if (useIncludeFields){
   if (!Arrays.asList(sourceStep.columns()).containsAll(includeFields)){
     throw new RuntimeException("Columns specified in " + INCLUDE_FIELDS + " are not found in input dependency schema \n" +
     "Available columns: " + Arrays.toString(sourceStep.columns()));
   }
   String firstCol = includeFields.get(0);
   includeFields.remove(0);
   return sourceStep.select(firstCol, includeFields.toArray(new String[0]));
 } else {
   if (!Arrays.asList(sourceStep.columns()).containsAll(excludeFields)){
     throw new RuntimeException("Columns specified in " + EXCLUDE_FIELDS + " are not found in input dependency schema \n" +
     "Available columns: " + Arrays.toString(sourceStep.columns()));
   }
   return sourceStep.drop(JavaConverters.collectionAsScalaIterableConverter(excludeFields).asScala().toSeq());
 }
}

代码示例来源:origin: SiftScience/kafka-assigner

JavaConversions.collectionAsScalaIterable(specifiedTopics).toSeq() :
zkUtils.getAllTopics();

代码示例来源:origin: uber/marmaray

/**
 * Creates {@link SparkConf} with {@link org.apache.spark.serializer.KryoSerializer} along with
 * registering default/user-input serializable classes and user-input Avro Schemas.
 * Once {@link SparkContext} is created, we can no longer register serialization classes and Avro schemas.
 */
public SparkConf createSparkConf(@NonNull final SparkArgs sparkArgs) {
  /**
   * By custom registering classes the full class name of each object
   * is not stored during serialization which reduces storage space.
   */
  final SparkConf sparkConf = new SparkConf();
  sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
  final List<Class> serializableClasses = getDefaultSerializableClasses();
  serializableClasses.addAll(sparkArgs.getUserSerializationClasses());
  sparkConf.registerKryoClasses(serializableClasses.toArray(new Class[0]));
  if (sparkArgs.getAvroSchemas().isPresent()) {
    sparkConf.registerAvroSchemas(
      JavaConverters
        .iterableAsScalaIterableConverter(sparkArgs.getAvroSchemas().get())
        .asScala()
        .toSeq());
  }
  // override spark properties
  final Map<String, String> sparkProps = sparkArgs.getOverrideSparkProperties();
  for (Entry<String, String> entry : sparkProps.entrySet()) {
    log.info("Setting spark key:val {} : {}", entry.getKey(), entry.getValue());
    sparkConf.set(entry.getKey(), entry.getValue());
  }
  return sparkConf;
}

代码示例来源:origin: uber/marmaray

JavaConverters.iterableAsScalaIterableConverter(avroSchemas.get())
.asScala()
.toSeq());

相关文章