java.util.stream.Collectors.groupingByConcurrent()方法的使用及代码示例

x33g5p2x  于2022-01-17 转载在 其他  
字(11.5k)|赞(0)|评价(0)|浏览(247)

本文整理了Java中java.util.stream.Collectors.groupingByConcurrent()方法的一些代码示例,展示了Collectors.groupingByConcurrent()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Collectors.groupingByConcurrent()方法的具体详情如下:
包路径:java.util.stream.Collectors
类名称:Collectors
方法名:groupingByConcurrent

Collectors.groupingByConcurrent介绍

暂无

代码示例

代码示例来源:origin: oracle/opengrok

Collectors.groupingByConcurrent((x) -> {
  try {
    doLink(x);

代码示例来源:origin: oracle/opengrok

Collectors.groupingByConcurrent((x) -> {
  try {
    doDelete(x);

代码示例来源:origin: oracle/opengrok

Collectors.groupingByConcurrent((x) -> {
  try {
    doRename(x);

代码示例来源:origin: oracle/opengrok

bySuccess = parallelizer.getForkJoinPool().submit(() ->
  args.works.parallelStream().collect(
  Collectors.groupingByConcurrent((x) -> {
    int tries = 0;
    Ctags pctags = null;

代码示例来源:origin: stackoverflow.com

Collectors.groupingByConcurrent(
      Function.identity(), Collectors.<String> counting()));
bh.consume(counts);

代码示例来源:origin: ripreal/V8LogScanner

private TreeMap<SortingKey, List<String>> mapLogs(ArrayList<String> sourceCol) {
  TreeMap<SortingKey, List<String>> mapped = sourceCol
          .stream()
          .parallel()
          .filter(n -> RgxOpManager.anyMatch(n, eventPatterns, integerFilters, integerCompTypes))
          .collect(Collectors.collectingAndThen(
              Collectors.groupingByConcurrent(this::createSortingKey),
              map -> {
                TreeMap<SortingKey, List<String>> tm = new TreeMap<>();
                map.entrySet().forEach(n -> tm.put(n.getKey(), n.getValue()));
                return tm;
              }
          ));
  return mapped;
}

代码示例来源:origin: ripreal/V8LogScanner

private TreeMap<SortingKey, List<String>> finalReduction(Collection<String> sourceCol) {
  TreeMap<SortingKey, List<String>> mapped = sourceCol
          .parallelStream()
          .collect(Collectors.collectingAndThen(
              Collectors.groupingByConcurrent(this::createSortingKey),
              map -> {
                TreeMap<SortingKey, List<String>> tm = new TreeMap<>();
                map.entrySet().forEach(n -> tm.put(n.getKey(), n.getValue()));
                return tm;
              })
          );
  TreeMap<SortingKey, List<String>> rgxResult = mapped.entrySet()
      .stream()
      .sequential()
      .flatMap(n -> n.getValue().stream())
      .limit(limit)
      .collect(Collectors.collectingAndThen(
          Collectors.groupingByConcurrent(this::createSortingKey),
          map -> {
            TreeMap<SortingKey, List<String>> tm = new TreeMap<>();
            map.entrySet().forEach(n -> tm.put(n.getKey(), n.getValue()));
            return tm;
          })
      );
  return rgxResult;
}

代码示例来源:origin: gauravrmazra/gauravbytes

.collect(Collectors.groupingByConcurrent(Person::getAge));
System.out.println(personByAgeConcurrent);

代码示例来源:origin: spotify/styx

public static ConcurrentMap<String, Long> getResourceUsage(boolean globalConcurrencyEnabled,
                              List<InstanceState> activeStates,
                              Set<WorkflowInstance> timedOutInstances,
                              WorkflowResourceDecorator resourceDecorator,
                              Map<WorkflowId, Workflow> workflows) {
 return activeStates.parallelStream()
   .filter(entry -> !timedOutInstances.contains(entry.workflowInstance()))
   .filter(entry -> isConsumingResources(entry.runState().state()))
   .flatMap(instanceState -> pairWithResources(globalConcurrencyEnabled, instanceState,
     workflows, resourceDecorator))
   .collect(groupingByConcurrent(
     ResourceWithInstance::resource,
     ConcurrentHashMap::new,
     counting()));
}

代码示例来源:origin: exomiser/Exomiser

private Stream<Optional<GeneModelPhenotypeMatch>> getBestModelsByGene(List<GeneModelPhenotypeMatch> organismModels) {
  if (options.isBenchmarkingEnabled()) {
    return organismModels.parallelStream()
        .filter(model -> model.getScore() > 0)
        // catch hit to known disease-gene association for purposes of benchmarking i.e to simulate novel gene discovery performance
        .filter(model -> !options.isBenchmarkHit(model))
        .collect(groupingByConcurrent(GeneModelPhenotypeMatch::getEntrezGeneId, maxBy(comparingDouble(GeneModelPhenotypeMatch::getScore))))
        .values()
        .stream();
  }
  return organismModels.parallelStream()
      .filter(model -> model.getScore() > 0)
      .collect(groupingByConcurrent(GeneModelPhenotypeMatch::getEntrezGeneId, maxBy(comparingDouble(GeneModelPhenotypeMatch::getScore))))
      .values()
      .stream();
}

代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-learning-core

/**
 * Saves the final clustering for each data point.
 */
protected void saveFinalClustering()
{
  if (clusters.size() > 0)
  {
    List<? extends DataType> data = getData();
    assignments = assignDataToClusters(data);
    clusters.forEach(cluster -> cluster.getMembers().clear());
    IntStream.range(0, assignments.length).parallel()
      .mapToObj(Integer::valueOf)
      .collect(
        groupingByConcurrent(idx -> assignments[idx]))
      .forEach((clusterIdx, clusterPoints)
        -> clusters.get(clusterIdx).getMembers()
        .addAll(
          clusterPoints.stream()
          .map(idx -> data.get(idx))
          .collect(toList())));
  }
}

代码示例来源:origin: algorithmfoundry/Foundry

/**
 * Saves the final clustering for each data point.
 */
protected void saveFinalClustering()
{
  if (clusters.size() > 0)
  {
    List<? extends DataType> data = getData();
    assignments = assignDataToClusters(data);
    clusters.forEach(cluster -> cluster.getMembers().clear());
    IntStream.range(0, assignments.length).parallel()
      .mapToObj(Integer::valueOf)
      .collect(
        groupingByConcurrent(idx -> assignments[idx]))
      .forEach((clusterIdx, clusterPoints)
        -> clusters.get(clusterIdx).getMembers()
        .addAll(
          clusterPoints.stream()
          .map(idx -> data.get(idx))
          .collect(toList())));
  }
}

代码示例来源:origin: horrorho/LiquidDonkey

/**
 * Returns a new instance.
 *
 * @param snapshot not null
 * @param fileConfig not null
 * @return a new instance, not null
 */
public static SignatureManager from(Snapshot snapshot, FileConfig fileConfig) {
  logger.trace("<< from() < dsPrsId: {} udid: {} snapshot: {} fileConfig: {}",
      snapshot.dsPrsID(), snapshot.backupUDID(), snapshot.snapshotID(), fileConfig);
  CloudFileWriter cloudWriter = CloudFileWriter.from(snapshot, fileConfig);
  Map<ByteString, Set<ICloud.MBSFile>> signatures = snapshot.files().stream()
      .collect(Collectors.groupingByConcurrent(ICloud.MBSFile::getSignature, Collectors.toSet()));
  long totalBytes = signatures.values().stream()
      .flatMap(Set::stream)
      .mapToLong(ICloud.MBSFile::getSize)
      .sum();
  Lock lock = new ReentrantLock();
  SignatureManager instance
      = new SignatureManager(signatures, cloudWriter, lock, totalBytes, new AtomicLong(0), new AtomicLong(0));
  logger.trace(">> from() > {}", instance);
  return instance;
}

代码示例来源:origin: com.opsbears.webcomponents/graph

private ImmutableMap<Node<VALUETYPE>, ImmutableList<Edge<VALUETYPE>>> createEdgeCache(
  ImmutableCollection<Node<VALUETYPE>> nodes,
  ImmutableCollection<Edge<VALUETYPE>> edges,
  Function<? super Edge<VALUETYPE>, ? extends Node<VALUETYPE>> mappingFunction
) {
  final Map<Node<VALUETYPE>, List<Edge<VALUETYPE>>> optimizedEdges = edges
    .stream()
    .collect(
      Collectors.groupingByConcurrent(
        mappingFunction,
        Collectors.toList()
      )
    );
  nodes.forEach(node -> optimizedEdges.putIfAbsent(node, new ArrayList<>()));
  return new ImmutableHashMap<>(
    optimizedEdges.keySet().stream().collect(
      Collectors.toMap(
        node -> node,
        node -> new ImmutableArrayList<>(optimizedEdges.get(node))
      )
    )
  );
}

代码示例来源:origin: algorithmfoundry/Foundry

/**
 * Saves the final clustering for each data point.
 */
protected void saveFinalClustering()
{
  if (clusters.size() > 0)
  {
    List<? extends DataType> data = getData();
    assignments = assignDataToClusters(data);
    clusters.forEach(cluster -> cluster.getMembers().clear());
    IntStream.range(0, assignments.length).parallel()
      .mapToObj(Integer::valueOf)
      .collect(
        groupingByConcurrent(idx -> assignments[idx]))
      .forEach((clusterIdx, clusterPoints)
        -> clusters.get(clusterIdx).getMembers()
        .addAll(
          clusterPoints.stream()
          .map(idx -> data.get(idx))
          .collect(toList())));
  }
}

代码示例来源:origin: exomiser/Exomiser

@Override
  public Stream<PhivePriorityResult> prioritise(List<String> hpoIds, List<Gene> genes) {
    logger.info("Starting {}", PRIORITY_TYPE);

    List<PhenotypeTerm> hpoPhenotypeTerms = priorityService.makePhenotypeTermsFromHpoIds(hpoIds);
    PhenotypeMatcher humanMousePhenotypeMatcher = priorityService.getPhenotypeMatcherForOrganism(hpoPhenotypeTerms, Organism.MOUSE);

    Set<Integer> wantedGeneIds = genes.stream().map(Gene::getEntrezGeneID).collect(ImmutableSet.toImmutableSet());

    Set<GeneModel> modelsToScore = priorityService.getModelsForOrganism(Organism.MOUSE).stream()
        .filter(model -> wantedGeneIds.contains(model.getEntrezGeneId()))
        .collect(ImmutableSet.toImmutableSet());

    List<GeneModelPhenotypeMatch> scoredModels = scoreModels(humanMousePhenotypeMatcher, modelsToScore);

    //n.b. this will contain models but with a phenotype score of zero
    Map<Integer, Optional<GeneModelPhenotypeMatch>> geneModelPhenotypeMatches = scoredModels.parallelStream()
//                .filter(model -> model.getScore() > 0)
        .collect(groupingByConcurrent(GeneModelPhenotypeMatch::getEntrezGeneId, maxBy(comparingDouble(GeneModelPhenotypeMatch::getScore))));

    return genes.stream().map(getPhivePriorityResult(geneModelPhenotypeMatches));
  }

代码示例来源:origin: ripreal/V8LogScanner

private ConcurrentMap<String, List<String>> mapLogs(ArrayList<String> sourceCol, String filename) {
  ConcurrentMap<String, List<String>> mapResults = null;
  if (groupType == GroupTypes.BY_PROPS) {
    mapResults = sourceCol
        .parallelStream()
        .unordered()
        .filter(n -> RgxOpManager.anyMatch(n, eventPatterns, integerFilters, integerCompTypes))
        .collect(Collectors.groupingByConcurrent(input -> {
              return RgxOpManager.getEventProperty(input, eventPatterns, cleanPropsRgx, groupPropsRgx);
            }
        ));
  } else if (groupType == GroupTypes.BY_FILE_NAMES) {
    mapResults = sourceCol
        .parallelStream()
        .unordered()
        .filter(n -> RgxOpManager.anyMatch(n, eventPatterns, integerFilters, integerCompTypes))
        .collect(Collectors.groupingByConcurrent(n -> filename));
  }
  return mapResults;
}

代码示例来源:origin: one.util/streamex

if (isParallel() && downstream.characteristics().contains(Characteristics.UNORDERED)
  && mapSupplier.get() instanceof ConcurrentMap) {
  return (M) collect(Collectors.groupingByConcurrent(keyMapper,
    (Supplier<? extends ConcurrentMap<K, D>>) mapSupplier, mapping));

代码示例来源:origin: org.infinispan/infinispan-core

public void testObjCollectorGroupBy() {
 Cache<Integer, String> cache = getCache(0);
 int range = 10;
 // First populate the cache with a bunch of values
 IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
 assertEquals(range, cache.size());
 CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
 ConcurrentMap<Boolean, List<Map.Entry<Integer, String>>> grouped = createStream(entrySet).collect(
       () -> Collectors.groupingByConcurrent(k -> k.getKey() % 2 == 0));
 grouped.get(true).parallelStream().forEach(e -> assertTrue(e.getKey() % 2 == 0));
 grouped.get(false).parallelStream().forEach(e -> assertTrue(e.getKey() % 2 == 1));
}

代码示例来源:origin: amidst/toolbox

public static double[][] learnKMeans(int k, DataStream<DataInstance> data){
  setK(k);
  Attributes atts = data.getAttributes();
  double[][] centroids = new double[getK()][atts.getNumberOfAttributes()];
  AtomicInteger index = new AtomicInteger();
  data.stream().limit(getK()).forEach(dataInstance -> centroids[index.getAndIncrement()]=dataInstance.toArray());
  data.restart();
  boolean change = true;
  while(change){
    Map<Integer, Averager> newCentroidsAv =
        data.parallelStream(batchSize)
            .map(instance -> Pair.newPair(centroids, instance))
            .collect(Collectors.groupingByConcurrent(Pair::getClusterID,
                Collectors.reducing(new Averager(atts.getNumberOfAttributes()), p -> new Averager(p.getDataInstance()), Averager::combine)));
    double error = IntStream.rangeClosed(0, centroids.length - 1).mapToDouble( i -> {
      double distance = Pair.getED(centroids[i], newCentroidsAv.get(i).average());
      centroids[i]=newCentroidsAv.get(i).average();
      return distance;
    }).average().getAsDouble();
    if (error<epsilon)
      change = false;
    data.restart();
  }
  return centroids;
}

相关文章