scala.collection.Iterator.hasNext()方法的使用及代码示例

x33g5p2x  于2022-01-21 转载在 其他  
字(13.0k)|赞(0)|评价(0)|浏览(257)

本文整理了Java中scala.collection.Iterator.hasNext()方法的一些代码示例,展示了Iterator.hasNext()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Iterator.hasNext()方法的具体详情如下:
包路径:scala.collection.Iterator
类名称:Iterator
方法名:hasNext

Iterator.hasNext介绍

暂无

代码示例

代码示例来源:origin: twosigma/beakerx

public static SparkConf getSparkConfBasedOn(SparkSession.Builder sparkSessionBuilder) {
 try {
  SparkConf sparkConf = new SparkConf();
  Field options = sparkSessionBuilder.getClass().getDeclaredField("org$apache$spark$sql$SparkSession$Builder$$options");
  options.setAccessible(true);
  Iterator iterator = ((scala.collection.mutable.HashMap) options.get(sparkSessionBuilder)).iterator();
  while (iterator.hasNext()) {
   Tuple2 x = (Tuple2) iterator.next();
   sparkConf.set((String) (x)._1, (String) (x)._2);
  }
  return sparkConf;
 } catch (Exception e) {
  throw new RuntimeException(e);
 }
}

代码示例来源:origin: twitter/distributedlog

scala.collection.Iterator<Address> endpointAddressesIterator = endpointAddresses.toIterator();
HashSet<ServiceInstance> serviceInstances = new HashSet<ServiceInstance>();
while (endpointAddressesIterator.hasNext()) {
  serviceInstances.add(endpointAddressToServiceInstance(endpointAddressesIterator.next()));

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Override
public void write(scala.collection.Iterator<Product2<K, V>> records) throws IOException {
 // Keep track of success so we know if we encountered an exception
 // We do this rather than a standard try/catch/re-throw to handle
 // generic throwables.
 boolean success = false;
 try {
  while (records.hasNext()) {
   insertRecordIntoSorter(records.next());
  }
  closeAndWriteOutput();
  success = true;
 } finally {
  if (sorter != null) {
   try {
    sorter.cleanupResources();
   } catch (Exception e) {
    // Only throw this error if we won't be masking another
    // error.
    if (success) {
     throw e;
    } else {
     logger.error("In addition to a failure during writing, we failed during " +
            "cleanup.", e);
    }
   }
  }
 }
}

代码示例来源:origin: Graylog2/graylog2-server

long lastOffset = Long.MIN_VALUE;
long totalBytes = 0;
while (iterator.hasNext()) {
  final MessageAndOffset messageAndOffset = iterator.next();

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Override
public void write(scala.collection.Iterator<Product2<K, V>> records) throws IOException {
 // Keep track of success so we know if we encountered an exception
 // We do this rather than a standard try/catch/re-throw to handle
 // generic throwables.
 boolean success = false;
 try {
  while (records.hasNext()) {
   insertRecordIntoSorter(records.next());
  }
  closeAndWriteOutput();
  success = true;
 } finally {
  if (sorter != null) {
   try {
    sorter.cleanupResources();
   } catch (Exception e) {
    // Only throw this error if we won't be masking another
    // error.
    if (success) {
     throw e;
    } else {
     logger.error("In addition to a failure during writing, we failed during " +
            "cleanup.", e);
    }
   }
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-core

@Override
public void write(scala.collection.Iterator<Product2<K, V>> records) throws IOException {
 // Keep track of success so we know if we encountered an exception
 // We do this rather than a standard try/catch/re-throw to handle
 // generic throwables.
 boolean success = false;
 try {
  while (records.hasNext()) {
   insertRecordIntoSorter(records.next());
  }
  closeAndWriteOutput();
  success = true;
 } finally {
  if (sorter != null) {
   try {
    sorter.cleanupResources();
   } catch (Exception e) {
    // Only throw this error if we won't be masking another
    // error.
    if (success) {
     throw e;
    } else {
     logger.error("In addition to a failure during writing, we failed during " +
            "cleanup.", e);
    }
   }
  }
 }
}

代码示例来源:origin: json-path/JsonPath

public Result runBoon() {
  String result = null;
  String error = null;
  long time;
  Iterator<Object> query = null;
  long now = System.currentTimeMillis();
  try {
    if (!optionAsValues) {
      throw new UnsupportedOperationException("Not supported!");
    }
    io.gatling.jsonpath.JsonPath jsonPath = JsonPath$.MODULE$.compile(path).right().get();
    JsonParser jsonParser = new JsonParserCharArray();
    Object jsonModel = jsonParser.parse(json);
    query = jsonPath.query(jsonModel);
  } catch (Exception e) {
    error = getError(e);
  } finally {
    time = System.currentTimeMillis() - now;
    if (query != null) {
      List<Object> res = new ArrayList<Object>();
      while (query.hasNext()) {
        res.add(query.next());
      }
      ObjectMapper mapper = new ObjectMapperImpl();
      result = mapper.toJson(res);
    }
    return new Result("boon", time, result, error);
  }
}

代码示例来源:origin: linkedin/kafka-monitor

private static void reassignPartitions(KafkaZkClient zkClient, Collection<Broker> brokers, String topic, int partitionCount, int replicationFactor) {
 scala.collection.mutable.ArrayBuffer<BrokerMetadata> brokersMetadata = new scala.collection.mutable.ArrayBuffer<>(brokers.size());
 for (Broker broker : brokers) {
  brokersMetadata.$plus$eq(new BrokerMetadata(broker.id(), broker.rack()));
 }
 scala.collection.Map<Object, Seq<Object>> assignedReplicas =
   AdminUtils.assignReplicasToBrokers(brokersMetadata, partitionCount, replicationFactor, 0, 0);
 scala.collection.immutable.Map<TopicPartition, Seq<Object>> newAssignment = new scala.collection.immutable.HashMap<>();
 scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = assignedReplicas.iterator();
 while (it.hasNext()) {
  scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next();
  TopicPartition tp = new TopicPartition(topic, (Integer) scalaTuple._1);
  newAssignment = newAssignment.$plus(new scala.Tuple2<>(tp, scalaTuple._2));
 }
 scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic);
 scala.collection.Map<Object, scala.collection.Seq<Object>> currentAssignment = zkClient.getPartitionAssignmentForTopics(topicList).apply(topic);
 String currentAssignmentJson = formatAsReassignmentJson(topic, currentAssignment);
 String newAssignmentJson = formatAsReassignmentJson(topic, assignedReplicas);
 LOG.info("Reassign partitions for topic " + topic);
 LOG.info("Current partition replica assignment " + currentAssignmentJson);
 LOG.info("New partition replica assignment " + newAssignmentJson);
 zkClient.createPartitionReassignment(newAssignment);
}

代码示例来源:origin: linkedin/kafka-monitor

private static List<PartitionInfo> getPartitionInfo(KafkaZkClient zkClient, String topic) {
 scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic);
 scala.collection.Map<Object, scala.collection.Seq<Object>> partitionAssignments =
   zkClient.getPartitionAssignmentForTopics(topicList).apply(topic);
 List<PartitionInfo> partitionInfoList = new ArrayList<>();
 scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = partitionAssignments.iterator();
 while (it.hasNext()) {
  scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next();
  Integer partition = (Integer) scalaTuple._1();
  scala.Option<Object> leaderOption = zkClient.getLeaderForPartition(new TopicPartition(topic, partition));
  Node leader = leaderOption.isEmpty() ?  null : new Node((Integer) leaderOption.get(), "", -1);
  Node[] replicas = new Node[scalaTuple._2().size()];
  for (int i = 0; i < replicas.length; i++) {
   Integer brokerId = (Integer) scalaTuple._2().apply(i);
   replicas[i] = new Node(brokerId, "", -1);
  }
  partitionInfoList.add(new PartitionInfo(topic, partition, leader, replicas, null));
 }
 return partitionInfoList;
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Override
public void write(Iterator<Product2<K, V>> records) throws IOException {
 assert (partitionWriters == null);
 if (!records.hasNext()) {
  partitionLengths = new long[numPartitions];
  shuffleBlockResolver.writeIndexFileAndCommit(shuffleId, mapId, partitionLengths, null);
 while (records.hasNext()) {
  final Product2<K, V> record = records.next();
  final K key = record._1();

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Override
public void write(Iterator<Product2<K, V>> records) throws IOException {
 assert (partitionWriters == null);
 if (!records.hasNext()) {
  partitionLengths = new long[numPartitions];
  shuffleBlockResolver.writeIndexFileAndCommit(shuffleId, mapId, partitionLengths, null);
 while (records.hasNext()) {
  final Product2<K, V> record = records.next();
  final K key = record._1();

代码示例来源:origin: org.apache.spark/spark-core

@Override
public void write(Iterator<Product2<K, V>> records) throws IOException {
 assert (partitionWriters == null);
 if (!records.hasNext()) {
  partitionLengths = new long[numPartitions];
  shuffleBlockResolver.writeIndexFileAndCommit(shuffleId, mapId, partitionLengths, null);
 while (records.hasNext()) {
  final Product2<K, V> record = records.next();
  final K key = record._1();

代码示例来源:origin: org.apache.spark/spark-core_2.10

private List<Tuple2<Object, Object>> readRecordsFromFile() throws IOException {
 final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<>();
 long startOffset = 0;
 for (int i = 0; i < NUM_PARTITITONS; i++) {
  final long partitionSize = partitionSizesInMergedFile[i];
  if (partitionSize > 0) {
   FileInputStream fin = new FileInputStream(mergedOutputFile);
   fin.getChannel().position(startOffset);
   InputStream in = new LimitedInputStream(fin, partitionSize);
   in = blockManager.serializerManager().wrapForEncryption(in);
   if (conf.getBoolean("spark.shuffle.compress", true)) {
    in = CompressionCodec$.MODULE$.createCodec(conf).compressedInputStream(in);
   }
   DeserializationStream recordsStream = serializer.newInstance().deserializeStream(in);
   Iterator<Tuple2<Object, Object>> records = recordsStream.asKeyValueIterator();
   while (records.hasNext()) {
    Tuple2<Object, Object> record = records.next();
    assertEquals(i, hashPartitioner.getPartition(record._1()));
    recordsList.add(record);
   }
   recordsStream.close();
   startOffset += partitionSize;
  }
 }
 return recordsList;
}

代码示例来源:origin: org.apache.spark/spark-core

private List<Tuple2<Object, Object>> readRecordsFromFile() throws IOException {
 final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<>();
 long startOffset = 0;
 for (int i = 0; i < NUM_PARTITITONS; i++) {
  final long partitionSize = partitionSizesInMergedFile[i];
  if (partitionSize > 0) {
   FileInputStream fin = new FileInputStream(mergedOutputFile);
   fin.getChannel().position(startOffset);
   InputStream in = new LimitedInputStream(fin, partitionSize);
   in = blockManager.serializerManager().wrapForEncryption(in);
   if (conf.getBoolean("spark.shuffle.compress", true)) {
    in = CompressionCodec$.MODULE$.createCodec(conf).compressedInputStream(in);
   }
   DeserializationStream recordsStream = serializer.newInstance().deserializeStream(in);
   Iterator<Tuple2<Object, Object>> records = recordsStream.asKeyValueIterator();
   while (records.hasNext()) {
    Tuple2<Object, Object> record = records.next();
    assertEquals(i, hashPartitioner.getPartition(record._1()));
    recordsList.add(record);
   }
   recordsStream.close();
   startOffset += partitionSize;
  }
 }
 return recordsList;
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

private List<Tuple2<Object, Object>> readRecordsFromFile() throws IOException {
 final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<>();
 long startOffset = 0;
 for (int i = 0; i < NUM_PARTITITONS; i++) {
  final long partitionSize = partitionSizesInMergedFile[i];
  if (partitionSize > 0) {
   FileInputStream fin = new FileInputStream(mergedOutputFile);
   fin.getChannel().position(startOffset);
   InputStream in = new LimitedInputStream(fin, partitionSize);
   in = blockManager.serializerManager().wrapForEncryption(in);
   if (conf.getBoolean("spark.shuffle.compress", true)) {
    in = CompressionCodec$.MODULE$.createCodec(conf).compressedInputStream(in);
   }
   DeserializationStream recordsStream = serializer.newInstance().deserializeStream(in);
   Iterator<Tuple2<Object, Object>> records = recordsStream.asKeyValueIterator();
   while (records.hasNext()) {
    Tuple2<Object, Object> record = records.next();
    assertEquals(i, hashPartitioner.getPartition(record._1()));
    recordsList.add(record);
   }
   recordsStream.close();
   startOffset += partitionSize;
  }
 }
 return recordsList;
}

代码示例来源:origin: apache/phoenix

@Override
public boolean next() {
  if (!iterator.hasNext()) {
    return false;
  }
  currentRow = iterator.next();
  return true;
}

代码示例来源:origin: vakinge/jeesuite-libs

protected List<String> group() {
  List<String> groups = new ArrayList<>();
  scala.collection.immutable.List<GroupOverview> list = adminClient.listAllConsumerGroupsFlattened();
  if (list == null)
    return groups;
  Iterator<GroupOverview> iterator = list.iterator();
  while (iterator.hasNext()) {
    groups.add(iterator.next().groupId());
  }
  return groups;
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.10

public Iterator<UnsafeRow> sort(Iterator<UnsafeRow> inputIterator) throws IOException {
 while (inputIterator.hasNext()) {
  insertRow(inputIterator.next());
 }
 return sort();
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.11

public Iterator<UnsafeRow> sort(Iterator<UnsafeRow> inputIterator) throws IOException {
 while (inputIterator.hasNext()) {
  insertRow(inputIterator.next());
 }
 return sort();
}

代码示例来源:origin: uber/hudi

@Override
 public void onTaskEnd(SparkListenerTaskEnd taskEnd) {
  Iterator<AccumulatorV2<?, ?>> iterator = taskEnd.taskMetrics().accumulators().iterator();
  while (iterator.hasNext()) {
   AccumulatorV2 accumulator = iterator.next();
   if (taskEnd.stageId() == 1 && accumulator.isRegistered() && accumulator.name().isDefined()
     && accumulator.name().get().equals("internal.metrics.shuffle.read.recordsRead")) {
    stageOneShuffleReadTaskRecordsCountMap.put(taskEnd.taskInfo().taskId(), (Long) accumulator.value());
   }
  }
 }
});

相关文章