org.apache.kafka.clients.consumer.Consumer.endOffsets()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(11.8k)|赞(0)|评价(0)|浏览(406)

本文整理了Java中org.apache.kafka.clients.consumer.Consumer.endOffsets()方法的一些代码示例,展示了Consumer.endOffsets()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Consumer.endOffsets()方法的具体详情如下:
包路径:org.apache.kafka.clients.consumer.Consumer
类名称:Consumer
方法名:endOffsets

Consumer.endOffsets介绍

暂无

代码示例

代码示例来源:origin: openzipkin/brave

  1. public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions,
  2. Duration timeout) {
  3. return delegate.endOffsets(partitions, timeout);
  4. }

代码示例来源:origin: openzipkin/brave

  1. @Override public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions) {
  2. return delegate.endOffsets(partitions);
  3. }

代码示例来源:origin: apache/storm

  1. Map<TopicPartition, Long> endOffsets = consumer.endOffsets(topicPartitions);

代码示例来源:origin: linkedin/cruise-control

  1. Map<TopicPartition, Long> endOffsets = _metricConsumer.endOffsets(assignment);
  2. Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes = _metricConsumer.offsetsForTimes(timestampToSeek);

代码示例来源:origin: org.apache.kafka/kafka_2.11

  1. public void resetOffsetsTo(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, Long offset) {
  2. final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
  3. final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
  4. final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size());
  5. for (final TopicPartition topicPartition : inputTopicPartitions) {
  6. topicPartitionsAndOffset.put(topicPartition, offset);
  7. }
  8. final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
  9. checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
  10. for (final TopicPartition topicPartition : inputTopicPartitions) {
  11. client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
  12. }
  13. }

代码示例来源:origin: org.apache.kafka/kafka_2.12

  1. public void resetOffsetsTo(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, Long offset) {
  2. final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
  3. final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
  4. final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size());
  5. for (final TopicPartition topicPartition : inputTopicPartitions) {
  6. topicPartitionsAndOffset.put(topicPartition, offset);
  7. }
  8. final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
  9. checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
  10. for (final TopicPartition topicPartition : inputTopicPartitions) {
  11. client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
  12. }
  13. }

代码示例来源:origin: org.apache.kafka/kafka

  1. public void resetOffsetsTo(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, Long offset) {
  2. final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
  3. final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
  4. final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size());
  5. for (final TopicPartition topicPartition : inputTopicPartitions) {
  6. topicPartitionsAndOffset.put(topicPartition, offset);
  7. }
  8. final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
  9. checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
  10. for (final TopicPartition topicPartition : inputTopicPartitions) {
  11. client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
  12. }
  13. }

代码示例来源:origin: org.apache.kafka/kafka_2.12

  1. public void shiftOffsetsBy(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, long shiftBy) {
  2. final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
  3. final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
  4. final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size());
  5. for (final TopicPartition topicPartition : inputTopicPartitions) {
  6. final long position = client.position(topicPartition);
  7. final long offset = position + shiftBy;
  8. topicPartitionsAndOffset.put(topicPartition, offset);
  9. }
  10. final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
  11. checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
  12. for (final TopicPartition topicPartition : inputTopicPartitions) {
  13. client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
  14. }
  15. }

代码示例来源:origin: org.apache.kafka/kafka_2.11

  1. public void shiftOffsetsBy(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, long shiftBy) {
  2. final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
  3. final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
  4. final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size());
  5. for (final TopicPartition topicPartition : inputTopicPartitions) {
  6. final long position = client.position(topicPartition);
  7. final long offset = position + shiftBy;
  8. topicPartitionsAndOffset.put(topicPartition, offset);
  9. }
  10. final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
  11. checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
  12. for (final TopicPartition topicPartition : inputTopicPartitions) {
  13. client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
  14. }
  15. }

代码示例来源:origin: org.apache.kafka/kafka_2.11

  1. public void resetOffsetsFromResetPlan(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, Map<TopicPartition, Long> topicPartitionsAndOffset) {
  2. final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
  3. final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
  4. final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
  5. checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
  6. for (final TopicPartition topicPartition : inputTopicPartitions) {
  7. client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
  8. }
  9. }

代码示例来源:origin: org.apache.kafka/kafka_2.12

  1. public void resetOffsetsFromResetPlan(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, Map<TopicPartition, Long> topicPartitionsAndOffset) {
  2. final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
  3. final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
  4. final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
  5. checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
  6. for (final TopicPartition topicPartition : inputTopicPartitions) {
  7. client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
  8. }
  9. }

代码示例来源:origin: org.apache.kafka/kafka

  1. public void resetOffsetsFromResetPlan(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, Map<TopicPartition, Long> topicPartitionsAndOffset) {
  2. final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
  3. final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
  4. final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
  5. checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
  6. for (final TopicPartition topicPartition : inputTopicPartitions) {
  7. client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
  8. }
  9. }

代码示例来源:origin: org.apache.kafka/kafka

  1. public void shiftOffsetsBy(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, long shiftBy) {
  2. final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
  3. final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
  4. final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size());
  5. for (final TopicPartition topicPartition : inputTopicPartitions) {
  6. final long position = client.position(topicPartition);
  7. final long offset = position + shiftBy;
  8. topicPartitionsAndOffset.put(topicPartition, offset);
  9. }
  10. final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
  11. checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
  12. for (final TopicPartition topicPartition : inputTopicPartitions) {
  13. client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
  14. }
  15. }

代码示例来源:origin: io.opentracing.contrib/opentracing-kafka-client

  1. @Override
  2. public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> collection,
  3. Duration duration) {
  4. return consumer.endOffsets(collection, duration);
  5. }

代码示例来源:origin: opentracing-contrib/java-kafka-client

  1. @Override
  2. public Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> collection,
  3. Duration duration) {
  4. return consumer.endOffsets(collection, duration);
  5. }

代码示例来源:origin: vert-x3/vertx-kafka-client

  1. @Override
  2. public void endOffsets(Set<TopicPartition> topicPartitions, Handler<AsyncResult<Map<TopicPartition, Long>>> handler) {
  3. this.submitTask((consumer, future) -> {
  4. Map<TopicPartition, Long> endOffsets = this.consumer.endOffsets(topicPartitions);
  5. if (future != null) {
  6. future.complete(endOffsets);
  7. }
  8. }, handler);
  9. }

代码示例来源:origin: vert-x3/vertx-kafka-client

  1. @Override
  2. public void endOffsets(TopicPartition topicPartition, Handler<AsyncResult<Long>> handler) {
  3. this.submitTask((consumer, future) -> {
  4. Set<TopicPartition> input = new HashSet<>();
  5. input.add(topicPartition);
  6. Map<TopicPartition, Long> endOffsets = this.consumer.endOffsets(input);
  7. if (future != null) {
  8. future.complete(endOffsets.get(topicPartition));
  9. }
  10. }, handler);
  11. }

代码示例来源:origin: org.apache.servicemix.bundles/org.apache.servicemix.bundles.kafka_2.11

  1. public void resetOffsetsFromResetPlan(Consumer<byte[], byte[]> client, Set<TopicPartition> inputTopicPartitions, Map<TopicPartition, Long> topicPartitionsAndOffset) {
  2. final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
  3. final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
  4. final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
  5. checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
  6. for (final TopicPartition topicPartition : inputTopicPartitions) {
  7. client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
  8. }
  9. }

代码示例来源:origin: apache/samza

  1. @Test
  2. public void testGetSSPMetadataEmptyUpcomingOffset() {
  3. SystemStreamPartition ssp = new SystemStreamPartition(TEST_SYSTEM, VALID_TOPIC, new Partition(0));
  4. TopicPartition topicPartition = new TopicPartition(VALID_TOPIC, 0);
  5. when(mockKafkaConsumer.beginningOffsets(ImmutableList.of(topicPartition))).thenReturn(
  6. ImmutableMap.of(topicPartition, 0L));
  7. when(mockKafkaConsumer.endOffsets(ImmutableList.of(topicPartition))).thenReturn(ImmutableMap.of());
  8. Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> expected =
  9. ImmutableMap.of(ssp, new SystemStreamMetadata.SystemStreamPartitionMetadata("0", null, null));
  10. assertEquals(kafkaSystemAdmin.getSSPMetadata(ImmutableSet.of(ssp)), expected);
  11. }

代码示例来源:origin: apache/samza

  1. @Test
  2. public void testGetSSPMetadataZeroUpcomingOffset() {
  3. SystemStreamPartition ssp = new SystemStreamPartition(TEST_SYSTEM, VALID_TOPIC, new Partition(0));
  4. TopicPartition topicPartition = new TopicPartition(VALID_TOPIC, 0);
  5. when(mockKafkaConsumer.beginningOffsets(ImmutableList.of(topicPartition))).thenReturn(
  6. ImmutableMap.of(topicPartition, -1L));
  7. when(mockKafkaConsumer.endOffsets(ImmutableList.of(topicPartition))).thenReturn(
  8. ImmutableMap.of(topicPartition, 0L));
  9. Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> expected =
  10. ImmutableMap.of(ssp, new SystemStreamMetadata.SystemStreamPartitionMetadata("0", null, "0"));
  11. assertEquals(kafkaSystemAdmin.getSSPMetadata(ImmutableSet.of(ssp)), expected);
  12. }

相关文章