org.apache.kafka.clients.consumer.Consumer.seekToEnd()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(7.3k)|赞(0)|评价(0)|浏览(535)

本文整理了Java中org.apache.kafka.clients.consumer.Consumer.seekToEnd()方法的一些代码示例,展示了Consumer.seekToEnd()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Consumer.seekToEnd()方法的具体详情如下:
包路径:org.apache.kafka.clients.consumer.Consumer
类名称:Consumer
方法名:seekToEnd

Consumer.seekToEnd介绍

暂无

代码示例

代码示例来源:origin: openzipkin/brave

@Override public void seekToEnd(Collection<TopicPartition> partitions) {
 delegate.seekToEnd(partitions);
}

代码示例来源:origin: apache/incubator-gobblin

@Override
public long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
 TopicPartition topicPartition = new TopicPartition(partition.getTopicName(), partition.getId());
 this.consumer.assign(Collections.singletonList(topicPartition));
 this.consumer.seekToEnd(topicPartition);
 return this.consumer.position(topicPartition);
}

代码示例来源:origin: apache/storm

} else if (firstPollOffsetStrategy == LATEST && isFirstPollSinceTopologyWasDeployed) {
  LOG.debug("First poll for topic partition [{}], seeking to partition end", tp);
  consumer.seekToEnd(Collections.singleton(tp));
} else if (lastBatchMeta != null) {
  LOG.debug("First poll for topic partition [{}], using last batch metadata", tp);
} else if (firstPollOffsetStrategy == UNCOMMITTED_LATEST) {
  LOG.debug("First poll for topic partition [{}] with no last batch metadata, seeking to partition end", tp);
  consumer.seekToEnd(Collections.singleton(tp));

代码示例来源:origin: apache/hive

consumer.seekToEnd(topicPartitionList);
this.endOffset = consumer.position(topicPartition);
LOG.info("End Offset set to [{}]", this.endOffset);

代码示例来源:origin: org.apache.kafka/kafka

public void maybeSeekToEnd(final String groupId,
              final Consumer<byte[], byte[]> client,
              final Set<TopicPartition> intermediateTopicPartitions) {
  if (intermediateTopicPartitions.size() > 0) {
    System.out.println("Following intermediate topics offsets will be reset to end (for consumer group " + groupId + ")");
    for (final TopicPartition topicPartition : intermediateTopicPartitions) {
      if (allTopics.contains(topicPartition.topic())) {
        System.out.println("Topic: " + topicPartition.topic());
      }
    }
    client.seekToEnd(intermediateTopicPartitions);
  }
}

代码示例来源:origin: org.apache.kafka/kafka_2.11

public void maybeSeekToEnd(final String groupId,
              final Consumer<byte[], byte[]> client,
              final Set<TopicPartition> intermediateTopicPartitions) {
  if (intermediateTopicPartitions.size() > 0) {
    System.out.println("Following intermediate topics offsets will be reset to end (for consumer group " + groupId + ")");
    for (final TopicPartition topicPartition : intermediateTopicPartitions) {
      if (allTopics.contains(topicPartition.topic())) {
        System.out.println("Topic: " + topicPartition.topic());
      }
    }
    client.seekToEnd(intermediateTopicPartitions);
  }
}

代码示例来源:origin: org.apache.kafka/kafka_2.12

public void maybeSeekToEnd(final String groupId,
              final Consumer<byte[], byte[]> client,
              final Set<TopicPartition> intermediateTopicPartitions) {
  if (intermediateTopicPartitions.size() > 0) {
    System.out.println("Following intermediate topics offsets will be reset to end (for consumer group " + groupId + ")");
    for (final TopicPartition topicPartition : intermediateTopicPartitions) {
      if (allTopics.contains(topicPartition.topic())) {
        System.out.println("Topic: " + topicPartition.topic());
      }
    }
    client.seekToEnd(intermediateTopicPartitions);
  }
}

代码示例来源:origin: spring-projects/spring-kafka

assertThat(captor.getValue())
    .isEqualTo(new HashSet<>(Arrays.asList(new TopicPartition("foo", 0), new TopicPartition("foo", 4))));
verify(consumer).seekToEnd(captor.capture());
assertThat(captor.getValue())
    .isEqualTo(new HashSet<>(Arrays.asList(new TopicPartition("foo", 1), new TopicPartition("foo", 5))));

代码示例来源:origin: rayokota/kafka-graphs

@Override
public void seekToEnd(Collection<TopicPartition> partitions) {
  kafkaConsumer.seekToEnd(partitions);
}

代码示例来源:origin: io.opentracing.contrib/opentracing-kafka-client

@Override
public void seekToEnd(Collection<TopicPartition> partitions) {
 consumer.seekToEnd(partitions);
}

代码示例来源:origin: org.apache.kafka/kafka_2.12

client.seekToBeginning(inputTopicPartitions);
} else if (options.has(toLatestOption)) {
  client.seekToEnd(inputTopicPartitions);
} else if (options.has(shiftByOption)) {
  shiftOffsetsBy(client, inputTopicPartitions, options.valueOf(shiftByOption));

代码示例来源:origin: org.apache.kafka/kafka_2.11

client.seekToBeginning(inputTopicPartitions);
} else if (options.has(toLatestOption)) {
  client.seekToEnd(inputTopicPartitions);
} else if (options.has(shiftByOption)) {
  shiftOffsetsBy(client, inputTopicPartitions, options.valueOf(shiftByOption));

代码示例来源:origin: org.apache.kafka/kafka

client.seekToBeginning(inputTopicPartitions);
} else if (options.has(toLatestOption)) {
  client.seekToEnd(inputTopicPartitions);
} else if (options.has(shiftByOption)) {
  shiftOffsetsBy(client, inputTopicPartitions, options.valueOf(shiftByOption));

代码示例来源:origin: spring-projects/spring-kafka

assertThat(next.topic()).isEqualTo(topic);
assertThat(next.partition()).isEqualTo(0);
verify(consumer).seekToEnd(captor.capture());
next = captor.getValue().iterator().next();
assertThat(next.topic()).isEqualTo(topic);

代码示例来源:origin: io.projectreactor.kafka/reactor-kafka

@Override
public void seekToEnd() {
  this.consumer.seekToEnd(Collections.singletonList(topicPartition));
}

代码示例来源:origin: BriData/DBus

public static Consumer<String, byte[]> createConsumer(Properties props, String subscribeTopic) throws Exception {
    TopicPartition topicPartition = new TopicPartition(subscribeTopic, 0);
    List<TopicPartition> topicPartitions = Arrays.asList(topicPartition);   
    Consumer<String, byte[]> consumer = new KafkaConsumer<>(props);
    // consumer.subscribe(Arrays.asList(subscribeTopics.split(",")));
    consumer.assign(topicPartitions);
    // consumer都是在topo启动时创建。当Topo重启,目前的策略是对于kafka中未处理的msg放弃。不再消费。所以seek to end。
    consumer.seekToEnd(topicPartitions);
    return consumer;
  }    
}

代码示例来源:origin: spring-cloud/spring-cloud-stream-binder-kafka

@Override
  public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> tps) {
    if (initialAssignment.getAndSet(false)) {
      if ("earliest".equals(resetTo)) {
        consumer.seekToBeginning(tps);
      }
      else if ("latest".equals(resetTo)) {
        consumer.seekToEnd(tps);
      }
    }
  }
});

代码示例来源:origin: org.springframework.cloud/spring-cloud-stream-binder-kafka

@Override
  public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> tps) {
    if (initialAssignment.getAndSet(false)) {
      if ("earliest".equals(resetTo)) {
        consumer.seekToBeginning(tps);
      }
      else if ("latest".equals(resetTo)) {
        consumer.seekToEnd(tps);
      }
    }
  }
});

代码示例来源:origin: vert-x3/vertx-kafka-client

@Override
public KafkaReadStream<K, V> seekToEnd(Set<TopicPartition> topicPartitions, Handler<AsyncResult<Void>> completionHandler) {
 this.context.runOnContext(r -> {
  current = null;
  this.submitTask((consumer, future) -> {
   consumer.seekToEnd(topicPartitions);
   if (future != null) {
    future.complete();
   }
  }, completionHandler);
 });
 return this;
}

代码示例来源:origin: linkedin/li-apache-kafka-clients

@Override
public void seekToEnd(Collection<TopicPartition> partitions) {
 _kafkaConsumer.seekToEnd(partitions);
 for (TopicPartition tp : partitions) {
  _consumerRecordsProcessor.clear(tp);
  // We set the high watermark to 0 if user is seeking to end. This is needed to prevent the consumer from
  // retrieving high watermark from the committed offsets.
  _consumerRecordsProcessor.setPartitionConsumerHighWaterMark(tp, 0L);
 }
}

相关文章