kafka.message.Message.<init>()方法的使用及代码示例

x33g5p2x  于2022-01-25 转载在 其他  
字(6.4k)|赞(0)|评价(0)|浏览(216)

本文整理了Java中kafka.message.Message.<init>()方法的一些代码示例,展示了Message.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Message.<init>()方法的具体详情如下:
包路径:kafka.message.Message
类名称:Message
方法名:<init>

Message.<init>介绍

暂无

代码示例

代码示例来源:origin: Graylog2/graylog2-server

  1. final Message newMessage = new Message(messageBytes, idBytes);

代码示例来源:origin: apache/incubator-gobblin

  1. public void pushToStream(String message) {
  2. int streamNo = (int) this.nextStream.incrementAndGet() % this.queues.size();
  3. AtomicLong offset = this.offsets.get(streamNo);
  4. BlockingQueue<FetchedDataChunk> queue = this.queues.get(streamNo);
  5. AtomicLong thisOffset = new AtomicLong(offset.incrementAndGet());
  6. List<Message> seq = Lists.newArrayList();
  7. seq.add(new Message(message.getBytes(Charsets.UTF_8)));
  8. ByteBufferMessageSet messageSet = new ByteBufferMessageSet(NoCompressionCodec$.MODULE$, offset, JavaConversions.asScalaBuffer(seq));
  9. FetchedDataChunk chunk = new FetchedDataChunk(messageSet,
  10. new PartitionTopicInfo("topic", streamNo, queue, thisOffset, thisOffset, new AtomicInteger(1), "clientId"),
  11. thisOffset.get());
  12. queue.add(chunk);
  13. }

代码示例来源:origin: linkedin/camus

  1. public void validate() throws IOException {
  2. // check the checksum of message.
  3. Message readMessage;
  4. if (key == null){
  5. readMessage = new Message(payload);
  6. } else {
  7. readMessage = new Message(payload, key);
  8. }
  9. if (checksum != readMessage.checksum()) {
  10. throw new ChecksumException("Invalid message checksum : " + readMessage.checksum() + ". Expected " + checksum,
  11. offset);
  12. }
  13. }

代码示例来源:origin: linkedin/camus

  1. private FetchResponse mockFetchResponse(List<MyMessage> myMessages) {
  2. FetchResponse fetchResponse = EasyMock.createMock(FetchResponse.class);
  3. EasyMock.expect(fetchResponse.hasError()).andReturn(false).times(1);
  4. List<Message> messages = new ArrayList<Message>();
  5. for (MyMessage myMessage:myMessages) {
  6. String payload = gson.toJson(myMessage);
  7. String msgKey = Integer.toString(PARTITION_1_ID);
  8. Message message = new Message(payload.getBytes(), msgKey.getBytes());
  9. messages.add(message);
  10. }
  11. ByteBufferMessageSet messageSet = new ByteBufferMessageSet(messages);
  12. EasyMock.expect(fetchResponse.messageSet(EasyMock.anyString(), EasyMock.anyInt())).andReturn(messageSet).times(1);
  13. mocks.add(fetchResponse);
  14. return fetchResponse;
  15. }

代码示例来源:origin: jacklund/mqttKafkaBridge

  1. @Override
  2. public void messageArrived(String topic, MqttMessage message) throws Exception {
  3. byte[] payload = message.getPayload();
  4. ProducerData<String, Message> data = new ProducerData<String, Message>(topic, new Message(payload));
  5. kafkaProducer.send(data);
  6. }

代码示例来源:origin: com.linkedin.camus/camus-etl-kafka

  1. public void validate() throws IOException {
  2. // check the checksum of message.
  3. Message readMessage;
  4. if (key == null){
  5. readMessage = new Message(payload);
  6. } else {
  7. readMessage = new Message(payload, key);
  8. }
  9. if (checksum != readMessage.checksum()) {
  10. throw new ChecksumException("Invalid message checksum : " + readMessage.checksum() + ". Expected " + checksum,
  11. offset);
  12. }
  13. }

代码示例来源:origin: org.apache.apex/malhar-contrib

  1. public void run()
  2. {
  3. ConsumerIterator<byte[], byte[]> itr = stream.iterator();
  4. logger.debug("Thread {} starts consuming message...", Thread.currentThread().getName());
  5. while (itr.hasNext() && isAlive) {
  6. MessageAndMetadata<byte[], byte[]> mam = itr.next();
  7. try {
  8. kp.setPartitionId(mam.partition());
  9. putMessage(kp, new Message(mam.message()), mam.offset());
  10. } catch (InterruptedException e) {
  11. logger.error("Message Enqueue has been interrupted", e);
  12. }
  13. }
  14. logger.debug("Thread {} stops consuming message...", Thread.currentThread().getName());
  15. }
  16. });

代码示例来源:origin: apache/apex-malhar

  1. public void run()
  2. {
  3. ConsumerIterator<byte[], byte[]> itr = stream.iterator();
  4. logger.debug("Thread {} starts consuming message...", Thread.currentThread().getName());
  5. while (itr.hasNext() && isAlive) {
  6. MessageAndMetadata<byte[], byte[]> mam = itr.next();
  7. try {
  8. kp.setPartitionId(mam.partition());
  9. putMessage(kp, new Message(mam.message()), mam.offset());
  10. } catch (InterruptedException e) {
  11. logger.error("Message Enqueue has been interrupted", e);
  12. }
  13. }
  14. logger.debug("Thread {} stops consuming message...", Thread.currentThread().getName());
  15. }
  16. });

代码示例来源:origin: org.graylog2/graylog2-server

  1. final Message newMessage = new Message(messageBytes, idBytes);

代码示例来源:origin: org.graylog2/graylog2-shared

  1. /**
  2. * Writes the list of entries to the journal.
  3. *
  4. * @param entries journal entries to be written
  5. * @return the last position written to in the journal
  6. */
  7. @Override
  8. public long write(List<Entry> entries) {
  9. try (Timer.Context ignored = writeTime.time()) {
  10. long payloadSize = 0L;
  11. final List<Message> messages = Lists.newArrayListWithCapacity(entries.size());
  12. for (final Entry entry : entries) {
  13. final byte[] messageBytes = entry.getMessageBytes();
  14. final byte[] idBytes = entry.getIdBytes();
  15. payloadSize += messageBytes.length;
  16. messages.add(new Message(messageBytes, idBytes));
  17. if (LOG.isTraceEnabled()) {
  18. LOG.trace("Message {} contains bytes {}", bytesToHex(idBytes), bytesToHex(messageBytes));
  19. }
  20. }
  21. final ByteBufferMessageSet messageSet = new ByteBufferMessageSet(JavaConversions.asScalaBuffer(messages));
  22. final Log.LogAppendInfo appendInfo = kafkaLog.append(messageSet, true);
  23. long lastWriteOffset = appendInfo.lastOffset();
  24. LOG.debug("Wrote {} messages to journal: {} bytes, log position {} to {}",
  25. entries.size(), payloadSize, appendInfo.firstOffset(), lastWriteOffset);
  26. writtenMessages.mark(entries.size());
  27. return lastWriteOffset;
  28. }
  29. }

代码示例来源:origin: apache/apex-malhar

  1. @Override
  2. public void run()
  3. {
  4. Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
  5. topicCountMap.put(topic, new Integer(1));
  6. Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
  7. KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
  8. ConsumerIterator<byte[], byte[]> it = stream.iterator();
  9. logger.debug("Inside consumer::run receiveCount= {}", receiveCount);
  10. while (it.hasNext() & isAlive) {
  11. Message msg = new Message(it.next().message());
  12. if (latch != null) {
  13. latch.countDown();
  14. }
  15. if (getMessage(msg).equals(KafkaOperatorTestBase.END_TUPLE)) {
  16. break;
  17. }
  18. holdingBuffer.add(msg);
  19. receiveCount++;
  20. logger.debug("Consuming {}, receiveCount= {}", getMessage(msg), receiveCount);
  21. try {
  22. Thread.sleep(50);
  23. } catch (InterruptedException e) {
  24. break;
  25. }
  26. }
  27. logger.debug("DONE consuming");
  28. }

代码示例来源:origin: HiveKa/HiveKa

  1. Message messageWithKey = new Message(bytes,keyBytes);
  2. Message messageWithoutKey = new Message(bytes);
  3. long checksum = key.getChecksum();
  4. if (checksum != messageWithKey.checksum() && checksum != messageWithoutKey.checksum()) {

相关文章