检查点文件[/var/lib/Kafka/data/log-start-offset-checkpoint]中的行格式不正确

4nkexdtk  于 2023-06-21  发布在  Apache
关注(0)|答案(1)|浏览(77)

我有一个Kafka集群运行Docker compose。在第一次运行时,一切都很好,但当我关闭我的windows机器并重新打开它并再次启动docker集群时,Kafka容器失败了。我尝试了几次创建集群后删除它,但没有运气。有人能帮帮我吗?这里Kafka容器的日志。

2023-06-10 18:27:46 java.io.IOException: Malformed line in checkpoint file [/var/lib/kafka/data/log-start-offset-checkpoint]: 
2023-06-10 18:27:46     at org.apache.kafka.server.common.CheckpointFile$CheckpointReadBuffer.buildMalformedLineException(CheckpointFile.java:172)
2023-06-10 18:27:46     at org.apache.kafka.server.common.CheckpointFile$CheckpointReadBuffer.toInt(CheckpointFile.java:167)
2023-06-10 18:27:46     at org.apache.kafka.server.common.CheckpointFile$CheckpointReadBuffer.read(CheckpointFile.java:133)
2023-06-10 18:27:46     at org.apache.kafka.server.common.CheckpointFile.read(CheckpointFile.java:106)
2023-06-10 18:27:46     at kafka.server.checkpoints.CheckpointFileWithFailureHandler.read(CheckpointFileWithFailureHandler.scala:48)
2023-06-10 18:27:46     at kafka.server.checkpoints.OffsetCheckpointFile.read(OffsetCheckpointFile.scala:70)
2023-06-10 18:27:46     at kafka.log.LogManager.$anonfun$loadLogs$4(LogManager.scala:384)
2023-06-10 18:27:46     at scala.collection.IterableOnceOps.foreach(IterableOnce.scala:563)
2023-06-10 18:27:46     at scala.collection.IterableOnceOps.foreach$(IterableOnce.scala:561)
2023-06-10 18:27:46     at scala.collection.AbstractIterable.foreach(Iterable.scala:926)
2023-06-10 18:27:46     at kafka.log.LogManager.loadLogs(LogManager.scala:353)
2023-06-10 18:27:46     at kafka.log.LogManager.startupWithConfigOverrides(LogManager.scala:522)
2023-06-10 18:27:46     at kafka.log.LogManager.startup(LogManager.scala:482)
2023-06-10 18:27:46     at kafka.server.KafkaServer.startup(KafkaServer.scala:263)
2023-06-10 18:27:46     at kafka.Kafka$.main(Kafka.scala:109)
2023-06-10 18:27:46     at kafka.Kafka.main(Kafka.scala)
2023-06-10 18:27:46 [2023-06-10 15:27:46,150] WARN Error occurred while reading log-start-offset-checkpoint file of directory /var/lib/kafka/data, resetting to the base offset of the first segment (kafka.log.LogManager)
2023-06-10 18:27:46 org.apache.kafka.common.errors.KafkaStorageException: Error while reading checkpoint file /var/lib/kafka/data/log-start-offset-checkpoint

这是我的docker compose文件。

version: "2.1"
name: kafka-cluster-single
services:
  zoo1:
    image: confluentinc/cp-zookeeper:7.3.0
    hostname: zoo1
    container_name: zoo1
    networks:
      - my-network
    ports:
      - "2181:2181"
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_SERVER_ID: 1
      ZOOKEEPER_SERVERS: zoo1:2888:3888;zoo2:2888:3888;zoo3:2888:3888
    volumes:
      - ~docker/volumes/kafka-cluster-single/zoo1/data:/var/lib/zookeeper/data
      - ~docker/volumes/kafka-cluster-single/zoo1/log:/var/lib/zookeeper/log

  zoo2:
    image: confluentinc/cp-zookeeper:7.3.0
    hostname: zoo2
    container_name: zoo2
    networks:
      - my-network
    ports:
      - "2182:2182"
    environment:
      ZOOKEEPER_CLIENT_PORT: 2182
      ZOOKEEPER_SERVER_ID: 2
      ZOOKEEPER_SERVERS: zoo1:2888:3888;zoo2:2888:3888;zoo3:2888:3888
    volumes:
      - ~docker/volumes/kafka-cluster-single/zoo2/data:/var/lib/zookeeper/data
      - ~docker/volumes/kafka-cluster-single/zoo2/log:/var/lib/zookeeper/log

  zoo3:
    image: confluentinc/cp-zookeeper:7.3.0
    hostname: zoo3
    container_name: zoo3
    networks:
      - my-network
    ports:
      - "2183:2183"
    environment:
      ZOOKEEPER_CLIENT_PORT: 2183
      ZOOKEEPER_SERVER_ID: 3
      ZOOKEEPER_SERVERS: zoo1:2888:3888;zoo2:2888:3888;zoo3:2888:3888
    volumes:
      - ~docker/volumes/kafka-cluster-single/zoo3/data:/var/lib/zookeeper/data
      - ~docker/volumes/kafka-cluster-single/zoo3/log:/var/lib/zookeeper/log

  kafka1:
    image: confluentinc/cp-kafka:7.3.0
    hostname: kafka1
    container_name: kafka1
    networks:
      - my-network
    ports:
      - "9092:9092"
      - "29092:29092"
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka1:19092,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092,DOCKER://host.docker.internal:29092
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
      KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
      KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2182,zoo3:2183"
      KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
      KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
      KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
      KAFKA_CONFLUENT_TOPIC_REPLICATION_FACTOR: "1"
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1"
      KAFKA_NUM_PARTITIONS: "30"
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    volumes:
      - ~docker/volumes/kafka-cluster-single/kafka1/data:/var/lib/kafka/data

  connect:
    image: confluentinc/cp-kafka-connect:latest
    networks:
      - my-network
    depends_on:
      - zoo3
    ports:
      - "8083:8083"
    environment:
      CONNECT_BOOTSTRAP_SERVERS: "kafka1:19092"
      CONNECT_REST_PORT: 8083
      CONNECT_GROUP_ID: "default"
      CONNECT_CONFIG_STORAGE_TOPIC: my_connect_configs
      CONNECT_OFFSET_STORAGE_TOPIC: my_connect_offsets
      CONNECT_STATUS_STORAGE_TOPIC: my_connect_statuses
      CONNECT_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
      CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: "http://schemaregistry:8081"
      CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
      CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: "http://schemaregistry:8081"
      CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
      CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
      CONNECT_REST_ADVERTISED_HOST_NAME: "localhost"
      CONNECT_LOG4J_ROOT_LOGLEVEL: "INFO"
      CONNECT_LOG4J_LOGGERS: "org.apache.kafka.connect.runtime.rest=WARN,org.reflections=ERROR"
      CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: "1"
      CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: "1"
      CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: "1"
      CONNECT_PLUGIN_PATH: /usr/share/java,/etc/kafka-connect/jars
    volumes:
      - ~docker/volumes/kafka-cluster-single/kafka-connect/jars:/etc/kafka-connect/jars
      - ~docker/volumes/kafka-cluster-single/kafka-connect/etc/kafka:/etc/kafka

  kowl:
    image: quay.io/cloudhut/kowl:master
    depends_on:
      - kafka1
    networks:
      - my-network
    environment:
      KAFKA_BROKERS: kafka1:29092
      CONNECT_ENABLED: "true"
      CONNECT_CLUSTERS_NAME: "my-connect-cluster"
      CONNECT_CLUSTERS_URL: http://connect:8083/
      SCHEMAREGISTRY_ENABLED: "true"
    ports:
      - "8081:8080"
    volumes:
      - ~docker/volumes/kafka-cluster-single/kowl/etc/configs:/etc/kowl/configs
      - ~docker/volumes/kafka-cluster-single/kowl/etc/secrets:/etc/kowl/secrets
networks:
  my-network:
    external: true
z8dt9xmd

z8dt9xmd1#

当我关闭我的windows机器并重新打开它时
听起来好像没有运行docker compose stop,因此在容器中运行的服务器进程没有时间正确地关闭自己,这意味着刷新任何打开的文件句柄并完成任何写入。
所以,是的,腐败有可能发生。这不是Docker独有的

相关问题