Can't migrate from zookeeper to KRaft

Hi, I’m having trouble with this situation. First I tried to migrate the volume which is in zookeeper to new kraft cluster with MirrorMaker2 but I couldn’t manage it. I posted that as a question as well. Now I tried to follow the migration steps that is described in Confluent’s Docs. But I couldn’t manage this also.

This is the setup of the zookeeper;

version: "3.7"
services:
  zookeeper:
    image: confluentinc/cp-zookeeper:latest
    hostname: zookeeper
    container_name: zookeeper
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000
    labels:
      maintainer: ino
      description: message-broker-zookeper
      project: pms
      version: v1.0.0
      type: backend
    volumes:
      - ../../storage_data/zookeeper_data:/zookeeper/data
      - ../../storage_data/zookeeper_datalog:/zookeeper/datalog 
    networks:
      - kafka-connector
    restart: always
    logging:                  # Add this section for logging configuration
      driver: "json-file"     # Here, we're using the json-file driver for logging
      options:
        max-size: "1g"        # Set the maximum size for a log file (1 gigabyte)
        max-file: "5"         # Set the maximum number of log files to keep
  

  kafka:
    image: confluentinc/cp-kafka:latest
    depends_on:
      - zookeeper
    hostname: kafka
    ports:
      - 29092:29092
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://${KAFKA_BROKER_IP_ADDRESS}:29092
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
      KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
      KAFKA_SCHEMA_REGISTRY_URL: "schemaregistry:8081"
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
      # CONNECT_HEAP_OPTS: "-Xmx10g -Xms5g"
    labels:
      maintainer: ino
      description: message-broker-kafka
      project: pms
      version: v1.0.0
      type: backend
    volumes:
      - ../../storage_data/kafka_data:/kafka/data
    networks:
      - kafka-connector
    restart: always
    logging:                  # Add this section for logging configuration
      driver: "json-file"     # Here, we're using the json-file driver for logging
      options:
        max-size: "1g"        # Set the maximum size for a log file (1 gigabyte)
        max-file: "5"         # Set the maximum number of log files to keep

This is the setup of the kraft;

services:
  controller-1:
    image: confluentinc/cp-kafka:7.8.0
    hostname: controller-1
    container_name: controller-1
    environment:
      KAFKA_NODE_ID: 1000
      KAFKA_PROCESS_ROLES: controller
      KAFKA_CONTROLLER_QUORUM_VOTERS: &kraft-voters "1000@controller-1:9093"
      # KAFKA_CONTROLLER_QUORUM_VOTERS: &kraft-voters "1000@controller-1:9093,2000@controller-2:9093,3000@controller-3:9093"
      KAFKA_LISTENERS: CONTROLLER://0.0.0.0:9093
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT
      KAFKA_LOG_DIRS: /var/lib/kafka/data
      CLUSTER_ID: bI95nJEhTjesvwTu-RhnCA
      KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER # Set your generated UUID her
      KAFKA_ZOOKEEPER_METADATA_MIGRATION_ENABLE: "true"
      KAFKA_ZOOKEEPER_CONNECT: "192.168.2.18:2181"
      KAFKA_CONFLUENT_CLUSTER_LINK_METADATA_TOPIC_ENABLE: "true"
    volumes:
      - controller_1_data:/var/lib/kafka/data
    user: 1000:1000
    networks:
      - kafka-connector
    restart: always

  controller-2:
    image: confluentinc/cp-kafka:7.8.0
    hostname: controller-2
    container_name: controller-2
    environment:
      KAFKA_NODE_ID: 2000
      KAFKA_PROCESS_ROLES: controller
      KAFKA_CONTROLLER_QUORUM_VOTERS: *kraft-voters
      KAFKA_LISTENERS: CONTROLLER://0.0.0.0:9093
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT
      KAFKA_LOG_DIRS: /var/lib/kafka/data
      CLUSTER_ID: bI95nJEhTjesvwTu-RhnCA
      KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
      KAFKA_ZOOKEEPER_METADATA_MIGRATION_ENABLE: "true"
      KAFKA_ZOOKEEPER_CONNECT: "192.168.2.18:2181"
      KAFKA_CONFLUENT_CLUSTER_LINK_METADATA_TOPIC_ENABLE: "true"
    volumes:
      - controller_2_data:/var/lib/kafka/data
    networks:
      - kafka-connector
    restart: always

  controller-3:
    image: confluentinc/cp-kafka:7.8.0
    hostname: controller-3
    container_name: controller-3
    environment:
      KAFKA_NODE_ID: 3000
      KAFKA_PROCESS_ROLES: controller
      KAFKA_CONTROLLER_QUORUM_VOTERS: *kraft-voters
      KAFKA_LISTENERS: CONTROLLER://0.0.0.0:9093
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT
      KAFKA_LOG_DIRS: /var/lib/kafka/data
      CLUSTER_ID: bI95nJEhTjesvwTu-RhnCA
      KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
      KAFKA_ZOOKEEPER_METADATA_MIGRATION_ENABLE: "true"
      KAFKA_ZOOKEEPER_CONNECT: "192.168.2.18:2181"
      KAFKA_CONFLUENT_CLUSTER_LINK_METADATA_TOPIC_ENABLE: "true"
    volumes:
      - controller_3_data:/var/lib/kafka/data
    networks:
      - kafka-connector
    restart: always

  kafka-1:
    image: confluentinc/cp-kafka:7.8.0
    hostname: kafka-1
    container_name: kafka-1
    ports:
      # - 9092:9092
      - 29093:29093
      - 9997:9997
    environment:
      KAFKA_BROKER_ID: 11
      KAFKA_NODE_ID: 11
      KAFKA_PROCESS_ROLES: broker
      KAFKA_CFG_LISTENERS: PLAINTEXT://kafka-1:9092,BROKER://192.168.18.2:29093
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:9092,BROKER://192.168.18.2:29093
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,BROKER:SASL_PLAINTEXT
      KAFKA_CONTROLLER_QUORUM_VOTERS: *kraft-voters
      KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
      KAFKA_JMX_PORT: 9997
      KAFKA_LOG_DIRS: /var/lib/kafka/data
      CLUSTER_ID: bI95nJEhTjesvwTu-RhnCA # BURASI ZOOKEEPERE GÖRE DEĞİŞECEK.
      KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
      KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 2
      KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 3
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
      KAFKA_SASL_ENABLED_MECHANISMS: SCRAM-SHA-256,PLAIN
      KAFKA_SASL_MECHANISM_CONTROLLER_PROTOCOL: PLAIN
      KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: SCRAM-SHA-256
      KAFKA_LISTENER_NAME_BROKER_SCRAM-SHA-256_SASL_JAAS_CONFIG: org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="admin-secret" user_admin="admin-secret" ;
      KAFKA_LISTENER_NAME_BROKER_PLAIN_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret" user_admin="admin-secret" ;
      KAFKA_LISTENER_NAME_CONTROLLER_PLAIN_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret" user_admin="admin-secret" ;
      KAFKA_LISTENER_NAME_CONTROLLER_SCRAM-SHA-256_SASL_JAAS_CONFIG: org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="admin-secret" user_admin="admin-secret" ;
      KAFKA_OPTS: "-Djava.security.debug=gssloginconfig,configfile,configparser,logincontext"
      KAFKA_ZOOKEEPER_METADATA_MIGRATION_ENABLE: "true"
      KAFKA_ZOOKEEPER_CONNECT: "192.168.2.18:2181"
      KAFKA_CONFLUENT_CLUSTER_LINK_METADATA_TOPIC_ENABLE: "true"
      KAFKA_INTER_BROKER_PROTOCOL_VERSION: 3.8
    volumes:
      - kafka_1_data:/var/lib/kafka/data
    networks:
      - kafka-connector
    restart: always

  kafka-2:
    image: confluentinc/cp-kafka:7.8.0
    hostname: kafka-2
    container_name: kafka-2
    ports:
      # - 9094:9092
      - 29094:29094
      - 9998:9997
    environment:
      KAFKA_BROKER_ID: 12
      KAFKA_NODE_ID: 12
      KAFKA_PROCESS_ROLES: broker
      KAFKA_CFG_LISTENERS: PLAINTEXT://kafka-2:9092,BROKER://192.168.18.2:29094
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:9092,BROKER://192.168.18.2:29094
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,BROKER:SASL_PLAINTEXT
      KAFKA_CONTROLLER_QUORUM_VOTERS: *kraft-voters
      KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
      KAFKA_LOG_DIRS: /var/lib/kafka/data
      CLUSTER_ID: bI95nJEhTjesvwTu-RhnCA
      KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
      KAFKA_JMX_PORT: 9997
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
      KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 2
      KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 3
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
      KAFKA_SASL_ENABLED_MECHANISMS: SCRAM-SHA-256,PLAIN
      KAFKA_SASL_MECHANISM_CONTROLLER_PROTOCOL: PLAIN
      KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: SCRAM-SHA-256
      KAFKA_LISTENER_NAME_BROKER_SCRAM-SHA-256_SASL_JAAS_CONFIG: org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="admin-secret" user_admin="admin-secret" ;
      KAFKA_LISTENER_NAME_BROKER_PLAIN_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret" user_admin="admin-secret" ;
      KAFKA_LISTENER_NAME_CONTROLLER_PLAIN_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret" user_admin="admin-secret" ;
      KAFKA_LISTENER_NAME_CONTROLLER_SCRAM-SHA-256_SASL_JAAS_CONFIG: org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="admin-secret" user_admin="admin-secret" ;
      KAFKA_OPTS: "-Djava.security.debug=gssloginconfig,configfile,configparser,logincontext"
      KAFKA_ZOOKEEPER_METADATA_MIGRATION_ENABLE: "true"
      KAFKA_ZOOKEEPER_CONNECT: "192.168.2.18:2181"
      KAFKA_CONFLUENT_CLUSTER_LINK_METADATA_TOPIC_ENABLE: "true"
      KAFKA_INTER_BROKER_PROTOCOL_VERSION: 3.8
    volumes:
      - kafka_2_data:/var/lib/kafka/data
    networks:
      - kafka-connector
    restart: always

  kafka-3:
    image: confluentinc/cp-kafka:7.8.0
    hostname: kafka-3
    container_name: kafka-3
    ports:
      # - 9096:9092
      - 29095:29095
      - 9999:9997
    environment:
      KAFKA_BROKER_ID: 13
      KAFKA_NODE_ID: 13
      KAFKA_PROCESS_ROLES: broker
      KAFKA_CFG_LISTENERS: PLAINTEXT://kafka-3:9092,BROKER://192.168.18.2:29095
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-3:9092,BROKER://192.168.18.2:29095
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,BROKER:SASL_PLAINTEXT
      KAFKA_CONTROLLER_QUORUM_VOTERS: *kraft-voters
      KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
      KAFKA_JMX_PORT: 9997
      KAFKA_LOG_DIRS: /var/lib/kafka/data
      CLUSTER_ID: bI95nJEhTjesvwTu-RhnCA
      KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
      KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 2
      KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 3
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
      KAFKA_SASL_ENABLED_MECHANISMS: SCRAM-SHA-256,PLAIN
      KAFKA_SASL_MECHANISM_CONTROLLER_PROTOCOL: PLAIN
      KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: SCRAM-SHA-256
      KAFKA_LISTENER_NAME_BROKER_SCRAM-SHA-256_SASL_JAAS_CONFIG: org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="admin-secret" user_admin="admin-secret" ;
      KAFKA_LISTENER_NAME_BROKER_PLAIN_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret" user_admin="admin-secret" ;
      KAFKA_LISTENER_NAME_CONTROLLER_PLAIN_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret" user_admin="admin-secret" ;
      KAFKA_LISTENER_NAME_CONTROLLER_SCRAM-SHA-256_SASL_JAAS_CONFIG: org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="admin-secret" user_admin="admin-secret" ;
      KAFKA_OPTS: "-Djava.security.debug=gssloginconfig,configfile,configparser,logincontext"
      KAFKA_ZOOKEEPER_METADATA_MIGRATION_ENABLE: "true"
      KAFKA_ZOOKEEPER_CONNECT: "192.168.2.18:2181"
      KAFKA_CONFLUENT_CLUSTER_LINK_METADATA_TOPIC_ENABLE: "true"
      KAFKA_INTER_BROKER_PROTOCOL_VERSION: 3.8
    volumes:
      - kafka_3_data:/var/lib/kafka/data
    networks:
      - kafka-connector
    restart: always

Normally this cluster consists of 3 controllers and 3 brokers but I wanted controller number to match with zookeeper during migration.

I followed the docs step by step, got the cluster ID, entered the necessary env variables. But when I start the controllers it gives this log constantly;

[2024-12-20 08:27:20,888] INFO [KRaftMigrationDriver id=1000] 1000 transitioning from INACTIVE to WAIT_FOR_CONTROLLER_QUORUM state (org.apache.kafka.metadata.migration.KRaftMigrationDriver)
[2024-12-20 08:27:20,889] INFO [KRaftMigrationDriver id=1000] Controller Quorum is ready for Zk to KRaft migration. Now waiting for ZK brokers. (org.apache.kafka.metadata.migration.KRaftMigrationDriver)
[2024-12-20 08:27:20,898] INFO [KRaftMigrationDriver id=1000] 1000 transitioning from WAIT_FOR_CONTROLLER_QUORUM to WAIT_FOR_BROKERS state (org.apache.kafka.metadata.migration.KRaftMigrationDriver)
[2024-12-20 08:27:20,899] INFO [KRaftMigrationDriver id=1000] Expected driver state WAIT_FOR_CONTROLLER_QUORUM but found WAIT_FOR_BROKERS. Not running this event WaitForControllerQuorumEvent. (org.apache.kafka.metadata.migration.KRaftMigrationDriver)
[2024-12-20 08:27:20,900] INFO [KRaftMigrationDriver id=1000] No brokers are known to KRaft, waiting for brokers to register. (org.apache.kafka.metadata.migration.KRaftMigrationDriver)
[2024-12-20 08:27:21,000] INFO [KRaftMigrationDriver id=1000] No brokers are known to KRaft, waiting for brokers to register. (org.apache.kafka.metadata.migration.KRaftMigrationDriver)
[2024-12-20 08:27:21,199] INFO [KRaftMigrationDriver id=1000] No brokers are known to KRaft, waiting for brokers to register. (org.apache.kafka.metadata.migration.KRaftMigrationDriver)
[2024-12-20 08:27:21,594] INFO [KRaftMigrationDriver id=1000] No brokers are known to KRaft, waiting for brokers to register. (org.apache.kafka.metadata.migration.KRaftMigrationDriver)
[2024-12-20 08:27:22,386] INFO [KRaftMigrationDriver id=1000] No brokers are known to KRaft, waiting for brokers to register. (org.apache.kafka.metadata.migration.KRaftMigrationDriver)

And if I start the brokers it prints these logs;

[2024-12-20 08:38:26,334] INFO [TransactionCoordinator id=11] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator)
[2024-12-20 08:38:26,336] INFO [BrokerLifecycleManager id=11] Unable to register broker 11 because the controller returned error BROKER_ID_NOT_REGISTERED (kafka.server.BrokerLifecycleManager)
[2024-12-20 08:38:26,340] INFO [MetadataLoader id=11] InitializeNewPublishers: initializing BrokerRegistrationTracker(id=11) with a snapshot at offset 1337 (org.apache.kafka.image.loader.MetadataLoader)
[2024-12-20 08:38:26,541] INFO [BrokerLifecycleManager id=11] Unable to register broker 11 because the controller returned error BROKER_ID_NOT_REGISTERED (kafka.server.BrokerLifecycleManager)
[2024-12-20 08:38:26,937] INFO [BrokerLifecycleManager id=11] Unable to register broker 11 because the controller returned error BROKER_ID_NOT_REGISTERED (kafka.server.BrokerLifecycleManager)

whereas controller starts printing these;

[2024-12-20 08:39:44,573] INFO [QuorumController id=1000] registerBroker: event failed with BrokerIdNotRegisteredException in 55 microseconds. Exception message: Controller is in pre-migration mode and cannot register KRaft brokers until the metadata migration is complete. (org.apache.kafka.controller.QuorumController)
[2024-12-20 08:39:53,575] INFO [QuorumController id=1000] registerBroker: event failed with BrokerIdNotRegisteredException in 42 microseconds. Exception message: Controller is in pre-migration mode and cannot register KRaft brokers until the metadata migration is complete. (org.apache.kafka.controller.QuorumController)

I want know what I’m missing. Any help is much appreciated.