1. 环境docker, docker-compose
2.zookeeper集群
/data/zookeeper/zoo1/config/zoo.cfg
# The number of milliseconds of each tick tickTime=2000 # The number of ticks that the initial # synchronization phase can take initLimit=10 # The number of ticks that can pass between # sending a request and getting an acknowledgement syncLimit=5 # the directory where the snapshot is stored. # do not use /tmp for storage, /tmp here is just # example sakes. # dataDir=/opt/zookeeper-3.4.13/data dataDir=/data dataLogDir=/datalog # the port at which the clients will connect clientPort=2181 # the maximum number of client connections. # increase this if you need to handle more clients #maxClientCnxns=60 # # Be sure to read the maintenance section of the # administrator guide before turning on autopurge. # # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance # # The number of snapshots to retain in dataDir autopurge.snapRetainCount=3 # Purge task interval in hours # Set to "0" to disable auto purge feature autopurge.purgeInterval=1 server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
docker-compose-zookeeper.yml
version: '3.7' services: zoo1: container_name: zoo1 hostname: zoo1 image: wurstmeister/zookeeper privileged: true restart: unless-stopped ports: - 2181:2181 volumes: # 挂载数据卷 - /data/zookeeper/zoo1/config/zoo.cfg:/opt/zookeeper-3.4.13/conf/zoo.cfg - /data/zookeeper/zoo1/data:/data - /data/zookeeper/zoo1/datalog:/datalog environment: TZ: Asia/Shanghai ZOO_MY_ID: 1 # 节点ID ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888 # zookeeper节点列表 zoo2: container_name: zoo2 hostname: zoo2 image: wurstmeister/zookeeper privileged: true restart: unless-stopped ports: - 2182:2181 volumes: # 挂载数据卷 - /data/zookeeper/zoo2/config/zoo.cfg:/opt/zookeeper-3.4.13/conf/zoo.cfg - /data/zookeeper/zoo2/data:/data - /data/zookeeper/zoo2/datalog:/datalog environment: TZ: Asia/Shanghai ZOO_MY_ID: 2 # 节点ID ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888 # zookeeper节点列表 zoo3: container_name: zoo3 hostname: zoo3 image: wurstmeister/zookeeper privileged: true restart: unless-stopped ports: - 2183:2181 volumes: # 挂载数据卷 - /data/zookeeper/zoo3/config/zoo.cfg:/opt/zookeeper-3.4.13/conf/zoo.cfg - /data/zookeeper/zoo3/data:/data - /data/zookeeper/zoo3/datalog:/datalog environment: TZ: Asia/Shanghai ZOO_MY_ID: 3 # 节点ID ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888 # zookeeper节点列表
启动
docker-compose -f docker-compose-zookeeper.yml up -d
3.kafka集群
需要先创建虚拟机网络
docker network create -d bridge --subnet 172.19.0.0/24 kafka_net
docker-compose-kafka.yml
version: '3.2' services: broker1: container_name: broker1 hostname: broker1 image: wurstmeister/kafka privileged: true restart: unless-stopped ports: - "9986:9986" - "9091:9091" environment: KAFKA_BROKER_ID: 1 KAFKA_LISTENERS: PLAINTEXT://:9091 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.16.12.163:9091 KAFKA_ADVERTISED_HOST_NAME: 172.16.12.163 KAFKA_ADVERTISED_PORT: 9091 KAFKA_ZOOKEEPER_CONNECT: 172.16.12.130:2181,172.16.12.130:2182,172.16.12.130:2183 JMX_PORT: 9986 volumes: - /var/run/docker.sock:/var/run/docker.sock - /data/kafka/broker1:/kafka/kafka-logs-broker2 networks: default: ipv4_address: 172.19.0.11 broker2: container_name: broker2 hostname: broker2 image: wurstmeister/kafka privileged: true restart: unless-stopped ports: - "9987:9987" - "9092:9092" environment: KAFKA_BROKER_ID: 2 KAFKA_LISTENERS: PLAINTEXT://:9092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.16.12.163:9092 KAFKA_ADVERTISED_HOST_NAME: 172.16.12.163 KAFKA_ADVERTISED_PORT: 9092 KAFKA_ZOOKEEPER_CONNECT: 172.16.12.130:2181,172.16.12.130:2182,172.16.12.130:2183 JMX_PORT: 9987 volumes: - /var/run/docker.sock:/var/run/docker.sock - /data/kafka/broker2:/kafka/kafka-logs-broker2 networks: default: ipv4_address: 172.19.0.12 broker3: container_name: broker3 hostname: broker3 image: wurstmeister/kafka privileged: true restart: unless-stopped ports: - "9988:9988" - "9093:9093" environment: KAFKA_BROKER_ID: 3 KAFKA_LISTENERS: PLAINTEXT://:9093 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.16.12.163:9093 KAFKA_ADVERTISED_HOST_NAME: 172.16.12.163 KAFKA_ADVERTISED_PORT: 9093 KAFKA_ZOOKEEPER_CONNECT: 172.16.12.130:2181,172.16.12.130:2182,172.16.12.130:2183 JMX_PORT: 9988 volumes: - /var/run/docker.sock:/var/run/docker.sock - /data/kafka/broker3:/kafka/kafk-logs-broker3 networks: default: ipv4_address: 172.19.0.13 kafka-manager: image: sheepkiller/kafka-manager:latest container_name: kafka-manager hostname: kafka-manager restart: unless-stopped ports: - 9000:9000 links: # 连接本compose文件创建的container - broker1 - broker2 - broker3 environment: ZK_HOSTS: 172.16.12.130:2181,172.16.12.130:2182,172.16.12.130:2183 KAFKA_BROKERS: broker1:9091,broker2:9092,broker3:9093 APPLICATION_SECRET: 123456 KM_ARGS: -Djava.net.preferIPv4Stack=true networks: default: ipv4_address: 172.19.0.14 networks: default: external: name: kafka_net
启动
docker-compose -f docker-compose-kafka.yml up -d
4. 遇到的问题:
在同一台集群上部署zookeeper集群和kafka集群,会报错:
/usr/bin/start-kafka.sh: line 149: /opt/kafka/bin/kafka-server-start.sh: No such file or direc
原因:可能是因为端口冲突导致docker容器没启动,然后报错。认真检查端口吧!