- 1.zookeeper集群 搭建在110, 111,112
- 2.kafka使用3个节点110, 111,112
- 修改配置文件config/server.properties
- broker.id=110
- host.name=192.168.1.110
- log.dirs=/usr/local/kafka_2.10-0.8.2.0/logs
- 复制到其他两个节点,然后修改对应节点上的config/server.pro
- 3.启动,在三个节点分别执行
- bin/kafka-server-start.sh config/server.properties >/dev/null 2>&1 &
- 4 创建主题
- bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 3 --partitions 3 --topic test
- 5 查看主题详细
- bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic test
- --topic test
- Topic:test PartitionCount:3 ReplicationFactor:3 Configs:
- Topic: test Partition: 0 Leader: 110 Replicas: 110,111,112 Isr: 110,111,112
- Topic: test Partition: 1 Leader: 111 Replicas: 111,112,110 Isr: 111,112,110
- Topic: test Partition: 2 Leader: 112 Replicas: 112,110,111 Isr: 112,110,111
- 6 去zk上看kafka集群
- [zk: localhost:2181(CONNECTED) 5] ls /
- [admin, zookeeper, consumers, config, controller, zk-fifo, storm, brokers, controller_epoch]
- [zk: localhost:2181(CONNECTED) 6] ls /brokers ----> 查看注册在zk内的kafka
- [topics, ids]
- [zk: localhost:2181(CONNECTED) 7] ls /brokers/ids
- [112, 110, 111]
- [zk: localhost:2181(CONNECTED) 8] ls /brokers/ids/112
- []
- [zk: localhost:2181(CONNECTED) 9] ls /brokers/topics
- [test]
- [zk: localhost:2181(CONNECTED) 10] ls /brokers/topics/test
- [partitions]
- [zk: localhost:2181(CONNECTED) 11] ls /brokers/topics/test/partitions
- [2, 1, 0]
- [zk: localhost:2181(CONNECTED) 12]
2 kafka java调用:
2.1 java端生产数据, kafka集群消费数据:
- 1 创建maven工程,pom.xml中增加如下:
- <dependency>
- <groupId>org.apache.kafka</groupId>
- <artifactId>kafka_2.10</artifactId>
- <version>0.8.2.0</version>
- </dependency>
- 2 java代码: 向主题test内写入数据
- import java.util.Properties;
- import java.util.concurrent.TimeUnit;
- import kafka.javaapi.producer.Producer;
- import kafka.producer.KeyedMessage;
- import kafka.producer.ProducerConfig;
- import kafka.serializer.StringEncoder;
- public class kafkaProducer extends Thread{
- private String topic;
- public kafkaProducer(String topic){
- super();
- this.topic = topic;
- }
- @Override
- public void run() {
- Producer producer = createProducer();
- int i=0;
- while(true){
- producer.send(new KeyedMessage<Integer, String>(topic, "message: " + i++));
- try {
- TimeUnit.SECONDS.sleep(1);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- }
- }
- private Producer createProducer() {
- Properties properties = new Properties();
- properties.put("zookeeper.connect", "192.168.1.110:2181,192.168.1.111:2181,192.168.1.112:2181");//声明zk
- properties.put("serializer.class", StringEncoder.class.getName());
- properties.put("metadata.broker.list", "192.168.1.110:9092,192.168.1.111:9093,192.168.1.112:9094");// 声明kafka broker
- return new Producer<Integer, String>(new ProducerConfig(properties));
- }
- public static void main(String[] args) {
- new kafkaProducer("test").start();// 使用kafka集群中创建好的主题 test
- }
- }
- 3 kafka集群中消费主题test的数据:
- [root@h2master kafka]# bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginnin
- 4 启动java代码,然后在看集群消费的数据如下:
- message: 0
- message: 1
- message: 2
- message: 3
- message: 4
- message: 5
- message: 6
- message: 7
- message: 8
- message: 9
- message: 10
- message: 11
- message: 12
- message: 13
- message: 14
- message: 15
- message: 16
- message: 17
- message: 18
- message: 19
- message: 20
- message: 21
3 kafka 使用Java写消费者,这样 先运行kafkaProducer ,在运行kafkaConsumer,即可得到生产者的数据:
- import java.util.HashMap;
- import java.util.List;
- import java.util.Map;
- import java.util.Properties;
- import kafka.consumer.Consumer;
- import kafka.consumer.ConsumerConfig;
- import kafka.consumer.ConsumerIterator;
- import kafka.consumer.KafkaStream;
- import kafka.javaapi.consumer.ConsumerConnector;
- /**
- * 接收数据
- * 接收到: message: 10
- 接收到: message: 11
- 接收到: message: 12
- 接收到: message: 13
- 接收到: message: 14
- * @author zm
- *
- */
- public class kafkaConsumer extends Thread{
- private String topic;
- public kafkaConsumer(String topic){
- super();
- this.topic = topic;
- }
- @Override
- public void run() {
- ConsumerConnector consumer = createConsumer();
- Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
- topicCountMap.put(topic, 1); // 一次从主题中获取一个数据
- Map<String, List<KafkaStream<byte[], byte[]>>> messageStreams = consumer.createMessageStreams(topicCountMap);
- KafkaStream<byte[], byte[]> stream = messageStreams.get(topic).get(0);// 获取每次接收到的这个数据
- ConsumerIterator<byte[], byte[]> iterator = stream.iterator();
- while(iterator.hasNext()){
- String message = new String(iterator.next().message());
- System.out.println("接收到: " + message);
- }
- }
- private ConsumerConnector createConsumer() {
- Properties properties = new Properties();
- properties.put("zookeeper.connect", "192.168.1.110:2181,192.168.1.111:2181,192.168.1.112:2181");//声明zk
- properties.put("group.id", "group1");// 必须要使用别的组名称, 如果生产者和消费者都在同一组,则不能访问同一组内的topic数据
- return Consumer.createJavaConsumerConnector(new ConsumerConfig(properties));
- }
- public static void main(String[] args) {
- new kafkaConsumer("test").start();// 使用kafka集群中创建好的主题 test
- }
- }
代码见附件: