zoukankan      html  css  js  c++  java
  • kafka 在java中的使用

    kafka  Producer  Api

    Procuder API有两种:kafka.producer.SyncProducer和kafka.producer.async.AsyncProducer.它们都实现了同一个接口:

    class Producer {
      /* 将消息发送到指定分区 */
      publicvoid send(kafka.javaapi.producer.ProducerData<K,V> producerData);
    
      /* 批量发送一批消息 */   publicvoid send(java.util.List<kafka.javaapi.producer.ProducerData<K,V>> producerData);
      /* 关闭producer */   publicvoid close(); }

    Producer API提供了以下功能:

      • 可以将多个消息缓存到本地队列里,然后异步的批量发送到broker,可以通过参数producer.type=async做到。缓存的大小可以通过一些参数指定:queue.time和batch.size。一个后台线程((kafka.producer.async.ProducerSendThread)从队列中取出数据并让kafka.producer.EventHandler将消息发送到broker,也可以通过参数event.handler定制handler,在producer端处理数据的不同的阶段注册处理器,比如可以对这一过程进行日志追踪,或进行一些监控。只需实现kafka.producer.async.CallbackHandler接口,并在callback.handler中配置。
      • 自己编写Encoder来序列化消息,只需实现下面这个接口。默认的Encoder是kafka.serializer.DefaultEncoder。
        • interface Encoder<T> {
        • public Message toMessage(T data);
        • }
      • 提供了基于Zookeeper的broker自动感知能力,可以通过参数zk.connect实现。如果不使用Zookeeper,也可以使用broker.list参数指定一个静态的brokers列表,这样消息将被随机的发送到一个broker上,一旦选中的broker失败了,消息发送也就失败了。
      • 通过分区函数kafka.producer.Partitioner类对消息分区。
        • interface Partitioner<T> {
        • int partition(T key, int numPartitions);
        • }

        分区函数有两个参数:key和可用的分区数量,从分区列表中选择一个分区并返回id。默认的分区策略是hash(key)%numPartitions.如果key是null,就随机的选择一个。可以通过参数partitioner.class定制分区函数。
    import java.util.*;
    
    import kafka.javaapi.producer.Producer;
    import kafka.producer.KeyedMessage;
    import kafka.producer.ProducerConfig;
    
    
    public class ASyncProduce {
        public static void main(String[] args) {
            Properties props = new Properties();
            props.put("metadata.broker.list", "192.168.1.1:19092,192.168.1.2:19092,192.168.1.3:19092");
            props.put("serializer.class", "kafka.serializer.StringEncoder");//kafka.serializer.DefaultEncoder
            props.put("partitioner.class", "kafka.producer.partiton.SimplePartitioner");
            //kafka.producer.DefaultPartitioner: based on the hash of the key
            //props.put("request.required.acks", "1");
            props.put("producer.type", "async");//1: async 2: sync
            //props.put("producer.type", "1");
            // 1: async 2: sync
     
            ProducerConfig config = new ProducerConfig(props);
     
            Producer<String, String> producer = new Producer<String, String>(config);
     
            for (int i = 0; i < Integer.MAX_VALUE; i++) { 
                   long runtime = new Date().getTime();  
                   String ip = "192.168.2.1"; 
                   String msg = "message"; 
                   KeyedMessage<String, String> data = new KeyedMessage<String, String>("topic", ip, msg);
                   producer.send(data);
                   try {
                       Thread.sleep(1000);
                   } catch (InterruptedException e) {
                       e.printStackTrace();
                   }
            }
            producer.close();
        }
    }
    import kafka.javaapi.producer.Producer;
    import kafka.producer.KeyedMessage;
    import kafka.producer.ProducerConfig;
    
    public class SyncProduce {
        public static void main(String[] args) {
     
            Properties props = new Properties();
            props.put("metadata.broker.list", "192.168.1.1:19092,192.168.1.2:19092,192.168.1.3:19092");
            props.put("serializer.class", "kafka.serializer.StringEncoder");
            //kafka.serializer.DefaultEncoder
            props.put("partitioner.class", "kafka.producer.partiton.SimplePartitioner");
            //kafka.producer.DefaultPartitioner: based on the hash of the key
            props.put("request.required.acks", "1");
            //0;  绝不等确认  1:   leader的一个副本收到这条消息,并发回确认 -1:   leader的所有副本都收到这条消息,并发回确认
     
            ProducerConfig config = new ProducerConfig(props);
     
            Producer<String, String> producer = new Producer<String, String>(config);
     
            for (int i = 0; i < Integer.MAX_VALUE; i++) { 
                   String ip = "192.168.2.1"; 
                   String msg = "message"; 
                   //eventKey必须有(即使自己的分区算法不会用到这个key,也不能设为null或者""),否者自己的分区算法根本得不到调用
                   KeyedMessage<String, String> data = new KeyedMessage<String, String>("topic", ip, msg);
                   producer.send(data);
                   try {
                       Thread.sleep(1000);
                   } catch (InterruptedException ie) {
                   }
            }
            producer.close();
        }
    }

    KafKa Consumer APIs

    Consumer API有两个级别。低级别的和一个指定的broker保持连接,并在接收完消息后关闭连接,这个级别是无状态的,每次读取消息都带着offset。
    高级别的API隐藏了和brokers连接的细节,在不必关心服务端架构的情况下和服务端通信。还可以自己维护消费状态,并可以通过一些条件指定订阅特定的topic,比如白名单黑名单或者正则表达式。

    import kafka.api.FetchRequest;
    import kafka.api.FetchRequestBuilder;
    import kafka.api.PartitionOffsetRequestInfo;
    import kafka.common.ErrorMapping;
    import kafka.common.TopicAndPartition;
    import kafka.javaapi.*;
    import kafka.javaapi.consumer.SimpleConsumer;
    import kafka.message.MessageAndOffset;
     
    import java.nio.ByteBuffer;
    import java.util.ArrayList;
    import java.util.Collections;
    import java.util.HashMap;
    import java.util.List;
    import java.util.Map;
    
    public class PartitionConsumerTest {
        
        public static void main(String args[]) {
            PartitionConsumerTest example = new PartitionConsumerTest();
            long maxReads = Long.MAX_VALUE;
            String topic = "topic";
            if(args.length < 1){
                System.out.println("Please assign partition number.");
            }
            
            List<String> seeds = new ArrayList<String>();
            seeds.add("192.168.2.1");
            seeds.add("192.168.2.2");
            seeds.add("192.168.2.3");
            
            int port = 19092;
             
            int partLen = Integer.parseInt(args[0]);
            for(int index=0;index < partLen;index++){
                try {
                    example.run(maxReads, topic, index/*partition*/, seeds, port);
                } catch (Exception e) {
                    System.out.println("Oops:" + e);
                     e.printStackTrace();
                }
            }
        }
        
        private List<String> m_replicaBrokers = new ArrayList<String>();
         
        public PartitionConsumerTest() {
            m_replicaBrokers = new ArrayList<String>();
        }
     
        public void run(long a_maxReads, String a_topic, int a_partition, List<String> a_seedBrokers, int a_port) throws Exception {
    
            PartitionMetadata metadata = findLeader(a_seedBrokers, a_port, a_topic, a_partition);
            if (metadata == null) {
                System.out.println("Can't find metadata for Topic and Partition. Exiting");
                return;
            }
            if (metadata.leader() == null) {
                System.out.println("Can't find Leader for Topic and Partition. Exiting");
                return;
            }
            String leadBroker = metadata.leader().host();
            String clientName = "Client_" + a_topic + "_" + a_partition;
     
            SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
            long readOffset = getLastOffset(consumer,a_topic, a_partition, kafka.api.OffsetRequest.EarliestTime(), clientName);
     
            int numErrors = 0;
            while (a_maxReads > 0) {
                if (consumer == null) {
                    consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
                }
                FetchRequest req = new FetchRequestBuilder()
                        .clientId(clientName)
                        .addFetch(a_topic, a_partition, readOffset, 100000) // Note: this fetchSize of 100000 might need to be increased if large batches are written to Kafka
                        .build();
                FetchResponse fetchResponse = consumer.fetch(req);
     
                if (fetchResponse.hasError()) {
                    numErrors++;
                    // Something went wrong!
                    short code = fetchResponse.errorCode(a_topic, a_partition);
                    System.out.println("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
                    if (numErrors > 5) break;
                    if (code == ErrorMapping.OffsetOutOfRangeCode())  {
                        // We asked for an invalid offset. For simple case ask for the last element to reset
                        readOffset = getLastOffset(consumer,a_topic, a_partition, kafka.api.OffsetRequest.LatestTime(), clientName);
                        continue;
                    }
                    consumer.close();
                    consumer = null;
                    leadBroker = findNewLeader(leadBroker, a_topic, a_partition, a_port);
                    continue;
                }
                numErrors = 0;
     
                long numRead = 0;
                for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
                    long currentOffset = messageAndOffset.offset();
                    if (currentOffset < readOffset) {
                        System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
                        continue;
                    }
                    readOffset = messageAndOffset.nextOffset();
                    ByteBuffer payload = messageAndOffset.message().payload();
     
                    byte[] bytes = new byte[payload.limit()];
                    payload.get(bytes);
                    System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
                    numRead++;
                    a_maxReads--;
                }
     
                if (numRead == 0) {
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException ie) {
                    }
                }
            }
            if (consumer != null) consumer.close();
        }
     
        public static long getLastOffset(SimpleConsumer consumer, String topic, int partition,
                                         long whichTime, String clientName) {
            TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
            Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
            requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
            kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
                    requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
            OffsetResponse response = consumer.getOffsetsBefore(request);
     
            if (response.hasError()) {
                System.out.println("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition) );
                return 0;
            }
            long[] offsets = response.offsets(topic, partition);
            return offsets[0];
        }
     
        private String findNewLeader(String a_oldLeader, String a_topic, int a_partition, int a_port) throws Exception {
            for (int i = 0; i < 3; i++) {
                boolean goToSleep = false;
                PartitionMetadata metadata = findLeader(m_replicaBrokers, a_port, a_topic, a_partition);
                if (metadata == null) {
                    goToSleep = true;
                } else if (metadata.leader() == null) {
                    goToSleep = true;
                } else if (a_oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
                    // first time through if the leader hasn't changed give ZooKeeper a second to recover
                    // second time, assume the broker did recover before failover, or it was a non-Broker issue
                    //
                    goToSleep = true;
                } else {
                    return metadata.leader().host();
                }
                if (goToSleep) {
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException ie) {
                    }
                }
            }
            System.out.println("Unable to find new leader after Broker failure. Exiting");
            throw new Exception("Unable to find new leader after Broker failure. Exiting");
        }
     
        private PartitionMetadata findLeader(List<String> a_seedBrokers, int a_port, String a_topic, int a_partition) {
            PartitionMetadata returnMetaData = null;
            loop:
            for (String seed : a_seedBrokers) {
                SimpleConsumer consumer = null;
                try {
                    consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024, "leaderLookup");
                    List<String> topics = Collections.singletonList(a_topic);
                    TopicMetadataRequest req = new TopicMetadataRequest(topics);
                    kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
     
                    List<TopicMetadata> metaData = resp.topicsMetadata();
                    for (TopicMetadata item : metaData) {
                        for (PartitionMetadata part : item.partitionsMetadata()) {
                            if (part.partitionId() == a_partition) {
                                returnMetaData = part;
                                break loop;
                            }
                        }
                    }
                } catch (Exception e) {
                    System.out.println("Error communicating with Broker [" + seed + "] to find Leader for [" + a_topic
                            + ", " + a_partition + "] Reason: " + e);
                } finally {
                    if (consumer != null) consumer.close();
                }
            }
            if (returnMetaData != null) {
                m_replicaBrokers.clear();
                for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
                    m_replicaBrokers.add(replica.host());
                }
            }
            return returnMetaData;
        }
    }
    import kafka.consumer.ConsumerConfig;
    import kafka.consumer.KafkaStream;
    import kafka.javaapi.consumer.ConsumerConnector;
     
    import java.util.HashMap;
    import java.util.List;
    import java.util.Map;
    import java.util.Properties;
    import java.util.concurrent.ExecutorService;
    import java.util.concurrent.Executors;
    import java.util.concurrent.TimeUnit;
    
    public class GroupConsumerTest extends Thread {
        private final ConsumerConnector consumer;
        private final String topic;
        private  ExecutorService executor;
        
        public GroupConsumerTest(String zookeeper, String groupId, String topic){
            consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig(zookeeper, groupId));
            this.topic = topic;
        }
        
        public void shutdown() {
            if (consumer != null) consumer.shutdown();
            if (executor != null) executor.shutdown();
            try {
                if (!executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS)) {
                    System.out.println("Timed out waiting for consumer threads to shut down, exiting uncleanly");
                }
            } catch (InterruptedException e) {
                System.out.println("Interrupted during shutdown, exiting uncleanly");
            }
       }
     
        public void run(int a_numThreads) {
            Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
            topicCountMap.put(topic, new Integer(a_numThreads));
            Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
            List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
     
            executor = Executors.newFixedThreadPool(a_numThreads);
     
            int threadNumber = 0;
            for (final KafkaStream stream : streams) {
                executor.submit(new ConsumerTest(stream, threadNumber));
                threadNumber++;
            }
        }
        
        private static ConsumerConfig createConsumerConfig(String zookeeper, StringgroupId) {
            Properties props = new Properties();
            props.put("zookeeper.connect", zookeeper);
            props.put("group.id", groupId);
            props.put("zookeeper.session.timeout.ms", "40000");
            props.put("zookeeper.sync.time.ms", "2000");
            props.put("auto.commit.interval.ms", "1000");
            return new ConsumerConfig(props);
        }
        
        public static void main(String[] args) {
            if(args.length < 1){
                System.out.println("Please assign partition number.");
            }
            
            String zooKeeper = "192.168.1.1:12181,192.168.1.1:12181,192.168.1.1:12181";
            String groupId = "grouptest";
            String topic = "topic";
            int threads = Integer.parseInt(args[0]);
     
            GroupConsumerTest example = new GroupConsumerTest(zooKeeper, groupId, topic);
            example.run(threads);
     
            try {
                Thread.sleep(Long.MAX_VALUE);
            } catch (InterruptedException ie) {
     
            }
            example.shutdown();
        }
    }

    参考:https://blog.csdn.net/tangdong3415/article/details/53432166

  • 相关阅读:
    发布全文检索类库外包
    给即将面试的人
    实验四 Web服务器2
    电子公文传输系统验收2功能测试
    整数范围与类型转换
    实验三电子公文传输系统1个人贡献
    socket测试
    电子公文传输系统验收4开发基础
    Web服务器1socket编程
    算法测试(课上测试)
  • 原文地址:https://www.cnblogs.com/yrjns/p/12570810.html
Copyright © 2011-2022 走看看