zoukankan      html  css  js  c++  java
  • kafka 0.8.2 消息生产者 KafkaProducer

    package com.hashleaf.kafka;
    
    import java.util.Properties;
    import java.util.concurrent.ExecutorService;
    import java.util.concurrent.Executors;
    
    import org.apache.kafka.clients.producer.KafkaProducer;
    import org.apache.kafka.clients.producer.ProducerRecord;
    
    /**
     * 消息生产者 KafkaProducer
     * @author xiaojf 294825811@qq.com
     * @since 2015-7-15 下午10:50:01
     */
    public class NewProducer {
        //发送消息的topic
        public static final String HASHLEAF_KAFKA_TOPIC = "hashleaf_topic";
        private final KafkaProducer<String, String> producer;
        
        public NewProducer() {
            Properties props = new Properties();
            //指定代理服务器的地址
            props.put("metadata.broker.list", "192.168.66.2:9092,192.168.66.3:9092,192.168.66.4:9092");
    
            //配置value的序列化类
            props.put("serializer.class", "kafka.serializer.StringEncoder");
            //配置key的序列化类
            props.put("key.serializer.class", "kafka.serializer.StringEncoder");
            //指定分区
            props.put("partitioner.class", "com.hashleaf.kafka.MyPartitioner");
            //指定topic的分区数
            props.put("num.pratitions", "4");
            
            //request.required.acks
            //0, which means that the producer never waits for an acknowledgement from the broker (the same behavior as 0.7). This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails).
            //1, which means that the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost).
            //-1, which means that the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains.
            props.put("request.required.acks","-1");
    
            //创建producer 对象
            //producer = new Producer<String, String>(new ProducerConfig(props));
            
            producer = new KafkaProducer<String, String>(props);
    
        }
        
        public void produce(){
            int count = 10000;
            ExecutorService executor = Executors.newFixedThreadPool(4);
            for (int i = 0; i < count; i++) {
                final String key = String.valueOf(i);
                final String message = "hashleaf-"+i;
                executor.submit(new Runnable() {
                    
                    @Override
                    public void run() {
                        producer.send(new ProducerRecord<String, String>(HASHLEAF_KAFKA_TOPIC, message));
                    }
                });
                
            }
        }
        
        public static void main(String[] args) {
            new MyProducer().produce();
        }
    }

    自定义分区

    package com.hashleaf.kafka;
    
    import kafka.producer.Partitioner;
    import kafka.utils.VerifiableProperties;
    
    /**
     * 自定义分区规则
     * @author xiaojf 294825811@qq.com
     * @since 2015-7-15 下午11:57:23
     */
    public class MyPartitioner implements Partitioner {
    
        public MyPartitioner(VerifiableProperties props){
        }
        @Override
        public int partition(Object obj, int numPartitions) {
            System.out.println(obj);
            return Integer.parseInt( obj +"") % numPartitions;
        }
    
    }

    maven

    <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
        xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
        <modelVersion>4.0.0</modelVersion>
        <groupId>com.hashleaf</groupId>
        <artifactId>kafka</artifactId>
        <version>0.0.1-SNAPSHOT</version>
    
        <dependencies>
            <dependency>
                <groupId>org.apache.kafka</groupId>
                <artifactId>kafka_2.9.2</artifactId>
                <version>0.8.2.1</version>
            </dependency>
            <dependency>
                <groupId>log4j</groupId>
                <artifactId>log4j</artifactId>
                <version>1.2.15</version>
                <exclusions>
                    <exclusion>
                        <artifactId>jmxtools</artifactId>
                        <groupId>com.sun.jdmk</groupId>
                    </exclusion>
                    <exclusion>
                        <artifactId>jmxri</artifactId>
                        <groupId>com.sun.jmx</groupId>
                    </exclusion>
                    <exclusion>
                        <artifactId>jms</artifactId>
                        <groupId>javax.jms</groupId>
                    </exclusion>
                    <exclusion>
                        <artifactId>mail</artifactId>
                        <groupId>javax.mail</groupId>
                    </exclusion>
                </exclusions>
            </dependency>
            <dependency>
                <groupId>junit</groupId>
                <artifactId>junit</artifactId>
                <version>4.11</version>
                <scope>test</scope>
            </dependency>
        </dependencies>
    
        <build>
            <plugins>
                <plugin>
                    <artifactId>maven-compiler-plugin</artifactId>
                    <configuration>
                        <source>1.6</source>
                        <target>1.6</target>
                    </configuration>
                </plugin>
            </plugins>
        </build>
    </project>
  • 相关阅读:
    【大数据应用技术】作业十二|Hadoop综合大作业
    【大数据应用技术】作业十一|分布式并行计算MapReduce
    【大数据应用技术】作业十|分布式文件系统HDFS 练习
    【大数据应用技术】作业九|安装关系型数据库MySQL 安装大数据处理框架Hadoop
    【大数据应用技术】作业八|爬虫综合大作业(上)
    【大数据应用技术】作业七|爬取全部的校园新闻
    【大数据应用技术】作业六|获取一篇新闻的全部信息
    【大数据应用技术】作业五|理解爬虫原理
    【大数据应用技术】作业四|中文词频统计
    大数据应用期末总作业
  • 原文地址:https://www.cnblogs.com/xiaojf/p/6602732.html
Copyright © 2011-2022 走看看