zoukankan      html  css  js  c++  java
  • flink_初识02kafkawordcount

    1.启动zookeeper服务

    ./bin/zookeeper-server-start.sh config/zookeeper.properties

    2.开启kafka服务

    .inwindowskafka-server-start.bat .configserver.properties
    ./bin/kafka-server-start.sh config/server.properties

    3.创建topic

    .inwindowskafka-topics.bat --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test_flink
    ./bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test_flink

    4.创建生产者

    .inwindowskafka-console-producer.bat --broker-list localhost:9092 --topic test_flink
    ./bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test_flink

    --5.创建消费者
    --.inwindowskafka-console-consumer.bat --zookeeper localhost:2181 --topic test_flink
    --./bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test_flink --from-beginning

    5.

    package flink
    
    import java.util.Properties
    
    import org.apache.flink.api.common.serialization.SimpleStringSchema
    import org.apache.flink.core.fs.FileSystem.WriteMode
    import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
    import org.apache.flink.streaming.api.windowing.time.Time
    import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011
    import org.apache.flink.streaming.api.scala._
    
    object KafkaWordCount {
    
    
      def getFlinkKafkaConsumer() = {
        val prop = new Properties()
    
        prop.setProperty("zookeeper.connect", "localhost:2181") //ZOOKEEPER_HOST
        prop.setProperty("bootstrap.servers", "localhost:9092") //KAFKA_BROKER
        prop.setProperty("group.id", "group1") //TRANSACTION_GROUP
        new FlinkKafkaConsumer011[String]("test_flink", new SimpleStringSchema(), prop) //TOPIC
      }
    
    
      def main(args: Array[String]): Unit = {
    
        val env = StreamExecutionEnvironment.getExecutionEnvironment
    
        val source = getFlinkKafkaConsumer()
    
        source.setStartFromEarliest()
    
        val dStream = env.addSource(source)
    
        val result = dStream.flatMap(x => x.split("\s"))
          .map(x => (x, 1)).keyBy(0).timeWindow(Time.seconds(2l)).sum(1)
    
        result.setParallelism(1).print()
    
        result.writeAsText("E:\\sparkproject\\src\\test\\data\\result2.txt", WriteMode.OVERWRITE)
    
        env.execute("KafkaWordCount")
      }
    
    }
  • 相关阅读:
    更好地限制一个UITextField的输入长度
    训练集(train set) 验证集(validation set) 测试集(test set)
    主流机器学习[xgb, lgb, Keras, LR]
    python pandas (ix & iloc &loc) 的区别
    机器学习:数据预处理之独热编码(One-Hot)
    SPARK第一天
    Spark简介
    scatter
    协方差和相关关系
    SQL存储过程
  • 原文地址:https://www.cnblogs.com/yin-fei/p/11165559.html
Copyright © 2011-2022 走看看