zoukankan      html  css  js  c++  java
  • Kafka配置文件及解释

    
    

    broker.id=0
    num.network.threads=9
    num.io.threads=24
    socket.send.buffer.bytes=102400
    listeners=PLAINTEXT://:9092
    port=9092  
    host.name=
    socket.receive.buffer.bytes=102400
    socket.request.max.bytes=104857600
    log.dirs=/home/service/var/kafka
    num.partitions=12
    offsets.topic.replication.factor=2   
    transaction.state.log.min.isr=1
    log.retention.hours=72
    log.retention.check.interval.ms=300000
    zookeeper.connect=10.12.176.3:2181,10.12.172.32:2181,10.12.174.14:2181/security-kafka
    zookeeper.connection.timeout.ms=6000
    group.initial.rebalance.delay.ms=3000
    log.cleaner.enable=true
    delete.topic.enable=true

     
    1. addrep_cpd-app-down.json 文件内容:
    {"version":1, "partitions":[ 
    {"topic":"cpd-app-down","partition":0,"replicas":[1,2]}, 
    {"topic":"cpd-app-down","partition":1,"replicas":[2,3]}, 
    {"topic":"cpd-app-down","partition":2,"replicas":[3,4]}, 
    {"topic":"cpd-app-down","partition":3,"replicas":[4,5]}, 
    {"topic":"cpd-app-down","partition":4,"replicas":[5,6]}, 
    {"topic":"cpd-app-down","partition":5,"replicas":[6,0]}, 
    {"topic":"cpd-app-down","partition":6,"replicas":[0,1]}, 
    {"topic":"cpd-app-down","partition":7,"replicas":[1,2]}, 
    {"topic":"cpd-app-down","partition":8,"replicas":[2,3]}, 
    {"topic":"cpd-app-down","partition":9,"replicas":[3,4]}, 
    {"topic":"cpd-app-down","partition":10,"replicas":[4,5]}, 
    {"topic":"cpd-app-down","partition":11,"replicas":[5,6]},
     {"topic":"cpd-app-down","partition":12,"replicas":[6,0]},
     {"topic":"cpd-app-down","partition":13,"replicas":[0,1]}
     ] }
    
    2.sh kafka-reassign-partitions.sh --zookeeper 10.6.72.38:2181,10.6.72.8:2181 --reassignment-json-file ../config/addrep_cpd-app-down.json --execute
    broker.id=1
    listeners=PLAINTEXT://10.32.104.37:9092
    num.network.threads=3 
    num.io.threads=8
    socket.send.buffer.bytes=102400
    socket.receive.buffer.bytes=102400
    socket.request.max.bytes=104857600
    log.dirs=/var/data/kafka
    num.partitions=6
    num.recovery.threads.per.data.dir=1
    log.retention.hours=72
    log.segment.bytes=1073741824
    log.retention.check.interval.ms=300000
    log.cleaner.enable=true
    zookeeper.connect=10.32.106.42:2181,10.32.114.34:2181,10.32.104.37:2181
    zookeeper.connection.timeout.ms=6000
    delete.topic.enable=true
    transaction.state.log.min.isr=1
    log.retention.hours=24
    default.replication.factor=3
    1. 创建topics-to-move.json,输入topic信息
    {"topics":
         [{"topic": "TestSing"}],
         "version":1
    }
    
    2. 生成topic迁移到新broker的配置文件,json格式
    sh bin/kafka-reassign-partitions.sh --zookeeper 10.32.106.42:2181 --topics-to-move-json-file topics-to-move.json --broker-list "3,4,5" --generate
    
    3. 执行脚本,加载json文件,开始迁移操作
    sh bin/kafka-reassign-partitions.sh --zookeeper 10.32.106.42:2181 --reassignment-json-file config/testsing.json --execute
    kafka日志按文件大小分割: log4j.properties
    #log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
    #log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
    log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
    log4j.appender.kafkaAppender.MaxFileSize=500MB
    log4j.appender.kafkaAppender.MaxBackupIndex=5

    log4j.rootLogger=INFO, stdout
    log4j.appender.stdout=org.apache.log4j.ConsoleAppender
    log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
    log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
    log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
    log4j.appender.kafkaAppender.MaxFileSize=500MB
    log4j.appender.kafkaAppender.MaxBackupIndex=5
    log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
    log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
    log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
    log4j.appender.stateChangeAppender=org.apache.log4j.RollingFileAppender
    log4j.appender.stateChangeAppender.MaxFileSize=500MB
    log4j.appender.stateChangeAppender.MaxBackupIndex=5
    log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
    log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
    log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
    log4j.appender.requestAppender=org.apache.log4j.RollingFileAppender
    log4j.appender.requestAppender.MaxFileSize=500MB
    log4j.appender.requestAppender.MaxBackupIndex=5
    log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
    log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
    log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
    log4j.appender.cleanerAppender=org.apache.log4j.RollingFileAppender
    log4j.appender.cleanerAppender.MaxFileSize=500MB
    log4j.appender.cleanerAppender.MaxBackupIndex=5
    log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
    log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
    log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
    log4j.appender.controllerAppender=org.apache.log4j.RollingFileAppender
    log4j.appender.controllerAppender.MaxFileSize=500MB
    log4j.appender.controllerAppender.MaxBackupIndex=5
    log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
    log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
    log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
    log4j.appender.authorizerAppender=org.apache.log4j.RollingFileAppender
    log4j.appender.authorizerAppender.MaxFileSize=500MB
    log4j.appender.authorizerAppender.MaxBackupIndex=5
    log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
    log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
    log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
    log4j.logger.kafka=INFO, kafkaAppender
    log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
    log4j.additivity.kafka.network.RequestChannel$=false
    log4j.logger.kafka.request.logger=WARN, requestAppender
    log4j.additivity.kafka.request.logger=false
    log4j.logger.kafka.controller=INFO, controllerAppender
    log4j.additivity.kafka.controller=false
    log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
    log4j.additivity.kafka.log.LogCleaner=false
    log4j.logger.state.change.logger=INFO, stateChangeAppender
    log4j.additivity.state.change.logger=false
    log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender
    log4j.additivity.kafka.authorizer.logger=false

     
  • 相关阅读:
    基于tiny4412的Linux内核移植 -- 设备树的展开
    基于tiny4412的Linux内核移植 -- MMA7660驱动移植(九-2)
    tiny4412的中断资源连接关系示意图
    基于tiny4412的Linux内核移植 -- MMA7660驱动移植(九)
    基于tiny4412的Linux内核移植 -- PWM子系统学习(八)
    spring cloud服务发现注解之@EnableDiscoveryClient与@EnableEurekaClient
    springcloud之eureka配置——eureka.instance
    Maven parent.relativePath
    mybatis中大于等于小于等于的写法
    Eureka服务注册过程详解之IpAddress(详解eureka.instance.prefer-ip-address = true 与 eureka.instance.prefer-ip-address)
  • 原文地址:https://www.cnblogs.com/lwhctv/p/10749921.html
Copyright © 2011-2022 走看看