zoukankan      html  css  js  c++  java
  • docker-compose部署ELK

    本章基于

    https://www.cnblogs.com/lirunzhou/p/10550675.html

    在此基础上将ELK系统docker-compose.yml化。

    其docker-compose 需要注意

    1.不要把 docker 当做数据容器来使用,数据一定要用 volumes 放在容器外面

    2.不要把 docker-compose 文件暴露给别人, 因为上面有你的服务器信息

    3.多用 docker-compose 的命令去操作, 不要用 docker 手动命令&docker-compose 去同时操作

    4.写一个脚本类的东西,自动备份docker 映射出来的数据。

    5.不要把所有服务都放在一个 docker 容器里面

    准备环境:

    管理节点10.191.51.44

    数据节点 10.191.51.45/46/47

    具体文件:

    es docker-compose.yml

    version: '2'
    services:
      elasticsearch:
        container_name: ES
        environment :
          - ES_JAVA_OPTS=-Xms4G -Xmx4G
        image: 10.191.51.5/elk/elasticsearch:6.5.4
        volumes:
          - ./config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
          - ./data:/usr/share/elasticsearch/data
        ports:
          - "9200:9200"
          - "9300:9300"
    View Code

    管理节点 elasticsearch.yml

    cluster.name: elasticsearch-cluster
    node.name: es-node1
    network.bind_host: 0.0.0.0
    network.publish_host: 10.191.51.44
    http.port: 9200
    transport.tcp.port: 9300
    http.cors.enabled: true
    http.cors.allow-origin: "*"
    node.master: false
    node.data: false
    node.ingest: true
    discovery.zen.ping.unicast.hosts: ["10.191.51.44:9300","10.191.51.45:9300","10.191.51.46:9300","10.191.51.47:9300"]
    discovery.zen.minimum_master_nodes: 2
    View Code

    数据节点 elasticsearch.yml

    cluster.name: elasticsearch-cluster
    node.name: es-node2
    network.bind_host: 0.0.0.0
    network.publish_host: 10.191.51.45
    http.port: 9200
    transport.tcp.port: 9300
    http.cors.enabled: true
    http.cors.allow-origin: "*"
    node.master: true
    node.data: true
    discovery.zen.ping.unicast.hosts: ["10.191.51.44:9300","10.191.51.45:9300","10.191.51.46:9300","10.191.51.47:9300"]
    discovery.zen.minimum_master_nodes: 2
    View Code

    kafka docker-compose.yml (其中environment需要配置)

    version: '2'
    services:
      kafka:
        container_name: kafka0
        environment:
          - KAFKA_BROKER_ID=0
          - KAFKA_ZOOKEEPER_CONNECT=10.191.51.44:2181
          - KAFKA_DEFAULT_REPLICATION_FACTOR=3
          - KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092
          - KAFKA_ADVERTISED_HOST_NAME=kafka1
          - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://10.191.51.45:9092
          - KAFKA_delete_topic_enable=true
        image: 10.191.51.5/elk/wurstmeister/kafka:2.1.1
        ports:
          - "9092:9092"
    View Code

    logstash docker-compose.yml

    version: '2'
    services:
      logstash:
        container_name: logstash
        image: 10.191.51.5/elk/logstash:6.5.4
        volumes:
          - ./config/:/usr/share/logstash/config/
          - ./pipeline/:/usr/share/logstash/pipeline/
        ports:
          - "5044:5044"
          - "9600:9600"
    View Code

    pipeline/logstash.conf 

            kafka{
            bootstrap_servers => ["10.191.51.45:9092,10.191.51.46:9092,10.191.51.47:9092"]
            client_id => "logstash-garnet"
            group_id => "logstash-garnet"
            consumer_threads => 8
            decorate_events => true
            topics => ["garnet_garnetAll_log"]
            type => "garnet_all_log"
            }
    }
    filter{
    
                    if[type]=="garnet_all_log"{
                            mutate{
                                    gsub => ["message", "@timestamp", "sampling_time"]
                            }
                            json{
                                    source=>"message"
                            }
                            grok{
                                    match=>{
                                                                            "message"=>[
    
                                                                                    "%{TIMESTAMP_ISO8601:log_time}s+[(?<thread_name>[a-zA-Z0-9-s]*)]s+%{LOGLEVEL:log_level}s+[(?<class_name>[a-zA-Z0-9.]*)]s+%{GREEDYDATA:msg_info}parameters=%{GREEDYDATA:msg_json_info}",
                                                                                    "%{TIMESTAMP_ISO8601:log_time}s+[(?<thread_name>[a-zA-Z0-9-s]*)]s+%{LOGLEVEL:log_level}s+[(?<class_name>[a-zA-Z0-9.]*)]s+%{GREEDYDATA:msg_info}"
    
                                                                                    ]
                                                                            }
                            }
                            if [msg_json_info]{
                                    json{
                                            source=>"msg_json_info"
                                    }
                    }
            }
    }
    output {
    
            if[type] == "garnet_all_log"{
                    elasticsearch{
                            hosts => ["10.191.51.45:9200","10.191.51.46:9200","10.191.51.47:9200"]
                            index => "garnet_all-%{+YYYY.MM.dd}"
                                                    }
                                            }
    }
    View Code

    config/pipeline.yml

    - pipeline.id: pipeline_1
      pipeline.batch.size: 200
      pipeline.batch.delay: 1
      path.config: /usr/share/logstash/pipeline
    View Code

    config/logstash.yml

    log.level: warn
    xpack.license.self_generated.type: basic
    xpack.monitoring.enabled: true
    xpack.monitoring.elasticsearch.url: "http://10.191.51.44:9200"
    View Code

    config/jvm.options 修改

    -Xms4g
    -Xmx4g
    View Code
  • 相关阅读:
    怎样编写一个Photoshop滤镜(1)
    蜂窝状网格的定位方法
    【转载】[TC]飞船动画例子《C高级实用程序设计》
    【完全随笔】用户体验和设计
    在 WinCe 平台读写 ini 文件
    关于携带完整 alpha 通道图标的技术研究
    【转载】When should static_cast, dynamic_cast and reinterpret_cast be used?
    怎样编写一个Photoshop滤镜(3) Scripting Plugins
    配电网WebGIS研究与开发[5]
    配电网WebGIS研究与开发[4]
  • 原文地址:https://www.cnblogs.com/lirunzhou/p/10600965.html
Copyright © 2011-2022 走看看