zoukankan      html  css  js  c++  java
  • ELK 收集 Docker 日志

    过程:filebeat(收集) -> kafka(缓存) -> logstash(处理) -> elasticsearch(存储) -> kibana(展示)

    本次实验使用了2台虚拟机

    IP 服务
    172.16.16.109 elasticsearch, kibana, logstash
    172.16.16.149 kafka, filebeat

    安装 elasticsearch, kibana, logstash

    mkdir -p /data/docker-compose/elk/ && cd /data/docker-compose/elk
    mkdir elasticsearch  kibana  logstash
    
    # 配置 docker-compose.yml
    cat docker-compose.yml 
    version: "3.2"
    
    services:
      elasticsearch01:
        image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
        container_name: es01
        volumes:
          - ./elasticsearch/es01:/usr/share/elasticsearch/data:rw
        ports:
          - 9200:9200
          - 9300:9300
        environment:
          node.name: "es01"
          cluster.name: "docker-cluster"
          network.host: "0.0.0.0"
          discovery.seed_hosts: "es02,es03"
          cluster.initial_master_nodes: "es01,es02,es03"
          bootstrap.memory_lock: "true"
          xpack.license.self_generated.type: "basic"
          xpack.security.enabled: "false"
          xpack.monitoring.collection.enabled: "true"
          ES_JAVA_OPTS: "-Xmx1g -Xms1g"
        ulimits:
          memlock:
            soft: -1
            hard: -1
        networks:
          - elk
    
      elasticsearch02:
        image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
        container_name: es02
        volumes:
          - ./elasticsearch/es02:/usr/share/elasticsearch/data:rw
        environment:
          node.name: "es02"
          cluster.name: "docker-cluster"
          network.host: "0.0.0.0"
          discovery.seed_hosts: "es01,es03"
          cluster.initial_master_nodes: "es01,es02,es03"
          bootstrap.memory_lock: "true"
          xpack.license.self_generated.type: "basic"
          xpack.security.enabled: "false"
          xpack.monitoring.collection.enabled: "true"
          ES_JAVA_OPTS: "-Xmx1g -Xms1g"
        ulimits:
          memlock:
            soft: -1
            hard: -1
        networks:
          - elk
    
      elasticsearch03:
        image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
        container_name: es03
        volumes:
          - ./elasticsearch/es03:/usr/share/elasticsearch/data:rw
        environment:
          node.name: "es03"
          cluster.name: "docker-cluster"
          network.host: "0.0.0.0"
          discovery.seed_hosts: "es01,es02"
          cluster.initial_master_nodes: "es01,es02,es03"
          bootstrap.memory_lock: "true"
          xpack.license.self_generated.type: "basic"
          xpack.security.enabled: "false"
          xpack.monitoring.collection.enabled: "true"
          ES_JAVA_OPTS: "-Xmx1g -Xms1g"
        ulimits:
          memlock:
            soft: -1
            hard: -1
        networks:
          - elk
    
      logstash:
        image: docker.elastic.co/logstash/logstash:7.6.2
        volumes:
          - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
          - ./logstash/pipeline:/usr/share/logstash/pipeline:ro
        ports:
          - "5000:5000/tcp"
          - "5000:5000/udp"
          - "9600:9600"
        environment:
          LS_JAVA_OPTS: "-Xmx1g -Xms1g"
        networks:
          - elk
        depends_on:
          - elasticsearch01
          - elasticsearch02
          - elasticsearch03
    
      kibana:
        image: docker.elastic.co/kibana/kibana:7.6.2
        volumes:
          - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
        ports:
          - "5601:5601"
        networks:
          - elk
        depends_on:
          - elasticsearch01
          - elasticsearch02
          - elasticsearch03
        environment:
          - ELASTICSEARCH_URL=http://es01:9200
          - xpack.security.enabled=false
    
    networks:
      elk:
        driver: bridge
    
    # 配置 elasticsearch
    mkdir elasticsearch/{es01,es02,es03}
    
    chown -R 1000.1000 elasticsearch
    
    # 配置 kibana
    mkdir -p /data/docker-compose/elk/kibana/config && cd /data/docker-compose/elk/kibana/config
    
    cat kibana.yml 
    ---
    ## Default Kibana configuration from Kibana base image.
    ## https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.ts
    #
    server.name: kibana
    server.host: 0.0.0.0
    elasticsearch.hosts: [ "http://es01:9200" ]
    #monitoring.ui.container.elasticsearch.enabled: true
    
    ## X-Pack security credentials
    #
    elasticsearch.username: elastic
    elasticsearch.password: changeme
    
    # logstash
    mkdir -p /data/docker-compose/elk/logstash/{config,pipeline}
    cd /data/docker-compose/elk/logstash/config
    
    cat logstash.yml 
    ---
    ## Default Logstash configuration from Logstash base image.
    ## https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml
    #
    http.host: "0.0.0.0"
    xpack.monitoring.elasticsearch.hosts: [ "http://es01:9200" ]
    
    ## X-Pack security credentials
    #
    xpack.monitoring.enabled: true
    xpack.monitoring.elasticsearch.username: elastic
    xpack.monitoring.elasticsearch.password: changeme
    
    cd /data/docker-compose/elk/logstash/pipeline
    
    cat logstash.conf
    input {
            kafka {
                    bootstrap_servers => "172.16.16.149:9092"
                    group_id => "services"
                    consumer_threads => 5
                    decorate_events => true
                    topics_pattern => "docker-.*"
                    auto_offset_reset => "latest"
                    codec => json { charset => "UTF-8" }
            }
    
    }
    
    filter {
          mutate {
            remove_field => [ "@version", "stream", "container", "agent", "log", "host", "input", "ecs" ]
          }
        }
    
    ## Add your filters / logstash plugins configuration here
    
    output {
            elasticsearch {
                    hosts => "es01:9200"
                    user => "elastic"
                    password => "changeme"
                    index =>  "%{[@metadata][topic]}-%{+YYYY-MM-dd}"
            }
    }
    
    # 启动
    cd /data/docker-compose/elk
    docker-compose up -d
    

    安装 kafka filebeat

    mkdir -p /data/docker-compose/{filebeat,kafka}
    
    # 配置 kafka
    cd /data/docker-compose/kafka
    
    cat docker-compose.yml 
    version: "2"
    
    services:
      zookeeper:
        image: docker.io/bitnami/zookeeper:3.7
        container_name: zookeeper
        ports:
          - "2181:2181"
        volumes:
          - "zookeeper_data:/bitnami"
        environment:
          - ALLOW_ANONYMOUS_LOGIN=yes
      kafka:
        image: docker.io/bitnami/kafka:3
        container_name: kafka
        ports:
          - "9092:9092"
        volumes:
          - "kafka_data:/bitnami"
        environment:
          - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092
          - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://172.16.16.149:9092  # 注意修改ip地址
          - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
          - ALLOW_PLAINTEXT_LISTENER=yes
        depends_on:
          - zookeeper
    
    volumes:
      zookeeper_data:
        driver: local
      kafka_data:
        driver: local
    
    # 启动 kafka
    docker-compose up -d
    
    # 配置 filebeat
    mkdir -p /data/docker-compose/filebeat/config
    
    cd /data/docker-compose/filebeat/config
    
    cat filebeat.yml 
    setup.ilm.enabled: false
    filebeat.inputs:
    - type: docker
      containers.ids:
        - "*"
      containers.paths:
        - "/var/lib/docker/containers/${data.docker.container.id}/*.log"
      multiline.pattern: '^[[:space:]]+(at|\.{3})\b|^Caused by:'
      multiline.negate: false
      multiline.match: after
    
    
    processors:
      - add_docker_metadata:
          host: "unix:///var/run/docker.sock"
    
    setup.template.name: "docker"
    setup.template.pattern: "docker-*"
    setup.template.enabled: false
    # 如果是第一次则不需要, 如果 index-template 已经存在需要更新, 则需要
    setup.template.overwrite: false
    setup.template.settings:
      index.number_of_shards: 2
      index.number_of_replicas: 0
    output.kafka:
      hosts: ["172.16.16.149:9092"]  # 注意修改 kafka的地址
      worker: 12
      # 单个elasticsearch批量API索引请求的最大事件数。默认是50。
      bulk_max_size: 400
      topic: "docker-%{[container.name]}"
    
    
    cd /data/docker-compose/filebeat
    
    cat docker-compose.yml 
    version: '3.2'
    
    services:
      filebeat:
        image: docker.elastic.co/beats/filebeat:7.3.1
        user: root
        volumes:
            - ./config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
            - /data/docker/containers:/var/lib/docker/containers:ro
            - /var/run/docker.sock:/var/run/docker.sock:ro
        privileged: true
    
    # 启动 filebeat
    docker-compose up -d
    

  • 相关阅读:
    JS替换字符
    sql 两个表字段叠加
    Qt实现窗口半透明显示
    Qt 设置窗口属性setWindowFlags函数
    ARM-Linux按键和旋钮控制
    飞凌开发板OK335xD烧写Linux镜像总结
    Qt QGraphics类应用——图片移动+选点缩放+控制移动区域
    Qt QGraphics类应用——地图缩放选点
    Ubuntu 同时使用有线和无线(有线连开发板,无限上网)
    Qt 自定义控件提升,头文件找不到的问题
  • 原文地址:https://www.cnblogs.com/klvchen/p/15668099.html
Copyright © 2011-2022 走看看