Docker-compose部署Elasticsearch+Kibana+Filebeat+APM(7.13.2)
Docker-compose部署Elasticsearch
- 集群
version: '2.2'
services:
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
container_name: es01
environment:
- node.name=es01
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es02,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx1024m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data01:/usr/share/elasticsearch/data
ports:
- 9200:9200
networks:
- elastic
es02:
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
container_name: es02
environment:
- node.name=es02s
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx1024m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data02:/usr/share/elasticsearch/data
networks:
- elastic
- elasticsearch_default
es03:
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
container_name: es03
environment:
- node.name=es03
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es02
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx1024m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data03:/usr/share/elasticsearch/data
networks:
- elastic
volumes:
data01:
driver: local
data02:
driver: local
data03:
driver: local
networks:
elastic:
driver: bridge
单节点
# grep vm.max_map_count /etc/sysctl.conf vm.max_map_count=262144 # sysctl -w vm.max_map_count=262144 # echo "elk soft memlock unlimited" >> /etc/security/limits.conf # echo "elk hard memlock unlimited" >> /etc/security/limits.conf
mkdir -p /data/elasticsearch/{data,plugins} chown -R Bdszjenkins.Bdszjenkins /data/elasticsearch/ $ docker run -itd --name es-test -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -e "ES_JAVA_OPTS=-Xms512m -Xmx512m" docker.elastic.co/elasticsearch/elasticsearch:7.13.2 $ docker cp es-test:/usr/share/elasticsearch/config /data/elasticsearch/
$ cat > /data/elasticsearch/docker-compose.yaml << EOF version: '2' services: elasticsearch: container_name: elasticsearch image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2 ports: - "9200:9200" volumes: - /data/elasticsearch/config:/usr/share/elasticsearch/config - /data/elasticsearch/data:/usr/share/elasticsearch/data - /data/elasticsearch/plugins:/usr/share/elasticsearch/plugins environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - "discovery.type=single-node" - "COMPOSE_PROJECT_NAME=elasticsearch-server" restart: always EOF
启动
$ docker-compose up -d
验证
$ curl http://127.0.0.1:9200/
curl -XPUT -H "Content-Type:application/json" http://localhost:9200/_cluster/settings -d '{"transient":{"cluster":{"max_shards_per_node":20000}}}' #增大分片
POST /_license/start_trial?acknowledge=true #免费试用30天
Docker-compose部署kibana
该es上
mkdir -p /data/elasticsearch/kibana cd /data/elasticsearch/kibana docker run --name kibana --net elasticsearch_default -p 5601:5601 -e "ELASTICSEARCH_HOSTS=http://elasticsearch:9200" docker.elastic.co/kibana/kibana:7.13.2 docker cp kibana:/usr/share/kibana/config ./ echo 'i18n.locale: "zh-CN"' >> ./config/kibana.yml
$ cat > /data/elasticsearch/kibana/docker-compose.yaml << EOF version: '2' services: kibana: container_name: kibana hostname: kibana image: docker.elastic.co/kibana/kibana:7.13.2 ports: - "5601:5601" volumes: - /data/elasticsearch/kibana/config:/usr/share/kibana/config environment: - ELASTICSEARCH_URL=http://elasticsearch:9200 restart: always networks: default: external: name: elasticsearch_default EOF
$ docker-compose up -d
验证
http://127.0.0.1:5601
Docker-compose部署APM-Server
该es上
mkdir -p /data/elasticsearch/APM/config;cd /data/elasticsearch/APM/ docker run -d -p 8200:8200 --name=apm-server --user=apm-server --net=elasticsearch_default docker.elastic.co/apm/apm-server:7.13.2 --strict.perms=false -e -E output.elasticsearch.hosts=["elasticsearch:9200"] docker cp apm-server:/usr/share/apm-server/ ./config/ chown -R Bdszjenkins.Bdszjenkins /data/elasticsearch/APM
$ cat > /data/elasticsearch/APM/docker-compose.yaml << EOF version: '2' services: apm_server: container_name: apm_server image: docker.elastic.co/apm/apm-server:7.13.2 command: --strict.perms=false ports: - "8200:8200" volumes: - /data/elasticsearch/APM/config:/usr/share/apm-server environment: - output.elasticsearch.hosts=["elasticsearch:9200"] restart: always networks: default: external: name: elasticsearch_default EOF
docker-compose up -d
部署filebeat
需采集数据的ecs
mkdir -p /data/filebeat curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.13.2-linux-x86_64.tar.gz tar xzvf filebeat-7.13.2-linux-x86_64.tar.gz /data/filebeat/ cd /data/filebeat/filebeat-7.13.2-linux-x86_64/ cp filebeat.yml filebeat.yml.bak
cat > /data/filebeat/filebeat-7.13.2-linux-x86_64/filebeat.yml << EOF filebeat.inputs: - type: docker combine_partial: true containers: path: "/var/lib/docker/containers" # stream: "stdout" ids: - "*" processors: - decode_json_fields: fields: ['message'] target: '' overwrite_keys: true output.elasticsearch: hosts: ["xxx.xxx.xxx.xxx:9200"] setup.kibana: host: "xxx.xxx.xxx.xxx:5601" EOF
启动
./filebeat setup -e
nohup ./filebeat -c filebeat.yml -e &
filebeat加入systemd管理
cat > /usr/lib/systemd/system/filebeat.service << EOF [Unit] Description=filebeat server daemon Documentation=/data/filebeat/filebeat-7.13.2-linux-x86_64/filebeat -help Wants=network-online.target After=network-online.target [Service] User=root Group=root ExecStart=/data/filebeat/filebeat-7.13.2-linux-x86_64/filebeat -c /data/filebeat/filebeat-7.13.2-linux-x86_64/filebeat.yml --path.logs /data/filebeat/filebeat-7.13.2-linux-x86_64/logs Restart=always [Install] WantedBy=multi-user.target EOF
systemctl daemon-reload
systemctl enable --now filebeat
Filebeat7自定义index的一个坑
- 从Filebeat 6.4.0版本变成Filebeat 7.x版本号,默认创建的索引名无法正常使用,在ES中生成的索引,并不是我指定的log-xxx,而是filebeat-xxx,明明配置文件指定的是log-xxx。
- ElasticStack从2019年1月29日的6.6.0版本的开始,引入了索引生命周期管理的功能,新版本的Filebeat则默认的配置开启了ILM,导致索引的命名规则被ILM策略控制。
加上这个配置就好了:
# filebeat 配置关闭 ILM 即可解决Index Pattern不生效的问题 setup.ilm.enabled: false
即
filebeat.inputs: - type: docker combine_partial: true containers: path: "/var/lib/docker/containers" # stream: "stdout" ids: - "*" processors: - decode_json_fields: fields: ['message'] target: '' overwrite_keys: true setup.ilm.enabled: false setup.template.enabled: false setup.template.name: "dev-filebeat" setup.template.pattern: "dev-filebeat-*" output.elasticsearch: hosts: ["xxx.xxx.xxx.xxx:9200"] index: "dev-filebeat-%{+yyyy.MM.dd}" setup.kibana: host: "xxx.xxx.xxx.xxx:5601"