.NET---Exceptionless 轻量级的分布式日志管理平台
官网地址:https://exceptionless.com/、https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html
-
建议Elasticsearch、Redis、Exceptionless分开部署,不要放在同一个docker-compose.yml文件中
一、环境准备
1、服务器文件限制
echo "vm.max_map_count=262144" >> /etc/sysctl.conf sysctl -p echo "* soft nofile 65535 * hard nofile 65535" >> /etc/security/limits.conf
2、安装docker和docker-compose
vim docker_install.sh
#/bin/bash curl http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -o /etc/yum.repos.d/docker-ce.repo yum -y install docker-ce [ ! -d /etc/docker ] && mkdir /etc/docker [ ! -d /data/docker ] && mkdir -p /data/docker cat > /etc/docker/daemon.json <<- EOF { "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m", "max-file": "3" }, "storage-driver": "overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ], "data-root": "/data/docker", "max-concurrent-downloads": 5, "storage-driver": "overlay2", "registry-mirrors": [ "https://pf5f57i3.mirror.aliyuncs.com", "http://harbor.xhyun.vip:81" ] } EOF systemctl enable --now docker
bash docker_install.sh
安装1.29.2版的docker-compose
curl -L https://github.com/docker/compose/releases/download/1.29.2/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose chmod +x /usr/local/bin/docker-compose docker-compose --version
二、下载文件、修改配置文件
-
Elasticsearch
mkdir -p /data/elasticsearch/{data,plugins,secrets} chown -R Bdszxhyun.Bdszxhyun /data/elasticsearch/ su - Bdszxhyun $ echo "mypasswd" > /data/elasticsearch/secrets/bootstrapPassword.txt $ chmod 600 /data/elasticsearch/secrets/bootstrapPassword.txt $ docker run -itd --restart=always --name es-test -p 59200:9200 -p 59300:9300 -e "discovery.type=single-node" -e "ES_JAVA_OPTS=-Xms512m -Xmx512m" -e ELASTIC_PASSWORD_FILE=/run/secrets/bootstrapPassword.txt -v /data/elasticsearch/secrets/bootstrapPassword.txt:/run/secrets/bootstrapPassword.txt docker.elastic.co/elasticsearch/elasticsearch:7.13.2 $ docker cp es-test:/usr/share/elasticsearch/config /data/elasticsearch/
$ cat > /data/elasticsearch/docker-compose.yaml << EOF version: '2' services: elasticsearch: container_name: elasticsearch image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2 ports: - "59200:9200" volumes: - /data/elasticsearch/config:/usr/share/elasticsearch/config - /data/elasticsearch/data:/usr/share/elasticsearch/data - /data/elasticsearch/plugins:/usr/share/elasticsearch/plugins - /data/elasticsearch/secrets/bootstrapPassword.txt:/run/secrets/bootstrapPassword.txt environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - "discovery.type=single-node" - "COMPOSE_PROJECT_NAME=elasticsearch-server" - "ELASTIC_PASSWORD_FILE=/run/secrets/bootstrapPassword.txt" restart: always EOF
cat > /data/elasticsearch/config/elasticsearch.conf << EOF cluster.name: "docker-cluster" network.host: 0.0.0.0 http.cors.allow-headers: Authorization xpack.security.enabled: true xpack.security.transport.ssl.enabled: true EOF
启动
$ cd /data/elasticsearch/ $ docker-compose up -d $ docker exec -t elasticsearch bin/elasticsearch-plugin install mapper-size $ docker-compose restart
验证
$ curl -u "elastic:mypasswd" http://127.0.0.1:59200/ $ curl -XPUT -H "Content-Type:application/json" -u "elastic:mypasswd" http://localhost:59200/_cluster/settings -d '{"transient":{"cluster":{"max_shards_per_node":20000}}}' #增大分片
Docker-compose部署kibana
https://www.elastic.co/guide/en/kibana/current/docker.html
$ mkdir -p /data/elasticsearch/kibana/config
$ echo 'server.host: "0" elasticsearch.hosts: [ "http://192.168.0.250:59200" ] elasticsearch.username: "elastic" elasticsearch.password: "mypasswd" monitoring.ui.container.elasticsearch.enabled: true xpack.security.encryptionKey: "122333444455555666666777777788888888" xpack.reporting.encryptionKey: "a_random_string" xpack.reporting.capture.browser.chromium.disableSandbox: true i18n.locale: "zh-CN"' > /data/elasticsearch/kibana/config/kibana.yml
$ cat > /data/elasticsearch/kibana/docker-compose.yaml << EOF version: '2' services: kibana: container_name: kibana hostname: kibana image: docker.elastic.co/kibana/kibana:7.13.2 ports: - "5601:5601" volumes: - /data/elasticsearch/kibana/config:/usr/share/kibana/config environment: - ELASTICSEARCH_URL=http://192.168.0.250:59200 restart: always networks: default: external: name: elasticsearch_default EOF
$ cd /data/elasticsearch/kibana;docker-compose up -d
验证
http://192.168.0.250:5601
-
Redis
[ ! -d /data/redis/data ] && mkdir -p /data/redis/data [ ! -d /data/redis/conf ] && mkdir -p /data/redis/conf
cat > /data/redis/conf/redis.conf << EOF bind 0.0.0.0 protected-mode no port 6379 tcp-backlog 511 timeout 0 tcp-keepalive 300 daemonize no supervised no pidfile /var/run/redis_6379.pid loglevel verbose logfile stdout databases 64 requirepass Baodian1608 always-show-logo yes save 900 1 save 300 10 save 60 10000 stop-writes-on-bgsave-error yes rdbcompression yes rdbchecksum yes dbfilename dump.rdb dir ./ replica-serve-stale-data yes replica-read-only yes repl-diskless-sync no repl-diskless-sync-delay 5 repl-disable-tcp-nodelay no replica-priority 100 maxmemory 2GB lazyfree-lazy-eviction no lazyfree-lazy-expire no lazyfree-lazy-server-del no replica-lazy-flush no appendonly yes appendfilename "appendonly.aof" appendfsync everysec no-appendfsync-on-rewrite no auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb aof-load-truncated yes aof-use-rdb-preamble yes lua-time-limit 5000 slowlog-log-slower-than 10000 slowlog-max-len 128 latency-monitor-threshold 0 notify-keyspace-events "" hash-max-ziplist-entries 512 hash-max-ziplist-value 64 list-max-ziplist-size -2 list-compress-depth 0 set-max-intset-entries 512 zset-max-ziplist-entries 128 zset-max-ziplist-value 64 hll-sparse-max-bytes 3000 stream-node-max-bytes 4096 stream-node-max-entries 100 activerehashing yes client-output-buffer-limit normal 0 0 0 client-output-buffer-limit replica 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60 hz 10 dynamic-hz yes aof-rewrite-incremental-fsync yes rdb-save-incremental-fsync yes min-replicas-to-write 1 min-replicas-max-lag 10 EOF
docker run -itd --restart=always --privileged=true -p 56379:6379 --net=elasticsearch_default -v /data/redis/conf:/etc/redis -v /data/redis/data:/data --name redis redis:5.0.12 redis-server /etc/redis/redis.conf --appendonly yes
curl -L https://github.com/exceptionless/Exceptionless/archive/refs/heads/master.zip -o Exceptionless.zip unzip Exceptionless.zip -d /data/ cd /data/Exceptionless-master
vim docker-compose.yml #本机IP:192.168.10.40
version: '3.7'
services:
api:
build:
context: .
target: api
image: exceptionless/api
container_name: api
environment:
EX_AppMode: Production
EX_BaseURL: http://192.168.0.250:55001/#! #URL to UI
EX_ConnectionStrings__Cache: provider=redis
EX_ConnectionStrings__Elasticsearch: server=http://192.168.0.250:59200;username=elastic;password=mypasswd
EX_ConnectionStrings__Email: smtps://1916989848%40qq.com:vvotbaaiifrweagh@smtp.qq.com:465
EX_SmtpFrom: 1916989848@qq.com
EX_ConnectionStrings__MessageBus: provider=redis
EX_ConnectionStrings__Queue: provider=redis
EX_ConnectionStrings__Redis: server=192.168.0.250:56379,password=Baodian1608,defaultDatabase=1,abortConnect=false
EX_ConnectionStrings__Storage: provider=folder;path=/app/storage
EX_RunJobsInProcess: 'true'
EX_MaximumRetentionDays: 2
ports:
- 5000:80 # This can be commented out if using reverse proxy.
volumes:
- appdata:/app/storage
shm_size: '1gb'
jobs:
depends_on:
- api
build:
context: .
target: job
image: exceptionless/job
container_name: jobs
environment:
EX_AppMode: Production
EX_BaseURL: http://192.168.0.250:55001/#! #URL to UI
EX_ConnectionStrings__Cache: provider=redis
EX_ConnectionStrings__Elasticsearch: server=http://192.168.0.250:59200;username=elastic;password=mypasswd
EX_ConnectionStrings__Email: smtps://1916989848%40qq.com:vvotbaaiifrweagh@smtp.qq.com:465
EX_SmtpFrom: 1916989848@qq.com
EX_ConnectionStrings__MessageBus: provider=redis
EX_ConnectionStrings__Queue: provider=redis
EX_ConnectionStrings__Redis: server=192.168.0.250:56379,password=Baodian1608,defaultDatabase=1,abortConnect=false
EX_ConnectionStrings__Storage: provider=folder;path=/app/storage
EX_MaximumRetentionDays: 2
volumes:
- appdata:/app/storage
shm_size: '1gb'
ui:
image: exceptionless/ui
container_name: ui
environment:
AppMode: Production
EX_ApiUrl: http://192.168.0.250:5000
ports:
- 55001:80 # This can be commented out if using reverse proxy.
shm_size: '1gb'
volumes:
appdata:
driver: local
docker-compose up -d
三、注册登录并配置
1、浏览器访问:http://192.168.10.40:55001/,先注册
2、创建项目,选择项目类型,获取API密钥,配置到代码里即可
3、验证
curl -u elastic:mypasswd --location --request POST "http://192.168.0.250:5000/api/v2/events" --header 'Authorization: Bearer 1SNg9DGOrpK1xtCMunJjvHGcGwdTPScAE8ViZwE8' --header 'Content-Type: application/json' --data '{ "message": "Exceptionless is amazing!" }'
四、邮箱配置
五、优化策略
由于持久化到local了,而exless有大量的小文件,占用大量inode节点,故需要做小文件清除策略,释放inode
[ ! -d /data/docker/scripts ] && mkdir -p /data/docker/scripts
cat > /data/docker/scripts/loginode_del.sh << EOF #!/bin/bash Year=`date +%y` Year_1ago=`date +%y -d "-1 years"` Month=`date +%m` Month_1ago=`date +%m -d "-1 months"` Day=`date +%d` Day_1ago=`date +%d -d "-1 days"` Hour_2ago=`date +%H -d "-10 hours"` DIR=/var/lib/docker/volumes/exceptionless-614_appdata/_data/archive # 挂载的工作路径 rm -rf $DIR/$Year/$Month/$Day/$Hour_2ago [ -d $DIR/$Year/$Month/$Day_1ago ] && rm -rf $DIR/$Year/$Month/$Day_1ago [ -d $DIR/$Year/$Month_1ago ] && rm -rf $DIR/$Year/$Month_1ago [ -d $DIR/$Year_1ago ] && rm -rf $DIR/$Year_1ago EOF
# crontab -e * */2 * * * /bin/bash /data/docker/scripts/loginode_del.sh