1、logstash通过redis收集日志。 logstash > redis>logstash >es
k8s日志挂载 tong sudo umount -t glusterfs 192.168.10.81:vol_b4c5f22a21e59e4388a65f9710b0e5e3 /app/bms_data_tmp
在日志收集机器安装logstash [root@H-LAPP-V257 config]# cat k8s_logstash.conf input { file { path => ["/var/lib/kubelet/pods/*/volumes/kubernetes.io~glusterfs/pvc-cb9dc6bb-f3b2-11e8-a65f-801844f11504/lcrm-activity/*.log"] type => "lcrm_activity" start_position => "end" codec => multiline { charset => "UTF-8" pattern => "^.{32}s{1}" negate => true what => "previous" } } file { path => ["/var/lib/kubelet/pods/*/volumes/kubernetes.io~glusterfs/pvc-b54723a4-bcb6-11e8-a65f-801844f11504/*.log"] type => "image" start_position => "end" codec => multiline { charset => "UTF-8" pattern => "^%{TIMESTAMP_ISO8601}" negate => true what => "previous" } } file { path => ["/var/lib/kubelet/pods/*/volumes/kubernetes.io~glusterfs/pvc-aa010360-eb5e-11ea-9f48-801844f11504/*.log"] type => "wk-app" start_position => "end" codec => multiline { charset => "UTF-8" pattern => "^%{TIMESTAMP_ISO8601}" negate => true what => "previous" } } } filter { mutate { replace => ["host", "192.168.9.175"] } } output { redis { host => "192.168.9.151" ###redis 地址 port => "6379" data_type => "list" key => "logstash:redis" } }
2、redis 搭建
[yyapp@H-LOTH-V151 redis]$ cat redis.conf |grep -v "#" |grep -v "^$"
bind 0.0.0.0
protected-mode yes
port 6379
tcp-backlog 511
timeout 60
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile "/log/redis/redis.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
maxmemory 6gb
no-appendfsync-on-rewrite no
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
2、 logstash 输出到es
[yyapp@H-LOTH-V151 config]$ cat log4j_to_es.conf input { redis { host => "127.0.0.1" type => "redis-input" data_type => "list" key => "logstash:redis" codec => "json" batch_count => 125 threads => 8 } } output { elasticsearch { action => "index" hosts => ["192.168.10.40","192.168.10.39","192.168.9.151"] index => "%{type}_log_%{+YYYY.MM.dd}" flush_size => 5000 idle_flush_time => 10 } }
3、es配置保证上面有磁盘1.5T
[yyapp@HM-LOTH-V168 config]$ cat elasticsearch.yml |grep -v ""# |grep -v "^$" cluster.name: yylending_elk_cluster node.name: node_master_02 node.master: true node.data: false http.enabled: true http.cors.enabled: true http.cors.allow-origin: "*" path.data: /app path.logs: /log/es-logs network.host: 0.0.0.0 http.port: 9200 bootstrap.memory_lock: false bootstrap.system_call_filter: false discovery.zen.minimum_master_nodes: 2 discovery.zen.ping.unicast.hosts: ["192.168.10.39","192.168.10.40","192.168.9.151","192.168.10.87","192.168.10.88"]