zoukankan      html  css  js  c++  java
  • 企业级日志分析平台(十一):ELKStack之生产案例(下)

    四,引入Redis

    image_1d1g9rclc1aag1rv6rem1tvjo8f9.png-187.5kB

     

    4.1 实验环境说明

    主机名IP地址用途
    ES1 192.168.200.191 elasticsearch-node1
    ES2 192.168.200.192 elasticsearch-node2
    ES3 192.168.200.193 elasticsearch-node3
    Logstash-Kibana 192.168.200.194 日志可视化服务器
    Web 192.168.200.195 模拟各种待收集的日志客户端
     

    4.2 在logstash-Kibana上安装部署redis

     
    1. #安装epel源
    2. [root@Logstash-Kibana ~]# yum -y install epel-release
    3. #利用yum安装redis
    4. [root@Logstash-Kibana ~]# yum -y install redis
    5. [root@Logstash-Kibana ~]# redis-server --version
    6. Redis server v=3.2.12 sha=00000000:0 malloc=jemalloc-3.6.0 bits=64 build=3dc3425a3049d2ef
    7. #修改redis配置文件
    8. [root@Logstash-Kibana ~]# cp /etc/redis.conf{,.bak}
    9. [root@Logstash-Kibana ~]# cat -n /etc/redis.conf.bak | sed -n '61p;480p'
    10. 61 bind 127.0.0.1
    11. 480 # requirepass foobared
    12. [root@Logstash-Kibana ~]# cat -n /etc/redis.conf | sed -n '61p;480p'
    13. 61 bind 0.0.0.0
    14. 480 requirepass yunjisuan
    15. #启动redis-server
    16. [root@Logstash-Kibana ~]# systemctl start redis
    17. [root@Logstash-Kibana ~]# netstat -antup | grep redis
    18. tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 15822/redis-server
     

    4.3 在Web服务器上安装logstash

     
    1. #yum安装jdk1.8
    2. [root@WebServer ~]# yum -y install java-1.8.0-openjdk
    3. #添加ELK的yum源文件
    4. [root@WebServer ~]# vim /etc/yum.repos.d/elastic.repo
    5. [root@WebServer ~]# cat /etc/yum.repos.d/elastic.repo
    6. [elastic-6.x]
    7. name=Elastic repository for 6.x packages
    8. baseurl=https://artifacts.elastic.co/packages/6.x/yum
    9. gpgcheck=1
    10. gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
    11. enabled=1
    12. autorefresh=1
    13. type=rpm-md
    14. #yum安装logstash和filebeat
    15. [root@WebServer ~]# yum -y install logstash filebeat
    16. #创建收集数据写入redis的logstash配置文件
    17. [root@WebServer ~]# vim /etc/logstash/conf.d/logstash-to-redis.conf
    18. [root@WebServer ~]# cat /etc/logstash/conf.d/logstash-to-redis.conf
    19. input {
    20. file {
    21. path => ["/var/log/messages"]
    22. type => "system"
    23. tags => ["syslog","test"]
    24. start_position => "beginning"
    25. }
    26. file {
    27. path => ["/var/log/audit/audit.log"]
    28. type => "system"
    29. tags => ["auth","test"]
    30. start_position => "beginning"
    31. }
    32. }
    33. filter {
    34. }
    35. output {
    36. redis {
    37. host => ["192.168.200.194:6379"]
    38. password => "yunjisuan"
    39. db => "0"
    40. data_type => "list"
    41. key => "logstash"
    42. }
    43. }
    44. #启动WebServer服务器上的logstash
    45. [root@WebServer ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-to-redis.conf
    46. #验证logstash是否成功将数据写入redis
    47. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan info Keyspace
    48. # Keyspace
    49. db0:keys=1,expires=0,avg_ttl=0
    50. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan scan 0
    51. 1) "0"
    52. 2) 1) "logstash"
    53. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan lrange logstash 0 1
    54. 1) "{"host":"WebServer","message":"Jul 3 03:50:54 localhost journal: Runtime journal is using 6.0M (max allowed 48.7M, trying to leave 73.0M free of 481.1M available xe2x86x92 current limit 48.7M).","type":"system","@version":"1","@timestamp":"2018-08-24T13:03:55.486Z","path":"/var/log/messages","tags":["syslog","test"]}"
    55. 2) "{"host":"WebServer","message":"type=DAEMON_START msg=audit(1530561057.301:6300): op=start ver=2.8.1 format=raw kernel=3.10.0-862.el7.x86_64 auid=4294967295 pid=625 uid=0 ses=4294967295 subj=system_u:system_r:auditd_t:s0 res=success","type":"system","@version":"1","@timestamp":"2018-08-24T13:03:55.478Z","path":"/var/log/audit/audit.log","tags":["auth","test"]}"
    56. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen logstash
    57. (integer) 9068
     

    4.4 在logstash-kibana服务器上配置读取redis数据的logstash配置文件

     
    1. #在Logstash-Kibana进行如下操作
    2. [root@Logstash-Kibana ~]# vim /etc/logstash/conf.d/logstash-from-redis.conf
    3. [root@Logstash-Kibana ~]# cat /etc/logstash/conf.d/logstash-from-redis.conf
    4. input {
    5. redis {
    6. host => "192.168.200.194"
    7. port => 6379
    8. password => "yunjisuan"
    9. db => "0"
    10. data_type => "list"
    11. key => "logstash"
    12. }
    13. }
    14. filter {
    15. }
    16. output {
    17. if [type] == "system" {
    18. if [tags][0] == "syslog" {
    19. elasticsearch {
    20. hosts => ["http://192.168.200.191:9200","http://192.168.200.192:9200","http://192.168.200.193:9200"]
    21. index => "logstash-mr_chen-syslog-%{+YYYY.MM.dd}"
    22. }
    23. stdout { codec => rubydebug }
    24. }
    25. else if [tags][0] == "auth" {
    26. elasticsearch {
    27. hosts => ["http://192.168.200.191:9200","http://192.168.200.192:9200","http://192.168.200.193:9200"]
    28. index => "logstash-mr_chen-auth-%{+YYYY.MM.dd}"
    29. }
    30. stdout { codec => rubydebug }
    31. }
    32. }
    33. }
     

    4.5 在ES1上启动图形化ES插件,清空ES上所有的索引

     
    1. [root@ES1 ~]# cd elasticsearch-head/
    2. [root@ES1 elasticsearch-head]# npm run start
    3. > elasticsearch-head@0.0.0 start /root/elasticsearch-head
    4. > grunt server
    5. >> Local Npm module "grunt-contrib-jasmine" not found. Is it installed?
    6. Running "connect:server" (connect) task
    7. Waiting forever...
    8. Started connect web server on http://localhost:9100

    image_1d1g9u97p1aui16bqvdh14i6polm.png-118.8kB

    image_1d1g9ueap85kmab1q9o1p8d101c13.png-55.5kB

     

    4.6 在logstash-kibana服务器上启动logstash,并查看kibana

     
    1. #启动logstash
    2. [root@Logstash-Kibana ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-from-redis.conf
    3. #查看redis的key情况
    4. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan info Keyspace
    5. # Keyspace
    6. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen logstash
    7. (integer) 0

    我们神奇的发现redis里的key已经全部都没有了 
    这是因为redis在这里充当的是一个轻量级消息队列 
    写入redis的logstash是生产者模型 
    读取redis的logstash是消费者模型

    重新创建好索引后,如下图

    image_1d1g9vjcc1v3d1f9f1c0h1hu71e2v1g.png-137.6kB

    image_1d1g9voti1n1q1cb211p04d1ru21t.png-131.5kB

    查看elasticsearch里索引的数据大小

    image_1d1ga0c2b37i1krc1aj43sgfm12a.png-39.2kB

     

    五,引入Filebeat

    filebeat优点:轻量。缺点:不支持正则 
    logstash优点:支持正则提取。缺点:比较重,依赖于java

     

    5.1 在WebServer上yum安装filebeat

     
    1. #安装filebeat
    2. [root@WebServer ~]# yum -y install filebeat
    3. #修改filebeat配置文件
    4. [root@WebServer ~]# cp /etc/filebeat/filebeat.yml{,.bak}
    5. [root@WebServer ~]# egrep -v "#|^$" /etc/filebeat/filebeat.yml.bak > /etc/filebeat/filebeat.yml
    6. #将配置文件修改成如下
    7. [root@WebServer ~]# vim /etc/filebeat/filebeat.yml
    8. [root@WebServer ~]# cat /etc/filebeat/filebeat.yml
    9. filebeat.inputs:
    10. - type: log
    11. paths:
    12. - /var/log/messages
    13. tags: ["syslog","test"]
    14. fields:
    15. type: system
    16. fields_under_root: true
    17. - type: log
    18. paths:
    19. - /var/log/audit/audit.log
    20. tags: ["auth","test"]
    21. fields:
    22. type: system
    23. fields_under_root: true
    24. output.redis:
    25. hosts: ["192.168.200.194"]
    26. password: "yunjisuan"
    27. key: "filebeat"
    28. db: 0
    29. datatype: list
    30. #启动filebeat进行数据收集测试
    31. [root@WebServer ~]# systemctl start filebeat
    32. #查看logstash-kibana服务器中的redis是否有数据
    33. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    34. (integer) 9109

    利用图形化软件清空ES中的索引,再开启logstash读取redis数据写入ES

     
    1. #修改logstash配置文件
    2. [root@Logstash-Kibana ~]# vim /etc/logstash/conf.d/logstash-from-redis.conf
    3. [root@Logstash-Kibana ~]# cat /etc/logstash/conf.d/logstash-from-redis.conf
    4. input {
    5. redis {
    6. host => "192.168.200.194"
    7. port => 6379
    8. password => "yunjisuan"
    9. db => "0"
    10. data_type => "list"
    11. key => "filebeat" #修改本行的读取的redis的key即可
    12. }
    13. }
    14. filter {
    15. }
    16. output {
    17. if [type] == "system" {
    18. if [tags][0] == "syslog" {
    19. elasticsearch {
    20. hosts => ["http://192.168.200.191:9200","http://192.168.200.192:9200","http://192.168.200.193:9200"]
    21. index => "logstash-mr_chen-syslog-%{+YYYY.MM.dd}"
    22. }
    23. stdout { codec => rubydebug }
    24. }
    25. else if [tags][0] == "auth" {
    26. elasticsearch {
    27. hosts => ["http://192.168.200.191:9200","http://192.168.200.192:9200","http://192.168.200.193:9200"]
    28. index => "logstash-mr_chen-auth-%{+YYYY.MM.dd}"
    29. }
    30. stdout { codec => rubydebug }
    31. }
    32. }
    33. }
    34. #清空ES数据后,启动logstash读取redis数据
    35. [root@Logstash-Kibana ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-from-redis.conf
    36. #查看redis的key被消费情况
    37. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    38. (integer) 8359
    39. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    40. (integer) 8109
    41. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    42. (integer) 7984
    43. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    44. (integer) 7859
    45. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    46. (integer) 7359
    47. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    48. (integer) 5234
    49. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    50. (integer) 4484
    51. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    52. (integer) 3734
    53. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    54. (integer) 2984
    55. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    56. (integer) 2484
    57. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    58. (integer) 1984
    59. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    60. (integer) 1609
    61. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    62. (integer) 984
    63. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    64. (integer) 0
    65. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    66. (integer) 0
     

    六,生产应用案例(Filebeat+Redis+ELK)

    主机名IP地址用途
    ES1 192.168.200.191 elasticsearch-node1
    ES2 192.168.200.192 elasticsearch-node2
    ES3 192.168.200.193 elasticsearch-node3
    Logstash-Kibana 192.168.200.194 日志可视化服务器
    WebServer 192.168.200.195 模拟各种待收集的日志客户端
     

    6.1 收集Nginx日志

     

    6.1.1 部署nginxWeb

     
    1. [root@WebServer ~]# yum -y install pcre-devel openssl-devel
    2. [root@WebServer ~]# tar xf nginx-1.10.2.tar.gz -C /usr/src/
    3. [root@WebServer ~]# cd /usr/src/nginx-1.10.2/
    4. [root@WebServer nginx-1.10.2]# useradd -s /sbin/nologin -M nginx
    5. [root@WebServer nginx-1.10.2]# ./configure --user=nginx --group=nginx --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module
    6. [root@WebServer nginx-1.10.2]# make && make install
    7. [root@WebServer nginx-1.10.2]# ln -s /usr/local/nginx/sbin/* /usr/local/sbin/
    8. [root@WebServer nginx-1.10.2]# which nginx
    9. /usr/local/sbin/nginx
    10. [root@WebServer nginx-1.10.2]# nginx -v
    11. nginx version: nginx/1.10.2
    12. [root@WebServer ~]# cd /usr/local/nginx/
    13. [root@WebServer nginx]# egrep -v "#|^$" conf/nginx.conf.default > conf/nginx.conf
    14. [root@WebServer nginx]# vim conf/nginx.conf
    15. [root@WebServer nginx]# cat conf/nginx.conf
    16. worker_processes 1;
    17. events {
    18. worker_connections 1024;
    19. }
    20. http {
    21. include mime.types;
    22. default_type application/octet-stream;
    23. log_format main '$remote_addr - $remote_user [$time_local] "$request" '
    24. '$status $body_bytes_sent "$http_referer" '
    25. '"$http_user_agent" "$http_x_forwarded_for"';
    26. log_format json '{ "@timestamp":"$time_iso8601", '
    27. '"remote_addr":"$remote_addr",'
    28. '"remote_user":"$remote_user",'
    29. '"body_bytes_sent":"$body_bytes_sent",'
    30. '"request_time":"$request_time",'
    31. '"status":"$status",'
    32. '"request_uri":"$request_uri",'
    33. '"request_method":"$request_method",'
    34. '"http_referer":"$http_referer",'
    35. '"body_bytes_sent":"$body_bytes_sent",'
    36. '"http_x_forwarded_for":"$http_x_forwared_for",'
    37. '"http_user_agent":"$http_user_agent"}';
    38. access_log logs/access_main.log main; #开启main格式访问日志记录
    39. access_log logs/access_json.log json; #开启json格式访问日志记录
    40. sendfile on;
    41. keepalive_timeout 65;
    42. server {
    43. listen 80;
    44. server_name www.yunjisuan.com;
    45. location / {
    46. root html/www;
    47. index index.html index.htm;
    48. }
    49. }
    50. }
    51. [root@WebServer nginx]# mkdir -p html/www
    52. [root@WebServer nginx]# echo "welcome to yunjisuan" > html/www/index.html
    53. [root@WebServer nginx]# cat html/www/index.html
    54. welcome to yunjisuan
    55. [root@WebServer nginx]# /usr/local/nginx/sbin/nginx
    56. [root@WebServer nginx]# netstat -antup | grep nginx
    57. tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 14716/nginx: master
    58. [root@WebServer nginx]# curl 192.168.200.195
    59. welcome to yunjisuan
    60. [root@WebServer nginx]# curl 192.168.200.195
    61. welcome to yunjisuan
    62. [root@WebServer nginx]# cat logs/access_main.log #查看main格式访问日志
    63. 192.168.200.195 - - [25/Aug/2018:23:42:44 +0800] "GET / HTTP/1.1" 200 21 "-" "curl/7.29.0" "-"
    64. 192.168.200.195 - - [25/Aug/2018:23:42:45 +0800] "GET / HTTP/1.1" 200 21 "-" "curl/7.29.0" "-"
    65. [root@WebServer nginx]# cat logs/access_json.log #查看json格式访问日志
    66. { "@timestamp":"2018-08-25T23:42:44+08:00", "remote_addr":"192.168.200.195","remote_user":"-","body_bytes_sent":"21","request_time":"0.000","status":"200","request":"GET / HTTP/1.1","request_method":"GET","http_referer":"-","body_bytes_sent":"21","http_x_forwarded_for":"-","http_user_agent":"curl/7.29.0"}
    67. { "@timestamp":"2018-08-25T23:42:45+08:00", "remote_addr":"192.168.200.195","remote_user":"-","body_bytes_sent":"21","request_time":"0.000","status":"200","request":"GET / HTTP/1.1","request_method":"GET","http_referer":"-","body_bytes_sent":"21","http_x_forwarded_for":"-","http_user_agent":"curl/7.29.0"}
     

    6.1.2 修改WebServer服务器上的filebeat配置文件

     
    1. #filebeat配置文件修改成如下所示
    2. [root@WebServer nginx]# cat /etc/filebeat/filebeat.yml
    3. filebeat.inputs:
    4. - type: log
    5. paths:
    6. - /usr/local/nginx/logs/access_json.log #收集json格式的访问日志
    7. tags: ["access"]
    8. fields:
    9. app: www
    10. type: nginx-access-json
    11. fields_under_root: true
    12. - type: log
    13. paths:
    14. - /usr/local/nginx/logs/access_main.log #收集main格式的访问日志
    15. tags: ["access"]
    16. fields:
    17. app: www
    18. type: nginx-access
    19. fields_under_root: true
    20. - type: log
    21. paths:
    22. - /usr/local/nginx/logs/error.log #收集错误日志
    23. tags: ["error"]
    24. fields:
    25. app: www
    26. type: nginx-error
    27. fields_under_root: true
    28. output.redis: #输出到redis
    29. hosts: ["192.168.200.194"]
    30. password: "yunjisuan"
    31. key: "filebeat"
    32. db: 0
    33. datatype: list
    34. #启动filebeat
    35. [root@WebServer nginx]# systemctl start filebeat
    36. #查看logstash-kibana服务器上redis储存的key
    37. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    38. (integer) 7
     

    6.1.3 修改logstash-kibana服务器上logstash的配置文件

     
    1. #logstash配置文件修改成如下所示
    2. [root@Logstash-Kibana ~]# vim /etc/logstash/conf.d/logstash-from-redis.conf
    3. [root@Logstash-Kibana ~]# cat /etc/logstash/conf.d/logstash-from-redis.conf
    4. input {
    5. redis {
    6. host => "192.168.200.194"
    7. port => 6379
    8. password => "yunjisuan"
    9. db => "0"
    10. data_type => "list"
    11. key => "filebeat"
    12. }
    13. }
    14. filter {
    15. if [app] == "www" { #如果日志项目名称是www
    16. if [type] == "nginx-access-json" { #如果是json类型的数据
    17. json {
    18. source => "message" #将源为message的json格式数据进行解析
    19. remove_field => ["message"] #移除message字段
    20. }
    21. geoip {
    22. source => "remote_addr" #针对remote_addr的数据进行来源解析
    23. target => "geoip" #将解析结果输出到geoip字段中
    24. database => "/opt/GeoLite2-City.mmdb" #geoip的解析库文件位置
    25. add_field => ["[geoip][coordinates]","%{[geoip][longitude]}"] #添加列表格式字段数据
    26. add_field => ["[geoip][coordinates]","%{[geoip][latitude]}"] #添加列表格式字段数据
    27. }
    28. mutate {
    29. convert => ["[geoip][coordinates]","float"] #将列表格式转换成字符串格式
    30. }
    31. }
    32. if [type] == "nginx-access" { #如果是main格式类型数据
    33. grok {
    34. match => {
    35. "message" => '(?<client>[0-9.]+).*' #从message中抓取client字段数据
    36. }
    37. }
    38. geoip {
    39. source => "client" #对client字段数据进行来源解析
    40. target => "geoip"
    41. database => "/opt/GeoLite2-City.mmdb"
    42. add_field => ["[geoip][coordinates]","%{[geoip][longitude]}"]
    43. add_field => ["[geoip][coordinates]","%{[geoip][latitude]}"]
    44. }
    45. mutate {
    46. convert => ["[geoip][coordinates]","float"]
    47. }
    48. }
    49. }
    50. }
    51. output {
    52. elasticsearch {
    53. hosts => ["http://192.168.200.191:9200","http://192.168.200.192:9200","http://192.168.200.193:9200"]
    54. index => "logstash-mr_chen-%{type}-%{+YYYY.MM.dd}" #根据type变量的值的不同写入不同的索引
    55. }
    56. stdout { codec => rubydebug }
    57. }
    58. #启动logstash进程
    59. [root@Logstash-Kibana ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-from-redis.conf
    60. #查看redis的key的消费情况
    61. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    62. (integer) 0
     

    6.1.4 创建kibana的索引

    在kibana上关联索引,进行数据收集的展示

    image_1d1ga5vob18l5r36194j1ns5cf3n.png-140.6kB

     

    6.2 收集Java堆栈日志

     

    6.2.1 部署tomcat

     
    1. [root@WebServer ~]# wget http://mirror.bit.edu.cn/apache/tomcat/tomcat-8/v8.5.33/bin/apache-tomcat-8.5.33.tar.gz
    2. [root@WebServer ~]# tar xf apache-tomcat-8.5.33.tar.gz -C /usr/local/
    3. [root@WebServer ~]# mv /usr/local/apache-tomcat-8.5.33 /usr/local/tomcat
    4. [root@WebServer ~]# /usr/local/tomcat/bin/startup.sh
    5. Using CATALINA_BASE: /usr/local/tomcat
    6. Using CATALINA_HOME: /usr/local/tomcat
    7. Using CATALINA_TMPDIR: /usr/local/tomcat/temp
    8. Using JRE_HOME: /usr
    9. Using CLASSPATH: /usr/local/tomcat/bin/bootstrap.jar:/usr/local/tomcat/bin/tomcat-juli.jar
    10. Tomcat started.
    11. [root@WebServer ~]# tail -f /usr/local/tomcat/logs/catalina.out #查看日志
    12. 26-Aug-2018 11:53:53.432 信息 [localhost-startStop-1] org.apache.catalina.startup.HostConfig.deployDirectory Deployment of web application directory [/usr/local/tomcat/webapps/docs] has finished in [22] ms
    13. 26-Aug-2018 11:53:53.432 信息 [localhost-startStop-1] org.apache.catalina.startup.HostConfig.deployDirectory Deploying web application directory [/usr/local/tomcat/webapps/examples]
    14. 26-Aug-2018 11:53:53.717 信息 [localhost-startStop-1] org.apache.catalina.startup.HostConfig.deployDirectory Deployment of web application directory [/usr/local/tomcat/webapps/examples] has finished in [285] ms
    15. 26-Aug-2018 11:53:53.718 信息 [localhost-startStop-1] org.apache.catalina.startup.HostConfig.deployDirectory Deploying web application directory [/usr/local/tomcat/webapps/host-manager]
    16. 26-Aug-2018 11:53:53.742 信息 [localhost-startStop-1] org.apache.catalina.startup.HostConfig.deployDirectory Deployment of web application directory [/usr/local/tomcat/webapps/host-manager] has finished in [24] ms
    17. 26-Aug-2018 11:53:53.742 信息 [localhost-startStop-1] org.apache.catalina.startup.HostConfig.deployDirectory Deploying web application directory [/usr/local/tomcat/webapps/manager]
    18. 26-Aug-2018 11:53:53.764 信息 [localhost-startStop-1] org.apache.catalina.startup.HostConfig.deployDirectory Deployment of web application directory [/usr/local/tomcat/webapps/manager] has finished in [22] ms
    19. 26-Aug-2018 11:53:53.778 信息 [main] org.apache.coyote.AbstractProtocol.start Starting ProtocolHandler ["http-nio-8080"]
    20. 26-Aug-2018 11:53:53.796 信息 [main] org.apache.coyote.AbstractProtocol.start Starting ProtocolHandler ["ajp-nio-8009"]
    21. 26-Aug-2018 11:53:53.800 信息 [main] org.apache.catalina.startup.Catalina.start Server startup in 903 ms

    用浏览器访问tomcat

    image_1d1ga6qhjj2pauj16ta1cc718q244.png-117.4kB

     

    6.2.2 配置filebeat收集日志

    catalina.out就是tomcat的堆栈日志

     
    1. #catalina.out的堆栈报错示例
    2. 2018-08-26 13:20:08
    3. [ERROR]-[Thread: Druid-ConnectionPool-Create-1090484466]-[com.alibaba.druid.pool.DruidDataSource$CreateConnectionThread.run()]: create connection error, url: jdbc:mysql://localhost:3306/jpress?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull, errorCode 0, state 08S01
    4. com.mysql.jdbc.exceptions.jdbc4.CommunicationsException: Communications link failure
    5. The last packet sent successfully to the server was 0 milliseconds ago. The driver has not received any packets from the server.
    6. at sun.reflect.GeneratedConstructorAccessor25.newInstance(Unknown Source)
    7. at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
    8. at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
    9. at com.mysql.jdbc.Util.handleNewInstance(Util.java:411)
    10. at com.mysql.jdbc.SQLError.createCommunicationsException(SQLError.java:1117)
    11. at com.mysql.jdbc.MysqlIO.<init>(MysqlIO.java:350)
    12. at com.mysql.jdbc.ConnectionImpl.coreConnect(ConnectionImpl.java:2393)
    13. at com.mysql.jdbc.ConnectionImpl.connectOneTryOnly(ConnectionImpl.java:2430)
    14. at com.mysql.jdbc.ConnectionImpl.createNewIO(ConnectionImpl.java:2215)
    15. at com.mysql.jdbc.ConnectionImpl.<init>(ConnectionImpl.java:813)
    16. at com.mysql.jdbc.JDBC4Connection.<init>(JDBC4Connection.java:47)
    17. at sun.reflect.GeneratedConstructorAccessor22.newInstance(Unknown Source)
    18. at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
    19. at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
    20. at com.mysql.jdbc.Util.handleNewInstance(Util.java:411)
    21. at com.mysql.jdbc.ConnectionImpl.getInstance(ConnectionImpl.java:399)
    22. at com.mysql.jdbc.NonRegisteringDriver.connect(NonRegisteringDriver.java:334)
    23. at com.alibaba.druid.filter.FilterChainImpl.connection_connect(FilterChainImpl.java:148)
    24. at com.alibaba.druid.filter.stat.StatFilter.connection_connect(StatFilter.java:211)
    25. at com.alibaba.druid.filter.FilterChainImpl.connection_connect(FilterChainImpl.java:142)
    26. at com.alibaba.druid.pool.DruidAbstractDataSource.createPhysicalConnection(DruidAbstractDataSource.java:1423)
    27. at com.alibaba.druid.pool.DruidAbstractDataSource.createPhysicalConnection(DruidAbstractDataSource.java:1477)
    28. at com.alibaba.druid.pool.DruidDataSource$CreateConnectionThread.run(DruidDataSource.java:2001)
    29. Caused by: java.net.ConnectException: 拒绝连接 (Connection refused)
    30. at java.net.PlainSocketImpl.socketConnect(Native Method)
    31. at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350)
    32. at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206)
    33. at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188)
    34. at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
    35. at java.net.Socket.connect(Socket.java:589)
    36. at java.net.Socket.connect(Socket.java:538)
    37. at java.net.Socket.<init>(Socket.java:434)
    38. at java.net.Socket.<init>(Socket.java:244)
    39. at com.mysql.jdbc.StandardSocketFactory.connect(StandardSocketFactory.java:257)
    40. at com.mysql.jdbc.MysqlIO.<init>(MysqlIO.java:300)
    41. ... 17 more
    42. #修改filebeat配置文件加入对tomcat的堆栈报错的数据收集
    43. [root@WebServer ~]# cat /etc/filebeat/filebeat.yml
    44. filebeat.inputs:
    45. - type: log
    46. paths:
    47. - /usr/local/nginx/logs/access_json.log
    48. tags: ["access"]
    49. fields:
    50. app: www
    51. type: nginx-access-json
    52. fields_under_root: true
    53. - type: log
    54. paths:
    55. - /usr/local/nginx/logs/access_main.log
    56. tags: ["access"]
    57. fields:
    58. app: www
    59. type: nginx-access
    60. fields_under_root: true
    61. - type: log
    62. paths:
    63. - /usr/local/nginx/logs/error.log
    64. tags: ["error"]
    65. fields:
    66. app: www
    67. type: nginx-error
    68. fields_under_root: true
    69. - type: log
    70. paths:
    71. - /usr/local/tomcat/logs/catalina.out
    72. tags: ["tomcat"]
    73. fields:
    74. app: www
    75. type: tomcat-catalina
    76. fields_under_root: true
    77. multiline:
    78. pattern: '^['
    79. negate: true
    80. match: after
    81. output.redis:
    82. hosts: ["192.168.200.194"]
    83. password: "yunjisuan"
    84. key: "filebeat"
    85. db: 0
    86. datatype: list
    87. #启动filebeat
    88. [root@WebServer ~]# systemctl start filebeat
    89. #查看redis的数据队列数
    90. [root@Logstash-Kibana ~]# redis-cli -a yunjisuan llen filebeat
    91. (integer) 11
    92. #启动logstash-kibana服务器下的logstash进程
    93. [root@Logstash-Kibana ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-from-redis.conf
     

    6.2.3 创建kibana的索引展示

    image_1d1ga82441bdsmse189tlek1bk4h.png-147.3kB

     

    6.3 Kibana可视化和仪表盘

    在nginx访问日志的main格式中,模拟些不同的访问IP

     
    1. 113.108.182.52
    2. 123.150.187.130
    3. 203.186.145.250
    4. 114.80.166.240
    5. 119.147.146.189
    6. 58.89.67.152
    7. [root@WebServer nginx]# a='58.89.67.152 - - [26/Aug/2018:14:17:33 +0800] "GET / HTTP/1.1" 200 21 "-" "curl/7.29.0" "-"'
    8. [root@WebServer nginx]# for i in `seq 50`;do echo $a >> /usr/local/nginx/logs/access_main.log ;done
     

    6.3.1 PV/IP

    统计pv其实就是统计单位时间内的访问量

    image_1d1ga9857qa7b2hje5l6nku74u.png-49.7kB

    image_1d1ga9i9f15npphq1sjki351v0p5b.png-57kB

    image_1d1ga9rtk1rdv1dnpf6kjbf1c3d5o.png-56.2kB

    image_1d1gaa0cr1t0tmk91pjk79en9b65.png-94.8kB

    image_1d1gabht74p513ism6c5mhkla6i.png-128.9kB

    统计IP其实就是统计去重复以后的访问IP数

    image_1d1gac4r56ne19q8162n1cuk1bbf6v.png-95.8kB

     

    6.3.2 用户地理位置分布

    image_1d1gachl616bqr0d9ef1tlb42d7c.png-72.2kB

    image_1d1gacs4s1b7m5am1nkao9aua57p.png-70.5kB

    image_1d1gad4e4ginlfv1fuh1rv3185s86.png-391.8kB

  • 相关阅读:
    ASP.NET MVC布局
    C#微信扫码支付Demo
    ASP.NET MVC用户登录(Memcache存储用户登录信息)
    Memcached分布式缓存快速入门
    Log4Net日志配置
    ASP.NET MVC自定义异常处理
    Spring.Net快速入门:控制翻转、依赖注入、面向切面编程
    C#微信公众号开发入门教程
    APS.NET MVC4生成解析二维码简单Demo
    Entity Framwork学习笔记
  • 原文地址:https://www.cnblogs.com/linyaonie/p/11231186.html
Copyright © 2011-2022 走看看