zoukankan      html  css  js  c++  java
  • logstash redis kafka传输 haproxy日志

    logstash 客户端收集 haproxy  tcp日志

    input {
    file {
    path => "/data/haproxy/logs/haproxy_http.log"
    start_position => "beginning"
    type => "haproxy_http"
    }
    file {
    path => "/data/haproxy/logs/haproxy_tcp.log"
    start_position => "beginning"
    type => "haproxy_tcp"
    }
    }

    filter {
    if [type] == "haproxy_http" {
    grok{
    patterns_dir => "/data/logstash/patterns"
    match => {"message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} [%{HAPROXYDATE:accept_date}] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/%{NOTSPACE:time_duration} %{INT:http_status_code} %{NOTSPACE:bytes_read} %{FENG:captured_request_cookie} %{FENG:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} "%{WORD:verb} %{URIPATHPARAM:request} %{WORD:http_socke}/%{NUMBER:http_version}""}
    }
    geoip {
    source => "client_ip"
    fields => ["ip","city_name","country_name","location"]
    add_tag => [ "geoip" ]
    }
    } else if [type] == "haproxy_tcp" {
    grok {
    match => { "message" => "(?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} [%{HAPROXYDATE:accept_date}] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_queue}/%{INT:time_backend_connect}/%{NOTSPACE:time_duration} %{NOTSPACE:bytes_read} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue}" }
    }
    }
    }

    output {
    if [type] == "haproxy_http" {
    redis {
    host => "192.168.20.166"
    port => "6379"
    db => "5"
    data_type => "list"
    key => "haproxy_http.log"
    }
    } else if [type] == "haproxy_tcp" {
    redis {
    host => "192.168.20.166"
    port => "6379"
    db => "4"
    data_type => "list"
    key => "haproxy_tcp.log"
    }
    }
    }

    logstash 服务器端把 haproxy  tcp日志写入到elasticsearch中

    [root@logstashserver etc]# cat logstash.conf

    input {
    if [type] == "haproxy_http" {
    redis {
    host => "192.168.20.166"
    port => "6379"
    db => "5"
    data_type => "list"
    key => "haproxy_http.log"
    }
    } else if [type] == "haproxy_tcp" {
    redis {
    host => "192.168.20.166"
    port => "6379"
    db => "4"
    data_type => "list"
    key => "haproxy_tcp.log"
    }
    }
    }

    output {
    if [type] == "haproxy_http" {
    elasticsearch {
    hosts => ["es1:9200","es2:9200","es3:9200"]
    manage_template => true
    index => "logstash-haproxy-http.log-%{+YYYY-MM-dd}"
    }
    }
    if [type] == "haproxy_tcp" {
    elasticsearch {
    hosts => ["es1:9200","es2:9200","es3:9200"]
    manage_template => true
    index => "logstash-haproxy-tcp.log-%{+YYYY-MM-dd}"
    }
    }
    }

    #########################################kafka###############################################

    客户端

    input {
    file {
    path => "/data/haproxy/logs/haproxy_http.log"
    start_position => "beginning"
    type => "haproxy_http"
    }
    file {
    path => "/data/haproxy/logs/haproxy_tcp.log"
    start_position => "beginning"
    type => "haproxy_tcp"
    }
    }

    filter {
    if [type] == "haproxy_http" {
    grok{
    patterns_dir => "/data/logstash/patterns"
    match => {"message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} [%{HAPROXYDATE:accept_date}] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/%{NOTSPACE:time_duration} %{INT:http_status_code} %{NOTSPACE:bytes_read} %{FENG:captured_request_cookie} %{FENG:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} "%{WORD:verb} %{URIPATHPARAM:request} %{WORD:http_socke}/%{NUMBER:http_version}""}
    }
    geoip {
    source => "client_ip"
    fields => ["ip","city_name","country_name","location"]
    add_tag => [ "geoip" ]
    }
    } else if [type] == "haproxy_tcp" {
    grok {
    match => { "message" => "(?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} [%{HAPROXYDATE:accept_date}] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_queue}/%{INT:time_backend_connect}/%{NOTSPACE:time_duration} %{NOTSPACE:bytes_read} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue}" }
    }
    }
    }

    output {
    if [type] == "haproxy_http" {
    kafka { #输出到kafka
    bootstrap_servers => "kafka1:9092,kafka2:9092,kafka3:9092" #他们就是生产者
    topic_id => "haproxy_http.log" #这个将作为主题的名称,将会自动创建
    compression_type => "snappy" #压缩类型
    }
    } else if [type] == "haproxy_tcp" {
    kafka { #输出到kafka
    bootstrap_servers => "kafka1:9092,kafka2:9092,kafka3:9092" #他们就是生产者
    topic_id => "haproxy_tcp.log" #这个将作为主题的名称,将会自动创建
    compression_type => "snappy" #压缩类型
    }
    }
    }

    服务器端

    input {
    if [type] == "haproxy_http" {
    kafka {
    zk_connect => "zookeeper1:2181,zookeeper2:2181,zookeeper3:2181"
    topic_id => "haproxy_http.log"
    reset_beginning => false
    consumer_threads => 5
    decorate_events => true
    }
    } else if [type] == "haproxy_tcp" {
    kafka {
    zk_connect => "zookeeper1:2181,zookeeper2:2181,zookeeper3:2181"
    topic_id => "haproxy_tcp.log"
    reset_beginning => false
    consumer_threads => 5
    decorate_events => true
    }
    }
    }


    output {
    if [type] == "haproxy_http" {
    elasticsearch {
    hosts => ["es1:9200","es2:9200","es3:9200"]
    manage_template => true
    index => "logstash-haproxy-http.log-%{+YYYY-MM-dd}"
    }
    }
    if [type] == "haproxy_tcp" {
    elasticsearch {
    hosts => ["es1:9200","es2:9200","es3:9200"]
    manage_template => true
    index => "logstash-haproxy-tcp.log-%{+YYYY-MM-dd}"
    }
    }
    }

  • 相关阅读:
    [Baltic2013]ballmachine BZOJ3133
    [Jxoi2012]奇怪的道路 BZOJ3195 状压DP
    [Baltic 2011]Lamp BZOJ2346
    可并堆
    [Jsoi2016]最佳团体 BZOJ4753 01分数规划+树形背包/dfs序
    点分治
    J2EE WEB应用架构分析
    {经典}springmvc+mybatis+restful+webservice Jeesz分布式架构
    深入Spring Boot:那些注入不了的 Spring 占位符 ( ${} 表达式 )
    G1 垃圾收集器之对象分配过程
  • 原文地址:https://www.cnblogs.com/fengjian2016/p/5919702.html
Copyright © 2011-2022 走看看