zoukankan      html  css  js  c++  java
  • logback-spring 集成 ELK、kafka的配置

    pom.xml

      <dependency>
                <groupId>com.github.danielwegener</groupId>
                <artifactId>logback-kafka-appender</artifactId>
                <version>0.2.0-RC2</version>
            </dependency>
    
            <dependency>
                <groupId>net.logstash.logback</groupId>
                <artifactId>logstash-logback-encoder</artifactId>
                <version>6.4</version>
            </dependency>
    
            <dependency>
                <groupId>ch.qos.logback</groupId>
                <artifactId>logback-classic</artifactId>
                <version>1.2.3</version>
            </dependency>

    logback-spring.xml的文俊

    <?xml version="1.0" encoding="UTF-8"?>
    <configuration  scan="true" scanPeriod="10 seconds" debug="false">
    
    
        <!--上下文名称-->
        <contextName>logback</contextName>
    
        <!--日志根目录 -->
        <property name="log.path" value="C:/logs" />
        <springProperty scope="context" name="servicename" source="spring.application.name" defaultValue="UnknownService"/>
        <springProperty scope="context" name="env" source="spring.profiles.active" defaultValue="dev"/>
        <springProperty scope="context" name="bootstrapServers" source="spring.kafka.bootstrap-servers" defaultValue="localhost:9092"/>
        <springProperty scope="context" name="serviceport" source="server.port" defaultValue="80"/>
        <!--获取服务器的IP和名称-->
        <conversionRule conversionWord="serviceip" converterClass="com.icar.web.makedata.utils.LogIpConfigUtil" />
    
        <!--以上三行需要和yml对应-->
    
        <!--输出日志到控制台 -->
        <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
            <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
                <level>INFO</level>
            </filter>
            <encoder>
                <pattern>%yellow(%date{yyyy-MM-dd HH:mm:ss}) |%highlight(%-5level) |%blue(%thread) |%green(%file:%line) |%magenta(%logger) |%cyan(%msg%n)</pattern>
                <charset>UTF-8</charset>
            </encoder>
        </appender>
    
        <!--①.level=INFO的日志文件 -->
        <appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
            <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
                <fileNamePattern>${log.path}/info/infolevel_makedata.%d{yyyy-MM-dd}.%i.txt</fileNamePattern>
                <maxFileSize>100MB</maxFileSize>
                <maxHistory>15</maxHistory>
                <totalSizeCap>2GB</totalSizeCap>
            </rollingPolicy>
            <!--日志输出级别-->
            <filter class="ch.qos.logback.classic.filter.LevelFilter">
                <level>INFO</level>
                <onMatch>ACCEPT</onMatch>
                <onMismatch>DENY</onMismatch>
            </filter>
            <!--日志文件输出格式-->
            <encoder>
                <pattern>[%d{yyyy-MM-dd HH:mm:ss.SSS}] %thread %-5level %logger{50} --- %msg%n</pattern>
                <charset>UTF-8</charset>
            </encoder>
        </appender>
    
        <!--②.level=WARN的日志文件 -->
        <appender name="WARN_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
            <!--基本设置-->
            <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
                <fileNamePattern>${log.path}/warn/warnlevel_makedata.%d{yyyy-MM-dd}.%i.txt</fileNamePattern>
                <maxFileSize>100MB</maxFileSize>
                <maxHistory>15</maxHistory>
                <totalSizeCap>2GB</totalSizeCap>
            </rollingPolicy>
            <!--日志输出级别-->
            <filter class="ch.qos.logback.classic.filter.LevelFilter">
                <level>WARN</level>
                <onMatch>ACCEPT</onMatch>
                <onMismatch>DENY</onMismatch>
            </filter>
            <!--日志文件输出格式-->
            <encoder>
                <pattern>[%d{yyyy-MM-dd HH:mm:ss.SSS}] %thread %-5level %logger{50} - %msg%n</pattern>
                <charset>UTF-8</charset>
            </encoder>
        </appender>
    
        <!--③.level=ERROR的日志文件 -->
        <appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
            <!--基本设置-->
            <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
                <fileNamePattern>${log.path}/error/errorlever_makedata.%d{yyyy-MM-dd}.%i.txt</fileNamePattern>
                <maxFileSize>100MB</maxFileSize>
                <maxHistory>15</maxHistory>
                <totalSizeCap>2GB</totalSizeCap>
            </rollingPolicy>
            <!--日志输出级别-->
            <filter class="ch.qos.logback.classic.filter.LevelFilter">
                <level>ERROR</level>
                <onMatch>ACCEPT</onMatch>
                <onMismatch>DENY</onMismatch>
            </filter>
            <!--日志文件输出格式-->
            <encoder>
                <pattern>[%d{yyyy-MM-dd HH:mm:ss.SSS}] %thread %-5level %logger{50} - %msg%n</pattern>
                <charset>UTF-8</charset>
            </encoder>
        </appender>
       <!-- <appender name="KafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
            <encoder class="com.github.danielwegener.logback.kafka.encoding.LayoutKafkaMessageEncoder">
                <layout class="net.logstash.logback.layout.LogstashLayout" >
                    &lt;!&ndash; 是否包含上下文 &ndash;&gt;
                    <includeContext>true</includeContext>
                    &lt;!&ndash; 是否包含日志来源 &ndash;&gt;
                    <includeCallerData>true</includeCallerData>
                    &lt;!&ndash; 自定义附加字段 &ndash;&gt;
                    <customFields>{"system":"test"}</customFields>
                    &lt;!&ndash; 自定义字段的简称 &ndash;&gt;
                    <fieldNames class="net.logstash.logback.fieldnames.ShortenedFieldNames"/>
                </layout>
                <charset>UTF-8</charset>
            </encoder>
            &lt;!&ndash;kafka topic 需要与配置文件里面的topic一致 否则kafka会沉默并鄙视你&ndash;&gt;
            <topic>applog_dev</topic>
            <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.HostNameKeyingStrategy" />
            <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />
            <producerConfig>bootstrap.servers=124.71.59.186:9092,139.159.249.142:9092,124.71.85.73:9092</producerConfig>
            &lt;!&ndash; don't wait for a broker to ack the reception of a batch.  &ndash;&gt;
            <producerConfig>acks=0</producerConfig>
            &lt;!&ndash; wait up to 1000ms and collect log messages before sending them as a batch &ndash;&gt;
            <producerConfig>linger.ms=1000</producerConfig>
            &lt;!&ndash; even if the producer buffer runs full, do not block the application but start to drop messages &ndash;&gt;
            <producerConfig>max.block.ms=0</producerConfig>
            &lt;!&ndash; define a client-id that you use to identify yourself against the kafka broker &ndash;&gt;
            <producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig>
    
             this is the fallback appender if kafka is not available.
            <appender-ref ref="CONSOLE" />
        </appender>-->
    
        <appender name="KafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
            <encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
                <providers class="net.logstash.logback.composite.loggingevent.LoggingEventJsonProviders">
                    <pattern>
                <!--        <pattern>
                            {
                            "env": "${env}",
                            "servicename":"${servicename}",
                            "type":"${servicename}",
                            "serviceinfo":"%serviceip:${serviceport}",
                            "date":"%d{yyyy-MM-dd HH:mm:ss.SSS}",
                            "level":"%level",
                            "thread": "%thread",
                            "logger": "%logger{36}",
                            "msg":"%msg",
                            "exception":"%exception"
                            }
                        </pattern>-->
                        <pattern>
                            {
                            "env": "${env}",
                            "servicename":"${servicename}",
                            "type":"${servicename}",
                            "serviceinfo":"%serviceip:${serviceport}",
                            "date":"%d{yyyy-MM-dd HH:mm:ss.SSS}",
                            "level":"%level",
                            "thread": "%thread",
                            "msg":"%msg",
                            "exception":"%exception"
                            }
                        </pattern>
                    </pattern>
                </providers>
            </encoder>
            <topic>appdev</topic>
            <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/>
            <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>
            <producerConfig>acks=0</producerConfig>
            <producerConfig>linger.ms=1000</producerConfig>
            <producerConfig>max.block.ms=0</producerConfig>
            <producerConfig>bootstrap.servers=${bootstrapServers}</producerConfig>
        </appender>
    
        <appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">
            <appender-ref ref="KafkaAppender"/>
        </appender>
    
        <root level="INFO">
            <appender-ref ref="ASYNC"/>
        </root>
    <!--    <root>
            <appender-ref ref="CONSOLE"/>
            <appender-ref ref="INFO_FILE"/>
            <appender-ref ref="WARN_FILE"/>
            <appender-ref ref="ERROR_FILE"/>
        </root>-->
         <!--springProfile表示在dev环境下使用 -->
    <!--     <springProfile name="dev">
             <logger name="com.nick" level="INFO" additivity="true">
                 <appender-ref ref="KafkaAppender" />
             </logger>
         </springProfile>-->
    
    </configuration>
    import ch.qos.logback.classic.pattern.ClassicConverter;
    import ch.qos.logback.classic.spi.ILoggingEvent;
    import lombok.extern.slf4j.Slf4j;
    
    import java.io.Console;
    import java.net.Inet4Address;
    import java.net.InetAddress;
    import java.net.NetworkInterface;
    import java.net.UnknownHostException;
    import java.util.Enumeration;
    
    /**
     * 获取服务器IP以及服务器名称
     */
    //@Slf4j
    public class LogIpConfigUtil extends ClassicConverter {
    
        public  static  String serviceIp;
    
        static {
            try {
               /* Enumeration<NetworkInterface> allNetInterfaces = NetworkInterface.getNetworkInterfaces();
                InetAddress ip = null;
                while (allNetInterfaces.hasMoreElements()) {
                    NetworkInterface netInterface = (NetworkInterface) allNetInterfaces.nextElement();
                    if (netInterface.isLoopback() || netInterface.isVirtual() || !netInterface.isUp()) {
                        continue;
                    } else {
                        Enumeration<InetAddress> addresses = netInterface.getInetAddresses();
                        while (addresses.hasMoreElements()) {
                            ip = addresses.nextElement();
                            if (ip != null && ip instanceof Inet4Address) {
                                return ip.getHostAddress();
                            }
                        }
                    }
                }*/
    
                InetAddress addr = InetAddress.getLocalHost();
                serviceIp =addr.getHostName()+"/" +addr.getHostAddress();
    
            } catch (Exception e) {
                //log.error("IP地址获取失败" + e.toString());
            }
        }
    
        @Override
        public String convert(ILoggingEvent iLoggingEvent) {
            return  serviceIp;
        }
    }

    logstash-es.conf

    input {
      kafka {
        group_id => "test-consumer-group"
        topics => ["appdev"]   
        bootstrap_servers => "localhost:9092"
        codec => "json"
      }
    }
    filter {
    }
    output {
      stdout {  codec => rubydebug }
      if [type] == "xxxx" {
        elasticsearch {
          hosts => [ "localhost:9200" ]
          index => "xxx"
        }
      }
    }
  • 相关阅读:
    css单位及颜色值
    web表单作业
    打卡第一天
    IIS的应用池-网站
    Sublime 提示 The package specified, DocBlockr, is not available
    Git/GitHub 初用体验与总结
    自定义置顶TOP按钮
    Firebug 学习使用教程
    ASP.NET导出文件FileResult的使用
    DataGrid获取当前行某列值
  • 原文地址:https://www.cnblogs.com/zhian/p/15149391.html
Copyright © 2011-2022 走看看