logback日志级别动态切换

概述

生产环境中经常有需要动态修改日志级别, 现在记录我在生产中的方案。由于生产环境中使用nacos配置中心, 所以该方案基于nacos。

方案

logback-custom.xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
<?xml version="1.0" encoding="utf-8"?>
<configuration debug="true" scan="true" scanPeriod="3 seconds">

<!-- 日志输出格式 -->
<property name="log.pattern" value="[eachbot-nlp-kb-proxy] [%X{ip}] [%d{yyyy-MM-dd HH:mm:ss.SSS}] [%thread] [%-5level] [%logger{80}] [%method,%line] [%msg]%n"/>
<property name="log.pattern.color" value="%yellow[eachbot-nlp-kb-proxy] [%X{ip}] [%d{yyyy-MM-dd HH:mm:ss.SSS}] [%thread] %highlight[%-5level] %green[%logger{80}] [%method,%line] %highlight[%msg]%n"/>

<!-- 日志存放路径 -->
<springProperty scope="context" name="LOG_PATH" source="log.path"/>
<springProperty scope="context" name="topic" source="elk.kafka.topic"/>
<springProperty scope="context" name="kafkaServers" source="elk.kafka.servers"/>
<springProperty scope="context" name="LOG_LEVEL" source="log.level"/>


<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${log.pattern}</pattern>
</encoder>
</appender>

<!-- 控制台输出-带颜色 -->
<appender name="console-with-color" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
</filter>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${log.pattern.color}</pattern>
</encoder>
</appender>

<!-- 文件输出 -->
<appender name="file_info" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${log.pattern}</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}/eachbot-nlp-kb-proxy.info.%d.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>30</maxHistory>
</rollingPolicy>
</appender>

<appender name="file_error" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${log.pattern}</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}/eachbot-nlp-kb-proxy.error.%d.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>30</maxHistory>
</rollingPolicy>
</appender>


<!-- kafka -->
<appender name="kafka_appender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>UTC</timeZone>
</timestamp>
<pattern>
<pattern>
{
"app_name":"eachbot-nlp-kb-proxy",
"ip":"%X{ip}",
"company_ip": "%X{companyId}",
"date_time": "%d{yyyy-MM-dd HH:mm:ss.SSS}",
"thread": "%thread",
"level": "%level",
"class":"%logger{80}",
"method":"%method,%line",
"message": "%msg"
}
</pattern>
</pattern>
</providers>
</encoder>

<!--kafka topic 需要与配置文件里面的topic一致 否则kafka会沉默并鄙视你-->
<topic>${topic}</topic>
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.HostNameKeyingStrategy" />
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />
<producerConfig>bootstrap.servers=${kafkaServers}</producerConfig>
<producerConfig>acks=1</producerConfig>
<producerConfig>linger.ms=1000</producerConfig>
<producerConfig>max.block.ms=0</producerConfig>

</appender>

<!--显示sql-->
<logger name="org.mongodb" level="warn"/>
<logger name="org.apache.kafka" level="INFO"/>
<logger name="org.springframework" level="info"/>

<root level="${LOG_LEVEL:-INFO}">
<appender-ref ref="console-with-color"/>

<appender-ref ref="file_info"/>
<appender-ref ref="file_error"/>

<!-- <appender-ref ref="kafka_appender"/>-->
</root>

</configuration>

我们只需要在nacos的配置文件中配置 log.level=debug 即可切换日志等级

pom.xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
<profiles>
<profile>
<id>local</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<properties>
<profileActive>local</profileActive>
<nacos.servers>127.0.0.1:8848</nacos.servers>
<nacos.namespace>d4009480-1234-4000-1234-15cdef527111</nacos.namespace>
<nacos.username>nacos</nacos.username>
<nacos.password>nacos</nacos.password>
</properties>
</profile>
</profiles>

bootstrap.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
spring:
application:
name: eachbot-nlp-kb-proxy
profiles:
active: @profileActive@
cloud:
nacos:
config:
server-addr: @nacos.servers@
namespace: @nacos.namespace@
group: EACHBOT
file-extension: yaml
username: @nacos.username@
password: @nacos.password@
discovery:
server-addr: @nacos.servers@
namespace: @nacos.namespace@
group: EACHBOT

最后

本文到此结束,感谢阅读。如果您觉得不错,请关注公众号【当我遇上你】,您的支持是我写作的最大动力。

其他