增加log,增加查询字典

master
LAPTOP-S9HJSOEB\昊天 8 months ago
parent 9496586193
commit befae82898

@ -49,6 +49,8 @@
<dependencyManagement>
<dependencies>
<dependency>
<groupId>cn.iocoder.boot</groupId>
<artifactId>yudao-dependencies</artifactId>

@ -16,6 +16,17 @@
system 模块 API暴露给其它模块调用
</description>
<dependencies>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.3.11</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>1.3.11</version>
</dependency>
<dependency>
<groupId>cn.iocoder.boot</groupId>
<artifactId>yudao-module-system-api</artifactId>

@ -80,6 +80,10 @@ public class CheckLogController {
public CommonResult<List<DictDataDO>> getColumns() {
Map<String, DictDataDO> dictDataList = dictDataService.getDictDataList("scan_conf");
Map<String, DictDataDO> dictDataNameList = dictDataService.getDictDataList("scan_conf_name");
for (DictDataDO dictDataDO : dictDataList.values()){
dictDataDO.setRemark(dictDataNameList.get(dictDataDO.getLabel()).getValue());
}
List<DictDataDO> dictDataDOS = dictDataList.values().stream().filter(dictDataDO -> !dictDataDO.getValue().equals("0")).toList();
return success(dictDataDOS);

@ -136,7 +136,9 @@ public class StockController {
ScanData scan = null;
Map<String, DictDataDO> dictValueMap = dictDataService.getDictValueMap("check_status");
Map<String, DictDataDO> scanConf = dictDataService.getDictDataList("scan_conf");
Map<String, DictDataDO> scanConfName = dictDataService.getDictDataList("scan_conf_name");
if (scanData.getCheckLogId() != null) {
CheckLogDO checkLogDO = checkLogService.getById(scanData.getCheckLogId());
@ -161,6 +163,7 @@ public class StockController {
if (!v.getValue().equals("0")){
ScanStatus scanStatus = BeanUtils.toBean(v, ScanStatus.class);
scanStatus.setRemark(scanConfName.get(v.getLabel()).getValue());
try {
String wmsType = "wms" + PLCServiceImpl.capitalize(v.getLabel());
@ -203,6 +206,26 @@ public class StockController {
return success(scan);
}
@PostMapping("/artificial")
@Operation(summary = "人工盘点")
//@Parameter(name = "id", description = "巷道id", required = true, example = "1024")
@PreAuthorize("@ss.hasPermission('logistics:stock:getStreetList')")
public CommonResult<String> artificial(@RequestBody ScanData scanData) {
if (scanData.getCheckLogId() != null) {
CheckLogDO checkLogDO = checkLogService.getById(scanData.getCheckLogId());
checkLogDO.setStatus(3);
checkLogService.updateById(checkLogDO);
}else if (scanData.getStockId() != null){
StockDO stockDOS = stockService.getById(scanData.getStockId());
stockDOS.setStatus("3");
stockService.updateById(stockDOS);
}
return success("人工盘点成功");
}
public static Field getFieldFromHierarchy(Class<?> clazz, String fieldName) {
while (clazz != null) {
try {

@ -1,6 +1,5 @@
package cn.iocoder.yudao.module.camera.framework.netty.ksec;
import cn.iocoder.yudao.module.camera.controller.admin.tcpclientlog.vo.TcpClientLogSaveReqVO;
import cn.iocoder.yudao.module.camera.service.tcpclientlog.TcpClientLogService;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
@ -30,15 +29,15 @@ public class KescEncoder extends MessageToByteEncoder<String> {
String body = new StringBuilder().append("<").append(s).append(">").toString();
byteBuf.writeBytes(body.getBytes(StandardCharsets.UTF_8));
InetSocketAddress remoteAddress = (InetSocketAddress) channelHandlerContext.channel().remoteAddress();
String remoteIp = remoteAddress.getAddress().getHostAddress();
int remotePort = remoteAddress.getPort();
TcpClientLogSaveReqVO tcpClientLogDO = new TcpClientLogSaveReqVO();
tcpClientLogDO.setTcpType("KESC");
tcpClientLogDO.setPort(remotePort);
tcpClientLogDO.setIp(remoteIp);
tcpClientLogDO.setType(2);
tcpClientLogDO.setInfo(body);
tcpClientLogService.createTcpClientLog(tcpClientLogDO);
// String remoteIp = remoteAddress.getAddress().getHostAddress();
// int remotePort = remoteAddress.getPort();
// TcpClientLogSaveReqVO tcpClientLogDO = new TcpClientLogSaveReqVO();
// tcpClientLogDO.setTcpType("KESC");
// tcpClientLogDO.setPort(remotePort);
// tcpClientLogDO.setIp(remoteIp);
// tcpClientLogDO.setType(2);
// tcpClientLogDO.setInfo(body);
// tcpClientLogService.createTcpClientLog(tcpClientLogDO);
tcpLogger.info("发送数据:"+body);
}
}

@ -73,12 +73,19 @@ public class KsecDecoder extends DelimiterBasedFrameDecoder {
@Override
public void run() {
String body = in.toString(StandardCharsets.UTF_8);
tcpLogger.info("接收数据:"+body);
log.info("接收数据:"+body);
String body = in.toString(StandardCharsets.US_ASCII);
// log.info("接收数据:"+body);
if (body.startsWith("<")){
// 去掉首尾标识符
body = body.substring(1, body.length());
body = body.substring(1);
byte[] bytes = body.getBytes(StandardCharsets.ISO_8859_1);
body = new String(bytes, StandardCharsets.UTF_8);
// 处理中文字符
tcpLogger.info("接收数据:"+body);
KsecInfo ksecInfo = JSONObject.parseObject(body, KsecInfo.class);
KsecDataInfo dataInfo = ksecInfo.getData();
@ -106,7 +113,7 @@ public class KsecDecoder extends DelimiterBasedFrameDecoder {
} else if (Cmd.E.name().equals(ksecInfo.getType())) {
//智能盘点
dataInfo.setCount("正常");
plcService.check( dataInfo);

@ -97,7 +97,8 @@ public class HikFlaskApiService implements ScanService {
.build();
try (Response response = client.newCall(request).execute()) {
if (!response.isSuccessful()) throw new IOException("Unexpected code " + response);
if (!response.isSuccessful())
System.out.println("Unexpected code " + response);
String responseBody = Objects.requireNonNull(response.body()).string();
return gson.fromJson(responseBody, HikPythonEntity.class);
@ -144,6 +145,7 @@ public class HikFlaskApiService implements ScanService {
scanData.setCode("缺件");
HikPythonEntity hikPythonEntity = picComputeAll(dataInfo.getCategory(),urlPath);
System.out.println(hikPythonEntity);
if (hikPythonEntity != null){
if (hikPythonEntity.getLack()!= null && hikPythonEntity.getLack()) {
}else {
@ -161,6 +163,9 @@ public class HikFlaskApiService implements ScanService {
}
return scanData;
} catch (IOException e) {
log.error("识别异常",e);
System.out.println("识别异常");
e.printStackTrace();
throw new RuntimeException(e);
}
// return null;

@ -192,7 +192,7 @@ public class PLCServiceImpl implements PLCService {
return;
}
String saveApiPath = dictDataService.parseDictData("base_conf", "data_api_path").getValue();
KescEntity kescEntity = new KescEntity();
kescEntity.setData(dataInfo);
// 判断巷道有几个相机 如果只有一个,则调用一个,如果是两个,先判断是否有对拍设置决定那个相机拍摄
@ -209,7 +209,7 @@ public class PLCServiceImpl implements PLCService {
String pathSrc = PathUtil.createFileName(dataInfo, street, picCmd, ".jpg");
pathSrc = cameraCapture(camera, false, pathSrc, dictDataService.getDictDataList("camera_conf"));
order.setPics(Strings.hasText(order.getPics()) ? order.getPics() + ";" + pathSrc : pathSrc);
order.setPics(Strings.hasText(order.getPics()) ? order.getPics() + ";" +saveApiPath+ pathSrc : saveApiPath+pathSrc);
orderMapper.updateById(order);
}
//转向原点位
@ -229,6 +229,8 @@ public class PLCServiceImpl implements PLCService {
@Override
public void check(KsecDataInfo dataInfo) {
String saveApiPath = dictDataService.parseDictData("base_conf", "data_api_path").getValue();
long startTime = System.currentTimeMillis();
String uuid = UUID.randomUUID().toString();
@ -277,7 +279,7 @@ public class PLCServiceImpl implements PLCService {
String pathSrc = PathUtil.createFileName(dataInfo, street, "E1", ".jpg");
CameraDO camera = cameraService.getById(dataInfo.getFromDirection() == 1 ? street.getCamera1Id() : street.getCamera2Id());
pathSrc = cameraCapture(camera, false, pathSrc, dictDataService.getDictDataList("camera_conf"));
urlResourcesService.save(URLResourcesDo.builder().url(pathSrc).uuid(uuid).type("1").little("球机拍照").build());
urlResourcesService.save(URLResourcesDo.builder().url(saveApiPath+pathSrc).uuid(uuid).type("1").little("球机拍照").build());
// 先记录正常未盘点情况
stockService.saveOrUpdate(stock);
@ -499,8 +501,20 @@ public class PLCServiceImpl implements PLCService {
//todo 昆船的项目 ,取货 放货是独立的
//取货是是不知道放货的位置的所以订单开始的时候只写1位置
//订单结束写2位置
if (street.getCamera1Id() != null) {
String path = zLMediaKitService.startRecord("live", street.getCamera1Id().toString());
// String path = cameraVideo(street.getCamera1Id(), pathSrc, order.getCreateTime(), endDownLoadTime, dictDataList);
order.setVideoPath1(path);
}
if (street.getCamera2Id() != null) {
String path = zLMediaKitService.startRecord("live", street.getCamera2Id().toString());
order.setVideoPath2(path);
}
orderMapper.insert(order);
//OrderRealtime.startOrder(street.getId(), plcCmdInfo.getOrderNum());
// OrderRealtime.startOrder(street.getId(), plcCmdInfo.getOrderNum());
}
}
@Resource
@ -534,17 +548,16 @@ public class PLCServiceImpl implements PLCService {
if (duration.toMinutes() > 50) {
endDownLoadTime = order.getCreateTime().plusMinutes(50);
}
if (street.getCamera1Id() != null) {
String pathSrc = PathUtil.createFileName(ksecDataInfo, street, "B2-1", ".mp4");
String path = zLMediaKitService.startRecord("live", street.getCamera1Id().toString());
zLMediaKitService.stopRecord("live", street.getCamera1Id().toString());
// String path = cameraVideo(street.getCamera1Id(), pathSrc, order.getCreateTime(), endDownLoadTime, dictDataList);
update.setVideoPath1(path);
}
if (street.getCamera2Id() != null) {
String pathSrc = PathUtil.createFileName(ksecDataInfo, street, "B2-2", ".mp4");
String path = cameraVideo(street.getCamera2Id(), pathSrc, order.getCreateTime(), endDownLoadTime, dictDataList);
update.setVideoPath2(path);
zLMediaKitService.stopRecord("live", street.getCamera2Id().toString());
}
orderMapper.updateById(update);

@ -26,6 +26,8 @@ public class CognexSocket {
while ("NOREAD".equals(code) && i <= 4){
writeCmd(os);
code = read(is);
System.out.println("count:"+i+",ip:"+ip+",code:{}"+code);
tcpLogger.info("count:{},ip:{},code:{}",i,ip,code);
if(code!= null){
code = code.replace("\\n","");

@ -41,7 +41,8 @@ public class ZLMediaKitServiceImpl implements ZLMediaKitService{
private DictDataService dictDataService;
@Override
public String startRecord(String app, String cameraId) {
public String
startRecord(String app, String cameraId) {
String zlmApiSecret = dictDataService.parseDictData("ZLMediaKit_conf", "secret").getValue();
Map<String, Object> addParams = new HashMap<>();
@ -71,6 +72,8 @@ public class ZLMediaKitServiceImpl implements ZLMediaKitService{
public String checkHiddenFilesInDirectory(String app, String cameraId) {
String mp4SavePath = dictDataService.parseDictData("ZLMediaKit_conf", "mp4SavePath").getValue();
// mp4SaveApi
String mp4SaveApi = dictDataService.parseDictData("ZLMediaKit_conf", "mp4SaveApi").getValue();
// 获取当前日期
LocalDate currentDate = LocalDate.now();
@ -93,7 +96,7 @@ public class ZLMediaKitServiceImpl implements ZLMediaKitService{
.map(Path::toString)
.filter(name -> name.startsWith("."))
.collect(Collectors.toList());
return hiddenFiles.isEmpty() ? "" : "record/"+app+"/camera"+cameraId+"/"+formattedDate+"/"+removeLeadingDot(hiddenFiles.get(0));
return hiddenFiles.isEmpty() ? "" : mp4SaveApi+"record/"+app+"/camera"+cameraId+"/"+formattedDate+"/"+removeLeadingDot(hiddenFiles.get(0));
} catch (IOException e) {
e.printStackTrace();
hiddenFiles= List.of(); // 返回空列表

@ -33,7 +33,7 @@ public class PathUtil {
public static String createFileName(KsecDataInfo dataInfo, StreetDO street, String picCmd, String fileType) {
String pathSrc = "";
if (dataInfo.getToColumn() == null || dataInfo.getToRow() == null) {
if (dataInfo.getToColumn() != null && dataInfo.getToColumn()!=0) {
pathSrc =fileType+"/"+ street.getPlcId() + "/row" + dataInfo.getToRow() + "/column" + dataInfo.getToColumn() + "/" + dataInfo.getTaskId() + "/";
} else {
pathSrc =fileType+"/"+ street.getPlcId() + "/row" + dataInfo.getFromRow() + "/column" + dataInfo.getFromColumn() + "/" + dataInfo.getTaskId() + "/";

@ -11,6 +11,7 @@ import cn.iocoder.yudao.module.system.controller.admin.dict.vo.data.DictDataPage
import cn.iocoder.yudao.module.system.controller.admin.dict.vo.data.DictDataRespVO;
import cn.iocoder.yudao.module.system.controller.admin.dict.vo.data.DictDataSaveReqVO;
import cn.iocoder.yudao.module.system.controller.admin.dict.vo.data.DictDataSimpleRespVO;
import cn.iocoder.yudao.module.system.controller.app.dict.vo.AppDictDataRespVO;
import cn.iocoder.yudao.module.system.dal.dataobject.dict.DictDataDO;
import cn.iocoder.yudao.module.system.service.dict.DictDataService;
import io.swagger.v3.oas.annotations.Operation;
@ -38,6 +39,15 @@ public class DictDataController {
@Resource
private DictDataService dictDataService;
@GetMapping("/type")
@Operation(summary = "根据字典类型查询字典数据信息")
@Parameter(name = "type", description = "字典类型", required = true, example = "common_status")
public CommonResult<List<AppDictDataRespVO>> getDictDataListByType(@RequestParam("type") String type) {
List<DictDataDO> list = dictDataService.getDictDataList(
CommonStatusEnum.ENABLE.getStatus(), type);
return success(BeanUtils.toBean(list, AppDictDataRespVO.class));
}
@PostMapping("/create")
@Operation(summary = "新增字典数据")
@PreAuthorize("@ss.hasPermission('system:dict:create')")

@ -144,11 +144,11 @@ public class DictDataServiceImpl extends ServiceImpl<DictDataMapper,DictDataDO>
// 校验是否存在
validateDictDataExists(id);
DictDataDO updateReqVO = dictDataMapper.selectById(id);
// 删除字典数据
dictDataMapper.deleteById(id);
DictDataDO updateReqVO = dictDataMapper.selectById(id);
// redis
redisUtil.hdel("dict:"+updateReqVO.getDictType(), updateReqVO.getLabel());
}

@ -21,6 +21,17 @@
<url>https://github.com/YunaiV/ruoyi-vue-pro</url>
<dependencies>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.3.11</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>1.3.11</version>
</dependency>
<dependency>
<groupId>cn.iocoder.boot</groupId>
<artifactId>yudao-module-system-biz</artifactId>

@ -47,15 +47,15 @@ spring:
datasource:
master:
name: sy-logistics
url: jdbc:mysql://127.0.0.1:3306/${spring.datasource.dynamic.datasource.master.name}?useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true&nullCatalogMeansCurrent=true # MySQL Connector/J 8.X 连接的示例
# url: jdbc:mysql://127.0.0.1:3306/${spring.datasource.dynamic.datasource.master.name}?useSSL=false&allowPublicKeyRetrieval=true&useUnicode=true&characterEncoding=UTF-8&serverTimezone=CTT # MySQL Connector/J 5.X 连接的示例
url: jdbc:mysql://192.168.2.162:3306/${spring.datasource.dynamic.datasource.master.name}?useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true&nullCatalogMeansCurrent=true # MySQL Connector/J 8.X 连接的示例
# url: jdbc:mysql://192.168.2.162:3306/${spring.datasource.dynamic.datasource.master.name}?useSSL=false&allowPublicKeyRetrieval=true&useUnicode=true&characterEncoding=UTF-8&serverTimezone=CTT # MySQL Connector/J 5.X 连接的示例
# url: jdbc:postgresql://127.0.0.1:5432/${spring.datasource.dynamic.datasource.master.name} # PostgreSQL 连接的示例
# url: jdbc:oracle:thin:@127.0.0.1:1521:xe # Oracle 连接的示例
# url: jdbc:sqlserver://127.0.0.1:1433;DatabaseName=${spring.datasource.dynamic.datasource.master.name} # SQLServer 连接的示例
# url: jdbc:dm://10.211.55.4:5236?schema=RUOYI_VUE_PRO # DM 连接的示例
username: root
password: root
password: Leaper@123
# username: sa
# password: JSm:g(*%lU4ZAkz06cd52KqT3)i1?H7W
# username: SYSDBA # DM 连接的示例
@ -116,18 +116,18 @@ spring:
# rocketmq 配置项,对应 RocketMQProperties 配置类
rocketmq:
name-server: 127.0.0.1:9876 # RocketMQ Namesrv
name-server: 192.168.2.162:9876 # RocketMQ Namesrv
spring:
# RabbitMQ 配置项,对应 RabbitProperties 配置类
rabbitmq:
host: 127.0.0.1 # RabbitMQ 服务的地址
host: 192.168.2.162 # RabbitMQ 服务的地址
port: 5672 # RabbitMQ 服务的端口
username: rabbit # RabbitMQ 服务的账号
password: rabbit # RabbitMQ 服务的密码
# Kafka 配置项,对应 KafkaProperties 配置类
kafka:
bootstrap-servers: 127.0.0.1:9092 # 指定 Kafka Broker 地址,可以设置多个,以逗号分隔
bootstrap-servers: 192.168.2.162:9092 # 指定 Kafka Broker 地址,可以设置多个,以逗号分隔
--- #################### 服务保障相关配置 ####################
@ -154,7 +154,7 @@ spring:
admin:
# Spring Boot Admin Client 客户端的相关配置
client:
url: http://127.0.0.1:${server.port}/${spring.boot.admin.context-path} # 设置 Spring Boot Admin Server 地址
url: http://192.168.2.162:${server.port}/${spring.boot.admin.context-path} # 设置 Spring Boot Admin Server 地址
instance:
service-host-type: IP # 注册实例时,优先使用 IP [IP, HOST_NAME, CANONICAL_HOST_NAME]
# Spring Boot Admin Server 服务端的相关配置

@ -1,76 +1,80 @@
<configuration>
<!-- 引用 Spring Boot 的 logback 基础配置 -->
<include resource="org/springframework/boot/logging/logback/defaults.xml" />
<!-- 变量 yudao.info.base-package基础业务包 -->
<springProperty scope="context" name="yudao.info.base-package" source="yudao.info.base-package"/>
<!-- 格式化输出:%d 表示日期,%X{tid} SkWalking 链路追踪编号,%thread 表示线程名,%-5level级别从左显示 5 个字符宽度,%msg日志消息%n是换行符 -->
<property name="PATTERN_DEFAULT" value="%d{${LOG_DATEFORMAT_PATTERN:-yyyy-MM-dd HH:mm:ss.SSS}} | %highlight(${LOG_LEVEL_PATTERN:-%5p} ${PID:- }) | %boldYellow(%thread [%tid]) %boldGreen(%-40.40logger{39}) | %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>
<?xml version="1.0" encoding="UTF-8"?>
<configuration debug="false">
<!-- 控制台 Appender -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">     
<encoder class="ch.qos.logback.core.encoder.LayoutWrappingEncoder">
<layout class="org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout">
<pattern>${PATTERN_DEFAULT}</pattern>
</layout>
<!-- 定义日志文件的存储位置 -->
<property name="LOG_HOME" value="logs" />
<!-- 控制台输出 -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
</encoder>
</appender>
<!-- 按日期和大小滚动的INFO日志 appender -->
<appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
</encoder>
<file>${LOG_HOME}/info.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 每天滚动一次,并按日期归档 -->
<fileNamePattern>${LOG_HOME}/info.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<!-- 每个日志文件最大10MB -->
<maxFileSize>10MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!-- 保留30天的日志 -->
<maxHistory>30</maxHistory>
</rollingPolicy>
</appender>
<!-- 文件 Appender -->
<!-- 参考 Spring Boot 的 file-appender.xml 编写 -->
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<encoder class="ch.qos.logback.core.encoder.LayoutWrappingEncoder">
<layout class="org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout">
<pattern>${PATTERN_DEFAULT}</pattern>
</layout>
<!-- 按日期和大小滚动的ERROR日志 appender -->
<appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
</encoder>
<!-- 日志文件名 -->
<file>${LOG_FILE}</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- 滚动后的日志文件名 -->
<fileNamePattern>${LOGBACK_ROLLINGPOLICY_FILE_NAME_PATTERN:-${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz}</fileNamePattern>
<!-- 启动服务时,是否清理历史日志,一般不建议清理 -->
<cleanHistoryOnStart>${LOGBACK_ROLLINGPOLICY_CLEAN_HISTORY_ON_START:-false}</cleanHistoryOnStart>
<!-- 日志文件,到达多少容量,进行滚动 -->
<maxFileSize>${LOGBACK_ROLLINGPOLICY_MAX_FILE_SIZE:-10MB}</maxFileSize>
<!-- 日志文件的总大小0 表示不限制 -->
<totalSizeCap>${LOGBACK_ROLLINGPOLICY_TOTAL_SIZE_CAP:-0}</totalSizeCap>
<!-- 日志文件的保留天数 -->
<maxHistory>${LOGBACK_ROLLINGPOLICY_MAX_HISTORY:-30}</maxHistory>
<file>${LOG_HOME}/error.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 每天滚动一次,并按日期归档 -->
<fileNamePattern>${LOG_HOME}/error.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<!-- 每个日志文件最大10MB -->
<maxFileSize>10MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!-- 保留30天的日志 -->
<maxHistory>30</maxHistory>
</rollingPolicy>
</appender>
<!-- 异步写入日志,提升性能 -->
<appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">
<!-- 不丢失日志。默认的,如果队列的 80% 已满,则会丢弃 TRACT、DEBUG、INFO 级别的日志 -->
<!-- 异步写入日志 -->
<appender name="ASYNC_INFO" class="ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="INFO_FILE" />
<queueSize>512</queueSize>
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能。默认值为 256 -->
<queueSize>256</queueSize>
<appender-ref ref="FILE"/>
</appender>
<!-- SkyWalking GRPC 日志收集实现日志中心。注意SkyWalking 8.4.0 版本开始支持 -->
<appender name="GRPC" class="org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender">
<encoder class="ch.qos.logback.core.encoder.LayoutWrappingEncoder">
<layout class="org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout">
<pattern>${PATTERN_DEFAULT}</pattern>
</layout>
</encoder>
<appender name="ASYNC_ERROR" class="ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="ERROR_FILE" />
<queueSize>512</queueSize>
<discardingThreshold>0</discardingThreshold>
</appender>
<!-- 本地环境 -->
<springProfile name="local">
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="GRPC"/> <!-- 本地环境下,如果不想接入 SkyWalking 日志服务,可以注释掉本行 -->
<appender-ref ref="ASYNC"/> <!-- 本地环境下,如果不想打印日志,可以注释掉本行 -->
</root>
</springProfile>
<!-- 其它环境 -->
<springProfile name="dev,test,stage,prod,default">
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="ASYNC"/>
<appender-ref ref="GRPC"/>
</root>
</springProfile>
<!-- 设置日志级别 -->
<root level="INFO">
<appender-ref ref="STDOUT" />
<appender-ref ref="ASYNC_INFO" />
<appender-ref ref="ASYNC_ERROR" />
</root>
</configuration>

Binary file not shown.
Loading…
Cancel
Save