Browse Source

提交订单缓存模块

cangku
Houpn 3 years ago
parent
commit
fce0886008
  1. 18
      hiver-admin/pom.xml
  2. 47
      hiver-admin/src/main/resources/application.yml
  3. 174
      hiver-admin/src/main/resources/logback.xml
  4. 4
      hiver-core/pom.xml
  5. 62
      hiver-core/src/main/java/cc/hiver/core/config/cache/CustomCacheResolver.java
  6. 52
      hiver-core/src/main/java/cc/hiver/core/config/cache/RedisCacheConfig.java
  7. 8
      hiver-modules/hiver-base/src/main/java/cc/hiver/base/controller/manage/OrderController.java
  8. 41
      hiver-modules/hiver-base/src/main/java/cc/hiver/base/handler/OrderXdHandler.java
  9. 5
      pom.xml

18
hiver-admin/pom.xml

@ -66,6 +66,24 @@
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-actuator</artifactId>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-resolver-dns-native-macos</artifactId>
<version>4.1.77.Final</version>
<classifier>osx-aarch_64</classifier>
</dependency>
<!--监控sql日志-->
<dependency>
<groupId>org.bgee.log4jdbc-log4j2</groupId>
<artifactId>log4jdbc-log4j2-jdbc4.1</artifactId>
<version>1.16</version>
</dependency>
<dependency>
<groupId>top.javatool</groupId>
<artifactId>canal-spring-boot-starter</artifactId>
<version>1.2.1-RELEASE</version>
</dependency>
</dependencies>
<build>

47
hiver-admin/src/main/resources/application.yml

@ -23,12 +23,15 @@ spring:
timeout-per-shutdown-phase: 10S
# 数据源
datasource:
url: jdbc:mysql://154.8.162.157:3306/hiver_shop?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=GMT%2B8&allowPublicKeyRetrieval=true
url: jdbc:log4jdbc:mysql://154.8.162.157:3306/hiver_shop?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=GMT%2B8&allowPublicKeyRetrieval=true
#url: jdbc:mysql://154.8.162.157:3306/hiver_shop?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=GMT%2B8&allowPublicKeyRetrieval=true
username: reddoor
# Jasypt加密 可到common-utils中找到JasyptUtil加解密工具类生成加密结果 格式为ENC(加密结果) 以下解密结果为123456
password: ENC(Zla4U4+yRLPhicvuX2TmiEgxEpzP4dk8BHzFDEtiEhwLQIIaftZrrEUJZce6efoe)
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.jdbc.Driver
driver-class-name: net.sf.log4jdbc.sql.jdbcapi.DriverSpy
#driver-class-name: com.mysql.jdbc.Driver
# Druid StatViewServlet配置
druid:
stat-view-servlet:
@ -58,10 +61,11 @@ spring:
multi-statement-allow: true
jpa:
# 显示sql
show-sql: true
#show-sql: true
# 自动生成表结构 关闭设为none
hibernate:
ddl-auto: update
database-platform: org.hibernate.dialect.MySQL5InnoDBDialect #设置数据库方言 记住必须要使用 MySQL5InnoDBDialect 指定数据库类型对应InnoDB ;如果使用MySQLDialect 则对应的是MyISAM
# Redis 若设有密码自行添加配置password
redis:
host: 154.8.162.157
@ -70,6 +74,16 @@ spring:
port: 6379
# 超时时间 Duration类型 3秒
timeout: 3S
jedis:
pool:
max-active: 8
max-wait: -1
max-idle: 8
min-idle: 0
#Canal服务用于数据同步
canal:
server: 154.8.162.157:11111
destination: example
# Elasticsearch
data:
elasticsearch:
@ -97,7 +111,7 @@ spring:
# 加锁调度
acquireTriggersWithinLock: true
# “容忍”触发器经过下一次触发时间的毫秒数
misfireThreshold: 10000
misfireThreshold: 500000
servlet:
multipart:
max-file-size: -1
@ -304,8 +318,10 @@ management:
OUT_OF_SERVICE: 200
FATAL: 200
UNKNOWN: 200
enabled: false
endpoints:
web:
enabled-by-default: false #关闭所有监控端点
base-path: /hiver/actuator/
exposure:
include: '*'
@ -336,15 +352,16 @@ mybatis-plus:
# 日志
logging:
config: classpath:logback.xml
# 输出级别
level:
root: info
file:
# 指定路径
path: hiver-logs
logback:
rollingpolicy:
# 最大保存天数
max-history: 7
# 每个文件最大大小
max-file-size: 5MB
# level:
# root: info
# file:
# # 指定路径
# path: hiver-logs
# logback:
# rollingpolicy:
# # 最大保存天数
# max-history: 7
# # 每个文件最大大小
# max-file-size: 5MB

174
hiver-admin/src/main/resources/logback.xml

@ -0,0 +1,174 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- 日志级别从低到高分为TRACE < DEBUG < INFO < WARN < ERROR < FATAL,如果设置为WARN,则低于WARN的信息都不会输出 -->
<!-- scan 当此属性设置为true时,配置文件如果发生改变,将会被重新加载,默认值为true。 -->
<!-- scanPeriod 设置监测配置文件是否有修改的时间间隔,如果没有给出时间单位,默认单位是毫秒。当scan为true时,此属性生效。默认的时间间隔为1分钟。 -->
<!-- debug 当此属性设置为true时,将打印出logback内部日志信息,实时查看logback运行状态。默认值为false。 -->
<configuration scan="false" scanPeriod="60 seconds" debug="false">
<!-- 读取application.properties配置 -->
<!-- <property resource="application.properties" />-->
<contextName>logback</contextName>
<!-- name的值是变量的名称,value的值时变量定义的值。通过定义的值会被插入到logger上下文中。定义变量后,可以使“${}”来使用变量。 -->
<!--Linux下的路径-->
<!--<property name="log.path" value="/www/wwwroot/zdj_log/" />-->
<!-- 生成文件名前缀 -->
<property name="FILE_PREFIX" value="Reddoor" />
<!-- 输出文件路径 -->
<property name="OPEN_FILE_PATH" value="./logs"/>
<!-- 文件保存时间 这里是7天 -->
<property name="EXIST_TIME" value="7"/>
<!-- 文件输出格式 -->
<property name="PATTERN" value="%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n"/>
<!-- 日志文件最大的大小 -->
<property name="MAX_FILE_SIZE" value="100MB"/>
<!-- 彩色日志 -->
<!-- 彩色日志依赖的渲染类 -->
<conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter" />
<conversionRule conversionWord="wex" converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter" />
<conversionRule conversionWord="wEx" converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter" />
<!-- 彩色日志格式 -->
<property name="CONSOLE_LOG_PATTERN" value="${CONSOLE_LOG_PATTERN:-%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<!--输出到控制台-->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<!--此日志appender是为开发使用,只配置最底级别,控制台输出的日志级别是大于或等于此级别的日志信息-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>debug</level>
</filter>
<encoder>
<Pattern>${CONSOLE_LOG_PATTERN}</Pattern>
<!-- 设置字符集 -->
<charset>UTF-8</charset>
</encoder>
</appender>
<!--输出到debug文件-->
<appender name="debug" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${OPEN_FILE_PATH}/log_debug.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${OPEN_FILE_PATH}/debug/${FILE_PREFIX}-debug_%d{yyyy-MM-dd}-%i.log</FileNamePattern>
<MaxHistory>30</MaxHistory>
<TimeBasedFileNamingAndTriggeringPolicy
class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<MaxFileSize>${MAX_FILE_SIZE}</MaxFileSize>
</TimeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<append>true</append>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %contextName [%thread] %-5level %logger{36} - %msg%n</pattern>
<charset>utf-8</charset>
</encoder>
<!-- 只打印DEBUG日志, -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!-- 过滤的级别 -->
<level>DEBUG</level>
<!-- 匹配时的操作:接收(记录) -->
<onMatch>ACCEPT</onMatch>
<!-- 不匹配时的操作:拒绝(不记录) -->
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!--输出到info文件-->
<appender name="info" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${OPEN_FILE_PATH}/log_info.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${OPEN_FILE_PATH}/info/${FILE_PREFIX}_%d{yyyy-MM-dd}-%i.log</FileNamePattern>
<MaxHistory>30</MaxHistory>
<TimeBasedFileNamingAndTriggeringPolicy
class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<MaxFileSize>${MAX_FILE_SIZE}</MaxFileSize>
</TimeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<append>true</append>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %contextName [%thread] %-5level %logger{36} - %msg%n</pattern>
<charset>utf-8</charset>
</encoder>
<!-- 只打印INFO日志 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
<!-- 匹配时的操作:接收(记录) -->
<onMatch>ACCEPT</onMatch>
<!-- 不匹配时的操作:拒绝(不记录) -->
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!--输出到warn文件-->
<appender name="warn" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${OPEN_FILE_PATH}/log_warn.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${OPEN_FILE_PATH}/warn/${FILE_PREFIX}_%d{yyyy-MM-dd}-%i.log</FileNamePattern>
<MaxHistory>30</MaxHistory>
<TimeBasedFileNamingAndTriggeringPolicy
class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<MaxFileSize>${MAX_FILE_SIZE}</MaxFileSize>
</TimeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<append>true</append>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %contextName [%thread] %-5level %logger{36} - %msg%n</pattern>
<charset>utf-8</charset>
</encoder>
<!-- 只打印WARN日志 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>WARN</level>
<!-- 匹配时的操作:接收(记录) -->
<onMatch>ACCEPT</onMatch>
<!-- 不匹配时的操作:拒绝(不记录) -->
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 时间滚动输出 level为 ERROR 日志 -->
<appender name="error" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${OPEN_FILE_PATH}/log_error.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/error/log-error-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>15</maxHistory>
</rollingPolicy>
<!-- 此日志文件只记录ERROR级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!--设置为OFF,即屏蔽; 留下sqltiming作为INFO级别输出-->
<logger name="jdbc.connection" level="OFF"/>
<logger name="jdbc.resultset" level="OFF"/>
<logger name="jdbc.resultsettable" level="OFF"/>
<logger name="jdbc.audit" level="OFF"/>
<logger name="jdbc.sqltiming" level="info"/>
<logger name="jdbc.sqlonly" level="OFF"/>
<!-- 默认输出info等级,然后再根据各自的拦截过滤规则去处理 -->
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<!--<appender-ref ref="debug" />-->
<appender-ref ref="info" />
<appender-ref ref="error" />
<!--<appender-ref ref="warn" />-->
</root>
</configuration>

4
hiver-core/pom.xml

@ -138,5 +138,9 @@
<groupId>org.seleniumhq.selenium</groupId>
<artifactId>selenium-java</artifactId>
</dependency>
<dependency>
<groupId>com.github.ben-manes.caffeine</groupId>
<artifactId>caffeine</artifactId>
</dependency>
</dependencies>
</project>

62
hiver-core/src/main/java/cc/hiver/core/config/cache/CustomCacheResolver.java

@ -0,0 +1,62 @@
package cc.hiver.core.config.cache;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.cache.Cache;
import org.springframework.cache.CacheManager;
import org.springframework.cache.interceptor.CacheOperationInvocationContext;
import org.springframework.cache.interceptor.CacheResolver;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
public class CustomCacheResolver implements CacheResolver, InitializingBean {
@Nullable
private List<CacheManager> cacheManagerList;
public CustomCacheResolver(){
}
public CustomCacheResolver(List<CacheManager> cacheManagerList){
this.cacheManagerList = cacheManagerList;
}
public void setCacheManagerList(@Nullable List<CacheManager> cacheManagerList) {
this.cacheManagerList = cacheManagerList;
}
public List<CacheManager> getCacheManagerList() {
return cacheManagerList;
}
@Override
public void afterPropertiesSet() {
Assert.notNull(this.cacheManagerList, "CacheManager is required");
}
@Override
public Collection<? extends Cache> resolveCaches(CacheOperationInvocationContext<?> context) {
Collection<String> cacheNames = getCacheNames(context);
if (cacheNames == null) {
return Collections.emptyList();
}
Collection<Cache> result = new ArrayList<>();
for(CacheManager cacheManager : getCacheManagerList()){
for (String cacheName : cacheNames) {
Cache cache = cacheManager.getCache(cacheName);
if (cache == null) {
throw new IllegalArgumentException("Cannot find cache named '" +
cacheName + "' for " + context.getOperation());
}
result.add(cache);
}
}
return result;
}
private Collection<String> getCacheNames(CacheOperationInvocationContext<?> context){
return context.getOperation().getCacheNames();
}
}

52
hiver-core/src/main/java/cc/hiver/core/config/cache/RedisCacheConfig.java

@ -4,22 +4,27 @@ import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.cache.Cache;
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.CachingConfigurerSupport;
import org.springframework.cache.caffeine.CaffeineCacheManager;
import org.springframework.cache.interceptor.CacheErrorHandler;
import org.springframework.cache.interceptor.CacheResolver;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.cache.RedisCacheConfiguration;
import org.springframework.data.redis.cache.RedisCacheManager;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.serializer.Jackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.RedisSerializationContext;
import org.springframework.data.redis.serializer.RedisSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.*;
import java.io.Serializable;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
/**
* Redis缓存
@ -32,14 +37,48 @@ public class RedisCacheConfig extends CachingConfigurerSupport {
@Value("${hiver.cache.timeToLive:-1}")
private Duration timeToLive;
@Autowired
RedisConnectionFactory factory;
@Override
public CacheResolver cacheResolver() {
// 通过Guava实现的自定义堆内存缓存管理器
CacheManager caffeineCacheManager = new CaffeineCacheManager();
CacheManager redisCacheManager = cacheManager();
List<CacheManager> list = new ArrayList<>();
// 优先读取堆内存缓存
list.add(caffeineCacheManager);
// 堆内存缓存读取不到该key时再读取redis缓存
list.add(redisCacheManager);
return new CustomCacheResolver(list);
}
@Bean
public RedisTemplate<String, Serializable> redisCacheTemplate(LettuceConnectionFactory redisConnectionFactory){
//初始化一个redis模板
RedisTemplate<String, Serializable> template = new RedisTemplate<>();
// key采用String的序列化方式
template.setKeySerializer(new StringRedisSerializer());
// value序列化方式采用jackson
template.setValueSerializer(new GenericJackson2JsonRedisSerializer());
// hash的key也采用String的序列化方式
template.setHashKeySerializer(new StringRedisSerializer());
// hash的value序列化方式采用jackson
template.setHashValueSerializer(new GenericJackson2JsonRedisSerializer());
template.setConnectionFactory(redisConnectionFactory);
return template;
}
/**
* 自定义序列化方式
*
* @param factory
* @param
* @return
*/
@Bean
public CacheManager cacheManager(RedisConnectionFactory factory) {
public CacheManager cacheManager() {
RedisSerializer<String> redisSerializer = new StringRedisSerializer();
Jackson2JsonRedisSerializer jackson2JsonRedisSerializer = new Jackson2JsonRedisSerializer(Object.class);
@ -92,4 +131,5 @@ public class RedisCacheConfig extends CachingConfigurerSupport {
};
return cacheErrorHandler;
}
}

8
hiver-modules/hiver-base/src/main/java/cc/hiver/base/controller/manage/OrderController.java

@ -41,6 +41,7 @@ import java.util.stream.Collectors;
*
* @author Yazhi Li
*/
@Slf4j
@RestController
@Api(tags = "订单接口")
@ -117,6 +118,13 @@ public class OrderController {
* 1.当前抢单工的抢单状态需进行查验非正常无法进行抢单
* 2.正常状态下判定当前抢单工押金余额是否充足不足时需要将当前扛包工状态自动修正为不可接单状态同时当前订单无法抢购
* 3.正常抢单情况下需要在redis中设置抢单后的剩余押金金额要存在抢单锁机制实时更新余额
*
* 计划暂定使用延时双删策略尽量保证最终一致性但不是强一致性
* 先进行缓存清除再执行update最后延迟N秒再执行缓存清除
* 延迟N秒的时间要大于一次写操作的时间一般为3-5秒
* 原因如果延迟时间小于写入redis的时间会导致请求1清除了缓存但是请求2缓存还未写入的尴尬
* ps:一般写入的时间会远小于5秒
*
*/
@RequestMapping(value = "/rush/order/{orderId}", method = RequestMethod.POST)
@ApiOperation(value = "抢单接口", notes = "需要通过下单编号获取订单信息后进行绑定")

41
hiver-modules/hiver-base/src/main/java/cc/hiver/base/handler/OrderXdHandler.java

@ -0,0 +1,41 @@
package cc.hiver.base.handler;
import cc.hiver.core.entity.OrderXd;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;
import top.javatool.canal.client.annotation.CanalTable;
import top.javatool.canal.client.handler.EntryHandler;
/**
* @author benjamin_5
* @Description
* @date 2022/9/25
*/
@CanalTable("bs_ecif")
@Component
@AllArgsConstructor
@Slf4j
public class OrderXdHandler implements EntryHandler<OrderXd> {
private final RedisTemplate<Object,Object> redisTemplate;
@Override
public void insert(OrderXd orderXd) {
log.info("[新增]"+orderXd.toString());
redisTemplate.opsForValue().set("ORDER:"+orderXd.getOrderId(),orderXd);
}
@Override
public void update(OrderXd before, OrderXd after) {
log.info("[更新]"+after.toString());
redisTemplate.opsForValue().set("ORDER:"+after.getOrderId(),after);
}
@Override
public void delete(OrderXd orderXd) {
log.info("[删除]"+orderXd);
redisTemplate.delete("ORDER:"+orderXd.getOrderId());
}
}

5
pom.xml

@ -282,6 +282,11 @@
<artifactId>selenium-java</artifactId>
<version>${selenuim.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-cache</artifactId>
<version>${spring.boot.version}</version>
</dependency>
</dependencies>
</dependencyManagement>

Loading…
Cancel
Save