Переглянути джерело

init(system):项目初始化

wzq 4 місяців тому
батько
коміт
38ac139826
100 змінених файлів з 9527 додано та 0 видалено
  1. 19 0
      .gitignore
  2. 69 0
      docker/docker-compose.yml
  3. 0 0
      docker/minio/README.md
  4. 20 0
      docker/mysql/conf/my.cnf
  5. 2297 0
      docker/redis/config/redis.conf
  6. 16 0
      docker/run.md
  7. 0 0
      docker/xxljob/README.md
  8. 572 0
      sql/mysql/youlai_boot.sql
  9. 21 0
      src/main/java/com/zsElectric/boot/ZsElectricBootApplication.java
  10. 115 0
      src/main/java/com/zsElectric/boot/auth/controller/AuthController.java
  11. 22 0
      src/main/java/com/zsElectric/boot/auth/model/dto/WxMiniAppCodeLoginDTO.java
  12. 28 0
      src/main/java/com/zsElectric/boot/auth/model/dto/WxMiniAppPhoneLoginDTO.java
  13. 24 0
      src/main/java/com/zsElectric/boot/auth/model/vo/CaptchaVO.java
  14. 84 0
      src/main/java/com/zsElectric/boot/auth/service/AuthService.java
  15. 270 0
      src/main/java/com/zsElectric/boot/auth/service/impl/AuthServiceImpl.java
  16. 28 0
      src/main/java/com/zsElectric/boot/common/annotation/DataPermission.java
  17. 49 0
      src/main/java/com/zsElectric/boot/common/annotation/Log.java
  18. 27 0
      src/main/java/com/zsElectric/boot/common/annotation/RepeatSubmit.java
  19. 35 0
      src/main/java/com/zsElectric/boot/common/annotation/ValidField.java
  20. 48 0
      src/main/java/com/zsElectric/boot/common/base/BaseEntity.java
  21. 29 0
      src/main/java/com/zsElectric/boot/common/base/BasePageQuery.java
  22. 21 0
      src/main/java/com/zsElectric/boot/common/base/BaseVO.java
  23. 88 0
      src/main/java/com/zsElectric/boot/common/base/IBaseEnum.java
  24. 38 0
      src/main/java/com/zsElectric/boot/common/constant/JwtClaimConstants.java
  25. 60 0
      src/main/java/com/zsElectric/boot/common/constant/RedisConstants.java
  26. 25 0
      src/main/java/com/zsElectric/boot/common/constant/SecurityConstants.java
  27. 32 0
      src/main/java/com/zsElectric/boot/common/constant/SystemConstants.java
  28. 27 0
      src/main/java/com/zsElectric/boot/common/enums/CaptchaTypeEnum.java
  29. 31 0
      src/main/java/com/zsElectric/boot/common/enums/DataScopeEnum.java
  30. 26 0
      src/main/java/com/zsElectric/boot/common/enums/EnvEnum.java
  31. 33 0
      src/main/java/com/zsElectric/boot/common/enums/LogModuleEnum.java
  32. 52 0
      src/main/java/com/zsElectric/boot/common/enums/RequestMethodEnum.java
  33. 27 0
      src/main/java/com/zsElectric/boot/common/enums/StatusEnum.java
  34. 30 0
      src/main/java/com/zsElectric/boot/common/model/KeyValue.java
  35. 53 0
      src/main/java/com/zsElectric/boot/common/model/Option.java
  36. 61 0
      src/main/java/com/zsElectric/boot/common/util/DateUtils.java
  37. 19 0
      src/main/java/com/zsElectric/boot/common/util/ExcelUtils.java
  38. 139 0
      src/main/java/com/zsElectric/boot/common/util/IPUtils.java
  39. 37 0
      src/main/java/com/zsElectric/boot/config/CaffeineConfig.java
  40. 55 0
      src/main/java/com/zsElectric/boot/config/CaptchaConfig.java
  41. 42 0
      src/main/java/com/zsElectric/boot/config/CorsConfig.java
  42. 51 0
      src/main/java/com/zsElectric/boot/config/MailConfig.java
  43. 48 0
      src/main/java/com/zsElectric/boot/config/MybatisConfig.java
  44. 106 0
      src/main/java/com/zsElectric/boot/config/OpenApiConfig.java
  45. 24 0
      src/main/java/com/zsElectric/boot/config/PasswordEncoderConfig.java
  46. 74 0
      src/main/java/com/zsElectric/boot/config/RedisCacheConfig.java
  47. 42 0
      src/main/java/com/zsElectric/boot/config/RedisConfig.java
  48. 168 0
      src/main/java/com/zsElectric/boot/config/SecurityConfig.java
  49. 93 0
      src/main/java/com/zsElectric/boot/config/WebMvcConfig.java
  50. 293 0
      src/main/java/com/zsElectric/boot/config/WebSocketConfig.java
  51. 41 0
      src/main/java/com/zsElectric/boot/config/WxMiniAppConfig.java
  52. 61 0
      src/main/java/com/zsElectric/boot/config/XxlJobConfig.java
  53. 50 0
      src/main/java/com/zsElectric/boot/config/property/AliyunSmsProperties.java
  54. 92 0
      src/main/java/com/zsElectric/boot/config/property/CaptchaProperties.java
  55. 96 0
      src/main/java/com/zsElectric/boot/config/property/CodegenProperties.java
  56. 89 0
      src/main/java/com/zsElectric/boot/config/property/MailProperties.java
  57. 112 0
      src/main/java/com/zsElectric/boot/config/property/SecurityProperties.java
  58. 232 0
      src/main/java/com/zsElectric/boot/core/aspect/LogAspect.java
  59. 102 0
      src/main/java/com/zsElectric/boot/core/aspect/RepeatSubmitAspect.java
  60. 45 0
      src/main/java/com/zsElectric/boot/core/exception/BusinessException.java
  61. 278 0
      src/main/java/com/zsElectric/boot/core/exception/GlobalExceptionHandler.java
  62. 98 0
      src/main/java/com/zsElectric/boot/core/filter/RateLimiterFilter.java
  63. 38 0
      src/main/java/com/zsElectric/boot/core/filter/RequestLogFilter.java
  64. 33 0
      src/main/java/com/zsElectric/boot/core/validator/FieldValidator.java
  65. 43 0
      src/main/java/com/zsElectric/boot/core/web/ExcelResult.java
  66. 15 0
      src/main/java/com/zsElectric/boot/core/web/IResultCode.java
  67. 46 0
      src/main/java/com/zsElectric/boot/core/web/PageResult.java
  68. 74 0
      src/main/java/com/zsElectric/boot/core/web/Result.java
  69. 300 0
      src/main/java/com/zsElectric/boot/core/web/ResultCode.java
  70. 77 0
      src/main/java/com/zsElectric/boot/core/web/WebResponseHelper.java
  71. 113 0
      src/main/java/com/zsElectric/boot/platform/ai/config/AiProperties.java
  72. 103 0
      src/main/java/com/zsElectric/boot/platform/ai/controller/AiCommandController.java
  73. 20 0
      src/main/java/com/zsElectric/boot/platform/ai/mapper/AiCommandAuditMapper.java
  74. 37 0
      src/main/java/com/zsElectric/boot/platform/ai/model/dto/AiCommandRequestDTO.java
  75. 53 0
      src/main/java/com/zsElectric/boot/platform/ai/model/dto/AiCommandResponseDTO.java
  76. 36 0
      src/main/java/com/zsElectric/boot/platform/ai/model/dto/AiExecuteRequestDTO.java
  77. 62 0
      src/main/java/com/zsElectric/boot/platform/ai/model/dto/AiExecuteResponseDTO.java
  78. 38 0
      src/main/java/com/zsElectric/boot/platform/ai/model/dto/FunctionCallDTO.java
  79. 121 0
      src/main/java/com/zsElectric/boot/platform/ai/model/entity/AiCommandAudit.java
  80. 101 0
      src/main/java/com/zsElectric/boot/platform/ai/provider/AbstractOpenAiCompatibleProvider.java
  81. 32 0
      src/main/java/com/zsElectric/boot/platform/ai/provider/AiProvider.java
  82. 51 0
      src/main/java/com/zsElectric/boot/platform/ai/provider/AiProviderFactory.java
  83. 25 0
      src/main/java/com/zsElectric/boot/platform/ai/provider/impl/DeepSeekProvider.java
  84. 30 0
      src/main/java/com/zsElectric/boot/platform/ai/provider/impl/OpenAiProvider.java
  85. 25 0
      src/main/java/com/zsElectric/boot/platform/ai/provider/impl/QwenProvider.java
  86. 67 0
      src/main/java/com/zsElectric/boot/platform/ai/service/AiCommandService.java
  87. 263 0
      src/main/java/com/zsElectric/boot/platform/ai/service/impl/AiCommandServiceImpl.java
  88. 111 0
      src/main/java/com/zsElectric/boot/platform/codegen/controller/CodegenController.java
  89. 41 0
      src/main/java/com/zsElectric/boot/platform/codegen/converter/CodegenConverter.java
  90. 89 0
      src/main/java/com/zsElectric/boot/platform/codegen/enums/FormTypeEnum.java
  91. 85 0
      src/main/java/com/zsElectric/boot/platform/codegen/enums/JavaTypeEnum.java
  92. 73 0
      src/main/java/com/zsElectric/boot/platform/codegen/enums/QueryTypeEnum.java
  93. 47 0
      src/main/java/com/zsElectric/boot/platform/codegen/mapper/DatabaseMapper.java
  94. 20 0
      src/main/java/com/zsElectric/boot/platform/codegen/mapper/GenConfigMapper.java
  95. 20 0
      src/main/java/com/zsElectric/boot/platform/codegen/mapper/GenFieldConfigMapper.java
  96. 50 0
      src/main/java/com/zsElectric/boot/platform/codegen/model/bo/ColumnMetaData.java
  97. 45 0
      src/main/java/com/zsElectric/boot/platform/codegen/model/bo/TableMetaData.java
  98. 64 0
      src/main/java/com/zsElectric/boot/platform/codegen/model/entity/GenConfig.java
  99. 106 0
      src/main/java/com/zsElectric/boot/platform/codegen/model/entity/GenFieldConfig.java
  100. 109 0
      src/main/java/com/zsElectric/boot/platform/codegen/model/form/GenConfigForm.java

+ 19 - 0
.gitignore

@@ -0,0 +1,19 @@
+# Created by .ignore support plugin (hsz.mobi)
+### Example sysUserDetails template template
+### Example sysUserDetails template
+
+# IntelliJ project files
+.idea
+*.iml
+out
+gen
+target
+*.log
+logs
+.history
+
+
+docker/*/data/
+docker/minio/config
+docker/xxljob/logs
+application-youlai.yml

+ 69 - 0
docker/docker-compose.yml

@@ -0,0 +1,69 @@
+# 创建一个名为 "youlai-boot" 的桥接网络,在同一个网络中的容器可以通过容器名互相访问
+networks:
+  youlai-boot:
+    driver: bridge
+
+services:
+  mysql:
+    image: mysql:8.0.29
+    container_name: mysql
+    restart: unless-stopped # 重启策略:除非手动停止容器,否则自动重启
+    environment:
+      - TZ=Asia/Shanghai
+      - LANG= en_US.UTF-8
+      - MYSQL_ROOT_PASSWORD=123456 #设置 root 用户的密码
+    volumes:
+      - ./mysql/conf/my.cnf:/etc/my.cnf # 挂载 my.cnf 文件到容器的指定路径
+      - ./mysql/data:/var/lib/mysql # 持久化 MySQL 数据
+      - ../sql/mysql:/docker-entrypoint-initdb.d # 初始化 SQL 脚本目录
+    ports:
+      - 3306:3306
+    networks:
+      - youlai-boot # 加入 "youlai-boot" 网络
+
+  redis:
+    image: redis:7.2.3
+    container_name: redis
+    restart: unless-stopped
+    command: redis-server /etc/redis/redis.conf --requirepass 123456 --appendonly no # 启动 Redis 服务并添加密码为:123456,默认不开启 Redis AOF 方式持久化配置
+    environment:
+      - TZ=Asia/Shanghai
+    volumes:
+      - ./redis/data:/data
+      - ./redis/config/redis.conf:/etc/redis/redis.conf
+    ports:
+      - 6379:6379
+    networks:
+      - youlai-boot
+
+  minio:
+    image: minio/minio:RELEASE.2024-07-16T23-46-41Z
+    container_name: minio
+    restart: unless-stopped
+    command: server /data --console-address ":9001"
+    ports:
+      - 9000:9000
+      - 9001:9001
+    environment:
+      - TZ=Asia/Shanghai
+      - LANG=en_US.UTF-8
+      - MINIO_ROOT_USER=minioadmin
+      - MINIO_ROOT_PASSWORD=minioadmin
+    volumes:
+      - ./minio/data:/data
+      - ./minio/config:/root/.minio
+    networks:
+      - youlai-boot
+
+  xxl-job-admin:
+    image: xuxueli/xxl-job-admin:2.4.0
+    container_name: xxl-job-admin
+    restart: unless-stopped
+    environment:
+      PARAMS: '--spring.datasource.url=jdbc:mysql://mysql:3306/xxl_job?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&serverTimezone=Asia/Shanghai --spring.datasource.username=root --spring.datasource.password=123456 --spring.datasource.driver-class-name=com.mysql.cj.jdbc.Driver'
+    volumes:
+      - ./xxljob/logs:/data/applogs
+    ports:
+      - 8080:8080
+    networks:
+      - youlai-boot

+ 0 - 0
docker/minio/README.md


+ 20 - 0
docker/mysql/conf/my.cnf

@@ -0,0 +1,20 @@
+
+
+[mysqld]
+# 字符集与排序规则
+character-set-server = utf8mb4  # 服务端默认字符集
+collation-server = utf8mb4_0900_ai_ci # 服务端默认排序规则
+
+# 网络与路径
+datadir         = /var/lib/mysql   # 数据文件存放的目录
+bind-address    = 0.0.0.0  # 允许远程连接,默认 127.0.0.1 只允许本地连接
+port            = 3306  # 显式指定端口(默认3306可不写)
+
+# 客户端字符集同步(避免乱码)
+init_connect    = 'SET NAMES utf8mb4'  # 连接初始化时设置字符集
+
+[client]
+default-character-set = utf8mb4  # 客户端默认字符集
+
+[mysql]
+default-character-set = utf8mb4  # MySQL 命令行工具字符集

+ 2297 - 0
docker/redis/config/redis.conf

@@ -0,0 +1,2297 @@
+# 下载地址: http://download.redis.io/redis-stable/redis.conf
+# https://github.com/redis/redis/blob/7.2/redis.conf
+# Redis configuration file example.
+#
+# Note that in order to read the configuration file, Redis must be
+# started with the file path as first argument:
+#
+# ./redis-server /path/to/redis.conf
+
+# Note on units: when memory size is needed, it is possible to specify
+# it in the usual form of 1k 5GB 4M and so forth:
+#
+# 1k => 1000 bytes
+# 1kb => 1024 bytes
+# 1m => 1000000 bytes
+# 1mb => 1024*1024 bytes
+# 1g => 1000000000 bytes
+# 1gb => 1024*1024*1024 bytes
+#
+# units are case insensitive so 1GB 1Gb 1gB are all the same.
+
+################################## INCLUDES ###################################
+
+# Include one or more other config files here.  This is useful if you
+# have a standard template that goes to all Redis servers but also need
+# to customize a few per-server settings.  Include files can include
+# other files, so use this wisely.
+#
+# Note that option "include" won't be rewritten by command "CONFIG REWRITE"
+# from admin or Redis Sentinel. Since Redis always uses the last processed
+# line as value of a configuration directive, you'd better put includes
+# at the beginning of this file to avoid overwriting config change at runtime.
+#
+# If instead you are interested in using includes to override configuration
+# options, it is better to use include as the last line.
+#
+# Included paths may contain wildcards. All files matching the wildcards will
+# be included in alphabetical order.
+# Note that if an include path contains a wildcards but no files match it when
+# the server is started, the include statement will be ignored and no error will
+# be emitted.  It is safe, therefore, to include wildcard files from empty
+# directories.
+#
+# include /path/to/local.conf
+# include /path/to/other.conf
+# include /path/to/fragments/*.conf
+#
+
+################################## MODULES #####################################
+
+# Load modules at startup. If the server is not able to load modules
+# it will abort. It is possible to use multiple loadmodule directives.
+#
+# loadmodule /path/to/my_module.so
+# loadmodule /path/to/other_module.so
+
+################################## NETWORK #####################################
+
+# By default, if no "bind" configuration directive is specified, Redis listens
+# for connections from all available network interfaces on the host machine.
+# It is possible to listen to just one or multiple selected interfaces using
+# the "bind" configuration directive, followed by one or more IP addresses.
+# Each address can be prefixed by "-", which means that redis will not fail to
+# start if the address is not available. Being not available only refers to
+# addresses that does not correspond to any network interface. Addresses that
+# are already in use will always fail, and unsupported protocols will always BE
+# silently skipped.
+#
+# Examples:
+#
+# bind 192.168.1.100 10.0.0.1     # listens on two specific IPv4 addresses
+# bind 127.0.0.1 ::1              # listens on loopback IPv4 and IPv6
+# bind * -::*                     # like the default, all available interfaces
+#
+# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
+# internet, binding to all the interfaces is dangerous and will expose the
+# instance to everybody on the internet. So by default we uncomment the
+# following bind directive, that will force Redis to listen only on the
+# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis
+# will only be able to accept client connections from the same host that it is
+# running on).
+#
+# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
+# COMMENT OUT THE FOLLOWING LINE.
+#
+# You will also need to set a password unless you explicitly disable protected
+# mode.
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#bind 127.0.0.1 -::1
+
+# By default, outgoing connections (from replica to master, from Sentinel to
+# instances, cluster bus, etc.) are not bound to a specific local address. In
+# most cases, this means the operating system will handle that based on routing
+# and the interface through which the connection goes out.
+#
+# Using bind-source-addr it is possible to configure a specific address to bind
+# to, which may also affect how the connection gets routed.
+#
+# Example:
+#
+# bind-source-addr 10.0.0.1
+
+# Protected mode is a layer of security protection, in order to avoid that
+# Redis instances left open on the internet are accessed and exploited.
+#
+# When protected mode is on and the default user has no password, the server
+# only accepts local connections from the IPv4 address (127.0.0.1), IPv6 address
+# (::1) or Unix domain sockets.
+#
+# By default protected mode is enabled. You should disable it only if
+# you are sure you want clients from other hosts to connect to Redis
+# even if no authentication is configured.
+protected-mode no
+
+# Redis uses default hardened security configuration directives to reduce the
+# attack surface on innocent users. Therefore, several sensitive configuration
+# directives are immutable, and some potentially-dangerous commands are blocked.
+#
+# Configuration directives that control files that Redis writes to (e.g., 'dir'
+# and 'dbfilename') and that aren't usually modified during runtime
+# are protected by making them immutable.
+#
+# Commands that can increase the attack surface of Redis and that aren't usually
+# called by users are blocked by default.
+#
+# These can be exposed to either all connections or just local ones by setting
+# each of the configs listed below to either of these values:
+#
+# no    - Block for any connection (remain immutable)
+# yes   - Allow for any connection (no protection)
+# local - Allow only for local connections. Ones originating from the
+#         IPv4 address (127.0.0.1), IPv6 address (::1) or Unix domain sockets.
+#
+# enable-protected-configs no
+# enable-debug-command no
+# enable-business-command no
+
+# Accept connections on the specified port, default is 6379 (IANA #815344).
+# If port 0 is specified Redis will not listen on a TCP socket.
+port 6379
+
+# TCP listen() backlog.
+#
+# In high requests-per-second environments you need a high backlog in order
+# to avoid slow clients connection issues. Note that the Linux kernel
+# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
+# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
+# in order to get the desired effect.
+tcp-backlog 511
+
+# Unix socket.
+#
+# Specify the path for the Unix socket that will be used to listen for
+# incoming connections. There is no default, so Redis will not listen
+# on a unix socket when not specified.
+#
+# unixsocket /run/redis.sock
+# unixsocketperm 700
+
+# Close the connection after a client is idle for N seconds (0 to disable)
+timeout 0
+
+# TCP keepalive.
+#
+# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
+# of communication. This is useful for two reasons:
+#
+# 1) Detect dead peers.
+# 2) Force network equipment in the middle to consider the connection to be
+#    alive.
+#
+# On Linux, the specified value (in seconds) is the period used to send ACKs.
+# Note that to close the connection the double of the time is needed.
+# On other kernels the period depends on the kernel configuration.
+#
+# A reasonable value for this option is 300 seconds, which is the new
+# Redis default starting with Redis 3.2.1.
+tcp-keepalive 300
+
+# Apply OS-specific mechanism to mark the listening socket with the specified
+# ID, to support advanced routing and filtering capabilities.
+#
+# On Linux, the ID represents a connection mark.
+# On FreeBSD, the ID represents a socket cookie ID.
+# On OpenBSD, the ID represents a route table ID.
+#
+# The default value is 0, which implies no marking is required.
+# socket-mark-id 0
+
+################################# TLS/SSL #####################################
+
+# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration
+# directive can be used to define TLS-listening ports. To enable TLS on the
+# default port, use:
+#
+# port 0
+# tls-port 6379
+
+# Configure a X.509 certificate and private key to use for authenticating the
+# server to connected clients, masters or cluster peers.  These files should be
+# PEM formatted.
+#
+# tls-cert-file redis.crt
+# tls-key-file redis.key
+#
+# If the key file is encrypted using a passphrase, it can be included here
+# as well.
+#
+# tls-key-file-pass secret
+
+# Normally Redis uses the same certificate for both server functions (accepting
+# connections) and client functions (replicating from a master, establishing
+# cluster bus connections, etc.).
+#
+# Sometimes certificates are issued with attributes that designate them as
+# client-only or server-only certificates. In that case it may be desired to use
+# different certificates for incoming (server) and outgoing (client)
+# connections. To do that, use the following directives:
+#
+# tls-client-cert-file client.crt
+# tls-client-key-file client.key
+#
+# If the key file is encrypted using a passphrase, it can be included here
+# as well.
+#
+# tls-client-key-file-pass secret
+
+# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange,
+# required by older versions of OpenSSL (<3.0). Newer versions do not require
+# this configuration and recommend against it.
+#
+# tls-dh-params-file redis.dh
+
+# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
+# clients and peers.  Redis requires an explicit configuration of at least one
+# of these, and will not implicitly use the system wide configuration.
+#
+# tls-ca-cert-file ca.crt
+# tls-ca-cert-dir /etc/ssl/certs
+
+# By default, clients (including replica servers) on a TLS port are required
+# to authenticate using valid client side certificates.
+#
+# If "no" is specified, client certificates are not required and not accepted.
+# If "optional" is specified, client certificates are accepted and must be
+# valid if provided, but are not required.
+#
+# tls-auth-clients no
+# tls-auth-clients optional
+
+# By default, a Redis replica does not attempt to establish a TLS connection
+# with its master.
+#
+# Use the following directive to enable TLS on replication links.
+#
+# tls-replication yes
+
+# By default, the Redis Cluster bus uses a plain TCP connection. To enable
+# TLS for the bus protocol, use the following directive:
+#
+# tls-cluster yes
+
+# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended
+# that older formally deprecated versions are kept disabled to reduce the attack surface.
+# You can explicitly specify TLS versions to support.
+# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2",
+# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination.
+# To enable only TLSv1.2 and TLSv1.3, use:
+#
+# tls-protocols "TLSv1.2 TLSv1.3"
+
+# Configure allowed ciphers.  See the ciphers(1ssl) manpage for more information
+# about the syntax of this string.
+#
+# Note: this configuration applies only to <= TLSv1.2.
+#
+# tls-ciphers DEFAULT:!MEDIUM
+
+# Configure allowed TLSv1.3 ciphersuites.  See the ciphers(1ssl) manpage for more
+# information about the syntax of this string, and specifically for TLSv1.3
+# ciphersuites.
+#
+# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
+
+# When choosing a cipher, use the server's preference instead of the client
+# preference. By default, the server follows the client's preference.
+#
+# tls-prefer-server-ciphers yes
+
+# By default, TLS session caching is enabled to allow faster and less expensive
+# reconnections by clients that support it. Use the following directive to disable
+# caching.
+#
+# tls-session-caching no
+
+# Change the default number of TLS sessions cached. A zero value sets the cache
+# to unlimited size. The default size is 20480.
+#
+# tls-session-cache-size 5000
+
+# Change the default timeout of cached TLS sessions. The default timeout is 300
+# seconds.
+#
+# tls-session-cache-timeout 60
+
+################################# GENERAL #####################################
+
+# By default Redis does not run as a daemon. Use 'yes' if you need it.
+# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
+# When Redis is supervised by upstart or systemd, this parameter has no impact.
+daemonize no
+
+# If you run Redis from upstart or systemd, Redis can interact with your
+# supervision tree. Options:
+#   supervised no      - no supervision interaction
+#   supervised upstart - signal upstart by putting Redis into SIGSTOP mode
+#                        requires "expect stop" in your upstart job config
+#   supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
+#                        on startup, and updating Redis status on a regular
+#                        basis.
+#   supervised auto    - detect upstart or systemd method based on
+#                        UPSTART_JOB or NOTIFY_SOCKET environment variables
+# Note: these supervision methods only signal "process is ready."
+#       They do not enable continuous pings back to your supervisor.
+#
+# The default is "no". To run under upstart/systemd, you can simply uncomment
+# the line below:
+#
+# supervised auto
+
+# If a pid file is specified, Redis writes it where specified at startup
+# and removes it at exit.
+#
+# When the server runs non daemonized, no pid file is created if none is
+# specified in the configuration. When the server is daemonized, the pid file
+# is used even if not specified, defaulting to "/var/run/redis.pid".
+#
+# Creating a pid file is best effort: if Redis is not able to create it
+# nothing bad happens, the server will start and run normally.
+#
+# Note that on modern Linux systems "/run/redis.pid" is more conforming
+# and should be used instead.
+pidfile /var/run/redis_6379.pid
+
+# Specify the server verbosity level.
+# This can be one of:
+# debug (a lot of information, useful for development/testing)
+# verbose (many rarely useful info, but not a mess like the debug level)
+# notice (moderately verbose, what you want in production probably)
+# warning (only very important / critical messages are logged)
+# nothing (nothing is logged)
+loglevel notice
+
+# Specify the log file name. Also the empty string can be used to force
+# Redis to log on the standard output. Note that if you use standard
+# output for logging but daemonize, logs will be sent to /dev/null
+logfile ""
+
+# To enable logging to the system logger, just set 'syslog-enabled' to yes,
+# and optionally update the other syslog parameters to suit your needs.
+# syslog-enabled no
+
+# Specify the syslog identity.
+# syslog-ident redis
+
+# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
+# syslog-facility local0
+
+# To disable the built in crash log, which will possibly produce cleaner core
+# dumps when they are needed, uncomment the following:
+#
+# crash-log-enabled no
+
+# To disable the fast memory check that's run as part of the crash log, which
+# will possibly let redis terminate sooner, uncomment the following:
+#
+# crash-memcheck-enabled no
+
+# Set the number of databases. The default database is DB 0, you can select
+# a different one on a per-connection basis using SELECT <dbid> where
+# dbid is a number between 0 and 'databases'-1
+databases 16
+
+# By default Redis shows an ASCII art logo only when started to log to the
+# standard output and if the standard output is a TTY and syslog logging is
+# disabled. Basically this means that normally a logo is displayed only in
+# interactive sessions.
+#
+# However it is possible to force the pre-4.0 behavior and always show a
+# ASCII art logo in startup logs by setting the following option to yes.
+always-show-logo no
+
+# By default, Redis modifies the process title (as seen in 'top' and 'ps') to
+# provide some runtime information. It is possible to disable this and leave
+# the process name as executed by setting the following to no.
+set-proc-title yes
+
+# When changing the process title, Redis uses the following template to construct
+# the modified title.
+#
+# Template variables are specified in curly brackets. The following variables are
+# supported:
+#
+# {title}           Name of process as executed if parent, or type of child process.
+# {listen-addr}     Bind address or '*' followed by TCP or TLS port listening on, or
+#                   Unix socket if only that's available.
+# {server-mode}     Special mode, i.e. "[sentinel]" or "[cluster]".
+# {port}            TCP port listening on, or 0.
+# {tls-port}        TLS port listening on, or 0.
+# {unixsocket}      Unix domain socket listening on, or "".
+# {config-file}     Name of configuration file used.
+#
+proc-title-template "{title} {listen-addr} {server-mode}"
+
+# Set the local environment which is used for string comparison operations, and 
+# also affect the performance of Lua scripts. Empty String indicates the locale 
+# is derived from the environment variables.
+locale-collate ""
+
+################################ SNAPSHOTTING  ################################
+
+# Save the DB to disk.
+#
+# save <seconds> <changes> [<seconds> <changes> ...]
+#
+# Redis will save the DB if the given number of seconds elapsed and it
+# surpassed the given number of write operations against the DB.
+#
+# Snapshotting can be completely disabled with a single empty string argument
+# as in following example:
+#
+# save ""
+#
+# Unless specified otherwise, by default Redis will save the DB:
+#   * After 3600 seconds (an hour) if at least 1 change was performed
+#   * After 300 seconds (5 minutes) if at least 100 changes were performed
+#   * After 60 seconds if at least 10000 changes were performed
+#
+# You can set these explicitly by uncommenting the following line.
+#
+# save 3600 1 300 100 60 10000
+
+# By default Redis will stop accepting writes if RDB snapshots are enabled
+# (at least one save point) and the latest background save failed.
+# This will make the user aware (in a hard way) that data is not persisting
+# on disk properly, otherwise chances are that no one will notice and some
+# disaster will happen.
+#
+# If the background saving process will start working again Redis will
+# automatically allow writes again.
+#
+# However if you have setup your proper monitoring of the Redis server
+# and persistence, you may want to disable this feature so that Redis will
+# continue to work as usual even if there are problems with disk,
+# permissions, and so forth.
+stop-writes-on-bgsave-error yes
+
+# Compress string objects using LZF when dump .rdb databases?
+# By default compression is enabled as it's almost always a win.
+# If you want to save some CPU in the saving child set it to 'no' but
+# the dataset will likely be bigger if you have compressible values or keys.
+rdbcompression yes
+
+# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
+# This makes the format more resistant to corruption but there is a performance
+# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
+# for maximum performances.
+#
+# RDB files created with checksum disabled have a checksum of zero that will
+# tell the loading code to skip the check.
+rdbchecksum yes
+
+# Enables or disables full sanitization checks for ziplist and listpack etc when
+# loading an RDB or RESTORE payload. This reduces the chances of a assertion or
+# crash later on while processing commands.
+# Options:
+#   no         - Never perform full sanitization
+#   yes        - Always perform full sanitization
+#   clients    - Perform full sanitization only for user connections.
+#                Excludes: RDB files, RESTORE commands received from the master
+#                connection, and client connections which have the
+#                skip-sanitize-payload ACL flag.
+# The default should be 'clients' but since it currently affects cluster
+# resharding via MIGRATE, it is temporarily set to 'no' by default.
+#
+# sanitize-dump-payload no
+
+# The filename where to dump the DB
+dbfilename dump.rdb
+
+# Remove RDB files used by replication in instances without persistence
+# enabled. By default this option is disabled, however there are environments
+# where for regulations or other security concerns, RDB files persisted on
+# disk by masters in order to feed replicas, or stored on disk by replicas
+# in order to load them for the initial synchronization, should be deleted
+# ASAP. Note that this option ONLY WORKS in instances that have both AOF
+# and RDB persistence disabled, otherwise is completely ignored.
+#
+# An alternative (and sometimes better) way to obtain the same effect is
+# to use diskless replication on both master and replicas instances. However
+# in the case of replicas, diskless is not always an option.
+rdb-del-sync-files no
+
+# The working directory.
+#
+# The DB will be written inside this directory, with the filename specified
+# above using the 'dbfilename' configuration directive.
+#
+# The Append Only File will also be created inside this directory.
+#
+# Note that you must specify a directory here, not a file name.
+dir ./
+
+################################# REPLICATION #################################
+
+# Master-Replica replication. Use replicaof to make a Redis instance a copy of
+# another Redis server. A few things to understand ASAP about Redis replication.
+#
+#   +------------------+      +---------------+
+#   |      Master      | ---> |    Replica    |
+#   | (receive writes) |      |  (exact copy) |
+#   +------------------+      +---------------+
+#
+# 1) Redis replication is asynchronous, but you can configure a master to
+#    stop accepting writes if it appears to be not connected with at least
+#    a given number of replicas.
+# 2) Redis replicas are able to perform a partial resynchronization with the
+#    master if the replication link is lost for a relatively small amount of
+#    time. You may want to configure the replication backlog size (see the next
+#    sections of this file) with a sensible value depending on your needs.
+# 3) Replication is automatic and does not need user intervention. After a
+#    network partition replicas automatically try to reconnect to masters
+#    and resynchronize with them.
+#
+# replicaof <masterip> <masterport>
+
+# If the master is password protected (using the "requirepass" configuration
+# directive below) it is possible to tell the replica to authenticate before
+# starting the replication synchronization process, otherwise the master will
+# refuse the replica request.
+#
+# masterauth <master-password>
+#
+# However this is not enough if you are using Redis ACLs (for Redis version
+# 6 or greater), and the default user is not capable of running the PSYNC
+# command and/or other commands needed for replication. In this case it's
+# better to configure a special user to use with replication, and specify the
+# masteruser configuration as such:
+#
+# masteruser <username>
+#
+# When masteruser is specified, the replica will authenticate against its
+# master using the new AUTH form: AUTH <username> <password>.
+
+# When a replica loses its connection with the master, or when the replication
+# is still in progress, the replica can act in two different ways:
+#
+# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
+#    still reply to client requests, possibly with out of date data, or the
+#    data set may just be empty if this is the first synchronization.
+#
+# 2) If replica-serve-stale-data is set to 'no' the replica will reply with error
+#    "MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'"
+#    to all data access commands, excluding commands such as:
+#    INFO, REPLICAOF, AUTH, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE,
+#    UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST,
+#    HOST and LATENCY.
+#
+replica-serve-stale-data yes
+
+# You can configure a replica instance to accept writes or not. Writing against
+# a replica instance may be useful to store some ephemeral data (because data
+# written on a replica will be easily deleted after resync with the master) but
+# may also cause problems if clients are writing to it because of a
+# misconfiguration.
+#
+# Since Redis 2.6 by default replicas are read-only.
+#
+# Note: read only replicas are not designed to be exposed to untrusted clients
+# on the internet. It's just a protection layer against misuse of the instance.
+# Still a read only replica exports by default all the administrative commands
+# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
+# security of read only replicas using 'rename-command' to shadow all the
+# administrative / dangerous commands.
+replica-read-only yes
+
+# Replication SYNC strategy: disk or socket.
+#
+# New replicas and reconnecting replicas that are not able to continue the
+# replication process just receiving differences, need to do what is called a
+# "full synchronization". An RDB file is transmitted from the master to the
+# replicas.
+#
+# The transmission can happen in two different ways:
+#
+# 1) Disk-backed: The Redis master creates a new process that writes the RDB
+#                 file on disk. Later the file is transferred by the parent
+#                 process to the replicas incrementally.
+# 2) Diskless: The Redis master creates a new process that directly writes the
+#              RDB file to replica sockets, without touching the disk at all.
+#
+# With disk-backed replication, while the RDB file is generated, more replicas
+# can be queued and served with the RDB file as soon as the current child
+# producing the RDB file finishes its work. With diskless replication instead
+# once the transfer starts, new replicas arriving will be queued and a new
+# transfer will start when the current one terminates.
+#
+# When diskless replication is used, the master waits a configurable amount of
+# time (in seconds) before starting the transfer in the hope that multiple
+# replicas will arrive and the transfer can be parallelized.
+#
+# With slow disks and fast (large bandwidth) networks, diskless replication
+# works better.
+repl-diskless-sync yes
+
+# When diskless replication is enabled, it is possible to configure the delay
+# the server waits in order to spawn the child that transfers the RDB via socket
+# to the replicas.
+#
+# This is important since once the transfer starts, it is not possible to serve
+# new replicas arriving, that will be queued for the next RDB transfer, so the
+# server waits a delay in order to let more replicas arrive.
+#
+# The delay is specified in seconds, and by default is 5 seconds. To disable
+# it entirely just set it to 0 seconds and the transfer will start ASAP.
+repl-diskless-sync-delay 5
+
+# When diskless replication is enabled with a delay, it is possible to let
+# the replication start before the maximum delay is reached if the maximum
+# number of replicas expected have connected. Default of 0 means that the
+# maximum is not defined and Redis will wait the full delay.
+repl-diskless-sync-max-replicas 0
+
+# -----------------------------------------------------------------------------
+# WARNING: Since in this setup the replica does not immediately store an RDB on
+# disk, it may cause data loss during failovers. RDB diskless load + Redis
+# modules not handling I/O reads may cause Redis to abort in case of I/O errors
+# during the initial synchronization stage with the master.
+# -----------------------------------------------------------------------------
+#
+# Replica can load the RDB it reads from the replication link directly from the
+# socket, or store the RDB to a file and read that file after it was completely
+# received from the master.
+#
+# In many cases the disk is slower than the network, and storing and loading
+# the RDB file may increase replication time (and even increase the master's
+# Copy on Write memory and replica buffers).
+# However, when parsing the RDB file directly from the socket, in order to avoid
+# data loss it's only safe to flush the current dataset when the new dataset is
+# fully loaded in memory, resulting in higher memory usage.
+# For this reason we have the following options:
+#
+# "disabled"    - Don't use diskless load (store the rdb file to the disk first)
+# "swapdb"      - Keep current db contents in RAM while parsing the data directly
+#                 from the socket. Replicas in this mode can keep serving current
+#                 dataset while replication is in progress, except for cases where
+#                 they can't recognize master as having a data set from same
+#                 replication history.
+#                 Note that this requires sufficient memory, if you don't have it,
+#                 you risk an OOM kill.
+# "on-empty-db" - Use diskless load only when current dataset is empty. This is 
+#                 safer and avoid having old and new dataset loaded side by side
+#                 during replication.
+repl-diskless-load disabled
+
+# Master send PINGs to its replicas in a predefined interval. It's possible to
+# change this interval with the repl_ping_replica_period option. The default
+# value is 10 seconds.
+#
+# repl-ping-replica-period 10
+
+# The following option sets the replication timeout for:
+#
+# 1) Bulk transfer I/O during SYNC, from the point of view of replica.
+# 2) Master timeout from the point of view of replicas (data, pings).
+# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
+#
+# It is important to make sure that this value is greater than the value
+# specified for repl-ping-replica-period otherwise a timeout will be detected
+# every time there is low traffic between the master and the replica. The default
+# value is 60 seconds.
+#
+# repl-timeout 60
+
+# Disable TCP_NODELAY on the replica socket after SYNC?
+#
+# If you select "yes" Redis will use a smaller number of TCP packets and
+# less bandwidth to send data to replicas. But this can add a delay for
+# the data to appear on the replica side, up to 40 milliseconds with
+# Linux kernels using a default configuration.
+#
+# If you select "no" the delay for data to appear on the replica side will
+# be reduced but more bandwidth will be used for replication.
+#
+# By default we optimize for low latency, but in very high traffic conditions
+# or when the master and replicas are many hops away, turning this to "yes" may
+# be a good idea.
+repl-disable-tcp-nodelay no
+
+# Set the replication backlog size. The backlog is a buffer that accumulates
+# replica data when replicas are disconnected for some time, so that when a
+# replica wants to reconnect again, often a full resync is not needed, but a
+# partial resync is enough, just passing the portion of data the replica
+# missed while disconnected.
+#
+# The bigger the replication backlog, the longer the replica can endure the
+# disconnect and later be able to perform a partial resynchronization.
+#
+# The backlog is only allocated if there is at least one replica connected.
+#
+# repl-backlog-size 1mb
+
+# After a master has no connected replicas for some time, the backlog will be
+# freed. The following option configures the amount of seconds that need to
+# elapse, starting from the time the last replica disconnected, for the backlog
+# buffer to be freed.
+#
+# Note that replicas never free the backlog for timeout, since they may be
+# promoted to masters later, and should be able to correctly "partially
+# resynchronize" with other replicas: hence they should always accumulate backlog.
+#
+# A value of 0 means to never release the backlog.
+#
+# repl-backlog-ttl 3600
+
+# The replica priority is an integer number published by Redis in the INFO
+# output. It is used by Redis Sentinel in order to select a replica to promote
+# into a master if the master is no longer working correctly.
+#
+# A replica with a low priority number is considered better for promotion, so
+# for instance if there are three replicas with priority 10, 100, 25 Sentinel
+# will pick the one with priority 10, that is the lowest.
+#
+# However a special priority of 0 marks the replica as not able to perform the
+# role of master, so a replica with priority of 0 will never be selected by
+# Redis Sentinel for promotion.
+#
+# By default the priority is 100.
+replica-priority 100
+
+# The propagation error behavior controls how Redis will behave when it is
+# unable to handle a command being processed in the replication stream from a master
+# or processed while reading from an AOF file. Errors that occur during propagation
+# are unexpected, and can cause data inconsistency. However, there are edge cases
+# in earlier versions of Redis where it was possible for the server to replicate or persist
+# commands that would fail on future versions. For this reason the default behavior
+# is to ignore such errors and continue processing commands.
+#
+# If an application wants to ensure there is no data divergence, this configuration
+# should be set to 'panic' instead. The value can also be set to 'panic-on-replicas'
+# to only panic when a replica encounters an error on the replication stream. One of
+# these two panic values will become the default value in the future once there are
+# sufficient safety mechanisms in place to prevent false positive crashes.
+#
+# propagation-error-behavior ignore
+
+# Replica ignore disk write errors controls the behavior of a replica when it is
+# unable to persist a write command received from its master to disk. By default,
+# this configuration is set to 'no' and will crash the replica in this condition.
+# It is not recommended to change this default, however in order to be compatible
+# with older versions of Redis this config can be toggled to 'yes' which will just
+# log a warning and execute the write command it got from the master.
+#
+# replica-ignore-disk-write-errors no
+
+# -----------------------------------------------------------------------------
+# By default, Redis Sentinel includes all replicas in its reports. A replica
+# can be excluded from Redis Sentinel's announcements. An unannounced replica
+# will be ignored by the 'sentinel replicas <master>' command and won't be
+# exposed to Redis Sentinel's clients.
+#
+# This option does not change the behavior of replica-priority. Even with
+# replica-announced set to 'no', the replica can be promoted to master. To
+# prevent this behavior, set replica-priority to 0.
+#
+# replica-announced yes
+
+# It is possible for a master to stop accepting writes if there are less than
+# N replicas connected, having a lag less or equal than M seconds.
+#
+# The N replicas need to be in "online" state.
+#
+# The lag in seconds, that must be <= the specified value, is calculated from
+# the last ping received from the replica, that is usually sent every second.
+#
+# This option does not GUARANTEE that N replicas will accept the write, but
+# will limit the window of exposure for lost writes in case not enough replicas
+# are available, to the specified number of seconds.
+#
+# For example to require at least 3 replicas with a lag <= 10 seconds use:
+#
+# min-replicas-to-write 3
+# min-replicas-max-lag 10
+#
+# Setting one or the other to 0 disables the feature.
+#
+# By default min-replicas-to-write is set to 0 (feature disabled) and
+# min-replicas-max-lag is set to 10.
+
+# A Redis master is able to list the address and port of the attached
+# replicas in different ways. For example the "INFO replication" section
+# offers this information, which is used, among other tools, by
+# Redis Sentinel in order to discover replica instances.
+# Another place where this info is available is in the output of the
+# "ROLE" command of a master.
+#
+# The listed IP address and port normally reported by a replica is
+# obtained in the following way:
+#
+#   IP: The address is auto detected by checking the peer address
+#   of the socket used by the replica to connect with the master.
+#
+#   Port: The port is communicated by the replica during the replication
+#   handshake, and is normally the port that the replica is using to
+#   listen for connections.
+#
+# However when port forwarding or Network Address Translation (NAT) is
+# used, the replica may actually be reachable via different IP and port
+# pairs. The following two options can be used by a replica in order to
+# report to its master a specific set of IP and port, so that both INFO
+# and ROLE will report those values.
+#
+# There is no need to use both the options if you need to override just
+# the port or the IP address.
+#
+# replica-announce-ip 5.5.5.5
+# replica-announce-port 1234
+
+############################### KEYS TRACKING #################################
+
+# Redis implements server assisted support for client side caching of values.
+# This is implemented using an invalidation table that remembers, using
+# a radix key indexed by key name, what clients have which keys. In turn
+# this is used in order to send invalidation messages to clients. Please
+# check this page to understand more about the feature:
+#
+#   https://redis.io/topics/client-side-caching
+#
+# When tracking is enabled for a client, all the read only queries are assumed
+# to be cached: this will force Redis to store information in the invalidation
+# table. When keys are modified, such information is flushed away, and
+# invalidation messages are sent to the clients. However if the workload is
+# heavily dominated by reads, Redis could use more and more memory in order
+# to track the keys fetched by many clients.
+#
+# For this reason it is possible to configure a maximum fill value for the
+# invalidation table. By default it is set to 1M of keys, and once this limit
+# is reached, Redis will start to evict keys in the invalidation table
+# even if they were not modified, just to reclaim memory: this will in turn
+# force the clients to invalidate the cached values. Basically the table
+# maximum size is a trade off between the memory you want to spend server
+# side to track information about who cached what, and the ability of clients
+# to retain cached objects in memory.
+#
+# If you set the value to 0, it means there are no limits, and Redis will
+# retain as many keys as needed in the invalidation table.
+# In the "stats" INFO section, you can find information about the number of
+# keys in the invalidation table at every given moment.
+#
+# Note: when key tracking is used in broadcasting mode, no memory is used
+# in the server side so this setting is useless.
+#
+# tracking-table-max-keys 1000000
+
+################################## SECURITY ###################################
+
+# Warning: since Redis is pretty fast, an outside user can try up to
+# 1 million passwords per second against a modern box. This means that you
+# should use very strong passwords, otherwise they will be very easy to break.
+# Note that because the password is really a platform secret between the client
+# and the server, and should not be memorized by any human, the password
+# can be easily a long string from /dev/urandom or whatever, so by using a
+# long and unguessable password no brute force attack will be possible.
+
+# Redis ACL users are defined in the following format:
+#
+#   user <username> ... acl rules ...
+#
+# For example:
+#
+#   user worker +@list +@connection ~jobs:* on >ffa9203c493aa99
+#
+# The special username "default" is used for new connections. If this user
+# has the "nopass" rule, then new connections will be immediately authenticated
+# as the "default" user without the need of any password provided via the
+# AUTH command. Otherwise if the "default" user is not flagged with "nopass"
+# the connections will start in not authenticated state, and will require
+# AUTH (or the HELLO command AUTH option) in order to be authenticated and
+# start to work.
+#
+# The ACL rules that describe what a user can do are the following:
+#
+#  on           Enable the user: it is possible to authenticate as this user.
+#  off          Disable the user: it's no longer possible to authenticate
+#               with this user, however the already authenticated connections
+#               will still work.
+#  skip-sanitize-payload    RESTORE dump-payload sanitization is skipped.
+#  sanitize-payload         RESTORE dump-payload is sanitized (default).
+#  +<command>   Allow the execution of that command.
+#               May be used with `|` for allowing subcommands (e.g "+config|get")
+#  -<command>   Disallow the execution of that command.
+#               May be used with `|` for blocking subcommands (e.g "-config|set")
+#  +@<category> Allow the execution of all the commands in such category
+#               with valid categories are like @admin, @set, @sortedset, ...
+#               and so forth, see the full list in the server.c file where
+#               the Redis command table is described and defined.
+#               The special category @all means all the commands, but currently
+#               present in the server, and that will be loaded in the future
+#               via modules.
+#  +<command>|first-arg  Allow a specific first argument of an otherwise
+#                        disabled command. It is only supported on commands with
+#                        no sub-commands, and is not allowed as negative form
+#                        like -SELECT|1, only additive starting with "+". This
+#                        feature is deprecated and may be removed in the future.
+#  allcommands  Alias for +@all. Note that it implies the ability to execute
+#               all the future commands loaded via the modules system.
+#  nocommands   Alias for -@all.
+#  ~<pattern>   Add a pattern of keys that can be mentioned as part of
+#               commands. For instance ~* allows all the keys. The pattern
+#               is a glob-style pattern like the one of KEYS.
+#               It is possible to specify multiple patterns.
+# %R~<pattern>  Add key read pattern that specifies which keys can be read 
+#               from.
+# %W~<pattern>  Add key write pattern that specifies which keys can be
+#               written to. 
+#  allkeys      Alias for ~*
+#  resetkeys    Flush the list of allowed keys patterns.
+#  &<pattern>   Add a glob-style pattern of Pub/Sub channels that can be
+#               accessed by the user. It is possible to specify multiple channel
+#               patterns.
+#  allchannels  Alias for &*
+#  resetchannels            Flush the list of allowed channel patterns.
+#  ><password>  Add this password to the list of valid password for the user.
+#               For example >mypass will add "mypass" to the list.
+#               This directive clears the "nopass" flag (see later).
+#  <<password>  Remove this password from the list of valid passwords.
+#  nopass       All the set passwords of the user are removed, and the user
+#               is flagged as requiring no password: it means that every
+#               password will work against this user. If this directive is
+#               used for the default user, every new connection will be
+#               immediately authenticated with the default user without
+#               any explicit AUTH command required. Note that the "resetpass"
+#               directive will clear this condition.
+#  resetpass    Flush the list of allowed passwords. Moreover removes the
+#               "nopass" status. After "resetpass" the user has no associated
+#               passwords and there is no way to authenticate without adding
+#               some password (or setting it as "nopass" later).
+#  reset        Performs the following actions: resetpass, resetkeys, resetchannels,
+#               allchannels (if acl-pubsub-default is set), off, clearselectors, -@all.
+#               The user returns to the same state it has immediately after its creation.
+# (<options>)   Create a new selector with the options specified within the
+#               parentheses and attach it to the user. Each option should be 
+#               space separated. The first character must be ( and the last 
+#               character must be ).
+# clearselectors            Remove all of the currently attached selectors. 
+#                           Note this does not change the "root" user permissions,
+#                           which are the permissions directly applied onto the
+#                           user (outside the parentheses).
+#
+# ACL rules can be specified in any order: for instance you can start with
+# passwords, then flags, or key patterns. However note that the additive
+# and subtractive rules will CHANGE MEANING depending on the ordering.
+# For instance see the following example:
+#
+#   user alice on +@all -DEBUG ~* >somepassword
+#
+# This will allow "alice" to use all the commands with the handler of the
+# DEBUG command, since +@all added all the commands to the set of the commands
+# alice can use, and later DEBUG was removed. However if we invert the order
+# of two ACL rules the result will be different:
+#
+#   user alice on -DEBUG +@all ~* >somepassword
+#
+# Now DEBUG was removed when alice had yet no commands in the set of allowed
+# commands, later all the commands are added, so the user will be able to
+# execute everything.
+#
+# Basically ACL rules are processed left-to-right.
+#
+# The following is a list of command categories and their meanings:
+# * keyspace - Writing or reading from keys, databases, or their metadata 
+#     in a type agnostic way. Includes DEL, RESTORE, DUMP, RENAME, EXISTS, DBSIZE,
+#     KEYS, EXPIRE, TTL, FLUSHALL, etc. Commands that may modify the keyspace,
+#     key or metadata will also have `write` category. Commands that only read
+#     the keyspace, key or metadata will have the `read` category.
+# * read - Reading from keys (values or metadata). Note that commands that don't
+#     interact with keys, will not have either `read` or `write`.
+# * write - Writing to keys (values or metadata)
+# * admin - Administrative commands. Normal applications will never need to use
+#     these. Includes REPLICAOF, CONFIG, DEBUG, SAVE, MONITOR, ACL, SHUTDOWN, etc.
+# * dangerous - Potentially dangerous (each should be considered with care for
+#     various reasons). This includes FLUSHALL, MIGRATE, RESTORE, SORT, KEYS,
+#     CLIENT, DEBUG, INFO, CONFIG, SAVE, REPLICAOF, etc.
+# * connection - Commands affecting the connection or other connections.
+#     This includes AUTH, SELECT, COMMAND, CLIENT, ECHO, PING, etc.
+# * blocking - Potentially blocking the connection until released by another
+#     command.
+# * fast - Fast O(1) commands. May loop on the number of arguments, but not the
+#     number of elements in the key.
+# * slow - All commands that are not Fast.
+# * pubsub - PUBLISH / SUBSCRIBE related
+# * transaction - WATCH / MULTI / EXEC related commands.
+# * scripting - Scripting related.
+# * set - Data type: sets related.
+# * sortedset - Data type: zsets related.
+# * list - Data type: lists related.
+# * hash - Data type: hashes related.
+# * string - Data type: strings related.
+# * bitmap - Data type: bitmaps related.
+# * hyperloglog - Data type: hyperloglog related.
+# * geo - Data type: geo related.
+# * stream - Data type: streams related.
+#
+# For more information about ACL configuration please refer to
+# the Redis web site at https://redis.io/topics/acl
+
+# ACL LOG
+#
+# The ACL Log tracks failed commands and authentication events associated
+# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
+# by ACLs. The ACL Log is stored in memory. You can reclaim memory with
+# ACL LOG RESET. Define the maximum entry length of the ACL Log below.
+acllog-max-len 128
+
+# Using an external ACL file
+#
+# Instead of configuring users here in this file, it is possible to use
+# a stand-alone file just listing users. The two methods cannot be mixed:
+# if you configure users here and at the same time you activate the external
+# ACL file, the server will refuse to start.
+#
+# The format of the external ACL user file is exactly the same as the
+# format that is used inside redis.conf to describe users.
+#
+# aclfile /etc/redis/users.acl
+
+# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility
+# layer on top of the new ACL system. The option effect will be just setting
+# the password for the default user. Clients will still authenticate using
+# AUTH <password> as usually, or more explicitly with AUTH default <password>
+# if they follow the new protocol: both will work.
+#
+# The requirepass is not compatible with aclfile option and the ACL LOAD
+# command, these will cause requirepass to be ignored.
+#
+# requirepass foobared
+
+# New users are initialized with restrictive permissions by default, via the
+# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it
+# is possible to manage access to Pub/Sub channels with ACL rules as well. The
+# default Pub/Sub channels permission if new users is controlled by the
+# acl-pubsub-default configuration directive, which accepts one of these values:
+#
+# allchannels: grants access to all Pub/Sub channels
+# resetchannels: revokes access to all Pub/Sub channels
+#
+# From Redis 7.0, acl-pubsub-default defaults to 'resetchannels' permission.
+#
+# acl-pubsub-default resetchannels
+
+# Command renaming (DEPRECATED).
+#
+# ------------------------------------------------------------------------
+# WARNING: avoid using this option if possible. Instead use ACLs to remove
+# commands from the default user, and put them only in some admin user you
+# create for administrative purposes.
+# ------------------------------------------------------------------------
+#
+# It is possible to change the name of dangerous commands in a platform
+# environment. For instance the CONFIG command may be renamed into something
+# hard to guess so that it will still be available for internal-use tools
+# but not available for general clients.
+#
+# Example:
+#
+# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
+#
+# It is also possible to completely kill a command by renaming it into
+# an empty string:
+#
+# rename-command CONFIG ""
+#
+# Please note that changing the name of commands that are logged into the
+# AOF file or transmitted to replicas may cause problems.
+
+################################### CLIENTS ####################################
+
+# Set the max number of connected clients at the same time. By default
+# this limit is set to 10000 clients, however if the Redis server is not
+# able to configure the process file limit to allow for the specified limit
+# the max number of allowed clients is set to the current file limit
+# minus 32 (as Redis reserves a few file descriptors for internal uses).
+#
+# Once the limit is reached Redis will close all the new connections sending
+# an error 'max number of clients reached'.
+#
+# IMPORTANT: When Redis Cluster is used, the max number of connections is also
+# platform with the cluster bus: every node in the cluster will use two
+# connections, one incoming and another outgoing. It is important to size the
+# limit accordingly in case of very large clusters.
+#
+# maxclients 10000
+
+############################## MEMORY MANAGEMENT ################################
+
+# Set a memory usage limit to the specified amount of bytes.
+# When the memory limit is reached Redis will try to remove keys
+# according to the eviction policy selected (see maxmemory-policy).
+#
+# If Redis can't remove keys according to the policy, or if the policy is
+# set to 'noeviction', Redis will start to reply with errors to commands
+# that would use more memory, like SET, LPUSH, and so on, and will continue
+# to reply to read-only commands like GET.
+#
+# This option is usually useful when using Redis as an LRU or LFU cache, or to
+# set a hard memory limit for an instance (using the 'noeviction' policy).
+#
+# WARNING: If you have replicas attached to an instance with maxmemory on,
+# the size of the output buffers needed to feed the replicas are subtracted
+# from the used memory count, so that network problems / resyncs will
+# not trigger a loop where keys are evicted, and in turn the output
+# buffer of replicas is full with DELs of keys evicted triggering the deletion
+# of more keys, and so forth until the database is completely emptied.
+#
+# In short... if you have replicas attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for replica
+# output buffers (but this is not needed if the policy is 'noeviction').
+#
+# maxmemory <bytes>
+
+# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
+# is reached. You can select one from the following behaviors:
+#
+# volatile-lru -> Evict using approximated LRU, only keys with an expire set.
+# allkeys-lru -> Evict any key using approximated LRU.
+# volatile-lfu -> Evict using approximated LFU, only keys with an expire set.
+# allkeys-lfu -> Evict any key using approximated LFU.
+# volatile-random -> Remove a random key having an expire set.
+# allkeys-random -> Remove a random key, any key.
+# volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
+# noeviction -> Don't evict anything, just return an error on write operations.
+#
+# LRU means Least Recently Used
+# LFU means Least Frequently Used
+#
+# Both LRU, LFU and volatile-ttl are implemented using approximated
+# randomized algorithms.
+#
+# Note: with any of the above policies, when there are no suitable keys for
+# eviction, Redis will return an error on write operations that require
+# more memory. These are usually commands that create new keys, add data or
+# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE,
+# SORT (due to the STORE argument), and EXEC (if the transaction includes any
+# command that requires memory).
+#
+# The default is:
+#
+# maxmemory-policy noeviction
+
+# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
+# algorithms (in order to save memory), so you can tune it for speed or
+# accuracy. By default Redis will check five keys and pick the one that was
+# used least recently, you can change the sample size using the following
+# configuration directive.
+#
+# The default of 5 produces good enough results. 10 Approximates very closely
+# true LRU but costs more CPU. 3 is faster but not very accurate.
+#
+# maxmemory-samples 5
+
+# Eviction processing is designed to function well with the default setting.
+# If there is an unusually large amount of write traffic, this value may need to
+# be increased.  Decreasing this value may reduce latency at the risk of
+# eviction processing effectiveness
+#   0 = minimum latency, 10 = default, 100 = process without regard to latency
+#
+# maxmemory-eviction-tenacity 10
+
+# Starting from Redis 5, by default a replica will ignore its maxmemory setting
+# (unless it is promoted to master after a failover or manually). It means
+# that the eviction of keys will be just handled by the master, sending the
+# DEL commands to the replica as keys evict in the master side.
+#
+# This behavior ensures that masters and replicas stay consistent, and is usually
+# what you want, however if your replica is writable, or you want the replica
+# to have a different memory setting, and you are sure all the writes performed
+# to the replica are idempotent, then you may change this default (but be sure
+# to understand what you are doing).
+#
+# Note that since the replica by default does not evict, it may end using more
+# memory than the one set via maxmemory (there are certain buffers that may
+# be larger on the replica, or data structures may sometimes take more memory
+# and so forth). So make sure you monitor your replicas and make sure they
+# have enough memory to never hit a real out-of-memory condition before the
+# master hits the configured maxmemory setting.
+#
+# replica-ignore-maxmemory yes
+
+# Redis reclaims expired keys in two ways: upon access when those keys are
+# found to be expired, and also in background, in what is called the
+# "active expire key". The key space is slowly and interactively scanned
+# looking for expired keys to reclaim, so that it is possible to free memory
+# of keys that are expired and will never be accessed again in a short time.
+#
+# The default effort of the expire cycle will try to avoid having more than
+# ten percent of expired keys still in memory, and will try to avoid consuming
+# more than 25% of total memory and to add latency to the system. However
+# it is possible to increase the expire "effort" that is normally set to
+# "1", to a greater value, up to the value "10". At its maximum value the
+# system will use more CPU, longer cycles (and technically may introduce
+# more latency), and will tolerate less already expired keys still present
+# in the system. It's a tradeoff between memory, CPU and latency.
+#
+# active-expire-effort 1
+
+############################# LAZY FREEING ####################################
+
+# Redis has two primitives to delete keys. One is called DEL and is a blocking
+# deletion of the object. It means that the server stops processing new commands
+# in order to reclaim all the memory associated with an object in a synchronous
+# way. If the key deleted is associated with a small object, the time needed
+# in order to execute the DEL command is very small and comparable to most other
+# O(1) or O(log_N) commands in Redis. However if the key is associated with an
+# aggregated value containing millions of elements, the server can block for
+# a long time (even seconds) in order to complete the operation.
+#
+# For the above reasons Redis also offers non blocking deletion primitives
+# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
+# FLUSHDB commands, in order to reclaim memory in background. Those commands
+# are executed in constant time. Another thread will incrementally free the
+# object in the background as fast as possible.
+#
+# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
+# It's up to the design of the application to understand when it is a good
+# idea to use one or the other. However the Redis server sometimes has to
+# delete keys or flush the whole database as a side effect of other operations.
+# Specifically Redis deletes objects independently of a user call in the
+# following scenarios:
+#
+# 1) On eviction, because of the maxmemory and maxmemory policy configurations,
+#    in order to make room for new data, without going over the specified
+#    memory limit.
+# 2) Because of expire: when a key with an associated time to live (see the
+#    EXPIRE command) must be deleted from memory.
+# 3) Because of a side effect of a command that stores data on a key that may
+#    already exist. For example the RENAME command may delete the old key
+#    content when it is replaced with another one. Similarly SUNIONSTORE
+#    or SORT with STORE option may delete existing keys. The SET command
+#    itself removes any old content of the specified key in order to replace
+#    it with the specified string.
+# 4) During replication, when a replica performs a full resynchronization with
+#    its master, the content of the whole database is removed in order to
+#    load the RDB file just transferred.
+#
+# In all the above cases the default is to delete objects in a blocking way,
+# like if DEL was called. However you can configure each case specifically
+# in order to instead release memory in a non-blocking way like if UNLINK
+# was called, using the following configuration directives.
+
+lazyfree-lazy-eviction no
+lazyfree-lazy-expire no
+lazyfree-lazy-server-del no
+replica-lazy-flush no
+
+# It is also possible, for the case when to replace the user code DEL calls
+# with UNLINK calls is not easy, to modify the default behavior of the DEL
+# command to act exactly like UNLINK, using the following configuration
+# directive:
+
+lazyfree-lazy-user-del no
+
+# FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous
+# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
+# commands. When neither flag is passed, this directive will be used to determine
+# if the data should be deleted asynchronously.
+
+lazyfree-lazy-user-flush no
+
+################################ THREADED I/O #################################
+
+# Redis is mostly single threaded, however there are certain threaded
+# operations such as UNLINK, slow I/O accesses and other things that are
+# performed on side threads.
+#
+# Now it is also possible to handle Redis clients socket reads and writes
+# in different I/O threads. Since especially writing is so slow, normally
+# Redis users use pipelining in order to speed up the Redis performances per
+# core, and spawn multiple instances in order to scale more. Using I/O
+# threads it is possible to easily speedup two times Redis without resorting
+# to pipelining nor sharding of the instance.
+#
+# By default threading is disabled, we suggest enabling it only in machines
+# that have at least 4 or more cores, leaving at least one spare core.
+# Using more than 8 threads is unlikely to help much. We also recommend using
+# threaded I/O only if you actually have performance problems, with Redis
+# instances being able to use a quite big percentage of CPU time, otherwise
+# there is no point in using this feature.
+#
+# So for instance if you have a four cores boxes, try to use 2 or 3 I/O
+# threads, if you have a 8 cores, try to use 6 threads. In order to
+# enable I/O threads use the following configuration directive:
+#
+# io-threads 4
+#
+# Setting io-threads to 1 will just use the main thread as usual.
+# When I/O threads are enabled, we only use threads for writes, that is
+# to thread the write(2) syscall and transfer the client buffers to the
+# socket. However it is also possible to enable threading of reads and
+# protocol parsing using the following configuration directive, by setting
+# it to yes:
+#
+# io-threads-do-reads no
+#
+# Usually threading reads doesn't help much.
+#
+# NOTE 1: This configuration directive cannot be changed at runtime via
+# CONFIG SET. Also, this feature currently does not work when SSL is
+# enabled.
+#
+# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make
+# sure you also run the benchmark itself in threaded mode, using the
+# --threads option to match the number of Redis threads, otherwise you'll not
+# be able to notice the improvements.
+
+############################ KERNEL OOM CONTROL ##############################
+
+# On Linux, it is possible to hint the kernel OOM killer on what processes
+# should be killed first when out of memory.
+#
+# Enabling this feature makes Redis actively control the oom_score_adj value
+# for all its processes, depending on their role. The default scores will
+# attempt to have background child processes killed before all others, and
+# replicas killed before masters.
+#
+# Redis supports these options:
+#
+# no:       Don't make changes to oom-score-adj (default).
+# yes:      Alias to "relative" see below.
+# absolute: Values in oom-score-adj-values are written as is to the kernel.
+# relative: Values are used relative to the initial value of oom_score_adj when
+#           the server starts and are then clamped to a range of -1000 to 1000.
+#           Because typically the initial value is 0, they will often match the
+#           absolute values.
+oom-score-adj no
+
+# When oom-score-adj is used, this directive controls the specific values used
+# for master, replica and background child processes. Values range -2000 to
+# 2000 (higher means more likely to be killed).
+#
+# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities)
+# can freely increase their value, but not decrease it below its initial
+# settings. This means that setting oom-score-adj to "relative" and setting the
+# oom-score-adj-values to positive values will always succeed.
+oom-score-adj-values 0 200 800
+
+
+#################### KERNEL transparent hugepage CONTROL ######################
+
+# Usually the kernel Transparent Huge Pages control is set to "madvise" or
+# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which
+# case this config has no effect. On systems in which it is set to "always",
+# redis will attempt to disable it specifically for the redis process in order
+# to avoid latency problems specifically with fork(2) and CoW.
+# If for some reason you prefer to keep it enabled, you can set this config to
+# "no" and the kernel global to "always".
+
+disable-thp yes
+
+############################## APPEND ONLY MODE ###############################
+
+# By default Redis asynchronously dumps the dataset on disk. This mode is
+# good enough in many applications, but an issue with the Redis process or
+# a power outage may result into a few minutes of writes lost (depending on
+# the configured save points).
+#
+# The Append Only File is an alternative persistence mode that provides
+# much better durability. For instance using the default data fsync policy
+# (see later in the config file) Redis can lose just one second of writes in a
+# dramatic event like a server power outage, or a single write if something
+# wrong with the Redis process itself happens, but the operating system is
+# still running correctly.
+#
+# AOF and RDB persistence can be enabled at the same time without problems.
+# If the AOF is enabled on startup Redis will load the AOF, that is the file
+# with the better durability guarantees.
+#
+# Please check https://redis.io/topics/persistence for more information.
+
+appendonly no
+
+# The base name of the append only file.
+#
+# Redis 7 and newer use a set of append-only files to persist the dataset
+# and changes applied to it. There are two basic types of files in use:
+#
+# - Base files, which are a snapshot representing the complete state of the
+#   dataset at the time the file was created. Base files can be either in
+#   the form of RDB (binary serialized) or AOF (textual commands).
+# - Incremental files, which contain additional commands that were applied
+#   to the dataset following the previous file.
+#
+# In addition, manifest files are used to track the files and the order in
+# which they were created and should be applied.
+#
+# Append-only file names are created by Redis following a specific pattern.
+# The file name's prefix is based on the 'appendfilename' configuration
+# parameter, followed by additional information about the sequence and type.
+#
+# For example, if appendfilename is set to appendonly.aof, the following file
+# names could be derived:
+#
+# - appendonly.aof.1.base.rdb as a base file.
+# - appendonly.aof.1.incr.aof, appendonly.aof.2.incr.aof as incremental files.
+# - appendonly.aof.manifest as a manifest file.
+
+appendfilename "appendonly.aof"
+
+# For convenience, Redis stores all persistent append-only files in a dedicated
+# directory. The name of the directory is determined by the appenddirname
+# configuration parameter.
+
+appenddirname "appendonlydir"
+
+# The fsync() call tells the Operating System to actually write data on disk
+# instead of waiting for more data in the output buffer. Some OS will really flush
+# data on disk, some other OS will just try to do it ASAP.
+#
+# Redis supports three different modes:
+#
+# no: don't fsync, just let the OS flush the data when it wants. Faster.
+# always: fsync after every write to the append only log. Slow, Safest.
+# everysec: fsync only one time every second. Compromise.
+#
+# The default is "everysec", as that's usually the right compromise between
+# speed and data safety. It's up to you to understand if you can relax this to
+# "no" that will let the operating system flush the output buffer when
+# it wants, for better performances (but if you can live with the idea of
+# some data loss consider the default persistence mode that's snapshotting),
+# or on the contrary, use "always" that's very slow but a bit safer than
+# everysec.
+#
+# More details please check the following article:
+# http://antirez.com/post/redis-persistence-demystified.html
+#
+# If unsure, use "everysec".
+
+# appendfsync always
+appendfsync everysec
+# appendfsync no
+
+# When the AOF fsync policy is set to always or everysec, and a background
+# saving process (a background save or AOF log background rewriting) is
+# performing a lot of I/O against the disk, in some Linux configurations
+# Redis may block too long on the fsync() call. Note that there is no fix for
+# this currently, as even performing fsync in a different thread will block
+# our synchronous write(2) call.
+#
+# In order to mitigate this problem it's possible to use the following option
+# that will prevent fsync() from being called in the main process while a
+# BGSAVE or BGREWRITEAOF is in progress.
+#
+# This means that while another child is saving, the durability of Redis is
+# the same as "appendfsync no". In practical terms, this means that it is
+# possible to lose up to 30 seconds of log in the worst scenario (with the
+# default Linux settings).
+#
+# If you have latency problems turn this to "yes". Otherwise leave it as
+# "no" that is the safest pick from the point of view of durability.
+
+no-appendfsync-on-rewrite no
+
+# Automatic rewrite of the append only file.
+# Redis is able to automatically rewrite the log file implicitly calling
+# BGREWRITEAOF when the AOF log size grows by the specified percentage.
+#
+# This is how it works: Redis remembers the size of the AOF file after the
+# latest rewrite (if no rewrite has happened since the restart, the size of
+# the AOF at startup is used).
+#
+# This base size is compared to the current size. If the current size is
+# bigger than the specified percentage, the rewrite is triggered. Also
+# you need to specify a minimal size for the AOF file to be rewritten, this
+# is useful to avoid rewriting the AOF file even if the percentage increase
+# is reached but it is still pretty small.
+#
+# Specify a percentage of zero in order to disable the automatic AOF
+# rewrite feature.
+
+auto-aof-rewrite-percentage 100
+auto-aof-rewrite-min-size 64mb
+
+# An AOF file may be found to be truncated at the end during the Redis
+# startup process, when the AOF data gets loaded back into memory.
+# This may happen when the system where Redis is running
+# crashes, especially when an ext4 filesystem is mounted without the
+# data=ordered option (however this can't happen when Redis itself
+# crashes or aborts but the operating system still works correctly).
+#
+# Redis can either exit with an error when this happens, or load as much
+# data as possible (the default now) and start if the AOF file is found
+# to be truncated at the end. The following option controls this behavior.
+#
+# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
+# the Redis server starts emitting a log to inform the user of the event.
+# Otherwise if the option is set to no, the server aborts with an error
+# and refuses to start. When the option is set to no, the user requires
+# to fix the AOF file using the "redis-check-aof" utility before to restart
+# the server.
+#
+# Note that if the AOF file will be found to be corrupted in the middle
+# the server will still exit with an error. This option only applies when
+# Redis will try to read more data from the AOF file but not enough bytes
+# will be found.
+aof-load-truncated yes
+
+# Redis can create append-only base files in either RDB or AOF formats. Using
+# the RDB format is always faster and more efficient, and disabling it is only
+# supported for backward compatibility purposes.
+aof-use-rdb-preamble yes
+
+# Redis supports recording timestamp annotations in the AOF to support restoring
+# the data from a specific point-in-time. However, using this capability changes
+# the AOF format in a way that may not be compatible with existing AOF parsers.
+aof-timestamp-enabled no
+
+################################ SHUTDOWN #####################################
+
+# Maximum time to wait for replicas when shutting down, in seconds.
+#
+# During shut down, a grace period allows any lagging replicas to catch up with
+# the latest replication offset before the master exists. This period can
+# prevent data loss, especially for deployments without configured disk backups.
+#
+# The 'shutdown-timeout' value is the grace period's duration in seconds. It is
+# only applicable when the instance has replicas. To disable the feature, set
+# the value to 0.
+#
+# shutdown-timeout 10
+
+# When Redis receives a SIGINT or SIGTERM, shutdown is initiated and by default
+# an RDB snapshot is written to disk in a blocking operation if save points are configured.
+# The options used on signaled shutdown can include the following values:
+# default:  Saves RDB snapshot only if save points are configured.
+#           Waits for lagging replicas to catch up.
+# save:     Forces a DB saving operation even if no save points are configured.
+# nosave:   Prevents DB saving operation even if one or more save points are configured.
+# now:      Skips waiting for lagging replicas.
+# force:    Ignores any errors that would normally prevent the server from exiting.
+#
+# Any combination of values is allowed as long as "save" and "nosave" are not set simultaneously.
+# Example: "nosave force now"
+#
+# shutdown-on-sigint default
+# shutdown-on-sigterm default
+
+################ NON-DETERMINISTIC LONG BLOCKING COMMANDS #####################
+
+# Maximum time in milliseconds for EVAL scripts, functions and in some cases
+# modules' commands before Redis can start processing or rejecting other clients.
+#
+# If the maximum execution time is reached Redis will start to reply to most
+# commands with a BUSY error.
+#
+# In this state Redis will only allow a handful of commands to be executed.
+# For instance, SCRIPT KILL, FUNCTION KILL, SHUTDOWN NOSAVE and possibly some
+# business specific 'allow-busy' commands.
+#
+# SCRIPT KILL and FUNCTION KILL will only be able to stop a script that did not
+# yet call any write commands, so SHUTDOWN NOSAVE may be the only way to stop
+# the server in the case a write command was already issued by the script when
+# the user doesn't want to wait for the natural termination of the script.
+#
+# The default is 5 seconds. It is possible to set it to 0 or a negative value
+# to disable this mechanism (uninterrupted execution). Note that in the past
+# this config had a different name, which is now an alias, so both of these do
+# the same:
+# lua-time-limit 5000
+# busy-reply-threshold 5000
+
+################################ REDIS CLUSTER  ###############################
+
+# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
+# started as cluster nodes can. In order to start a Redis instance as a
+# cluster node enable the cluster support uncommenting the following:
+#
+# cluster-enabled yes
+
+# Every cluster node has a cluster configuration file. This file is not
+# intended to be edited by hand. It is created and updated by Redis nodes.
+# Every Redis Cluster node requires a different cluster configuration file.
+# Make sure that instances running in the same system do not have
+# overlapping cluster configuration file names.
+#
+# cluster-config-file nodes-6379.conf
+
+# Cluster node timeout is the amount of milliseconds a node must be unreachable
+# for it to be considered in failure state.
+# Most other internal time limits are a multiple of the node timeout.
+#
+# cluster-node-timeout 15000
+
+# The cluster port is the port that the cluster bus will listen for inbound connections on. When set 
+# to the default value, 0, it will be bound to the command port + 10000. Setting this value requires 
+# you to specify the cluster bus port when executing cluster meet.
+# cluster-port 0
+
+# A replica of a failing master will avoid to start a failover if its data
+# looks too old.
+#
+# There is no simple way for a replica to actually have an exact measure of
+# its "data age", so the following two checks are performed:
+#
+# 1) If there are multiple replicas able to failover, they exchange messages
+#    in order to try to give an advantage to the replica with the best
+#    replication offset (more data from the master processed).
+#    Replicas will try to get their rank by offset, and apply to the start
+#    of the failover a delay proportional to their rank.
+#
+# 2) Every single replica computes the time of the last interaction with
+#    its master. This can be the last ping or command received (if the master
+#    is still in the "connected" state), or the time that elapsed since the
+#    disconnection with the master (if the replication link is currently down).
+#    If the last interaction is too old, the replica will not try to failover
+#    at all.
+#
+# The point "2" can be tuned by user. Specifically a replica will not perform
+# the failover if, since the last interaction with the master, the time
+# elapsed is greater than:
+#
+#   (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period
+#
+# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor
+# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
+# replica will not try to failover if it was not able to talk with the master
+# for longer than 310 seconds.
+#
+# A large cluster-replica-validity-factor may allow replicas with too old data to failover
+# a master, while a too small value may prevent the cluster from being able to
+# elect a replica at all.
+#
+# For maximum availability, it is possible to set the cluster-replica-validity-factor
+# to a value of 0, which means, that replicas will always try to failover the
+# master regardless of the last time they interacted with the master.
+# (However they'll always try to apply a delay proportional to their
+# offset rank).
+#
+# Zero is the only value able to guarantee that when all the partitions heal
+# the cluster will always be able to continue.
+#
+# cluster-replica-validity-factor 10
+
+# Cluster replicas are able to migrate to orphaned masters, that are masters
+# that are left without working replicas. This improves the cluster ability
+# to resist to failures as otherwise an orphaned master can't be failed over
+# in case of failure if it has no working replicas.
+#
+# Replicas migrate to orphaned masters only if there are still at least a
+# given number of other working replicas for their old master. This number
+# is the "migration barrier". A migration barrier of 1 means that a replica
+# will migrate only if there is at least 1 other working replica for its master
+# and so forth. It usually reflects the number of replicas you want for every
+# master in your cluster.
+#
+# Default is 1 (replicas migrate only if their masters remain with at least
+# one replica). To disable migration just set it to a very large value or
+# set cluster-allow-replica-migration to 'no'.
+# A value of 0 can be set but is useful only for debugging and dangerous
+# in production.
+#
+# cluster-migration-barrier 1
+
+# Turning off this option allows to use less automatic cluster configuration.
+# It both disables migration to orphaned masters and migration from masters
+# that became empty.
+#
+# Default is 'yes' (allow automatic migrations).
+#
+# cluster-allow-replica-migration yes
+
+# By default Redis Cluster nodes stop accepting queries if they detect there
+# is at least a hash slot uncovered (no available node is serving it).
+# This way if the cluster is partially down (for example a range of hash slots
+# are no longer covered) all the cluster becomes, eventually, unavailable.
+# It automatically returns available as soon as all the slots are covered again.
+#
+# However sometimes you want the subset of the cluster which is working,
+# to continue to accept queries for the part of the key space that is still
+# covered. In order to do so, just set the cluster-require-full-coverage
+# option to no.
+#
+# cluster-require-full-coverage yes
+
+# This option, when set to yes, prevents replicas from trying to failover its
+# master during master failures. However the replica can still perform a
+# manual failover, if forced to do so.
+#
+# This is useful in different scenarios, especially in the case of multiple
+# data center operations, where we want one side to never be promoted if not
+# in the case of a total DC failure.
+#
+# cluster-replica-no-failover no
+
+# This option, when set to yes, allows nodes to serve read traffic while the
+# cluster is in a down state, as long as it believes it owns the slots.
+#
+# This is useful for two cases.  The first case is for when an application
+# doesn't require consistency of data during node failures or network partitions.
+# One example of this is a cache, where as long as the node has the data it
+# should be able to serve it.
+#
+# The second use case is for configurations that don't meet the recommended
+# three shards but want to enable cluster mode and scale later. A
+# master outage in a 1 or 2 shard configuration causes a read/write outage to the
+# entire cluster without this option set, with it set there is only a write outage.
+# Without a quorum of masters, slot ownership will not change automatically.
+#
+# cluster-allow-reads-when-down no
+
+# This option, when set to yes, allows nodes to serve pubsub shard traffic while
+# the cluster is in a down state, as long as it believes it owns the slots.
+#
+# This is useful if the application would like to use the pubsub feature even when
+# the cluster global stable state is not OK. If the application wants to make sure only
+# one shard is serving a given channel, this feature should be kept as yes.
+#
+# cluster-allow-pubsubshard-when-down yes
+
+# Cluster link send buffer limit is the limit on the memory usage of an individual
+# cluster bus link's send buffer in bytes. Cluster links would be freed if they exceed
+# this limit. This is to primarily prevent send buffers from growing unbounded on links
+# toward slow peers (E.g. PubSub messages being piled up).
+# This limit is disabled by default. Enable this limit when 'mem_cluster_links' INFO field
+# and/or 'send-buffer-allocated' entries in the 'CLUSTER LINKS` command output continuously increase.
+# Minimum limit of 1gb is recommended so that cluster link buffer can fit in at least a single
+# PubSub message by default. (client-query-buffer-limit default value is 1gb)
+#
+# cluster-link-sendbuf-limit 0
+ 
+# Clusters can configure their announced hostname using this config. This is a common use case for 
+# applications that need to use TLS Server Name Indication (SNI) or dealing with DNS based
+# routing. By default this value is only shown as additional metadata in the CLUSTER SLOTS
+# command, but can be changed using 'cluster-preferred-endpoint-type' config. This value is 
+# communicated along the clusterbus to all nodes, setting it to an empty string will remove 
+# the hostname and also propagate the removal.
+#
+# cluster-announce-hostname ""
+
+# Clusters can configure an optional nodename to be used in addition to the node ID for
+# debugging and admin information. This name is broadcasted between nodes, so will be used
+# in addition to the node ID when reporting cross node events such as node failures.
+# cluster-announce-human-nodename ""
+
+# Clusters can advertise how clients should connect to them using either their IP address,
+# a user defined hostname, or by declaring they have no endpoint. Which endpoint is
+# shown as the preferred endpoint is set by using the cluster-preferred-endpoint-type
+# config with values 'ip', 'hostname', or 'unknown-endpoint'. This value controls how
+# the endpoint returned for MOVED/ASKING requests as well as the first field of CLUSTER SLOTS. 
+# If the preferred endpoint type is set to hostname, but no announced hostname is set, a '?' 
+# will be returned instead.
+#
+# When a cluster advertises itself as having an unknown endpoint, it's indicating that
+# the server doesn't know how clients can reach the cluster. This can happen in certain 
+# networking situations where there are multiple possible routes to the node, and the 
+# server doesn't know which one the client took. In this case, the server is expecting
+# the client to reach out on the same endpoint it used for making the last request, but use
+# the port provided in the response.
+#
+# cluster-preferred-endpoint-type ip
+
+# In order to setup your cluster make sure to read the documentation
+# available at https://redis.io web site.
+
+########################## CLUSTER DOCKER/NAT support  ########################
+
+# In certain deployments, Redis Cluster nodes address discovery fails, because
+# addresses are NAT-ted or because ports are forwarded (the typical case is
+# Docker and other containers).
+#
+# In order to make Redis Cluster working in such environments, a static
+# configuration where each node knows its public address is needed. The
+# following four options are used for this scope, and are:
+#
+# * cluster-announce-ip
+# * cluster-announce-port
+# * cluster-announce-tls-port
+# * cluster-announce-bus-port
+#
+# Each instructs the node about its address, client ports (for connections
+# without and with TLS) and cluster message bus port. The information is then
+# published in the header of the bus packets so that other nodes will be able to
+# correctly map the address of the node publishing the information.
+#
+# If tls-cluster is set to yes and cluster-announce-tls-port is omitted or set
+# to zero, then cluster-announce-port refers to the TLS port. Note also that
+# cluster-announce-tls-port has no effect if tls-cluster is set to no.
+#
+# If the above options are not used, the normal Redis Cluster auto-detection
+# will be used instead.
+#
+# Note that when remapped, the bus port may not be at the fixed offset of
+# clients port + 10000, so you can specify any port and bus-port depending
+# on how they get remapped. If the bus-port is not set, a fixed offset of
+# 10000 will be used as usual.
+#
+# Example:
+#
+# cluster-announce-ip 10.1.1.5
+# cluster-announce-tls-port 6379
+# cluster-announce-port 0
+# cluster-announce-bus-port 6380
+
+################################## SLOW LOG ###################################
+
+# The Redis Slow Log is a system to log queries that exceeded a specified
+# execution time. The execution time does not include the I/O operations
+# like talking with the client, sending the reply and so forth,
+# but just the time needed to actually execute the command (this is the only
+# stage of command execution where the thread is blocked and can not serve
+# other requests in the meantime).
+#
+# You can configure the slow log with two parameters: one tells Redis
+# what is the execution time, in microseconds, to exceed in order for the
+# command to get logged, and the other parameter is the length of the
+# slow log. When a new command is logged the oldest one is removed from the
+# queue of logged commands.
+
+# The following time is expressed in microseconds, so 1000000 is equivalent
+# to one second. Note that a negative number disables the slow log, while
+# a value of zero forces the logging of every command.
+slowlog-log-slower-than 10000
+
+# There is no limit to this length. Just be aware that it will consume memory.
+# You can reclaim memory used by the slow log with SLOWLOG RESET.
+slowlog-max-len 128
+
+################################ LATENCY MONITOR ##############################
+
+# The Redis latency monitoring subsystem samples different operations
+# at runtime in order to collect data related to possible sources of
+# latency of a Redis instance.
+#
+# Via the LATENCY command this information is available to the user that can
+# print graphs and obtain reports.
+#
+# The system only logs operations that were performed in a time equal or
+# greater than the amount of milliseconds specified via the
+# latency-monitor-threshold configuration directive. When its value is set
+# to zero, the latency monitor is turned off.
+#
+# By default latency monitoring is disabled since it is mostly not needed
+# if you don't have latency issues, and collecting data has a performance
+# impact, that while very small, can be measured under big load. Latency
+# monitoring can easily be enabled at runtime using the command
+# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
+latency-monitor-threshold 0
+
+################################ LATENCY TRACKING ##############################
+
+# The Redis extended latency monitoring tracks the per command latencies and enables
+# exporting the percentile distribution via the INFO latencystats command,
+# and cumulative latency distributions (histograms) via the LATENCY command.
+#
+# By default, the extended latency monitoring is enabled since the overhead
+# of keeping track of the command latency is very small.
+# latency-tracking yes
+
+# By default the exported latency percentiles via the INFO latencystats command
+# are the p50, p99, and p999.
+# latency-tracking-info-percentiles 50 99 99.9
+
+############################# EVENT NOTIFICATION ##############################
+
+# Redis can notify Pub/Sub clients about events happening in the key space.
+# This feature is documented at https://redis.io/topics/notifications
+#
+# For instance if keyspace events notification is enabled, and a client
+# performs a DEL operation on key "foo" stored in the Database 0, two
+# messages will be published via Pub/Sub:
+#
+# PUBLISH __keyspace@0__:foo del
+# PUBLISH __keyevent@0__:del foo
+#
+# It is possible to select the events that Redis will notify among a set
+# of classes. Every class is identified by a single character:
+#
+#  K     Keyspace events, published with __keyspace@<db>__ prefix.
+#  E     Keyevent events, published with __keyevent@<db>__ prefix.
+#  g     Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
+#  $     String commands
+#  l     List commands
+#  s     Set commands
+#  h     Hash commands
+#  z     Sorted set commands
+#  x     Expired events (events generated every time a key expires)
+#  e     Evicted events (events generated when a key is evicted for maxmemory)
+#  n     New key events (Note: not included in the 'A' class)
+#  t     Stream commands
+#  d     Module key type events
+#  m     Key-miss events (Note: It is not included in the 'A' class)
+#  A     Alias for g$lshzxetd, so that the "AKE" string means all the events
+#        (Except key-miss events which are excluded from 'A' due to their
+#         unique nature).
+#
+#  The "notify-keyspace-events" takes as argument a string that is composed
+#  of zero or multiple characters. The empty string means that notifications
+#  are disabled.
+#
+#  Example: to enable list and generic events, from the point of view of the
+#           event name, use:
+#
+#  notify-keyspace-events Elg
+#
+#  Example 2: to get the stream of the expired keys subscribing to channel
+#             name __keyevent@0__:expired use:
+#
+#  notify-keyspace-events Ex
+#
+#  By default all notifications are disabled because most users don't need
+#  this feature and the feature has some overhead. Note that if you don't
+#  specify at least one of K or E, no events will be delivered.
+notify-keyspace-events ""
+
+############################### ADVANCED CONFIG ###############################
+
+# Hashes are encoded using a memory efficient data structure when they have a
+# small number of entries, and the biggest entry does not exceed a given
+# threshold. These thresholds can be configured using the following directives.
+hash-max-listpack-entries 512
+hash-max-listpack-value 64
+
+# Lists are also encoded in a special way to save a lot of space.
+# The number of entries allowed per internal list node can be specified
+# as a fixed maximum size or a maximum number of elements.
+# For a fixed maximum size, use -5 through -1, meaning:
+# -5: max size: 64 Kb  <-- not recommended for normal workloads
+# -4: max size: 32 Kb  <-- not recommended
+# -3: max size: 16 Kb  <-- probably not recommended
+# -2: max size: 8 Kb   <-- good
+# -1: max size: 4 Kb   <-- good
+# Positive numbers mean store up to _exactly_ that number of elements
+# per list node.
+# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
+# but if your use case is unique, adjust the settings as necessary.
+list-max-listpack-size -2
+
+# Lists may also be compressed.
+# Compress depth is the number of quicklist ziplist nodes from *each* side of
+# the list to *exclude* from compression.  The head and tail of the list
+# are always uncompressed for fast push/pop operations.  Settings are:
+# 0: disable all list compression
+# 1: depth 1 means "don't start compressing until after 1 node into the list,
+#    going from either the head or tail"
+#    So: [head]->node->node->...->node->[tail]
+#    [head], [tail] will always be uncompressed; inner nodes will compress.
+# 2: [head]->[next]->node->node->...->node->[prev]->[tail]
+#    2 here means: don't compress head or head->next or tail->prev or tail,
+#    but compress all nodes between them.
+# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
+# etc.
+list-compress-depth 0
+
+# Sets have a special encoding when a set is composed
+# of just strings that happen to be integers in radix 10 in the range
+# of 64 bit signed integers.
+# The following configuration setting sets the limit in the size of the
+# set in order to use this special memory saving encoding.
+set-max-intset-entries 512
+
+# Sets containing non-integer values are also encoded using a memory efficient
+# data structure when they have a small number of entries, and the biggest entry
+# does not exceed a given threshold. These thresholds can be configured using
+# the following directives.
+set-max-listpack-entries 128
+set-max-listpack-value 64
+
+# Similarly to hashes and lists, sorted sets are also specially encoded in
+# order to save a lot of space. This encoding is only used when the length and
+# elements of a sorted set are below the following limits:
+zset-max-listpack-entries 128
+zset-max-listpack-value 64
+
+# HyperLogLog sparse representation bytes limit. The limit includes the
+# 16 bytes header. When a HyperLogLog using the sparse representation crosses
+# this limit, it is converted into the dense representation.
+#
+# A value greater than 16000 is totally useless, since at that point the
+# dense representation is more memory efficient.
+#
+# The suggested value is ~ 3000 in order to have the benefits of
+# the space efficient encoding without slowing down too much PFADD,
+# which is O(N) with the sparse encoding. The value can be raised to
+# ~ 10000 when CPU is not a concern, but space is, and the data set is
+# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
+hll-sparse-max-bytes 3000
+
+# Streams macro node max size / items. The stream data structure is a radix
+# tree of big nodes that encode multiple items inside. Using this configuration
+# it is possible to configure how big a single node can be in bytes, and the
+# maximum number of items it may contain before switching to a new node when
+# appending new stream entries. If any of the following settings are set to
+# zero, the limit is ignored, so for instance it is possible to set just a
+# max entries limit by setting max-bytes to 0 and max-entries to the desired
+# value.
+stream-node-max-bytes 4096
+stream-node-max-entries 100
+
+# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
+# order to help rehashing the main Redis hash table (the one mapping top-level
+# keys to values). The hash table implementation Redis uses (see dict.c)
+# performs a lazy rehashing: the more operation you run into a hash table
+# that is rehashing, the more rehashing "steps" are performed, so if the
+# server is idle the rehashing is never complete and some more memory is used
+# by the hash table.
+#
+# The default is to use this millisecond 10 times every second in order to
+# actively rehash the main dictionaries, freeing memory when possible.
+#
+# If unsure:
+# use "activerehashing no" if you have hard latency requirements and it is
+# not a good thing in your environment that Redis can reply from time to time
+# to queries with 2 milliseconds delay.
+#
+# use "activerehashing yes" if you don't have such hard requirements but
+# want to free memory asap when possible.
+activerehashing yes
+
+# The client output buffer limits can be used to force disconnection of clients
+# that are not reading data from the server fast enough for some reason (a
+# common reason is that a Pub/Sub client can't consume messages as fast as the
+# publisher can produce them).
+#
+# The limit can be set differently for the three different classes of clients:
+#
+# normal -> normal clients including MONITOR clients
+# replica -> replica clients
+# pubsub -> clients subscribed to at least one pubsub channel or pattern
+#
+# The syntax of every client-output-buffer-limit directive is the following:
+#
+# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
+#
+# A client is immediately disconnected once the hard limit is reached, or if
+# the soft limit is reached and remains reached for the specified number of
+# seconds (continuously).
+# So for instance if the hard limit is 32 megabytes and the soft limit is
+# 16 megabytes / 10 seconds, the client will get disconnected immediately
+# if the size of the output buffers reach 32 megabytes, but will also get
+# disconnected if the client reaches 16 megabytes and continuously overcomes
+# the limit for 10 seconds.
+#
+# By default normal clients are not limited because they don't receive data
+# without asking (in a push way), but just after a request, so only
+# asynchronous clients may create a scenario where data is requested faster
+# than it can read.
+#
+# Instead there is a default limit for pubsub and replica clients, since
+# subscribers and replicas receive data in a push fashion.
+#
+# Note that it doesn't make sense to set the replica clients output buffer
+# limit lower than the repl-backlog-size config (partial sync will succeed
+# and then replica will get disconnected).
+# Such a configuration is ignored (the size of repl-backlog-size will be used).
+# This doesn't have memory consumption implications since the replica client
+# will share the backlog buffers memory.
+#
+# Both the hard or the soft limit can be disabled by setting them to zero.
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit replica 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+
+# Client query buffers accumulate new commands. They are limited to a fixed
+# amount by default in order to avoid that a protocol desynchronization (for
+# instance due to a bug in the client) will lead to unbound memory usage in
+# the query buffer. However you can configure it here if you have very special
+# needs, such us huge multi/exec requests or alike.
+#
+# client-query-buffer-limit 1gb
+
+# In some scenarios client connections can hog up memory leading to OOM
+# errors or data eviction. To avoid this we can cap the accumulated memory
+# used by all client connections (all pubsub and normal clients). Once we
+# reach that limit connections will be dropped by the server freeing up
+# memory. The server will attempt to drop the connections using the most 
+# memory first. We call this mechanism "client eviction".
+#
+# Client eviction is configured using the maxmemory-clients setting as follows:
+# 0 - client eviction is disabled (default)
+#
+# A memory value can be used for the client eviction threshold,
+# for example:
+# maxmemory-clients 1g
+#
+# A percentage value (between 1% and 100%) means the client eviction threshold
+# is based on a percentage of the maxmemory setting. For example to set client
+# eviction at 5% of maxmemory:
+# maxmemory-clients 5%
+
+# In the Redis protocol, bulk requests, that are, elements representing single
+# strings, are normally limited to 512 mb. However you can change this limit
+# here, but must be 1mb or greater
+#
+# proto-max-bulk-len 512mb
+
+# Redis calls an internal function to perform many background tasks, like
+# closing connections of clients in timeout, purging expired keys that are
+# never requested, and so forth.
+#
+# Not all tasks are performed with the same frequency, but Redis checks for
+# tasks to perform according to the specified "hz" value.
+#
+# By default "hz" is set to 10. Raising the value will use more CPU when
+# Redis is idle, but at the same time will make Redis more responsive when
+# there are many keys expiring at the same time, and timeouts may be
+# handled with more precision.
+#
+# The range is between 1 and 500, however a value over 100 is usually not
+# a good idea. Most users should use the default of 10 and raise this up to
+# 100 only in environments where very low latency is required.
+hz 10
+
+# Normally it is useful to have an HZ value which is proportional to the
+# number of clients connected. This is useful in order, for instance, to
+# avoid too many clients are processed for each background task invocation
+# in order to avoid latency spikes.
+#
+# Since the default HZ value by default is conservatively set to 10, Redis
+# offers, and enables by default, the ability to use an adaptive HZ value
+# which will temporarily raise when there are many connected clients.
+#
+# When dynamic HZ is enabled, the actual configured HZ will be used
+# as a baseline, but multiples of the configured HZ value will be actually
+# used as needed once more clients are connected. In this way an idle
+# instance will use very little CPU time while a busy instance will be
+# more responsive.
+dynamic-hz yes
+
+# When a child rewrites the AOF file, if the following option is enabled
+# the file will be fsync-ed every 4 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+aof-rewrite-incremental-fsync yes
+
+# When redis saves RDB file, if the following option is enabled
+# the file will be fsync-ed every 4 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+rdb-save-incremental-fsync yes
+
+# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
+# idea to start with the default settings and only change them after investigating
+# how to improve the performances and how the keys LFU change over time, which
+# is possible to inspect via the OBJECT FREQ command.
+#
+# There are two tunable parameters in the Redis LFU implementation: the
+# counter logarithm factor and the counter decay time. It is important to
+# understand what the two parameters mean before changing them.
+#
+# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
+# uses a probabilistic increment with logarithmic behavior. Given the value
+# of the old counter, when a key is accessed, the counter is incremented in
+# this way:
+#
+# 1. A random number R between 0 and 1 is extracted.
+# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
+# 3. The counter is incremented only if R < P.
+#
+# The default lfu-log-factor is 10. This is a table of how the frequency
+# counter changes with a different number of accesses with different
+# logarithmic factors:
+#
+# +--------+------------+------------+------------+------------+------------+
+# | factor | 100 hits   | 1000 hits  | 100K hits  | 1M hits    | 10M hits   |
+# +--------+------------+------------+------------+------------+------------+
+# | 0      | 104        | 255        | 255        | 255        | 255        |
+# +--------+------------+------------+------------+------------+------------+
+# | 1      | 18         | 49         | 255        | 255        | 255        |
+# +--------+------------+------------+------------+------------+------------+
+# | 10     | 10         | 18         | 142        | 255        | 255        |
+# +--------+------------+------------+------------+------------+------------+
+# | 100    | 8          | 11         | 49         | 143        | 255        |
+# +--------+------------+------------+------------+------------+------------+
+#
+# NOTE: The above table was obtained by running the following commands:
+#
+#   redis-benchmark -n 1000000 incr foo
+#   redis-cli object freq foo
+#
+# NOTE 2: The counter initial value is 5 in order to give new objects a chance
+# to accumulate hits.
+#
+# The counter decay time is the time, in minutes, that must elapse in order
+# for the key counter to be decremented.
+#
+# The default value for the lfu-decay-time is 1. A special value of 0 means we
+# will never decay the counter.
+#
+# lfu-log-factor 10
+# lfu-decay-time 1
+
+########################### ACTIVE DEFRAGMENTATION #######################
+#
+# What is active defragmentation?
+# -------------------------------
+#
+# Active (online) defragmentation allows a Redis server to compact the
+# spaces left between small allocations and deallocations of data in memory,
+# thus allowing to reclaim back memory.
+#
+# Fragmentation is a natural process that happens with every allocator (but
+# less so with Jemalloc, fortunately) and certain workloads. Normally a server
+# restart is needed in order to lower the fragmentation, or at least to flush
+# away all the data and create it again. However thanks to this feature
+# implemented by Oran Agra for Redis 4.0 this process can happen at runtime
+# in a "hot" way, while the server is running.
+#
+# Basically when the fragmentation is over a certain level (see the
+# configuration options below) Redis will start to create new copies of the
+# values in contiguous memory regions by exploiting certain specific Jemalloc
+# features (in order to understand if an allocation is causing fragmentation
+# and to allocate it in a better place), and at the same time, will release the
+# old copies of the data. This process, repeated incrementally for all the keys
+# will cause the fragmentation to drop back to normal values.
+#
+# Important things to understand:
+#
+# 1. This feature is disabled by default, and only works if you compiled Redis
+#    to use the copy of Jemalloc we ship with the source code of Redis.
+#    This is the default with Linux builds.
+#
+# 2. You never need to enable this feature if you don't have fragmentation
+#    issues.
+#
+# 3. Once you experience fragmentation, you can enable this feature when
+#    needed with the command "CONFIG SET activedefrag yes".
+#
+# The configuration parameters are able to fine tune the behavior of the
+# defragmentation process. If you are not sure about what they mean it is
+# a good idea to leave the defaults untouched.
+
+# Active defragmentation is disabled by default
+# activedefrag no
+
+# Minimum amount of fragmentation waste to start active defrag
+# active-defrag-ignore-bytes 100mb
+
+# Minimum percentage of fragmentation to start active defrag
+# active-defrag-threshold-lower 10
+
+# Maximum percentage of fragmentation at which we use maximum effort
+# active-defrag-threshold-upper 100
+
+# Minimal effort for defrag in CPU percentage, to be used when the lower
+# threshold is reached
+# active-defrag-cycle-min 1
+
+# Maximal effort for defrag in CPU percentage, to be used when the upper
+# threshold is reached
+# active-defrag-cycle-max 25
+
+# Maximum number of set/hash/zset/list fields that will be processed from
+# the main dictionary scan
+# active-defrag-max-scan-fields 1000
+
+# Jemalloc background thread for purging will be enabled by default
+jemalloc-bg-thread yes
+
+# It is possible to pin different threads and processes of Redis to specific
+# CPUs in your system, in order to maximize the performances of the server.
+# This is useful both in order to pin different Redis threads in different
+# CPUs, but also in order to make sure that multiple Redis instances running
+# in the same host will be pinned to different CPUs.
+#
+# Normally you can do this using the "taskset" command, however it is also
+# possible to this via Redis configuration directly, both in Linux and FreeBSD.
+#
+# You can pin the server/IO threads, bio threads, aof rewrite child process, and
+# the bgsave child process. The syntax to specify the cpu list is the same as
+# the taskset command:
+#
+# Set redis server/io threads to cpu affinity 0,2,4,6:
+# server_cpulist 0-7:2
+#
+# Set bio threads to cpu affinity 1,3:
+# bio_cpulist 1,3
+#
+# Set aof rewrite child process to cpu affinity 8,9,10,11:
+# aof_rewrite_cpulist 8-11
+#
+# Set bgsave child process to cpu affinity 1,10,11
+# bgsave_cpulist 1,10-11
+
+# In some cases redis will emit warnings and even refuse to start if it detects
+# that the system is in bad state, it is possible to suppress these warnings
+# by setting the following config which takes a space delimited list of warnings
+# to suppress
+#
+# ignore-warnings ARM64-COW-BUG

+ 16 - 0
docker/run.md

@@ -0,0 +1,16 @@
+
+#  Docker Compose 安装中间件 MySQL、Redis、Minio、Xxl-Job
+
+## 安装
+
+```bash
+docker-compose -f ./docker-compose.yml -p youlai-boot up -d
+```
+
+- p youlai-boot 指定命名空间,避免与其他容器冲突,这里方便管理,统一管理和卸载
+
+## 卸载
+```bash
+docker-compose -f ./docker-compose.yml -p youlai-boot down
+```
+

+ 0 - 0
docker/xxljob/README.md


+ 572 - 0
sql/mysql/youlai_boot.sql

@@ -0,0 +1,572 @@
+
+# YouLai_Boot 数据库(MySQL 5.7 ~ MySQL 8.x)
+# Copyright (c) 2021-present, youlai.tech
+
+
+-- ----------------------------
+-- 1. 创建数据库
+-- ----------------------------
+CREATE DATABASE IF NOT EXISTS youlai_boot CHARACTER SET utf8mb4 DEFAULT COLLATE utf8mb4_unicode_ci;
+
+
+-- ----------------------------
+-- 2. 创建表 && 数据初始化
+-- ----------------------------
+use youlai_boot;
+
+SET NAMES utf8mb4;  # 设置字符集
+SET FOREIGN_KEY_CHECKS = 0; # 关闭外键检查,加快导入速度
+
+-- ----------------------------
+-- Table structure for sys_dept
+-- ----------------------------
+DROP TABLE IF EXISTS `sys_dept`;
+CREATE TABLE `sys_dept`  (
+                             `id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键',
+                             `name` varchar(100) NOT NULL COMMENT '部门名称',
+                             `code` varchar(100) NOT NULL COMMENT '部门编号',
+                             `parent_id` bigint DEFAULT 0 COMMENT '父节点id',
+                             `tree_path` varchar(255) NOT NULL COMMENT '父节点id路径',
+                             `sort` smallint DEFAULT 0 COMMENT '显示顺序',
+                             `status` tinyint DEFAULT 1 COMMENT '状态(1-正常 0-禁用)',
+                             `create_by` bigint NULL COMMENT '创建人ID',
+                             `create_time` datetime NULL COMMENT '创建时间',
+                             `update_by` bigint NULL COMMENT '修改人ID',
+                             `update_time` datetime NULL COMMENT '更新时间',
+                             `is_deleted` tinyint DEFAULT 0 COMMENT '逻辑删除标识(1-已删除 0-未删除)',
+                             PRIMARY KEY (`id`) USING BTREE,
+                             UNIQUE INDEX `uk_code`(`code` ASC) USING BTREE COMMENT '部门编号唯一索引'
+) ENGINE = InnoDB CHARACTER SET = utf8mb4 COMMENT = '部门表';
+
+-- ----------------------------
+-- Records of sys_dept
+-- ----------------------------
+INSERT INTO `sys_dept` VALUES (1, '有来技术', 'YOULAI', 0, '0', 1, 1, 1, NULL, 1, now(), 0);
+INSERT INTO `sys_dept` VALUES (2, '研发部门', 'RD001', 1, '0,1', 1, 1, 2, NULL, 2, now(), 0);
+INSERT INTO `sys_dept` VALUES (3, '测试部门', 'QA001', 1, '0,1', 1, 1, 2, NULL, 2, now(), 0);
+
+-- ----------------------------
+-- Table structure for sys_dict
+-- ----------------------------
+DROP TABLE IF EXISTS `sys_dict`;
+CREATE TABLE `sys_dict` (
+                            `id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键 ',
+                            `dict_code` varchar(50) COMMENT '类型编码',
+                            `name` varchar(50) COMMENT '类型名称',
+                            `status` tinyint(1) DEFAULT '0' COMMENT '状态(0:正常;1:禁用)',
+                            `remark` varchar(255) COMMENT '备注',
+                            `create_time` datetime COMMENT '创建时间',
+                            `create_by` bigint COMMENT '创建人ID',
+                            `update_time` datetime COMMENT '更新时间',
+                            `update_by` bigint COMMENT '修改人ID',
+                            `is_deleted` tinyint DEFAULT '0' COMMENT '是否删除(1-删除,0-未删除)',
+                            PRIMARY KEY (`id`) USING BTREE,
+                            KEY `idx_dict_code` (`dict_code`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='字典表';
+-- ----------------------------
+-- Records of sys_dict
+-- ----------------------------
+INSERT INTO `sys_dict` VALUES (1, 'gender', '性别', 1, NULL, now() , 1,now(), 1,0);
+INSERT INTO `sys_dict` VALUES (2, 'notice_type', '通知类型', 1, NULL, now(), 1,now(), 1,0);
+INSERT INTO `sys_dict` VALUES (3, 'notice_level', '通知级别', 1, NULL, now(), 1,now(), 1,0);
+
+
+-- ----------------------------
+-- Table structure for sys_dict_item
+-- ----------------------------
+DROP TABLE IF EXISTS `sys_dict_item`;
+CREATE TABLE `sys_dict_item` (
+                                 `id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键',
+                                 `dict_code` varchar(50) COMMENT '关联字典编码,与sys_dict表中的dict_code对应',
+                                 `value` varchar(50) COMMENT '字典项值',
+                                 `label` varchar(100) COMMENT '字典项标签',
+                                 `tag_type` varchar(50) COMMENT '标签类型,用于前端样式展示(如success、warning等)',
+                                 `status` tinyint DEFAULT '0' COMMENT '状态(1-正常,0-禁用)',
+                                 `sort` int DEFAULT '0' COMMENT '排序',
+                                 `remark` varchar(255) COMMENT '备注',
+                                 `create_time` datetime COMMENT '创建时间',
+                                 `create_by` bigint COMMENT '创建人ID',
+                                 `update_time` datetime COMMENT '更新时间',
+                                 `update_by` bigint COMMENT '修改人ID',
+                                 PRIMARY KEY (`id`) USING BTREE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='字典项表';
+
+-- ----------------------------
+-- Records of sys_dict_item
+-- ----------------------------
+INSERT INTO `sys_dict_item` VALUES (1, 'gender', '1', '男', 'primary', 1, 1, NULL, now(), 1,now(),1);
+INSERT INTO `sys_dict_item` VALUES (2, 'gender', '2', '女', 'danger', 1, 2, NULL, now(), 1,now(),1);
+INSERT INTO `sys_dict_item` VALUES (3, 'gender', '0', '保密', 'info', 1, 3, NULL, now(), 1,now(),1);
+INSERT INTO `sys_dict_item` VALUES (4, 'notice_type', '1', '系统升级', 'success', 1, 1, '', now(), 1,now(),1);
+INSERT INTO `sys_dict_item` VALUES (5, 'notice_type', '2', '系统维护', 'primary', 1, 2, '', now(), 1,now(),1);
+INSERT INTO `sys_dict_item` VALUES (6, 'notice_type', '3', '安全警告', 'danger', 1, 3, '', now(), 1,now(),1);
+INSERT INTO `sys_dict_item` VALUES (7, 'notice_type', '4', '假期通知', 'success', 1, 4, '', now(), 1,now(),1);
+INSERT INTO `sys_dict_item` VALUES (8, 'notice_type', '5', '公司新闻', 'primary', 1, 5, '', now(), 1,now(),1);
+INSERT INTO `sys_dict_item` VALUES (9, 'notice_type', '99', '其他', 'info', 1, 99, '', now(), 1,now(),1);
+INSERT INTO `sys_dict_item` VALUES (10, 'notice_level', 'L', '低', 'info', 1, 1, '', now(), 1,now(),1);
+INSERT INTO `sys_dict_item` VALUES (11, 'notice_level', 'M', '中', 'warning', 1, 2, '', now(), 1,now(),1);
+INSERT INTO `sys_dict_item` VALUES (12, 'notice_level', 'H', '高', 'danger', 1, 3, '', now(), 1,now(),1);
+
+-- ----------------------------
+-- Table structure for sys_menu
+-- ----------------------------
+DROP TABLE IF EXISTS `sys_menu`;
+CREATE TABLE `sys_menu`  (
+                             `id` bigint NOT NULL AUTO_INCREMENT COMMENT 'ID',
+                             `parent_id` bigint NOT NULL COMMENT '父菜单ID',
+                             `tree_path` varchar(255) COMMENT '父节点ID路径',
+                             `name` varchar(64) NOT NULL COMMENT '菜单名称',
+                             `type` tinyint NOT NULL COMMENT '菜单类型(1-菜单 2-目录 3-外链 4-按钮)',
+                             `route_name` varchar(255) COMMENT '路由名称(Vue Router 中用于命名路由)',
+                             `route_path` varchar(128) COMMENT '路由路径(Vue Router 中定义的 URL 路径)',
+                             `component` varchar(128) COMMENT '组件路径(组件页面完整路径,相对于 src/views/,缺省后缀 .vue)',
+                             `perm` varchar(128) COMMENT '【按钮】权限标识',
+                             `always_show` tinyint DEFAULT 0 COMMENT '【目录】只有一个子路由是否始终显示(1-是 0-否)',
+                             `keep_alive` tinyint DEFAULT 0 COMMENT '【菜单】是否开启页面缓存(1-是 0-否)',
+                             `visible` tinyint(1) DEFAULT 1 COMMENT '显示状态(1-显示 0-隐藏)',
+                             `sort` int DEFAULT 0 COMMENT '排序',
+                             `icon` varchar(64) COMMENT '菜单图标',
+                             `redirect` varchar(128) COMMENT '跳转路径',
+                             `create_time` datetime NULL COMMENT '创建时间',
+                             `update_time` datetime NULL COMMENT '更新时间',
+                             `params` varchar(255) NULL COMMENT '路由参数',
+                             PRIMARY KEY (`id`) USING BTREE
+) ENGINE = InnoDB CHARACTER SET = utf8mb4 COMMENT = '菜单管理';
+
+-- ----------------------------
+-- Records of sys_menu
+-- ----------------------------
+INSERT INTO `sys_menu` VALUES (1, 0, '0', '系统管理', 2, '', '/system', 'Layout', NULL, NULL, NULL, 1, 1, 'system', '/system/user', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (2, 1, '0,1', '用户管理', 1, 'User', 'user', 'system/user/index', NULL, NULL, 1, 1, 1, 'el-icon-User', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (3, 1, '0,1', '角色管理', 1, 'Role', 'role', 'system/role/index', NULL, NULL, 1, 1, 2, 'role', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (4, 1, '0,1', '菜单管理', 1, 'SysMenu', 'menu', 'system/menu/index', NULL, NULL, 1, 1, 3, 'menu', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (5, 1, '0,1', '部门管理', 1, 'Dept', 'dept', 'system/dept/index', NULL, NULL, 1, 1, 4, 'tree', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (6, 1, '0,1', '字典管理', 1, 'Dict', 'dict', 'system/dict/index', NULL, NULL, 1, 1, 5, 'dict', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (20, 0, '0', '多级菜单', 2, NULL, '/multi-level', 'Layout', NULL, 1, NULL, 1, 9, 'cascader', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (21, 20, '0,20', '菜单一级', 2, NULL, 'multi-level1', 'Layout', NULL, 1, NULL, 1, 1, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (22, 21, '0,20,21', '菜单二级', 2, NULL, 'multi-level2', 'Layout', NULL, 0, NULL, 1, 1, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (23, 22, '0,20,21,22', '菜单三级-1', 1, NULL, 'multi-level3-1', 'demo/multi-level/children/children/level3-1', NULL, 0, 1, 1, 1, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (24, 22, '0,20,21,22', '菜单三级-2', 1, NULL, 'multi-level3-2', 'demo/multi-level/children/children/level3-2', NULL, 0, 1, 1, 2, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (26, 0, '0', '平台文档', 2, '', '/doc', 'Layout', NULL, NULL, NULL, 1, 8, 'document', 'https://juejin.cn/post/7228990409909108793', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (30, 26, '0,26', '平台文档(外链)', 3, NULL, 'https://juejin.cn/post/7228990409909108793', '', NULL, NULL, NULL, 1, 2, 'document', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (31, 2, '0,1,2', '用户新增', 4, NULL, '', NULL, 'sys:user:add', NULL, NULL, 1, 1, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (32, 2, '0,1,2', '用户编辑', 4, NULL, '', NULL, 'sys:user:edit', NULL, NULL, 1, 2, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (33, 2, '0,1,2', '用户删除', 4, NULL, '', NULL, 'sys:user:delete', NULL, NULL, 1, 3, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (36, 0, '0', '组件封装', 2, NULL, '/component', 'Layout', NULL, NULL, NULL, 1, 10, 'menu', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (37, 36, '0,36', '富文本编辑器', 1, 'WangEditor', 'wang-editor', 'demo/wang-editor', NULL, NULL, 1, 1, 2, '', '', NULL, NULL, NULL);
+INSERT INTO `sys_menu` VALUES (38, 36, '0,36', '图片上传', 1, 'Upload', 'upload', 'demo/upload', NULL, NULL, 1, 1, 3, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (39, 36, '0,36', '图标选择器', 1, 'IconSelect', 'icon-select', 'demo/icon-select', NULL, NULL, 1, 1, 4, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (40, 0, '0', '接口文档', 2, NULL, '/api', 'Layout', NULL, 1, NULL, 1, 7, 'api', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (41, 40, '0,40', 'Apifox', 1, 'Apifox', 'apifox', 'demo/api/apifox', NULL, NULL, 1, 1, 1, 'api', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (70, 3, '0,1,3', '角色新增', 4, NULL, '', NULL, 'sys:role:add', NULL, NULL, 1, 2, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (71, 3, '0,1,3', '角色编辑', 4, NULL, '', NULL, 'sys:role:edit', NULL, NULL, 1, 3, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (72, 3, '0,1,3', '角色删除', 4, NULL, '', NULL, 'sys:role:delete', NULL, NULL, 1, 4, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (73, 4, '0,1,4', '菜单新增', 4, NULL, '', NULL, 'sys:menu:add', NULL, NULL, 1, 1, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (74, 4, '0,1,4', '菜单编辑', 4, NULL, '', NULL, 'sys:menu:edit', NULL, NULL, 1, 3, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (75, 4, '0,1,4', '菜单删除', 4, NULL, '', NULL, 'sys:menu:delete', NULL, NULL, 1, 3, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (76, 5, '0,1,5', '部门新增', 4, NULL, '', NULL, 'sys:dept:add', NULL, NULL, 1, 1, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (77, 5, '0,1,5', '部门编辑', 4, NULL, '', NULL, 'sys:dept:edit', NULL, NULL, 1, 2, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (78, 5, '0,1,5', '部门删除', 4, NULL, '', NULL, 'sys:dept:delete', NULL, NULL, 1, 3, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (79, 6, '0,1,6', '字典新增', 4, NULL, '', NULL, 'sys:dict:add', NULL, NULL, 1, 1, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (81, 6, '0,1,6', '字典编辑', 4, NULL, '', NULL, 'sys:dict:edit', NULL, NULL, 1, 2, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (84, 6, '0,1,6', '字典删除', 4, NULL, '', NULL, 'sys:dict:delete', NULL, NULL, 1, 3, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (88, 2, '0,1,2', '重置密码', 4, NULL, '', NULL, 'sys:user:reset-password', NULL, NULL, 1, 4, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (89, 0, '0', '功能演示', 2, NULL, '/function', 'Layout', NULL, NULL, NULL, 1, 12, 'menu', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (90, 89, '0,89', 'Websocket', 1, 'WebSocket', '/function/websocket', 'demo/websocket', NULL, NULL, 1, 1, 3, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (95, 36, '0,36', '字典组件', 1, 'DictDemo', 'dict-demo', 'demo/dictionary', NULL, NULL, 1, 1, 4, '', '',  now(),  now(), NULL);
+INSERT INTO `sys_menu` VALUES (97, 89, '0,89', 'Icons', 1, 'IconDemo', 'icon-demo', 'demo/icons', NULL, NULL, 1, 1, 2, 'el-icon-Notification', '',  now(),  now(), NULL);
+INSERT INTO `sys_menu` VALUES (102, 26, '0,26', 'document', 3, NULL, 'internal-doc', 'demo/internal-doc', NULL, NULL, NULL, 1, 1, 'document', '',  now(),  now(), NULL);
+INSERT INTO `sys_menu` VALUES (105, 2, '0,1,2', '用户查询', 4, NULL, '', NULL, 'sys:user:query', 0, 0, 1, 0, '', NULL,  now(),  now(), NULL);
+INSERT INTO `sys_menu` VALUES (106, 2, '0,1,2', '用户导入', 4, NULL, '', NULL, 'sys:user:import', NULL, NULL, 1, 5, '', NULL,  now(),  now(), NULL);
+INSERT INTO `sys_menu` VALUES (107, 2, '0,1,2', '用户导出', 4, NULL, '', NULL, 'sys:user:export', NULL, NULL, 1, 6, '', NULL,  now(),  now(), NULL);
+INSERT INTO `sys_menu` VALUES (108, 36, '0,36', '增删改查', 1, 'Curd', 'curd', 'demo/curd/index', NULL, NULL, 1, 1, 0, '', '', NULL, NULL, NULL);
+INSERT INTO `sys_menu` VALUES (109, 36, '0,36', '列表选择器', 1, 'TableSelect', 'table-select', 'demo/table-select/index', NULL, NULL, 1, 1, 1, '', '', NULL, NULL, NULL);
+INSERT INTO `sys_menu` VALUES (110, 0, '0', '路由参数', 2, NULL, '/route-param', 'Layout', NULL, 1, 1, 1, 11, 'el-icon-ElementPlus', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (111, 110, '0,110', '参数(type=1)', 1, 'RouteParamType1', 'route-param-type1', 'demo/route-param', NULL, 0, 1, 1, 1, 'el-icon-Star', NULL, now(), now(), '{\"type\": \"1\"}');
+INSERT INTO `sys_menu` VALUES (112, 110, '0,110', '参数(type=2)', 1, 'RouteParamType2', 'route-param-type2', 'demo/route-param', NULL, 0, 1, 1, 2, 'el-icon-StarFilled', NULL, now(), now(), '{\"type\": \"2\"}');
+INSERT INTO `sys_menu` VALUES (117, 1, '0,1', '系统日志', 1, 'Log', 'log', 'system/log/index', NULL, 0, 1, 1, 6, 'document', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (118, 0, '0', '系统工具', 2, NULL, '/codegen', 'Layout', NULL, 0, 1, 1, 2, 'menu', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (119, 118, '0,118', '代码生成', 1, 'Codegen', 'codegen', 'codegen/index', NULL, 0, 1, 1, 1, 'code', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (120, 1, '0,1', '系统配置', 1, 'Config', 'config', 'system/config/index', NULL, 0, 1, 1, 7, 'setting', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (121, 120, '0,1,120', '系统配置查询', 4, NULL, '', NULL, 'sys:config:query', 0, 1, 1, 1, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (122, 120, '0,1,120', '系统配置新增', 4, NULL, '', NULL, 'sys:config:add', 0, 1, 1, 2, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (123, 120, '0,1,120', '系统配置修改', 4, NULL, '', NULL, 'sys:config:update', 0, 1, 1, 3, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (124, 120, '0,1,120', '系统配置删除', 4, NULL, '', NULL, 'sys:config:delete', 0, 1, 1, 4, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (125, 120, '0,1,120', '系统配置刷新', 4, NULL, '', NULL, 'sys:config:refresh', 0, 1, 1, 5, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (126, 1, '0,1', '通知公告', 1, 'Notice', 'notice', 'system/notice/index', NULL, NULL, NULL, 1, 9, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (127, 126, '0,1,126', '通知查询', 4, NULL, '', NULL, 'sys:notice:query', NULL, NULL, 1, 1, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (128, 126, '0,1,126', '通知新增', 4, NULL, '', NULL, 'sys:notice:add', NULL, NULL, 1, 2, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (129, 126, '0,1,126', '通知编辑', 4, NULL, '', NULL, 'sys:notice:edit', NULL, NULL, 1, 3, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (130, 126, '0,1,126', '通知删除', 4, NULL, '', NULL, 'sys:notice:delete', NULL, NULL, 1, 4, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (133, 126, '0,1,126', '通知发布', 4, NULL, '', NULL, 'sys:notice:publish', 0, 1, 1, 5, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (134, 126, '0,1,126', '通知撤回', 4, NULL, '', NULL, 'sys:notice:revoke', 0, 1, 1, 6, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (135, 1, '0,1', '字典项', 1, 'DictItem', 'dict-item', 'system/dict/dict-item', NULL, 0, 1, 0, 6, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (136, 135, '0,1,135', '字典项新增', 4, NULL, '', NULL, 'sys:dict-item:add', NULL, NULL, 1, 2, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (137, 135, '0,1,135', '字典项编辑', 4, NULL, '', NULL, 'sys:dict-item:edit', NULL, NULL, 1, 3, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (138, 135, '0,1,135', '字典项删除', 4, NULL, '', NULL, 'sys:dict-item:delete', NULL, NULL, 1, 4, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (139, 3, '0,1,3', '角色查询', 4, NULL, '', NULL, 'sys:role:query', NULL, NULL, 1, 1, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (140, 4, '0,1,4', '菜单查询', 4, NULL, '', NULL, 'sys:menu:query', NULL, NULL, 1, 1, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (141, 5, '0,1,5', '部门查询', 4, NULL, '', NULL, 'sys:dept:query', NULL, NULL, 1, 1, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (142, 6, '0,1,6', '字典查询', 4, NULL, '', NULL, 'sys:dict:query', NULL, NULL, 1, 1, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (143, 135, '0,1,135', '字典项查询', 4, NULL, '', NULL, 'sys:dict-item:query', NULL, NULL, 1, 1, '', NULL, now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (144, 26, '0,26', '后端文档', 3, NULL, 'https://youlai.blog.csdn.net/article/details/145178880', '', NULL, NULL, NULL, 1, 3, 'document', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (145, 26, '0,26', '移动端文档', 3, NULL, 'https://youlai.blog.csdn.net/article/details/143222890', '', NULL, NULL, NULL, 1, 4, 'document', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (146, 36, '0,36', '拖拽组件', 1, 'Drag', 'drag', 'demo/drag', NULL, NULL, NULL, 1, 5, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (147, 36, '0,36', '滚动文本', 1, 'TextScroll', 'text-scroll', 'demo/text-scroll', NULL, NULL, NULL, 1, 6, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (148, 89, '0,89', '字典实时同步', 1, 'DictSync', 'dict-sync', 'demo/dict-sync', NULL, NULL, NULL, 1, 3, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (149, 89, '0,89', 'VxeTable', 1, 'VxeTable', 'vxe-table', 'demo/vxe-table/index', NULL, NULL, 1, 1, 0, 'el-icon-MagicStick', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (150, 36, '0,36', '自适应表格操作列', 1, 'AutoOperationColumn', 'operation-column', 'demo/auto-operation-column', NULL, NULL, 1, 1, 1, '', '', now(), now(), NULL);
+INSERT INTO `sys_menu` VALUES (151, 89, '0,89', 'CURD单文件', 1, 'CurdSingle', 'curd-single', 'demo/curd-single', NULL, NULL, 1, 1, 7, 'el-icon-Reading', '', now(),now(), NULL);
+
+
+-- ----------------------------
+-- Table structure for sys_role
+-- ----------------------------
+DROP TABLE IF EXISTS `sys_role`;
+CREATE TABLE `sys_role`  (
+                             `id` bigint NOT NULL AUTO_INCREMENT,
+                             `name` varchar(64) NOT NULL COMMENT '角色名称',
+                             `code` varchar(32) NOT NULL COMMENT '角色编码',
+                             `sort` int NULL COMMENT '显示顺序',
+                             `status` tinyint(1) DEFAULT 1 COMMENT '角色状态(1-正常 0-停用)',
+                             `data_scope` tinyint NULL COMMENT '数据权限(1-所有数据 2-部门及子部门数据 3-本部门数据 4-本人数据)',
+                             `create_by` bigint NULL COMMENT '创建人 ID',
+                             `create_time` datetime NULL COMMENT '创建时间',
+                             `update_by` bigint NULL COMMENT '更新人ID',
+                             `update_time` datetime NULL COMMENT '更新时间',
+                             `is_deleted` tinyint(1) DEFAULT 0 COMMENT '逻辑删除标识(0-未删除 1-已删除)',
+                             PRIMARY KEY (`id`) USING BTREE,
+                             UNIQUE INDEX `uk_name`(`name` ASC) USING BTREE COMMENT '角色名称唯一索引',
+                             UNIQUE INDEX `uk_code`(`code` ASC) USING BTREE COMMENT '角色编码唯一索引'
+) ENGINE = InnoDB CHARACTER SET = utf8mb4 COMMENT = '角色表';
+
+-- ----------------------------
+-- Records of sys_role
+-- ----------------------------
+INSERT INTO `sys_role` VALUES (1, '超级管理员', 'ROOT', 1, 1, 1, NULL, now(), NULL, now(), 0);
+INSERT INTO `sys_role` VALUES (2, '系统管理员', 'ADMIN', 2, 1, 1, NULL, now(), NULL, NULL, 0);
+INSERT INTO `sys_role` VALUES (3, '访问游客', 'GUEST', 3, 1, 3, NULL, now(), NULL, now(), 0);
+INSERT INTO `sys_role` VALUES (4, '系统管理员1', 'ADMIN1', 4, 1, 1, NULL, now(), NULL, NULL, 0);
+INSERT INTO `sys_role` VALUES (5, '系统管理员2', 'ADMIN2', 5, 1, 1, NULL, now(), NULL, NULL, 0);
+INSERT INTO `sys_role` VALUES (6, '系统管理员3', 'ADMIN3', 6, 1, 1, NULL, now(), NULL, NULL, 0);
+INSERT INTO `sys_role` VALUES (7, '系统管理员4', 'ADMIN4', 7, 1, 1, NULL, now(), NULL, NULL, 0);
+INSERT INTO `sys_role` VALUES (8, '系统管理员5', 'ADMIN5', 8, 1, 1, NULL, now(), NULL, NULL, 0);
+INSERT INTO `sys_role` VALUES (9, '系统管理员6', 'ADMIN6', 9, 1, 1, NULL, now(), NULL, NULL, 0);
+INSERT INTO `sys_role` VALUES (10, '系统管理员7', 'ADMIN7', 10, 1, 1, NULL, now(), NULL, NULL, 0);
+INSERT INTO `sys_role` VALUES (11, '系统管理员8', 'ADMIN8', 11, 1, 1, NULL, now(), NULL, NULL, 0);
+INSERT INTO `sys_role` VALUES (12, '系统管理员9', 'ADMIN9', 12, 1, 1, NULL, now(), NULL, NULL, 0);
+
+-- ----------------------------
+-- Table structure for sys_role_menu
+-- ----------------------------
+DROP TABLE IF EXISTS `sys_role_menu`;
+CREATE TABLE `sys_role_menu`  (
+                                  `role_id` bigint NOT NULL COMMENT '角色ID',
+                                  `menu_id` bigint NOT NULL COMMENT '菜单ID',
+                                  UNIQUE INDEX `uk_roleid_menuid`(`role_id` ASC, `menu_id` ASC) USING BTREE COMMENT '角色菜单唯一索引'
+) ENGINE = InnoDB CHARACTER SET = utf8mb4 COMMENT = '角色和菜单关联表';
+
+-- ----------------------------
+-- Records of sys_role_menu
+-- ----------------------------
+INSERT INTO `sys_role_menu` VALUES (2, 1);
+INSERT INTO `sys_role_menu` VALUES (2, 2);
+INSERT INTO `sys_role_menu` VALUES (2, 3);
+INSERT INTO `sys_role_menu` VALUES (2, 4);
+INSERT INTO `sys_role_menu` VALUES (2, 5);
+INSERT INTO `sys_role_menu` VALUES (2, 6);
+INSERT INTO `sys_role_menu` VALUES (2, 20);
+INSERT INTO `sys_role_menu` VALUES (2, 21);
+INSERT INTO `sys_role_menu` VALUES (2, 22);
+INSERT INTO `sys_role_menu` VALUES (2, 23);
+INSERT INTO `sys_role_menu` VALUES (2, 24);
+INSERT INTO `sys_role_menu` VALUES (2, 26);
+INSERT INTO `sys_role_menu` VALUES (2, 30);
+INSERT INTO `sys_role_menu` VALUES (2, 31);
+INSERT INTO `sys_role_menu` VALUES (2, 32);
+INSERT INTO `sys_role_menu` VALUES (2, 33);
+INSERT INTO `sys_role_menu` VALUES (2, 36);
+INSERT INTO `sys_role_menu` VALUES (2, 37);
+INSERT INTO `sys_role_menu` VALUES (2, 38);
+INSERT INTO `sys_role_menu` VALUES (2, 39);
+INSERT INTO `sys_role_menu` VALUES (2, 40);
+INSERT INTO `sys_role_menu` VALUES (2, 41);
+INSERT INTO `sys_role_menu` VALUES (2, 70);
+INSERT INTO `sys_role_menu` VALUES (2, 71);
+INSERT INTO `sys_role_menu` VALUES (2, 72);
+INSERT INTO `sys_role_menu` VALUES (2, 73);
+INSERT INTO `sys_role_menu` VALUES (2, 74);
+INSERT INTO `sys_role_menu` VALUES (2, 75);
+INSERT INTO `sys_role_menu` VALUES (2, 76);
+INSERT INTO `sys_role_menu` VALUES (2, 77);
+INSERT INTO `sys_role_menu` VALUES (2, 78);
+INSERT INTO `sys_role_menu` VALUES (2, 79);
+INSERT INTO `sys_role_menu` VALUES (2, 81);
+INSERT INTO `sys_role_menu` VALUES (2, 84);
+INSERT INTO `sys_role_menu` VALUES (2, 85);
+INSERT INTO `sys_role_menu` VALUES (2, 86);
+INSERT INTO `sys_role_menu` VALUES (2, 87);
+INSERT INTO `sys_role_menu` VALUES (2, 88);
+INSERT INTO `sys_role_menu` VALUES (2, 89);
+INSERT INTO `sys_role_menu` VALUES (2, 90);
+INSERT INTO `sys_role_menu` VALUES (2, 91);
+INSERT INTO `sys_role_menu` VALUES (2, 95);
+INSERT INTO `sys_role_menu` VALUES (2, 97);
+INSERT INTO `sys_role_menu` VALUES (2, 102);
+INSERT INTO `sys_role_menu` VALUES (2, 105);
+INSERT INTO `sys_role_menu` VALUES (2, 106);
+INSERT INTO `sys_role_menu` VALUES (2, 107);
+INSERT INTO `sys_role_menu` VALUES (2, 108);
+INSERT INTO `sys_role_menu` VALUES (2, 109);
+INSERT INTO `sys_role_menu` VALUES (2, 110);
+INSERT INTO `sys_role_menu` VALUES (2, 111);
+INSERT INTO `sys_role_menu` VALUES (2, 112);
+INSERT INTO `sys_role_menu` VALUES (2, 114);
+INSERT INTO `sys_role_menu` VALUES (2, 115);
+INSERT INTO `sys_role_menu` VALUES (2, 116);
+INSERT INTO `sys_role_menu` VALUES (2, 117);
+INSERT INTO `sys_role_menu` VALUES (2, 118);
+INSERT INTO `sys_role_menu` VALUES (2, 119);
+INSERT INTO `sys_role_menu` VALUES (2, 120);
+INSERT INTO `sys_role_menu` VALUES (2, 121);
+INSERT INTO `sys_role_menu` VALUES (2, 122);
+INSERT INTO `sys_role_menu` VALUES (2, 123);
+INSERT INTO `sys_role_menu` VALUES (2, 124);
+INSERT INTO `sys_role_menu` VALUES (2, 125);
+INSERT INTO `sys_role_menu` VALUES (2, 126);
+INSERT INTO `sys_role_menu` VALUES (2, 127);
+INSERT INTO `sys_role_menu` VALUES (2, 128);
+INSERT INTO `sys_role_menu` VALUES (2, 129);
+INSERT INTO `sys_role_menu` VALUES (2, 130);
+INSERT INTO `sys_role_menu` VALUES (2, 131);
+INSERT INTO `sys_role_menu` VALUES (2, 132);
+INSERT INTO `sys_role_menu` VALUES (2, 133);
+INSERT INTO `sys_role_menu` VALUES (2, 134);
+INSERT INTO `sys_role_menu` VALUES (2, 135);
+INSERT INTO `sys_role_menu` VALUES (2, 136);
+INSERT INTO `sys_role_menu` VALUES (2, 137);
+INSERT INTO `sys_role_menu` VALUES (2, 138);
+INSERT INTO `sys_role_menu` VALUES (2, 139);
+INSERT INTO `sys_role_menu` VALUES (2, 140);
+INSERT INTO `sys_role_menu` VALUES (2, 141);
+INSERT INTO `sys_role_menu` VALUES (2, 142);
+INSERT INTO `sys_role_menu` VALUES (2, 143);
+INSERT INTO `sys_role_menu` VALUES (2, 144);
+INSERT INTO `sys_role_menu` VALUES (2, 145);
+INSERT INTO `sys_role_menu` VALUES (2, 146);
+INSERT INTO `sys_role_menu` VALUES (2, 147);
+INSERT INTO `sys_role_menu` VALUES (2, 148);
+INSERT INTO `sys_role_menu` VALUES (2, 149);
+INSERT INTO `sys_role_menu` VALUES (2, 150);
+INSERT INTO `sys_role_menu` VALUES (2, 151);
+
+-- ----------------------------
+-- Table structure for sys_user
+-- ----------------------------
+DROP TABLE IF EXISTS `sys_user`;
+CREATE TABLE `sys_user`  (
+                             `id` bigint NOT NULL AUTO_INCREMENT,
+                             `username` varchar(64) COMMENT '用户名',
+                             `nickname` varchar(64) COMMENT '昵称',
+                             `gender` tinyint(1) DEFAULT 1 COMMENT '性别((1-男 2-女 0-保密)',
+                             `password` varchar(100) COMMENT '密码',
+                             `dept_id` int COMMENT '部门ID',
+                             `avatar` varchar(255) COMMENT '用户头像',
+                             `mobile` varchar(20) COMMENT '联系方式',
+                             `status` tinyint(1) DEFAULT 1 COMMENT '状态(1-正常 0-禁用)',
+                             `email` varchar(128) COMMENT '用户邮箱',
+                             `create_time` datetime COMMENT '创建时间',
+                             `create_by` bigint COMMENT '创建人ID',
+                             `update_time` datetime COMMENT '更新时间',
+                             `update_by` bigint COMMENT '修改人ID',
+                             `is_deleted` tinyint(1) DEFAULT 0 COMMENT '逻辑删除标识(0-未删除 1-已删除)',
+                             `openid` char(28) COMMENT '微信 openid',
+                             PRIMARY KEY (`id`) USING BTREE,
+                             KEY `login_name` (`username`)
+) ENGINE = InnoDB CHARACTER SET = utf8mb4 COMMENT = '用户信息表';
+
+-- ----------------------------
+-- Records of sys_user
+-- ----------------------------
+INSERT INTO `sys_user` VALUES (1, 'root', '有来技术', 0, '$2a$10$xVWsNOhHrCxh5UbpCE7/HuJ.PAOKcYAqRxD2CO2nVnJS.IAXkr5aq', NULL, 'https://foruda.gitee.com/images/1723603502796844527/03cdca2a_716974.gif', '18812345677', 1, 'youlaitech@163.com', now(), NULL, now(), NULL, 0,NULL);
+INSERT INTO `sys_user` VALUES (2, 'admin', '系统管理员', 1, '$2a$10$xVWsNOhHrCxh5UbpCE7/HuJ.PAOKcYAqRxD2CO2nVnJS.IAXkr5aq', 1, 'https://foruda.gitee.com/images/1723603502796844527/03cdca2a_716974.gif', '18812345678', 1, 'youlaitech@163.com', now(), NULL, now(), NULL, 0,NULL);
+INSERT INTO `sys_user` VALUES (3, 'test', '测试小用户', 1, '$2a$10$xVWsNOhHrCxh5UbpCE7/HuJ.PAOKcYAqRxD2CO2nVnJS.IAXkr5aq', 3, 'https://foruda.gitee.com/images/1723603502796844527/03cdca2a_716974.gif', '18812345679', 1, 'youlaitech@163.com', now(), NULL, now(), NULL, 0,NULL);
+
+-- ----------------------------
+-- Table structure for sys_user_role
+-- ----------------------------
+DROP TABLE IF EXISTS `sys_user_role`;
+CREATE TABLE `sys_user_role`  (
+                                  `user_id` bigint NOT NULL COMMENT '用户ID',
+                                  `role_id` bigint NOT NULL COMMENT '角色ID',
+                                  PRIMARY KEY (`user_id`, `role_id`) USING BTREE
+) ENGINE = InnoDB CHARACTER SET = utf8mb4 COMMENT = '用户和角色关联表';
+
+-- ----------------------------
+-- Records of sys_user_role
+-- ----------------------------
+INSERT INTO `sys_user_role` VALUES (1, 1);
+INSERT INTO `sys_user_role` VALUES (2, 2);
+INSERT INTO `sys_user_role` VALUES (3, 3);
+
+
+-- ----------------------------
+-- Table structure for sys_log
+-- ----------------------------
+DROP TABLE IF EXISTS `sys_log`;
+CREATE TABLE `sys_log` (
+                           `id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键',
+                           `module` varchar(50) NOT NULL COMMENT '日志模块',
+                           `request_method` varchar(64) NOT NULL COMMENT '请求方式',
+                           `request_params` text COMMENT '请求参数(批量请求参数可能会超过text)',
+                           `response_content` mediumtext COMMENT '返回参数',
+                           `content` varchar(255) NOT NULL COMMENT '日志内容',
+                           `request_uri` varchar(255) COMMENT '请求路径',
+                           `method` varchar(255) COMMENT '方法名',
+                           `ip` varchar(45) COMMENT 'IP地址',
+                           `province` varchar(100) COMMENT '省份',
+                           `city` varchar(100) COMMENT '城市',
+                           `execution_time` bigint COMMENT '执行时间(ms)',
+                           `browser` varchar(100) COMMENT '浏览器',
+                           `browser_version` varchar(100) COMMENT '浏览器版本',
+                           `os` varchar(100) COMMENT '终端系统',
+                           `create_by` bigint COMMENT '创建人ID',
+                           `create_time` datetime COMMENT '创建时间',
+                           `is_deleted` tinyint DEFAULT '0' COMMENT '逻辑删除标识(1-已删除 0-未删除)',
+                           PRIMARY KEY (`id`) USING BTREE,
+                           KEY `idx_create_time` (`create_time`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COMMENT='系统日志表';
+
+-- ----------------------------
+-- Table structure for gen_config
+-- ----------------------------
+DROP TABLE IF EXISTS `gen_config`;
+CREATE TABLE `gen_config` (
+                              `id` bigint NOT NULL AUTO_INCREMENT,
+                              `table_name` varchar(100) NOT NULL COMMENT '表名',
+                              `module_name` varchar(100) COMMENT '模块名',
+                              `package_name` varchar(255) NOT NULL COMMENT '包名',
+                              `business_name` varchar(100) NOT NULL COMMENT '业务名',
+                              `entity_name` varchar(100) NOT NULL COMMENT '实体类名',
+                              `author` varchar(50) NOT NULL COMMENT '作者',
+                              `parent_menu_id` bigint COMMENT '上级菜单ID,对应sys_menu的id ',
+                              `remove_table_prefix` varchar(20) COMMENT '要移除的表前缀,如: sys_',
+                              `page_type` varchar(20) COMMENT '页面类型(classic|curd)',
+                              `create_time` datetime COMMENT '创建时间',
+                              `update_time` datetime COMMENT '更新时间',
+                              `is_deleted` tinyint(4) DEFAULT 0 COMMENT '是否删除',
+                              PRIMARY KEY (`id`),
+                              UNIQUE KEY `uk_tablename` (`table_name`)
+) ENGINE=InnoDB  DEFAULT CHARSET=utf8mb4 COMMENT='代码生成基础配置表';
+
+-- ----------------------------
+-- Table structure for gen_field_config
+-- ----------------------------
+DROP TABLE IF EXISTS `gen_field_config`;
+CREATE TABLE `gen_field_config` (
+                                    `id` bigint NOT NULL AUTO_INCREMENT,
+                                    `config_id` bigint NOT NULL COMMENT '关联的配置ID',
+                                    `column_name` varchar(100)  ,
+                                    `column_type` varchar(50)  ,
+                                    `column_length` int ,
+                                    `field_name` varchar(100) NOT NULL COMMENT '字段名称',
+                                    `field_type` varchar(100) COMMENT '字段类型',
+                                    `field_sort` int COMMENT '字段排序',
+                                    `field_comment` varchar(255) COMMENT '字段描述',
+                                    `max_length` int ,
+                                    `is_required` tinyint(1) COMMENT '是否必填',
+                                    `is_show_in_list` tinyint(1) DEFAULT '0' COMMENT '是否在列表显示',
+                                    `is_show_in_form` tinyint(1) DEFAULT '0' COMMENT '是否在表单显示',
+                                    `is_show_in_query` tinyint(1) DEFAULT '0' COMMENT '是否在查询条件显示',
+                                    `query_type` tinyint COMMENT '查询方式',
+                                    `form_type` tinyint COMMENT '表单类型',
+                                    `dict_type` varchar(50) COMMENT '字典类型',
+                                    `create_time` datetime COMMENT '创建时间',
+                                    `update_time` datetime COMMENT '更新时间',
+                                    PRIMARY KEY (`id`),
+                                    KEY `config_id` (`config_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='代码生成字段配置表';
+
+-- ----------------------------
+-- 系统配置表
+-- ----------------------------
+DROP TABLE IF EXISTS `sys_config`;
+CREATE TABLE `sys_config` (
+                              `id` bigint NOT NULL AUTO_INCREMENT,
+                              `config_name` varchar(50) NOT NULL COMMENT '配置名称',
+                              `config_key` varchar(50) NOT NULL COMMENT '配置key',
+                              `config_value` varchar(100) NOT NULL COMMENT '配置值',
+                              `remark` varchar(255) COMMENT '备注',
+                              `create_time` datetime COMMENT '创建时间',
+                              `create_by` bigint COMMENT '创建人ID',
+                              `update_time` datetime COMMENT '更新时间',
+                              `update_by` bigint COMMENT '更新人ID',
+                              `is_deleted` tinyint(4) DEFAULT '0' NOT NULL COMMENT '逻辑删除标识(0-未删除 1-已删除)',
+                              PRIMARY KEY (`id`)
+) ENGINE=InnoDB COMMENT='系统配置表';
+
+INSERT INTO `sys_config` VALUES (1, '系统限流QPS', 'IP_QPS_THRESHOLD_LIMIT', '10', '单个IP请求的最大每秒查询数(QPS)阈值Key', now(), 1, NULL, NULL, 0);
+
+-- ----------------------------
+-- 通知公告表
+-- ----------------------------
+DROP TABLE IF EXISTS `sys_notice`;
+CREATE TABLE `sys_notice` (
+                              `id` bigint NOT NULL AUTO_INCREMENT,
+                              `title` varchar(50) COMMENT '通知标题',
+                              `content` text COMMENT '通知内容',
+                              `type` tinyint NOT NULL COMMENT '通知类型(关联字典编码:notice_type)',
+                              `level` varchar(5) NOT NULL COMMENT '通知等级(字典code:notice_level)',
+                              `target_type` tinyint NOT NULL COMMENT '目标类型(1: 全体, 2: 指定)',
+                              `target_user_ids` varchar(255) COMMENT '目标人ID集合(多个使用英文逗号,分割)',
+                              `publisher_id` bigint COMMENT '发布人ID',
+                              `publish_status` tinyint DEFAULT '0' COMMENT '发布状态(0: 未发布, 1: 已发布, -1: 已撤回)',
+                              `publish_time` datetime COMMENT '发布时间',
+                              `revoke_time` datetime COMMENT '撤回时间',
+                              `create_by` bigint NOT NULL COMMENT '创建人ID',
+                              `create_time` datetime NOT NULL COMMENT '创建时间',
+                              `update_by` bigint COMMENT '更新人ID',
+                              `update_time` datetime COMMENT '更新时间',
+                              `is_deleted` tinyint(1) DEFAULT '0' COMMENT '是否删除(0: 未删除, 1: 已删除)',
+                              PRIMARY KEY (`id`) USING BTREE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='通知公告表';
+
+INSERT INTO `sys_notice`  VALUES (1, 'v2.12.0 新增系统日志,访问趋势统计功能。', '<p>1. 消息通知</p><p>2. 字典重构</p><p>3. 代码生成</p>', 1, 'L', 1, '2', 1, 1, now(), now(), 2, now(), 1, now(), 0);
+INSERT INTO `sys_notice`  VALUES (2, 'v2.13.0 新增菜单搜索。', '<p>1. 消息通知</p><p>2. 字典重构</p><p>3. 代码生成</p>', 1, 'L', 1, '2', 1, 1, now(), now(), 2, now(), 1, now(), 0);
+INSERT INTO `sys_notice`  VALUES (3, 'v2.14.0 新增个人中心。', '<p>1. 消息通知</p><p>2. 字典重构</p><p>3. 代码生成</p>', 1, 'L', 1, '2', 2, 1, now(), now(), 2, now(), 2, now(), 0);
+INSERT INTO `sys_notice`  VALUES (4, 'v2.15.0 登录页面改造。', '<p>1. 消息通知</p><p>2. 字典重构</p><p>3. 代码生成</p>', 1, 'L', 1, '2', 2, 1, now(), now(), 2, now(), 2, now(), 0);
+INSERT INTO `sys_notice`  VALUES (5, 'v2.16.0 通知公告、字典翻译组件。', '<p>1. 消息通知</p><p>2. 字典重构</p><p>3. 代码生成</p>', 1, 'L', 1, '2', 2, 1, now(), now(), 2, now(), 2, now(), 0);
+INSERT INTO `sys_notice`  VALUES (6, '系统将于本周六凌晨 2 点进行维护,预计维护时间为 2 小时。', '<p>1. 消息通知</p><p>2. 字典重构</p><p>3. 代码生成</p>', 2, 'H', 1, '2', 2, 1, now(), now(), 2, now(), 2, now(), 0);
+INSERT INTO `sys_notice`  VALUES (7, '最近发现一些钓鱼邮件,请大家提高警惕,不要点击陌生链接。', '<p>1. 消息通知</p><p>2. 字典重构</p><p>3. 代码生成</p>', 3, 'L', 1, '2', 2, 1, now(), now(), 2, now(), 2, now(), 0);
+INSERT INTO `sys_notice`  VALUES (8, '国庆假期从 10 月 1 日至 10 月 7 日放假,共 7 天。', '<p>1. 消息通知</p><p>2. 字典重构</p><p>3. 代码生成</p>', 4, 'L', 1, '2', 2, 1, now(), now(), 2, now(), 2, now(), 0);
+INSERT INTO `sys_notice`  VALUES (9, '公司将在 10 月 15 日举办新产品发布会,敬请期待。', '公司将在 10 月 15 日举办新产品发布会,敬请期待。', 5, 'H', 1, '2', 2, 1, now(), now(), 2, now(), 2, now(), 0);
+INSERT INTO `sys_notice`  VALUES (10, 'v2.16.1 版本发布。', 'v2.16.1 版本修复了 WebSocket 重复连接导致的后台线程阻塞问题,优化了通知公告。', 1, 'M', 1, '2', 2, 1, now(), now(), 2, now(), 2, now(), 0);
+
+-- ----------------------------
+-- 用户通知公告表
+-- ----------------------------
+DROP TABLE IF EXISTS `sys_user_notice`;
+CREATE TABLE `sys_user_notice` (
+                                   `id` bigint NOT NULL AUTO_INCREMENT COMMENT 'id',
+                                   `notice_id` bigint NOT NULL COMMENT '公共通知id',
+                                   `user_id` bigint NOT NULL COMMENT '用户id',
+                                   `is_read` bigint DEFAULT '0' COMMENT '读取状态(0: 未读, 1: 已读)',
+                                   `read_time` datetime COMMENT '阅读时间',
+                                   `create_time` datetime NOT NULL COMMENT '创建时间',
+                                   `update_time` datetime COMMENT '更新时间',
+                                   `is_deleted` tinyint DEFAULT '0' COMMENT '逻辑删除(0: 未删除, 1: 已删除)',
+                                   PRIMARY KEY (`id`) USING BTREE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='用户通知公告表';
+
+INSERT INTO `sys_user_notice` VALUES (1, 1, 2, 1, NULL, now(), now(), 0);
+INSERT INTO `sys_user_notice` VALUES (2, 2, 2, 1, NULL, now(), now(), 0);
+INSERT INTO `sys_user_notice` VALUES (3, 3, 2, 1, NULL, now(), now(), 0);
+INSERT INTO `sys_user_notice` VALUES (4, 4, 2, 1, NULL, now(), now(), 0);
+INSERT INTO `sys_user_notice` VALUES (5, 5, 2, 1, NULL, now(), now(), 0);
+INSERT INTO `sys_user_notice` VALUES (6, 6, 2, 1, NULL, now(), now(), 0);
+INSERT INTO `sys_user_notice` VALUES (7, 7, 2, 1, NULL, now(), now(), 0);
+INSERT INTO `sys_user_notice` VALUES (8, 8, 2, 1, NULL, now(), now(), 0);
+INSERT INTO `sys_user_notice` VALUES (9, 9, 2, 1, NULL, now(), now(), 0);
+INSERT INTO `sys_user_notice` VALUES (10, 10, 2, 1, NULL, now(), now(), 0);
+
+SET FOREIGN_KEY_CHECKS = 1;

+ 21 - 0
src/main/java/com/zsElectric/boot/ZsElectricBootApplication.java

@@ -0,0 +1,21 @@
+package com.zsElectric.boot;
+
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.boot.context.properties.ConfigurationPropertiesScan;
+
+/**
+ * 应用启动类
+ *
+ * @author Ray.Hao
+ * @since 0.0.1
+ */
+@SpringBootApplication
+@ConfigurationPropertiesScan // 开启配置属性绑定
+public class ZsElectricBootApplication {
+
+    public static void main(String[] args) {
+        SpringApplication.run(ZsElectricBootApplication.class, args);
+    }
+
+}

+ 115 - 0
src/main/java/com/zsElectric/boot/auth/controller/AuthController.java

@@ -0,0 +1,115 @@
+package com.zsElectric.boot.auth.controller;
+
+import com.zsElectric.boot.auth.model.vo.CaptchaVO;
+import com.zsElectric.boot.auth.model.dto.WxMiniAppPhoneLoginDTO;
+import com.zsElectric.boot.common.enums.LogModuleEnum;
+import com.zsElectric.boot.core.web.Result;
+import com.zsElectric.boot.auth.service.AuthService;
+import com.zsElectric.boot.auth.model.dto.WxMiniAppCodeLoginDTO;
+import com.zsElectric.boot.common.annotation.Log;
+import com.zsElectric.boot.security.model.AuthenticationToken;
+import io.swagger.v3.oas.annotations.Operation;
+import io.swagger.v3.oas.annotations.Parameter;
+import io.swagger.v3.oas.annotations.tags.Tag;
+import jakarta.validation.Valid;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.web.bind.annotation.*;
+
+
+/**
+ * 认证控制层
+ *
+ * @author Ray.Hao
+ * @since 2022/10/16
+ */
+@Tag(name = "01.认证中心")
+@RestController
+@RequestMapping("/api/v1/auth")
+@RequiredArgsConstructor
+@Slf4j
+public class AuthController {
+
+    private final AuthService authService;
+
+    @Operation(summary = "获取验证码")
+    @GetMapping("/captcha")
+    public Result<CaptchaVO> getCaptcha() {
+        CaptchaVO captcha = authService.getCaptcha();
+        return Result.success(captcha);
+    }
+
+    @Operation(summary = "账号密码登录")
+    @PostMapping("/login")
+    @Log(value = "登录", module = LogModuleEnum.LOGIN)
+    public Result<AuthenticationToken> login(
+            @Parameter(description = "用户名", example = "admin") @RequestParam String username,
+            @Parameter(description = "密码", example = "123456") @RequestParam String password
+    ) {
+        AuthenticationToken authenticationToken = authService.login(username, password);
+        return Result.success(authenticationToken);
+    }
+
+    @Operation(summary = "短信验证码登录")
+    @PostMapping("/login/sms")
+    @Log(value = "短信验证码登录", module = LogModuleEnum.LOGIN)
+    public Result<AuthenticationToken> loginBySms(
+            @Parameter(description = "手机号", example = "18812345678") @RequestParam String mobile,
+            @Parameter(description = "验证码", example = "1234") @RequestParam String code
+    ) {
+        AuthenticationToken loginResult = authService.loginBySms(mobile, code);
+        return Result.success(loginResult);
+    }
+
+    @Operation(summary = "发送登录短信验证码")
+    @PostMapping("/sms/code")
+    public Result<Void> sendLoginVerifyCode(
+            @Parameter(description = "手机号", example = "18812345678") @RequestParam String mobile
+    ) {
+        authService.sendSmsLoginCode(mobile);
+        return Result.success();
+    }
+
+    @Operation(summary = "微信授权登录(Web)")
+    @PostMapping("/login/wechat")
+    @Log(value = "微信登录", module = LogModuleEnum.LOGIN)
+    public Result<AuthenticationToken> loginByWechat(
+            @Parameter(description = "微信授权码", example = "code") @RequestParam String code
+    ) {
+        AuthenticationToken loginResult = authService.loginByWechat(code);
+        return Result.success(loginResult);
+    }
+
+    @Operation(summary = "微信小程序登录(Code)")
+    @PostMapping("/wx/miniapp/code-login")
+    public Result<AuthenticationToken> loginByWxMiniAppCode(@RequestBody @Valid WxMiniAppCodeLoginDTO loginDTO) {
+        AuthenticationToken token = authService.loginByWxMiniAppCode(loginDTO);
+        return Result.success(token);
+    }
+
+    @Operation(summary = "微信小程序登录(手机号)")
+    @PostMapping("/wx/miniapp/phone-login")
+    public Result<AuthenticationToken> loginByWxMiniAppPhone(@RequestBody @Valid WxMiniAppPhoneLoginDTO loginDTO) {
+        AuthenticationToken token = authService.loginByWxMiniAppPhone(loginDTO);
+        return Result.success(token);
+    }
+
+
+    @Operation(summary = "退出登录")
+    @DeleteMapping("/logout")
+    @Log(value = "退出登录", module = LogModuleEnum.LOGIN)
+    public Result<?> logout() {
+        authService.logout();
+        return Result.success();
+    }
+
+    @Operation(summary = "刷新令牌")
+    @PostMapping("/refresh-token")
+    public Result<?> refreshToken(
+            @Parameter(description = "刷新令牌", example = "xxx.xxx.xxx") @RequestParam String refreshToken
+    ) {
+        AuthenticationToken authenticationToken = authService.refreshToken(refreshToken);
+        return Result.success(authenticationToken);
+    }
+
+}

+ 22 - 0
src/main/java/com/zsElectric/boot/auth/model/dto/WxMiniAppCodeLoginDTO.java

@@ -0,0 +1,22 @@
+package com.zsElectric.boot.auth.model.dto;
+
+import io.swagger.v3.oas.annotations.media.Schema;
+import lombok.Data;
+
+import jakarta.validation.constraints.NotBlank;
+
+/**
+ * 微信小程序Code登录请求参数
+ *
+ * @author 有来技术团队
+ * @since 2.0.0
+ */
+@Schema(description = "微信小程序Code登录请求参数")
+@Data
+public class WxMiniAppCodeLoginDTO {
+
+    @Schema(description = "微信小程序登录时获取的code", requiredMode = Schema.RequiredMode.REQUIRED)
+    @NotBlank(message = "code不能为空")
+    private String code;
+
+} 

+ 28 - 0
src/main/java/com/zsElectric/boot/auth/model/dto/WxMiniAppPhoneLoginDTO.java

@@ -0,0 +1,28 @@
+package com.zsElectric.boot.auth.model.dto;
+
+import io.swagger.v3.oas.annotations.media.Schema;
+import lombok.Data;
+
+import jakarta.validation.constraints.NotBlank;
+
+/**
+ * 微信小程序手机号登录请求参数
+ *
+ * @author Ray.Hao
+ * @since 2.0.0
+ */
+@Schema(description = "微信小程序手机号登录请求参数")
+@Data
+public class WxMiniAppPhoneLoginDTO {
+
+    @Schema(description = "微信小程序登录时获取的code", requiredMode = Schema.RequiredMode.REQUIRED)
+    @NotBlank(message = "code不能为空")
+    private String code;
+
+    @Schema(description = "包括敏感数据在内的完整用户信息的加密数据")
+    private String encryptedData;
+
+    @Schema(description = "加密算法的初始向量")
+    private String iv;
+
+} 

+ 24 - 0
src/main/java/com/zsElectric/boot/auth/model/vo/CaptchaVO.java

@@ -0,0 +1,24 @@
+package com.zsElectric.boot.auth.model.vo;
+
+import io.swagger.v3.oas.annotations.media.Schema;
+import lombok.Builder;
+import lombok.Data;
+
+/**
+ * 验证码信息
+ *
+ * @author Ray。Hao
+ * @since 2023/03/24
+ */
+@Schema(description = "验证码信息")
+@Data
+@Builder
+public class CaptchaVO {
+
+    @Schema(description = "验证码缓存 Key")
+    private String captchaKey;
+
+    @Schema(description = "验证码图片Base64字符串")
+    private String captchaBase64;
+
+}

+ 84 - 0
src/main/java/com/zsElectric/boot/auth/service/AuthService.java

@@ -0,0 +1,84 @@
+package com.zsElectric.boot.auth.service;
+
+import com.zsElectric.boot.auth.model.vo.CaptchaVO;
+import com.zsElectric.boot.auth.model.dto.WxMiniAppPhoneLoginDTO;
+import com.zsElectric.boot.security.model.AuthenticationToken;
+import com.zsElectric.boot.auth.model.dto.WxMiniAppCodeLoginDTO;
+
+/**
+ * 认证服务接口
+ *
+ * @author Ray.Hao
+ * @since 2.4.0
+ */
+public interface AuthService {
+
+    /**
+     * 登录
+     *
+     * @param username 用户名
+     * @param password 密码
+     * @return 登录结果
+     */
+    AuthenticationToken login(String username, String password);
+
+    /**
+     * 登出
+     */
+    void logout();
+
+    /**
+     * 获取验证码
+     *
+     * @return 验证码
+     */
+    CaptchaVO getCaptcha();
+
+    /**
+     * 刷新令牌
+     *
+     * @param refreshToken 刷新令牌
+     * @return 登录结果
+     */
+    AuthenticationToken refreshToken(String refreshToken);
+
+    /**
+     * 微信小程序登录
+     *
+     * @param code 微信登录code
+     * @return 登录结果
+     */
+    AuthenticationToken loginByWechat(String code);
+
+    /**
+     * 微信小程序Code登录
+     *
+     * @param loginDTO 登录参数
+     * @return 访问令牌
+     */
+    AuthenticationToken loginByWxMiniAppCode(WxMiniAppCodeLoginDTO loginDTO);
+
+    /**
+     * 微信小程序手机号登录
+     *
+     * @param loginDTO 登录参数
+     * @return 访问令牌
+     */
+    AuthenticationToken loginByWxMiniAppPhone(WxMiniAppPhoneLoginDTO loginDTO);
+
+    /**
+     * 发送短信验证码
+     *
+     * @param mobile 手机号
+     */
+    void sendSmsLoginCode(String mobile);
+
+    /**
+     * 短信验证码登录
+     *
+     * @param mobile 手机号
+     * @param code   验证码
+     * @return 登录结果
+     */
+    AuthenticationToken loginBySms(String mobile, String code);
+}

+ 270 - 0
src/main/java/com/zsElectric/boot/auth/service/impl/AuthServiceImpl.java

@@ -0,0 +1,270 @@
+package com.zsElectric.boot.auth.service.impl;
+
+import cn.hutool.captcha.AbstractCaptcha;
+import cn.hutool.captcha.CaptchaUtil;
+import cn.hutool.captcha.generator.CodeGenerator;
+import cn.hutool.core.util.IdUtil;
+import cn.hutool.core.util.StrUtil;
+import com.zsElectric.boot.auth.model.dto.WxMiniAppCodeLoginDTO;
+import com.zsElectric.boot.auth.model.dto.WxMiniAppPhoneLoginDTO;
+import com.zsElectric.boot.auth.model.vo.CaptchaVO;
+import com.zsElectric.boot.auth.service.AuthService;
+import com.zsElectric.boot.common.constant.RedisConstants;
+import com.zsElectric.boot.common.constant.SecurityConstants;
+import com.zsElectric.boot.common.enums.CaptchaTypeEnum;
+import com.zsElectric.boot.config.property.CaptchaProperties;
+import com.zsElectric.boot.platform.sms.enums.SmsTypeEnum;
+import com.zsElectric.boot.platform.sms.service.SmsService;
+import com.zsElectric.boot.security.model.AuthenticationToken;
+import com.zsElectric.boot.security.model.SmsAuthenticationToken;
+import com.zsElectric.boot.security.model.WxMiniAppCodeAuthenticationToken;
+import com.zsElectric.boot.security.model.WxMiniAppPhoneAuthenticationToken;
+import com.zsElectric.boot.security.token.TokenManager;
+import com.zsElectric.boot.security.util.SecurityUtils;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.data.redis.core.RedisTemplate;
+import org.springframework.security.authentication.AuthenticationManager;
+import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
+import org.springframework.security.core.Authentication;
+import org.springframework.security.core.context.SecurityContextHolder;
+import org.springframework.stereotype.Service;
+
+import java.awt.*;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * 认证服务实现类
+ *
+ * @author Ray.Hao
+ * @since 2.4.0
+ */
+@Service
+@RequiredArgsConstructor
+@Slf4j
+public class AuthServiceImpl implements AuthService {
+
+    private final AuthenticationManager authenticationManager;
+    private final TokenManager tokenManager;
+
+    private final Font captchaFont;
+    private final CaptchaProperties captchaProperties;
+    private final CodeGenerator codeGenerator;
+
+    private final SmsService smsService;
+    private final RedisTemplate<String, Object> redisTemplate;
+
+    /**
+     * 用户名密码登录
+     *
+     * @param username 用户名
+     * @param password 密码
+     * @return 访问令牌
+     */
+    @Override
+    public AuthenticationToken login(String username, String password) {
+        // 1. 创建用于密码认证的令牌(未认证)
+        UsernamePasswordAuthenticationToken authenticationToken =
+                new UsernamePasswordAuthenticationToken(username.trim(), password);
+
+        // 2. 执行认证(认证中)
+        Authentication authentication = authenticationManager.authenticate(authenticationToken);
+
+        // 3. 认证成功后生成 JWT 令牌,并存入 Security 上下文,供登录日志 AOP 使用(已认证)
+        AuthenticationToken authenticationTokenResponse =
+                tokenManager.generateToken(authentication);
+        SecurityContextHolder.getContext().setAuthentication(authentication);
+        return authenticationTokenResponse;
+    }
+
+    /**
+     * 微信一键授权登录
+     *
+     * @param code 微信登录code
+     * @return 访问令牌
+     */
+    @Override
+    public AuthenticationToken loginByWechat(String code) {
+        // 1. 创建用户微信认证的令牌(未认证)
+        WxMiniAppCodeAuthenticationToken authenticationToken = new WxMiniAppCodeAuthenticationToken(code);
+
+        // 2. 执行认证(认证中)
+        Authentication authentication = authenticationManager.authenticate(authenticationToken);
+
+        // 3. 认证成功后生成 JWT 令牌,并存入 Security 上下文,供登录日志 AOP 使用(已认证)
+        AuthenticationToken token = tokenManager.generateToken(authentication);
+        SecurityContextHolder.getContext().setAuthentication(authentication);
+
+        return token;
+    }
+
+    /**
+     * 发送登录短信验证码
+     *
+     * @param mobile 手机号
+     */
+    @Override
+    public void sendSmsLoginCode(String mobile) {
+
+        // 随机生成4位验证码
+        // String code = String.valueOf((int) ((Math.random() * 9 + 1) * 1000));
+        // TODO 为了方便测试,验证码固定为 1234,实际开发中在配置了厂商短信服务后,可以使用上面的随机验证码
+        String code = "1234";
+
+        // 发送短信验证码
+        Map<String, String> templateParams = new HashMap<>();
+        templateParams.put("code", code);
+        try {
+            smsService.sendSms(mobile, SmsTypeEnum.LOGIN, templateParams);
+        } catch (Exception e) {
+            log.error("发送短信验证码失败", e);
+        }
+        // 缓存验证码至Redis,用于登录校验
+        redisTemplate.opsForValue().set(StrUtil.format(RedisConstants.Captcha.SMS_LOGIN_CODE, mobile), code, 5, TimeUnit.MINUTES);
+    }
+
+    /**
+     * 短信验证码登录
+     *
+     * @param mobile 手机号
+     * @param code   验证码
+     * @return 访问令牌
+     */
+    @Override
+    public AuthenticationToken loginBySms(String mobile, String code) {
+        // 1. 创建用户短信验证码认证的令牌(未认证)
+        SmsAuthenticationToken smsAuthenticationToken = new SmsAuthenticationToken(mobile, code);
+
+        // 2. 执行认证(认证中)
+        Authentication authentication = authenticationManager.authenticate(smsAuthenticationToken);
+
+        // 3. 认证成功后生成 JWT 令牌,并存入 Security 上下文,供登录日志 AOP 使用(已认证)
+        AuthenticationToken authenticationToken = tokenManager.generateToken(authentication);
+        SecurityContextHolder.getContext().setAuthentication(authentication);
+
+        return authenticationToken;
+    }
+
+    /**
+     * 注销登录
+     */
+    @Override
+    public void logout() {
+        String token = SecurityUtils.getTokenFromRequest();
+        if (StrUtil.isNotBlank(token) && token.startsWith(SecurityConstants.BEARER_TOKEN_PREFIX )) {
+            token = token.substring(SecurityConstants.BEARER_TOKEN_PREFIX .length());
+            // 将JWT令牌加入黑名单
+            tokenManager.invalidateToken(token);
+            // 清除Security上下文
+            SecurityContextHolder.clearContext();
+        }
+    }
+
+    /**
+     * 获取验证码
+     *
+     * @return 验证码
+     */
+    @Override
+    public CaptchaVO getCaptcha() {
+
+        String captchaType = captchaProperties.getType();
+        int width = captchaProperties.getWidth();
+        int height = captchaProperties.getHeight();
+        int interfereCount = captchaProperties.getInterfereCount();
+        int codeLength = captchaProperties.getCode().getLength();
+
+        AbstractCaptcha captcha;
+        if (CaptchaTypeEnum.CIRCLE.name().equalsIgnoreCase(captchaType)) {
+            captcha = CaptchaUtil.createCircleCaptcha(width, height, codeLength, interfereCount);
+        } else if (CaptchaTypeEnum.GIF.name().equalsIgnoreCase(captchaType)) {
+            captcha = CaptchaUtil.createGifCaptcha(width, height, codeLength);
+        } else if (CaptchaTypeEnum.LINE.name().equalsIgnoreCase(captchaType)) {
+            captcha = CaptchaUtil.createLineCaptcha(width, height, codeLength, interfereCount);
+        } else if (CaptchaTypeEnum.SHEAR.name().equalsIgnoreCase(captchaType)) {
+            captcha = CaptchaUtil.createShearCaptcha(width, height, codeLength, interfereCount);
+        } else {
+            throw new IllegalArgumentException("Invalid captcha type: " + captchaType);
+        }
+        captcha.setGenerator(codeGenerator);
+        captcha.setTextAlpha(captchaProperties.getTextAlpha());
+        captcha.setFont(captchaFont);
+
+        String captchaCode = captcha.getCode();
+        String imageBase64Data = captcha.getImageBase64Data();
+
+        // 验证码文本缓存至Redis,用于登录校验
+        String captchaKey = IdUtil.fastSimpleUUID();
+        redisTemplate.opsForValue().set(
+                StrUtil.format(RedisConstants.Captcha.IMAGE_CODE, captchaKey),
+                captchaCode,
+                captchaProperties.getExpireSeconds(),
+                TimeUnit.SECONDS
+        );
+
+        return CaptchaVO.builder()
+                .captchaKey(captchaKey)
+                .captchaBase64(imageBase64Data)
+                .build();
+    }
+
+    /**
+     * 刷新token
+     *
+     * @param refreshToken 刷新令牌
+     * @return 新的访问令牌
+     */
+    @Override
+    public AuthenticationToken refreshToken(String refreshToken) {
+        return tokenManager.refreshToken(refreshToken);
+    }
+
+    /**
+     * 微信小程序Code登录
+     *
+     * @param loginDTO 登录参数
+     * @return 访问令牌
+     */
+    @Override
+    public AuthenticationToken loginByWxMiniAppCode(WxMiniAppCodeLoginDTO loginDTO) {
+        // 1. 创建微信小程序认证令牌(未认证)
+        WxMiniAppCodeAuthenticationToken authenticationToken = new WxMiniAppCodeAuthenticationToken(loginDTO.getCode());
+
+        // 2. 执行认证(认证中)
+        Authentication authentication = authenticationManager.authenticate(authenticationToken);
+
+        // 3. 认证成功后生成 JWT 令牌,并存入 Security 上下文,供登录日志 AOP 使用(已认证)
+        AuthenticationToken token = tokenManager.generateToken(authentication);
+        SecurityContextHolder.getContext().setAuthentication(authentication);
+
+        return token;
+    }
+
+    /**
+     * 微信小程序手机号登录
+     *
+     * @param loginDTO 登录参数
+     * @return 访问令牌
+     */
+    @Override
+    public AuthenticationToken loginByWxMiniAppPhone(WxMiniAppPhoneLoginDTO loginDTO) {
+        // 创建微信小程序手机号认证Token
+        WxMiniAppPhoneAuthenticationToken authenticationToken = new WxMiniAppPhoneAuthenticationToken(
+                loginDTO.getCode(),
+                loginDTO.getEncryptedData(),
+                loginDTO.getIv()
+        );
+
+        // 执行认证
+        Authentication authentication = authenticationManager.authenticate(authenticationToken);
+
+        // 认证成功后生成JWT令牌,并存入Security上下文
+        AuthenticationToken token = tokenManager.generateToken(authentication);
+        SecurityContextHolder.getContext().setAuthentication(authentication);
+
+        return token;
+    }
+
+}

+ 28 - 0
src/main/java/com/zsElectric/boot/common/annotation/DataPermission.java

@@ -0,0 +1,28 @@
+package com.zsElectric.boot.common.annotation;
+
+import java.lang.annotation.*;
+
+/**
+ * 数据权限注解
+ *
+ * @author zc
+ * @since 2.0.0
+ */
+@Documented
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.TYPE, ElementType.METHOD})
+public @interface DataPermission {
+
+    /**
+     * 数据权限 {@link com.baomidou.mybatisplus.extension.plugins.inner.DataPermissionInterceptor}
+     */
+    String deptAlias() default "";
+
+    String deptIdColumnName() default "dept_id";
+
+    String userAlias() default "";
+
+    String userIdColumnName() default "create_by";
+
+}
+

+ 49 - 0
src/main/java/com/zsElectric/boot/common/annotation/Log.java

@@ -0,0 +1,49 @@
+package com.zsElectric.boot.common.annotation;
+
+import com.zsElectric.boot.common.enums.LogModuleEnum;
+
+import java.lang.annotation.*;
+
+/**
+ * 日志注解
+ *
+ * @author Ray
+ * @since 2024/6/25
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.METHOD)
+@Documented
+public @interface Log {
+
+    /**
+     * 日志描述
+     *
+     * @return 日志描述
+     */
+    String value() default "";
+
+    /**
+     * 日志模块
+     *
+     * @return 日志模块
+     */
+
+    LogModuleEnum module();
+
+    /**
+     * 是否记录请求参数
+     *
+     * @return 是否记录请求参数
+     */
+    boolean params() default true;
+
+    /**
+     * 是否记录响应结果
+     * <br/>
+     * 响应结果默认不记录,避免日志过大
+     * @return 是否记录响应结果
+     */
+    boolean result() default false;
+
+
+}

+ 27 - 0
src/main/java/com/zsElectric/boot/common/annotation/RepeatSubmit.java

@@ -0,0 +1,27 @@
+package com.zsElectric.boot.common.annotation;
+
+
+import java.lang.annotation.*;
+
+/**
+ * 防止重复提交注解
+ * <p>
+ * 该注解用于方法上,防止在指定时间内的重复提交。 默认时间为5秒。
+ *
+ * @author Ray.Hao
+ * @since 2.3.0
+ */
+@Target(ElementType.METHOD)
+@Retention(RetentionPolicy.RUNTIME)
+@Documented
+@Inherited
+public @interface RepeatSubmit {
+
+    /**
+     * 锁过期时间(秒)
+     * <p>
+     * 默认5秒内不允许重复提交
+     */
+    int expire() default 5;
+
+}

+ 35 - 0
src/main/java/com/zsElectric/boot/common/annotation/ValidField.java

@@ -0,0 +1,35 @@
+package com.zsElectric.boot.common.annotation;
+
+import com.zsElectric.boot.core.validator.FieldValidator;
+import jakarta.validation.Constraint;
+import jakarta.validation.Payload;
+
+import java.lang.annotation.*;
+
+/**
+ * 用于验证字段值是否合法的注解
+ *
+ * @author Ray.Hao
+ * @since 2.18.0
+ */
+@Documented
+@Constraint(validatedBy = FieldValidator.class)
+@Target({ElementType.FIELD, ElementType.PARAMETER})
+@Retention(RetentionPolicy.RUNTIME)
+public @interface ValidField {
+
+    /**
+     * 验证失败时的错误信息。
+     */
+    String message() default "非法字段";
+
+    Class<?>[] groups() default {};
+
+    Class<? extends Payload>[] payload() default {};
+
+    /**
+     * 允许的合法值列表。
+     */
+    String[] allowedValues();
+
+}

+ 48 - 0
src/main/java/com/zsElectric/boot/common/base/BaseEntity.java

@@ -0,0 +1,48 @@
+package com.zsElectric.boot.common.base;
+
+import com.baomidou.mybatisplus.annotation.*;
+import com.fasterxml.jackson.annotation.JsonFormat;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import lombok.Data;
+
+import java.io.Serial;
+import java.io.Serializable;
+import java.time.LocalDateTime;
+
+/**
+ * 基础实体类
+ *
+ * <p>实体类的基类,包含了实体类的公共属性,如创建时间、更新时间、逻辑删除标识等</p>
+ *
+ * @author Ray
+ * @since 2024/6/23
+ */
+@Data
+public class BaseEntity implements Serializable {
+
+    @Serial
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * 主键ID
+     */
+    @TableId(type = IdType.AUTO)
+    private Long id;
+
+    /**
+     * 创建时间
+     */
+    @TableField(fill = FieldFill.INSERT)
+    @JsonInclude(value = JsonInclude.Include.NON_NULL)
+    @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
+    private LocalDateTime createTime;
+
+    /**
+     * 更新时间
+     */
+    @TableField(fill = FieldFill.INSERT_UPDATE)
+    @JsonInclude(value = JsonInclude.Include.NON_NULL)
+    @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
+    private LocalDateTime updateTime;
+
+}

+ 29 - 0
src/main/java/com/zsElectric/boot/common/base/BasePageQuery.java

@@ -0,0 +1,29 @@
+package com.zsElectric.boot.common.base;
+
+import io.swagger.v3.oas.annotations.media.Schema;
+import lombok.Data;
+
+import java.io.Serial;
+import java.io.Serializable;
+
+/**
+ * 基础分页请求对象
+ *
+ * @author haoxr
+ * @since 2021/2/28
+ */
+@Data
+@Schema
+public class BasePageQuery implements Serializable {
+
+    @Serial
+    private static final long serialVersionUID = 1L;
+
+    @Schema(description = "页码", requiredMode = Schema.RequiredMode.REQUIRED, example = "1")
+    private int pageNum = 1;
+
+    @Schema(description = "每页记录数", requiredMode = Schema.RequiredMode.REQUIRED, example = "10")
+    private int pageSize = 10;
+
+
+}

+ 21 - 0
src/main/java/com/zsElectric/boot/common/base/BaseVO.java

@@ -0,0 +1,21 @@
+package com.zsElectric.boot.common.base;
+
+import lombok.Data;
+import lombok.ToString;
+
+import java.io.Serial;
+import java.io.Serializable;
+
+/**
+ * 视图对象基类
+ *
+ * @author haoxr
+ * @since 2022/10/22
+ */
+@Data
+@ToString
+public class BaseVO implements Serializable {
+
+    @Serial
+    private static final long serialVersionUID = 1L;
+}

+ 88 - 0
src/main/java/com/zsElectric/boot/common/base/IBaseEnum.java

@@ -0,0 +1,88 @@
+package com.zsElectric.boot.common.base;
+
+
+import cn.hutool.core.util.ObjectUtil;
+
+import java.util.EnumSet;
+import java.util.Objects;
+
+/**
+ * 枚举通用接口
+ *
+ * @author haoxr
+ * @since 2022/3/27 12:06
+ */
+public interface IBaseEnum<T> {
+
+    T getValue();
+
+    String getLabel();
+
+    /**
+     * 根据值获取枚举
+     *
+     * @param value
+     * @param clazz
+     * @param <E>   枚举
+     * @return
+     */
+    static <E extends Enum<E> & IBaseEnum> E getEnumByValue(Object value, Class<E> clazz) {
+        Objects.requireNonNull(value);
+        EnumSet<E> allEnums = EnumSet.allOf(clazz); // 获取类型下的所有枚举
+        E matchEnum = allEnums.stream()
+                .filter(e -> ObjectUtil.equal(e.getValue(), value))
+                .findFirst()
+                .orElse(null);
+        return matchEnum;
+    }
+
+    /**
+     * 根据文本标签获取值
+     *
+     * @param value
+     * @param clazz
+     * @param <E>
+     * @return
+     */
+    static <E extends Enum<E> & IBaseEnum> String getLabelByValue(Object value, Class<E> clazz) {
+        Objects.requireNonNull(value);
+        EnumSet<E> allEnums = EnumSet.allOf(clazz); // 获取类型下的所有枚举
+        E matchEnum = allEnums.stream()
+                .filter(e -> ObjectUtil.equal(e.getValue(), value))
+                .findFirst()
+                .orElse(null);
+
+        String label = null;
+        if (matchEnum != null) {
+            label = matchEnum.getLabel();
+        }
+        return label;
+    }
+
+
+    /**
+     * 根据文本标签获取值
+     *
+     * @param label
+     * @param clazz
+     * @param <E>
+     * @return
+     */
+    static <E extends Enum<E> & IBaseEnum> Object getValueByLabel(String label, Class<E> clazz) {
+        Objects.requireNonNull(label);
+        EnumSet<E> allEnums = EnumSet.allOf(clazz); // 获取类型下的所有枚举
+        String finalLabel = label;
+        E matchEnum = allEnums.stream()
+                .filter(e -> ObjectUtil.equal(e.getLabel(), finalLabel))
+                .findFirst()
+                .orElse(null);
+
+        Object value = null;
+        if (matchEnum != null) {
+            value = matchEnum.getValue();
+        }
+        return value;
+    }
+
+
+}

+ 38 - 0
src/main/java/com/zsElectric/boot/common/constant/JwtClaimConstants.java

@@ -0,0 +1,38 @@
+package com.zsElectric.boot.common.constant;
+
+/**
+ * JWT Claims声明常量
+ * <p>
+ * JWT Claims 属于 Payload 的一部分,包含了一些实体(通常指的用户)的状态和额外的元数据。
+ *
+ * @author haoxr
+ * @since 2023/11/24
+ */
+public interface JwtClaimConstants {
+
+    /**
+     * 令牌类型
+     */
+    String TOKEN_TYPE = "tokenType";
+
+    /**
+     * 用户ID
+     */
+    String USER_ID = "userId";
+
+    /**
+     * 部门ID
+     */
+    String DEPT_ID = "deptId";
+
+    /**
+     * 数据权限
+     */
+    String DATA_SCOPE = "dataScope";
+
+    /**
+     * 权限(角色Code)集合
+     */
+    String AUTHORITIES = "authorities";
+
+}

+ 60 - 0
src/main/java/com/zsElectric/boot/common/constant/RedisConstants.java

@@ -0,0 +1,60 @@
+package com.zsElectric.boot.common.constant;
+
+/**
+ * Redis 常量
+ *
+ * @author Theo
+ * @since 2024-7-29 11:46:08
+ */
+public interface RedisConstants {
+
+    /**
+     * 限流相关键
+     */
+    interface RateLimiter {
+        String IP = "rate_limiter:ip:{}"; // IP限流(示例:rate_limiter:ip:192.168.1.1)
+    }
+
+    /**
+     * 分布式锁相关键
+     */
+    interface Lock {
+        String RESUBMIT = "lock:resubmit:{}:{}"; // 防重复提交(示例:lock:resubmit:userIdentifier:requestIdentifier)
+    }
+
+    /**
+     * 认证模块
+     */
+    interface Auth {
+        // 存储访问令牌对应的用户信息(accessToken -> OnlineUser)
+        String ACCESS_TOKEN_USER = "auth:token:access:{}";
+        // 存储刷新令牌对应的用户信息(refreshToken -> OnlineUser)
+        String REFRESH_TOKEN_USER = "auth:token:refresh:{}";
+        // 用户与访问令牌的映射(userId -> accessToken)
+        String USER_ACCESS_TOKEN = "auth:user:access:{}";
+        // 用户与刷新令牌的映射(userId -> refreshToken
+        String USER_REFRESH_TOKEN = "auth:user:refresh:{}";
+        // 黑名单 Token(用于退出登录或注销)
+        String BLACKLIST_TOKEN = "auth:token:blacklist:{}";
+    }
+
+    /**
+     * 验证码模块
+     */
+    interface Captcha {
+        String IMAGE_CODE = "captcha:image:{}";              // 图形验证码
+        String SMS_LOGIN_CODE = "captcha:sms_login:{}";      // 登录短信验证码
+        String SMS_REGISTER_CODE = "captcha:sms_register:{}";// 注册短信验证码
+        String MOBILE_CODE = "captcha:mobile:{}";            // 绑定、更换手机验证码
+        String EMAIL_CODE = "captcha:email:{}";              // 邮箱验证码
+    }
+
+    /**
+     * 系统模块
+     */
+    interface System {
+        String CONFIG = "system:config";                 // 系统配置
+        String ROLE_PERMS = "system:role:perms"; // 系统角色和权限映射
+    }
+
+}

+ 25 - 0
src/main/java/com/zsElectric/boot/common/constant/SecurityConstants.java

@@ -0,0 +1,25 @@
+package com.zsElectric.boot.common.constant;
+
+/**
+ * 安全模块常量
+ *
+ * @author Ray.Hao
+ * @since 2023/11/24
+ */
+public interface SecurityConstants {
+
+    /**
+     * 登录路径
+     */
+    String LOGIN_PATH = "/api/v1/auth/login";
+
+    /**
+     * JWT Token 前缀
+     */
+    String BEARER_TOKEN_PREFIX  = "Bearer ";
+
+    /**
+     * 角色前缀,用于区分 authorities 角色和权限, ROLE_* 角色 、没有前缀的是权限
+     */
+    String ROLE_PREFIX = "ROLE_";
+}

+ 32 - 0
src/main/java/com/zsElectric/boot/common/constant/SystemConstants.java

@@ -0,0 +1,32 @@
+package com.zsElectric.boot.common.constant;
+
+/**
+ * 系统常量
+ *
+ * @author Ray.Hao
+ * @since 1.0.0
+ */
+public interface SystemConstants {
+
+    /**
+     * 根节点ID
+     */
+    Long ROOT_NODE_ID = 0L;
+
+    /**
+     * 系统默认密码
+     */
+    String DEFAULT_PASSWORD = "123456";
+
+    /**
+     * 超级管理员角色编码
+     */
+    String ROOT_ROLE_CODE = "ROOT";
+
+
+    /**
+     * 系统配置 IP的QPS限流的KEY
+     */
+    String SYSTEM_CONFIG_IP_QPS_LIMIT_KEY = "IP_QPS_THRESHOLD_LIMIT";
+
+}

+ 27 - 0
src/main/java/com/zsElectric/boot/common/enums/CaptchaTypeEnum.java

@@ -0,0 +1,27 @@
+package com.zsElectric.boot.common.enums;
+
+/**
+ * EasyCaptcha 验证码类型枚举
+ *
+ * @author haoxr
+ * @since 2.5.1
+ */
+public enum CaptchaTypeEnum {
+
+    /**
+     * 圆圈干扰验证码
+     */
+    CIRCLE,
+    /**
+     * GIF验证码
+     */
+    GIF,
+    /**
+     * 干扰线验证码
+     */
+    LINE,
+    /**
+     * 扭曲干扰验证码
+     */
+    SHEAR
+}

+ 31 - 0
src/main/java/com/zsElectric/boot/common/enums/DataScopeEnum.java

@@ -0,0 +1,31 @@
+package com.zsElectric.boot.common.enums;
+
+import com.zsElectric.boot.common.base.IBaseEnum;
+import lombok.Getter;
+
+/**
+ * 数据权限枚举
+ *
+ * @author Ray.Hao
+ * @since 2.3.0
+ */
+@Getter
+public enum DataScopeEnum implements IBaseEnum<Integer> {
+
+    /**
+     * value 越小,数据权限范围越大
+     */
+    ALL(1, "所有数据"),
+    DEPT_AND_SUB(2, "部门及子部门数据"),
+    DEPT(3, "本部门数据"),
+    SELF(4, "本人数据");
+
+    private final Integer value;
+
+    private final String label;
+
+    DataScopeEnum(Integer value, String label) {
+        this.value = value;
+        this.label = label;
+    }
+}

+ 26 - 0
src/main/java/com/zsElectric/boot/common/enums/EnvEnum.java

@@ -0,0 +1,26 @@
+package com.zsElectric.boot.common.enums;
+
+import com.zsElectric.boot.common.base.IBaseEnum;
+import lombok.Getter;
+
+/**
+ * 环境枚举
+ *
+ * @author Ray
+ * @since 4.0.0
+ */
+@Getter
+public enum EnvEnum implements IBaseEnum<String> {
+
+    DEV("dev", "开发环境"),
+    PROD("prod", "生产环境");
+
+    private final String value;
+
+    private final String label;
+
+    EnvEnum(String value, String label) {
+        this.value = value;
+        this.label = label;
+    }
+}

+ 33 - 0
src/main/java/com/zsElectric/boot/common/enums/LogModuleEnum.java

@@ -0,0 +1,33 @@
+package com.zsElectric.boot.common.enums;
+
+import com.fasterxml.jackson.annotation.JsonValue;
+import io.swagger.v3.oas.annotations.media.Schema;
+import lombok.Getter;
+
+/**
+ * 日志模块枚举
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@Schema(enumAsRef = true)
+@Getter
+public enum LogModuleEnum {
+
+    EXCEPTION("异常"),
+    LOGIN("登录"),
+    USER("用户"),
+    DEPT("部门"),
+    ROLE("角色"),
+    MENU("菜单"),
+    DICT("字典"),
+    SETTING("系统配置"),
+    OTHER("其他");
+
+    @JsonValue
+    private final String moduleName;
+
+    LogModuleEnum(String moduleName) {
+        this.moduleName = moduleName;
+    }
+}

+ 52 - 0
src/main/java/com/zsElectric/boot/common/enums/RequestMethodEnum.java

@@ -0,0 +1,52 @@
+package com.zsElectric.boot.common.enums;
+
+import lombok.AllArgsConstructor;
+import lombok.Getter;
+
+@Getter
+@AllArgsConstructor
+public enum RequestMethodEnum {
+    /**
+     * 搜寻 @AnonymousGetMapping
+     */
+    GET("GET"),
+
+    /**
+     * 搜寻 @AnonymousPostMapping
+     */
+    POST("POST"),
+
+    /**
+     * 搜寻 @AnonymousPutMapping
+     */
+    PUT("PUT"),
+
+    /**
+     * 搜寻 @AnonymousPatchMapping
+     */
+    PATCH("PATCH"),
+
+    /**
+     * 搜寻 @AnonymousDeleteMapping
+     */
+    DELETE("DELETE"),
+
+    /**
+     * 否则就是所有 Request 接口都放行
+     */
+    ALL("All");
+
+    /**
+     * Request 类型
+     */
+    private final String type;
+
+    public static RequestMethodEnum find(String type) {
+        for (RequestMethodEnum value : RequestMethodEnum.values()) {
+            if (value.getType().equals(type)) {
+                return value;
+            }
+        }
+        return ALL;
+    }
+}

+ 27 - 0
src/main/java/com/zsElectric/boot/common/enums/StatusEnum.java

@@ -0,0 +1,27 @@
+package com.zsElectric.boot.common.enums;
+
+import com.zsElectric.boot.common.base.IBaseEnum;
+import lombok.Getter;
+
+/**
+ * 状态枚举
+ *
+ * @author haoxr
+ * @since 2022/10/14
+ */
+@Getter
+public enum StatusEnum implements IBaseEnum<Integer> {
+
+    ENABLE(1, "启用"),
+    DISABLE (0, "禁用");
+
+    private final Integer value;
+
+
+    private final String label;
+
+    StatusEnum(Integer value, String label) {
+        this.value = value;
+        this.label = label;
+    }
+}

+ 30 - 0
src/main/java/com/zsElectric/boot/common/model/KeyValue.java

@@ -0,0 +1,30 @@
+package com.zsElectric.boot.common.model;
+
+import io.swagger.v3.oas.annotations.media.Schema;
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+
+/**
+ * 键值对
+ *
+ * @author haoxr
+ * @since 2024/5/25
+ */
+@Schema(description = "键值对")
+@Data
+@NoArgsConstructor
+public class KeyValue {
+
+    public KeyValue(String key, String value) {
+        this.key = key;
+        this.value = value;
+    }
+
+    @Schema(description = "选项的值")
+    private String key;
+
+    @Schema(description = "选项的标签")
+    private String value;
+
+}

+ 53 - 0
src/main/java/com/zsElectric/boot/common/model/Option.java

@@ -0,0 +1,53 @@
+package com.zsElectric.boot.common.model;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import io.swagger.v3.oas.annotations.media.Schema;
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+import java.util.List;
+
+/**
+ * 下拉选项对象
+ *
+ * @author haoxr
+ * @since 2022/1/22
+ */
+@Schema(description ="下拉选项对象")
+@Data
+@NoArgsConstructor
+public class Option<T> {
+
+    public Option(T value, String label) {
+        this.value = value;
+        this.label = label;
+    }
+
+    public Option(T value, String label, List<Option<T>> children) {
+        this.value = value;
+        this.label = label;
+        this.children= children;
+    }
+
+    public Option(T value, String label, String tag) {
+        this.value = value;
+        this.label = label;
+        this.tag= tag;
+    }
+
+
+    @Schema(description="选项的值")
+    private T value;
+
+    @Schema(description="选项的标签")
+    private String label;
+
+    @Schema(description = "标签类型")
+    @JsonInclude(value = JsonInclude.Include.NON_EMPTY)
+    private String tag;
+
+    @Schema(description="子选项列表")
+    @JsonInclude(value = JsonInclude.Include.NON_EMPTY)
+    private List<Option<T>> children;
+
+}

+ 61 - 0
src/main/java/com/zsElectric/boot/common/util/DateUtils.java

@@ -0,0 +1,61 @@
+
+package com.zsElectric.boot.common.util;
+
+import cn.hutool.core.date.DateTime;
+import cn.hutool.core.date.DateUtil;
+import cn.hutool.core.util.ReflectUtil;
+import cn.hutool.core.util.StrUtil;
+import org.springframework.format.annotation.DateTimeFormat;
+
+import java.lang.reflect.Field;
+
+/**
+ * 日期工具类
+ *
+ * @author haoxr
+ * @since 2.4.2
+ */
+public class DateUtils {
+
+    /**
+     * 区间日期格式化为数据库日期格式
+     * <p>
+     * eg:2021-01-01 → 2021-01-01 00:00:00
+     *
+     * @param obj                要处理的对象
+     * @param startTimeFieldName 起始时间字段名
+     * @param endTimeFieldName   结束时间字段名
+     */
+    public static void toDatabaseFormat(Object obj, String startTimeFieldName, String endTimeFieldName) {
+        Field startTimeField = ReflectUtil.getField(obj.getClass(), startTimeFieldName);
+        Field endTimeField = ReflectUtil.getField(obj.getClass(), endTimeFieldName);
+
+        if (startTimeField != null) {
+            processDateTimeField(obj, startTimeField, startTimeFieldName, "yyyy-MM-dd 00:00:00");
+        }
+
+        if (endTimeField != null) {
+            processDateTimeField(obj, endTimeField, endTimeFieldName, "yyyy-MM-dd 23:59:59");
+        }
+    }
+
+    /**
+     * 处理日期字段
+     *
+     * @param obj           要处理的对象
+     * @param field         字段
+     * @param fieldName     字段名
+     * @param targetPattern 目标数据库日期格式
+     */
+    private static void processDateTimeField(Object obj, Field field, String fieldName, String targetPattern) {
+        Object fieldValue = ReflectUtil.getFieldValue(obj, fieldName);
+        if (fieldValue != null) {
+            // 得到原始的日期格式
+            String pattern = field.isAnnotationPresent(DateTimeFormat.class) ? field.getAnnotation(DateTimeFormat.class).pattern() : "yyyy-MM-dd";
+            // 转换为日期对象
+            DateTime dateTime = DateUtil.parse(StrUtil.toString(fieldValue), pattern);
+            // 转换为目标数据库日期格式
+            ReflectUtil.setFieldValue(obj, fieldName, dateTime.toString(targetPattern));
+        }
+    }
+}

+ 19 - 0
src/main/java/com/zsElectric/boot/common/util/ExcelUtils.java

@@ -0,0 +1,19 @@
+package com.zsElectric.boot.common.util;
+
+import cn.idev.excel.EasyExcel;
+import cn.idev.excel.event.AnalysisEventListener;
+
+import java.io.InputStream;
+
+/**
+ * Excel 工具类
+ *
+ * @author haoxr
+ * @since 2023/03/01
+ */
+public class ExcelUtils {
+
+    public static <T> void importExcel(InputStream is, Class clazz, AnalysisEventListener<T> listener) {
+        EasyExcel.read(is, clazz, listener).sheet().doRead();
+    }
+}

+ 139 - 0
src/main/java/com/zsElectric/boot/common/util/IPUtils.java

@@ -0,0 +1,139 @@
+package com.zsElectric.boot.common.util;
+
+import cn.hutool.core.util.StrUtil;
+import jakarta.annotation.PostConstruct;
+import jakarta.servlet.http.HttpServletRequest;
+import lombok.extern.slf4j.Slf4j;
+import org.lionsoul.ip2region.xdb.Searcher;
+import org.springframework.stereotype.Component;
+
+import java.io.FileNotFoundException;
+import java.io.InputStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
+
+/**
+ * IP工具类
+ * <p>
+ * 获取客户端IP地址和IP地址对应的地理位置信息
+ * <p>
+ * 使用Nginx等反向代理软件, 则不能通过request.getRemoteAddr()获取IP地址
+ * 如果使用了多级反向代理的话,X-Forwarded-For的值并不止一个,而是一串IP地址,X-Forwarded-For中第一个非unknown的有效IP字符串,则为真实IP地址
+ * </p>
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@Slf4j
+@Component
+public class IPUtils {
+
+    private static final String DB_PATH = "/data/ip2region.xdb";
+    private static Searcher searcher;
+
+    @PostConstruct
+    public void init() {
+        try {
+            // 从类路径加载资源文件
+            InputStream inputStream = getClass().getResourceAsStream(DB_PATH);
+            if (inputStream == null) {
+                throw new FileNotFoundException("Resource not found: " + DB_PATH);
+            }
+
+            // 将资源文件复制到临时文件
+            Path tempDbPath = Files.createTempFile("ip2region", ".xdb");
+            Files.copy(inputStream, tempDbPath, StandardCopyOption.REPLACE_EXISTING);
+
+            // 使用临时文件初始化 Searcher 对象
+            searcher = Searcher.newWithFileOnly(tempDbPath.toString());
+        } catch (Exception e) {
+            log.error("IpRegionUtil initialization ERROR, {}", e.getMessage());
+        }
+    }
+
+    /**
+     * 获取IP地址
+     *
+     * @param request HttpServletRequest对象
+     * @return 客户端IP地址
+     */
+    public static String getIpAddr(HttpServletRequest request) {
+        String ip = null;
+        try {
+            if (request == null) {
+                return "";
+            }
+            ip = request.getHeader("x-forwarded-for");
+            if (checkIp(ip)) {
+                ip = request.getHeader("Proxy-Client-IP");
+            }
+            if (checkIp(ip)) {
+                ip = request.getHeader("WL-Proxy-Client-IP");
+            }
+            if (checkIp(ip)) {
+                ip = request.getHeader("HTTP_CLIENT_IP");
+            }
+            if (checkIp(ip)) {
+                ip = request.getHeader("HTTP_X_FORWARDED_FOR");
+            }
+            if (checkIp(ip)) {
+                ip = request.getRemoteAddr();
+                if ("127.0.0.1".equals(ip) || "0:0:0:0:0:0:0:1".equals(ip)) {
+                    // 根据网卡取本机配置的IP
+                    ip = getLocalAddr();
+                }
+            }
+        } catch (Exception e) {
+            log.error("IPUtils ERROR, {}", e.getMessage());
+        }
+
+        // 使用代理,则获取第一个IP地址
+        if (StrUtil.isNotBlank(ip) && ip.indexOf(",") > 0) {
+            ip = ip.substring(0, ip.indexOf(","));
+        }
+
+        return ip;
+    }
+
+    private static boolean checkIp(String ip) {
+        String unknown = "unknown";
+        return StrUtil.isEmpty(ip) || unknown.equalsIgnoreCase(ip);
+    }
+
+    /**
+     * 获取本机的IP地址
+     *
+     * @return 本机IP地址
+     */
+    private static String getLocalAddr() {
+        try {
+            return InetAddress.getLocalHost().getHostAddress();
+        } catch (UnknownHostException e) {
+            log.error("InetAddress.getLocalHost()-error, {}", e.getMessage());
+        }
+        return null;
+    }
+
+    /**
+     * 根据IP地址获取地理位置信息
+     *
+     * @param ip IP地址
+     * @return 地理位置信息
+     */
+    public static String getRegion(String ip) {
+        if (searcher == null) {
+            log.error("Searcher is not initialized");
+            return null;
+        }
+
+        try {
+            return searcher.search(ip);
+        } catch (Exception e) {
+            log.error("IpRegionUtil ERROR, {}", e.getMessage());
+            return null;
+        }
+    }
+}

+ 37 - 0
src/main/java/com/zsElectric/boot/config/CaffeineConfig.java

@@ -0,0 +1,37 @@
+package com.zsElectric.boot.config;
+
+import com.github.benmanes.caffeine.cache.Caffeine;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.cache.CacheManager;
+import org.springframework.cache.caffeine.CaffeineCacheManager;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+/**
+ * caffeine缓存配置
+ *
+ * @author Theo
+ * @since 2025-01-22 17:40:23
+ */
+@Slf4j
+@Configuration
+public class CaffeineConfig {
+
+    @Value("${spring.cache.caffeine.spec}")
+    private String caffeineSpec;
+
+    /**
+     * 缓存管理器
+     *
+     * @return CacheManager 缓存管理器
+     */
+    @Bean
+    public CacheManager cacheManager() {
+        CaffeineCacheManager caffeineCacheManager = new CaffeineCacheManager();
+        Caffeine<Object, Object> caffeineBuilder = Caffeine.from(caffeineSpec);
+        caffeineCacheManager.setCaffeine(caffeineBuilder);
+        return caffeineCacheManager;
+    }
+}
+

+ 55 - 0
src/main/java/com/zsElectric/boot/config/CaptchaConfig.java

@@ -0,0 +1,55 @@
+package com.zsElectric.boot.config;
+
+import cn.hutool.captcha.generator.CodeGenerator;
+import cn.hutool.captcha.generator.MathGenerator;
+import cn.hutool.captcha.generator.RandomGenerator;
+import com.zsElectric.boot.config.property.CaptchaProperties;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+import java.awt.*;
+
+/**
+ * 验证码自动装配配置
+ *
+ * @author haoxr
+ * @since 2023/11/24
+ */
+@Configuration
+public class CaptchaConfig {
+
+    @Autowired
+    private CaptchaProperties captchaProperties;
+
+    /**
+     * 验证码文字生成器
+     *
+     * @return CodeGenerator
+     */
+    @Bean
+    public CodeGenerator codeGenerator() {
+        String codeType = captchaProperties.getCode().getType();
+        int codeLength = captchaProperties.getCode().getLength();
+        if ("math".equalsIgnoreCase(codeType)) {
+            return new MathGenerator(codeLength);
+        } else if ("random".equalsIgnoreCase(codeType)) {
+            return new RandomGenerator(codeLength);
+        } else {
+            throw new IllegalArgumentException("Invalid captcha codegen type: " + codeType);
+        }
+    }
+
+    /**
+     * 验证码字体
+     */
+    @Bean
+    public Font captchaFont() {
+        String fontName = captchaProperties.getFont().getName();
+        int fontSize = captchaProperties.getFont().getSize();
+        int fontWight = captchaProperties.getFont().getWeight();
+        return new Font(fontName, fontWight, fontSize);
+    }
+
+
+}

+ 42 - 0
src/main/java/com/zsElectric/boot/config/CorsConfig.java

@@ -0,0 +1,42 @@
+package com.zsElectric.boot.config;
+
+import org.springframework.boot.web.servlet.FilterRegistrationBean;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.web.cors.CorsConfiguration;
+import org.springframework.web.cors.UrlBasedCorsConfigurationSource;
+import org.springframework.web.filter.CorsFilter;
+
+import java.util.Collections;
+
+/**
+ * CORS 资源共享配置
+ *
+ * @author haoxr
+ * @since 2023/4/17
+ */
+@Configuration
+public class CorsConfig {
+
+    @Bean
+    public FilterRegistrationBean filterRegistrationBean() {
+        CorsConfiguration corsConfiguration = new CorsConfiguration();
+        //1.允许任何来源
+        corsConfiguration.setAllowedOriginPatterns(Collections.singletonList("*"));
+        //2.允许任何请求头
+        corsConfiguration.addAllowedHeader(CorsConfiguration.ALL);
+        //3.允许任何方法
+        corsConfiguration.addAllowedMethod(CorsConfiguration.ALL);
+        //4.允许凭证
+        corsConfiguration.setAllowCredentials(true);
+
+        UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource();
+        source.registerCorsConfiguration("/**", corsConfiguration);
+        CorsFilter corsFilter = new CorsFilter(source);
+
+        FilterRegistrationBean<CorsFilter> filterRegistrationBean=new FilterRegistrationBean<>(corsFilter);
+        filterRegistrationBean.setOrder(-101);  // 小于 SpringSecurity Filter的 Order(-100) 即可
+
+        return filterRegistrationBean;
+    }
+}

+ 51 - 0
src/main/java/com/zsElectric/boot/config/MailConfig.java

@@ -0,0 +1,51 @@
+package com.zsElectric.boot.config;
+
+import com.zsElectric.boot.config.property.MailProperties;
+import org.springframework.boot.context.properties.EnableConfigurationProperties;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.mail.javamail.JavaMailSender;
+import org.springframework.mail.javamail.JavaMailSenderImpl;
+
+import java.util.Properties;
+
+/**
+ * MailConfig 配置类,用于手动配置和注入 JavaMailSender。
+ * 通过读取 MailProperties 类中配置的邮件相关属性来初始化 JavaMailSender。
+ * <p>
+ * 手动注入的原因是为了避免在使用 application-dev.yml 或其他非 application.yml 配置文件时,
+ * IDEA 提示无法找到 JavaMailSender 的 bean。
+ *
+ * @author Ray
+ * @since 2024/8/17
+ */
+@Configuration
+@EnableConfigurationProperties(MailProperties.class)
+public class MailConfig {
+
+    private final MailProperties mailProperties;
+
+    public MailConfig(MailProperties mailProperties) {
+        this.mailProperties = mailProperties;
+    }
+
+    /**
+     * 创建并配置 JavaMailSender bean。
+     *
+     * @return 配置好的 JavaMailSender 实例
+     */
+    @Bean
+    public JavaMailSender javaMailSender() {
+        JavaMailSenderImpl mailSender = new JavaMailSenderImpl();
+        mailSender.setHost(mailProperties.getHost());
+        mailSender.setPort(mailProperties.getPort());
+        mailSender.setUsername(mailProperties.getUsername());
+        mailSender.setPassword(mailProperties.getPassword());
+
+        Properties properties = mailSender.getJavaMailProperties();
+        properties.put("mail.smtp.auth", mailProperties.getProperties().getSmtp().isAuth());
+        properties.put("mail.smtp.starttls.enable", mailProperties.getProperties().getSmtp().getStarttls().isEnable());
+
+        return mailSender;
+    }
+}

+ 48 - 0
src/main/java/com/zsElectric/boot/config/MybatisConfig.java

@@ -0,0 +1,48 @@
+package com.zsElectric.boot.config;
+
+import com.baomidou.mybatisplus.annotation.DbType;
+import com.baomidou.mybatisplus.core.config.GlobalConfig;
+import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
+import com.baomidou.mybatisplus.extension.plugins.inner.DataPermissionInterceptor;
+import com.baomidou.mybatisplus.extension.plugins.inner.PaginationInnerInterceptor;
+import com.zsElectric.boot.plugin.mybatis.MyDataPermissionHandler;
+import com.zsElectric.boot.plugin.mybatis.MyMetaObjectHandler;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.transaction.annotation.EnableTransactionManagement;
+
+/**
+ * mybatis-plus 配置类
+ *
+ * @author Ray.Hao
+ * @since 2022/7/2
+ */
+@Configuration
+@EnableTransactionManagement
+public class MybatisConfig {
+
+    /**
+     * 分页插件和数据权限插件
+     */
+    @Bean
+    public MybatisPlusInterceptor mybatisPlusInterceptor() {
+        MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
+        //数据权限
+        interceptor.addInnerInterceptor(new DataPermissionInterceptor(new MyDataPermissionHandler()));
+        //分页插件
+        interceptor.addInnerInterceptor(new PaginationInnerInterceptor(DbType.MYSQL));
+
+        return interceptor;
+    }
+
+    /**
+     * 自动填充数据库创建人、创建时间、更新人、更新时间
+     */
+    @Bean
+    public GlobalConfig globalConfig() {
+        GlobalConfig globalConfig = new GlobalConfig();
+        globalConfig.setMetaObjectHandler(new MyMetaObjectHandler());
+        return globalConfig;
+    }
+
+}

+ 106 - 0
src/main/java/com/zsElectric/boot/config/OpenApiConfig.java

@@ -0,0 +1,106 @@
+package com.zsElectric.boot.config;
+
+import cn.hutool.core.util.ArrayUtil;
+import com.zsElectric.boot.config.property.SecurityProperties;
+import io.swagger.v3.oas.models.Components;
+import io.swagger.v3.oas.models.OpenAPI;
+import io.swagger.v3.oas.models.info.Contact;
+import io.swagger.v3.oas.models.info.Info;
+import io.swagger.v3.oas.models.info.License;
+import io.swagger.v3.oas.models.security.SecurityRequirement;
+import io.swagger.v3.oas.models.security.SecurityScheme;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.springdoc.core.customizers.GlobalOpenApiCustomizer;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.core.env.Environment;
+import org.springframework.http.HttpHeaders;
+import org.springframework.util.AntPathMatcher;
+
+import java.util.stream.Stream;
+
+/**
+ * OpenAPI 接口文档配置
+ *
+ * @author Ray.Hao
+ * @see <a href="https://doc.xiaominfo.com/docs/quick-start">knife4j 快速开始</a>
+ * @since 2023/2/17
+ */
+@Configuration
+@RequiredArgsConstructor
+@Slf4j
+public class OpenApiConfig {
+
+    private final Environment environment;
+
+    private final SecurityProperties securityProperties;
+
+    /**
+     * 接口文档信息
+     */
+    @Bean
+    public OpenAPI openApi() {
+
+        String appVersion = environment.getProperty("project.version", "1.0.0");
+
+        return new OpenAPI()
+                .info(new Info()
+                        .title("管理系统 API 文档")
+                        .description("本文档涵盖管理系统的所有API接口,包括登录认证、用户管理、角色管理、部门管理等功能模块,提供详细的接口说明和使用指南。")
+                        .version(appVersion)
+                        .license(new License()
+                                .name("Apache License 2.0")
+                                .url("http://www.apache.org/licenses/LICENSE-2.0")
+                        )
+                        .contact(new Contact()
+                                .name("youlai")
+                                .email("youlaitech@163.com")
+                                .url("https://www.youlai.tech")
+                        )
+                )
+                // 配置全局鉴权参数-Authorize
+                .components(new Components()
+                        .addSecuritySchemes(HttpHeaders.AUTHORIZATION,
+                                new SecurityScheme()
+                                        .name(HttpHeaders.AUTHORIZATION)
+                                        .type(SecurityScheme.Type.APIKEY)
+                                        .in(SecurityScheme.In.HEADER)
+                                        .scheme("Bearer")
+                                        .bearerFormat("JWT")
+                        )
+                );
+    }
+
+
+    /**
+     * 全局自定义扩展
+     */
+    @Bean
+    public GlobalOpenApiCustomizer globalOpenApiCustomizer() {
+        return openApi -> {
+            // 全局添加Authorization
+            if (openApi.getPaths() != null) {
+                openApi.getPaths().forEach((path, pathItem) -> {
+
+                    // 忽略认证的请求无需携带 Authorization
+                    String[] ignoreUrls = securityProperties.getIgnoreUrls();
+                    if (ArrayUtil.isNotEmpty(ignoreUrls)) {
+                        // Ant 匹配忽略的路径,不添加Authorization
+                        AntPathMatcher antPathMatcher = new AntPathMatcher();
+                        if (Stream.of(ignoreUrls).anyMatch(ignoreUrl -> antPathMatcher.match(ignoreUrl, path))) {
+                            return;
+                        }
+                    }
+
+                    // 其他接口统一添加Authorization
+                    pathItem.readOperations()
+                            .forEach(operation ->
+                                    operation.addSecurityItem(new SecurityRequirement().addList(HttpHeaders.AUTHORIZATION))
+                            );
+                });
+            }
+        };
+    }
+
+}

+ 24 - 0
src/main/java/com/zsElectric/boot/config/PasswordEncoderConfig.java

@@ -0,0 +1,24 @@
+package com.zsElectric.boot.config;
+
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder;
+import org.springframework.security.crypto.password.PasswordEncoder;
+
+/**
+ * 密码编码器
+ *
+ * @author Ray.Hao
+ * @since 2024/12/3
+ */
+@Configuration
+public class PasswordEncoderConfig {
+
+    /**
+     * 密码编码器
+     */
+    @Bean
+    public PasswordEncoder passwordEncoder() {
+        return new BCryptPasswordEncoder();
+    }
+}

+ 74 - 0
src/main/java/com/zsElectric/boot/config/RedisCacheConfig.java

@@ -0,0 +1,74 @@
+package com.zsElectric.boot.config;
+
+import org.springframework.boot.autoconfigure.cache.CacheProperties;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.boot.context.properties.EnableConfigurationProperties;
+import org.springframework.cache.annotation.EnableCaching;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.data.redis.cache.RedisCacheConfiguration;
+import org.springframework.data.redis.cache.RedisCacheManager;
+import org.springframework.data.redis.cache.RedisCacheWriter;
+import org.springframework.data.redis.connection.RedisConnectionFactory;
+import org.springframework.data.redis.serializer.RedisSerializationContext;
+import org.springframework.data.redis.serializer.RedisSerializer;
+
+/**
+ * Redis 缓存配置
+ *
+ * @author Ray.Hao
+ * @since 2023/12/4
+ */
+@EnableCaching
+@EnableConfigurationProperties(CacheProperties.class)
+@Configuration
+@ConditionalOnProperty(name = "spring.cache.enabled") // xxl.job.enabled = true 才会自动装配
+public class RedisCacheConfig {
+
+    /**
+     * 自定义 RedisCacheManager
+     * <p>
+     * 修改 Redis 序列化方式,默认 JdkSerializationRedisSerializer
+     *
+     * @param redisConnectionFactory {@link RedisConnectionFactory}
+     * @param cacheProperties        {@link CacheProperties}
+     * @return {@link RedisCacheManager}
+     */
+    @Bean
+    public RedisCacheManager redisCacheManager(RedisConnectionFactory redisConnectionFactory, CacheProperties cacheProperties){
+        return RedisCacheManager.builder(RedisCacheWriter.nonLockingRedisCacheWriter(redisConnectionFactory))
+                .cacheDefaults(redisCacheConfiguration(cacheProperties))
+                .build();
+    }
+
+    /**
+     * 自定义 RedisCacheConfiguration
+     *
+     * @param cacheProperties {@link CacheProperties}
+     * @return {@link RedisCacheConfiguration}
+     */
+    @Bean
+    RedisCacheConfiguration redisCacheConfiguration(CacheProperties cacheProperties) {
+
+        RedisCacheConfiguration config = RedisCacheConfiguration.defaultCacheConfig();
+
+        config = config.serializeKeysWith(RedisSerializationContext.SerializationPair.fromSerializer(RedisSerializer.string()));
+        config = config.serializeValuesWith(RedisSerializationContext.SerializationPair.fromSerializer(RedisSerializer.json()));
+
+        CacheProperties.Redis redisProperties = cacheProperties.getRedis();
+
+        if (redisProperties.getTimeToLive() != null) {
+            config = config.entryTtl(redisProperties.getTimeToLive());
+        }
+        if (!redisProperties.isCacheNullValues()) {
+            config = config.disableCachingNullValues();
+        }
+        if (!redisProperties.isUseKeyPrefix()) {
+            config = config.disableKeyPrefix();
+        }
+        // 覆盖默认key双冒号  CacheKeyPrefix#prefixed
+        config = config.computePrefixWith(name -> name + ":");
+        return config;
+    }
+
+}

+ 42 - 0
src/main/java/com/zsElectric/boot/config/RedisConfig.java

@@ -0,0 +1,42 @@
+package com.zsElectric.boot.config;
+
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.data.redis.connection.RedisConnectionFactory;
+import org.springframework.data.redis.core.RedisTemplate;
+import org.springframework.data.redis.serializer.RedisSerializer;
+
+/**
+ * Redis 配置
+ *
+ * @author Ray.Hao
+ * @since 2023/5/15
+ */
+@Configuration
+public class RedisConfig {
+
+    /**
+     * 自定义 RedisTemplate
+     * <p>
+     * 修改 Redis 序列化方式,默认 JdkSerializationRedisSerializer
+     *
+     * @param redisConnectionFactory {@link RedisConnectionFactory}
+     * @return {@link RedisTemplate}
+     */
+    @Bean
+    public RedisTemplate<String, Object> redisTemplate(RedisConnectionFactory redisConnectionFactory) {
+
+        RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>();
+        redisTemplate.setConnectionFactory(redisConnectionFactory);
+
+        redisTemplate.setKeySerializer(RedisSerializer.string());
+        redisTemplate.setValueSerializer(RedisSerializer.json());
+
+        redisTemplate.setHashKeySerializer(RedisSerializer.string());
+        redisTemplate.setHashValueSerializer(RedisSerializer.json());
+
+        redisTemplate.afterPropertiesSet();
+        return redisTemplate;
+    }
+
+}

+ 168 - 0
src/main/java/com/zsElectric/boot/config/SecurityConfig.java

@@ -0,0 +1,168 @@
+package com.zsElectric.boot.config;
+
+import cn.binarywang.wx.miniapp.api.WxMaService;
+import cn.hutool.captcha.generator.CodeGenerator;
+import cn.hutool.core.util.ArrayUtil;
+import com.zsElectric.boot.config.property.SecurityProperties;
+import com.zsElectric.boot.core.filter.RateLimiterFilter;
+import com.zsElectric.boot.security.filter.CaptchaValidationFilter;
+import com.zsElectric.boot.security.filter.TokenAuthenticationFilter;
+import com.zsElectric.boot.security.handler.MyAccessDeniedHandler;
+import com.zsElectric.boot.security.handler.MyAuthenticationEntryPoint;
+import com.zsElectric.boot.security.provider.SmsAuthenticationProvider;
+import com.zsElectric.boot.security.provider.WxMiniAppCodeAuthenticationProvider;
+import com.zsElectric.boot.security.provider.WxMiniAppPhoneAuthenticationProvider;
+import com.zsElectric.boot.security.token.TokenManager;
+import com.zsElectric.boot.security.service.SysUserDetailsService;
+import com.zsElectric.boot.system.service.ConfigService;
+import com.zsElectric.boot.system.service.UserService;
+import lombok.RequiredArgsConstructor;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.data.redis.core.RedisTemplate;
+import org.springframework.security.authentication.AuthenticationManager;
+import org.springframework.security.authentication.ProviderManager;
+import org.springframework.security.authentication.dao.DaoAuthenticationProvider;
+import org.springframework.security.config.annotation.method.configuration.EnableMethodSecurity;
+import org.springframework.security.config.annotation.web.builders.HttpSecurity;
+import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity;
+import org.springframework.security.config.annotation.web.configuration.WebSecurityCustomizer;
+import org.springframework.security.config.annotation.web.configurers.AbstractHttpConfigurer;
+import org.springframework.security.config.annotation.web.configurers.HeadersConfigurer;
+import org.springframework.security.config.http.SessionCreationPolicy;
+import org.springframework.security.crypto.password.PasswordEncoder;
+import org.springframework.security.web.SecurityFilterChain;
+import org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter;
+
+/**
+ * Spring Security 配置类
+ *
+ * @author Ray.Hao
+ * @since 2023/2/17
+ */
+@Configuration
+@EnableWebSecurity
+@EnableMethodSecurity
+@RequiredArgsConstructor
+public class SecurityConfig {
+
+    private final RedisTemplate<String, Object> redisTemplate;
+    private final PasswordEncoder passwordEncoder;
+
+    private final TokenManager tokenManager;
+    private final WxMaService wxMaService;
+    private final UserService userService;
+    private final SysUserDetailsService userDetailsService;
+
+    private final CodeGenerator codeGenerator;
+    private final ConfigService configService;
+    private final SecurityProperties securityProperties;
+
+    /**
+     * 配置安全过滤链 SecurityFilterChain
+     */
+    @Bean
+    public SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception {
+
+        return http
+                .authorizeHttpRequests(requestMatcherRegistry -> {
+                            // 配置无需登录即可访问的公开接口
+                            String[] ignoreUrls = securityProperties.getIgnoreUrls();
+                            if (ArrayUtil.isNotEmpty(ignoreUrls)) {
+                                requestMatcherRegistry.requestMatchers(ignoreUrls).permitAll();
+                            }
+                            // 其他所有请求需登录后访问
+                            requestMatcherRegistry.anyRequest().authenticated();
+                        }
+                )
+                .exceptionHandling(configurer ->
+                        configurer
+                                .authenticationEntryPoint(new MyAuthenticationEntryPoint()) // 未认证异常处理器
+                                .accessDeniedHandler(new MyAccessDeniedHandler()) // 无权限访问异常处理器
+                )
+
+                // 禁用默认的 Spring Security 特性,适用于前后端分离架构
+                .sessionManagement(configurer ->
+                        configurer.sessionCreationPolicy(SessionCreationPolicy.STATELESS) // 无状态认证,不使用 Session
+                )
+                .csrf(AbstractHttpConfigurer::disable)      // 禁用 CSRF 防护,前后端分离无需此防护机制
+                .formLogin(AbstractHttpConfigurer::disable) // 禁用默认的表单登录功能,前后端分离采用 Token 认证方式
+                .httpBasic(AbstractHttpConfigurer::disable) // 禁用 HTTP Basic 认证,避免弹窗式登录
+                // 禁用 X-Frame-Options 响应头,允许页面被嵌套到 iframe 中
+                .headers(headers -> headers.frameOptions(HeadersConfigurer.FrameOptionsConfig::disable))
+                // 限流过滤器
+                .addFilterBefore(new RateLimiterFilter(redisTemplate, configService), UsernamePasswordAuthenticationFilter.class)
+                // 验证码校验过滤器
+                .addFilterBefore(new CaptchaValidationFilter(redisTemplate, codeGenerator), UsernamePasswordAuthenticationFilter.class)
+                // 验证和解析过滤器
+                .addFilterBefore(new TokenAuthenticationFilter(tokenManager), UsernamePasswordAuthenticationFilter.class)
+                .build();
+    }
+
+    /**
+     * 配置Web安全自定义器,以忽略特定请求路径的安全性检查。
+     * <p>
+     * 该配置用于指定哪些请求路径不经过Spring Security过滤器链。通常用于静态资源文件。
+     */
+    @Bean
+    public WebSecurityCustomizer webSecurityCustomizer() {
+        return (web) -> {
+            String[] unsecuredUrls = securityProperties.getUnsecuredUrls();
+            if (ArrayUtil.isNotEmpty(unsecuredUrls)) {
+                web.ignoring().requestMatchers(unsecuredUrls);
+            }
+        };
+    }
+
+    /**
+     * 默认密码认证的 Provider
+     */
+    @Bean
+    public DaoAuthenticationProvider daoAuthenticationProvider() {
+        DaoAuthenticationProvider daoAuthenticationProvider = new DaoAuthenticationProvider(userDetailsService);
+        daoAuthenticationProvider.setPasswordEncoder(passwordEncoder);
+        return daoAuthenticationProvider;
+    }
+
+    /**
+     * 微信小程序Code认证Provider
+     */
+    @Bean
+    public WxMiniAppCodeAuthenticationProvider wxMiniAppCodeAuthenticationProvider() {
+        return new WxMiniAppCodeAuthenticationProvider(userService, wxMaService);
+    }
+
+    /**
+     * 微信小程序手机号认证Provider
+     */
+    @Bean
+    public WxMiniAppPhoneAuthenticationProvider wxMiniAppPhoneAuthenticationProvider() {
+        return new WxMiniAppPhoneAuthenticationProvider(userService, wxMaService);
+    }
+
+    /**
+     * 短信验证码认证 Provider
+     */
+    @Bean
+    public SmsAuthenticationProvider smsAuthenticationProvider() {
+        return new SmsAuthenticationProvider(userService, redisTemplate);
+    }
+
+    /**
+     * 认证管理器
+     */
+    @Bean
+    public AuthenticationManager authenticationManager(
+            DaoAuthenticationProvider daoAuthenticationProvider,
+            WxMiniAppCodeAuthenticationProvider wxMiniAppCodeAuthenticationProvider,
+            WxMiniAppPhoneAuthenticationProvider wxMiniAppPhoneAuthenticationProvider,
+            SmsAuthenticationProvider smsAuthenticationProvider
+    ) {
+        return new ProviderManager(
+                daoAuthenticationProvider,
+                wxMiniAppCodeAuthenticationProvider,
+                wxMiniAppPhoneAuthenticationProvider,
+                smsAuthenticationProvider
+        );
+    }
+}

+ 93 - 0
src/main/java/com/zsElectric/boot/config/WebMvcConfig.java

@@ -0,0 +1,93 @@
+package com.zsElectric.boot.config;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+import com.fasterxml.jackson.databind.module.SimpleModule;
+import com.fasterxml.jackson.databind.ser.std.ToStringSerializer;
+import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
+import com.fasterxml.jackson.datatype.jsr310.deser.LocalDateTimeDeserializer;
+import com.fasterxml.jackson.datatype.jsr310.ser.LocalDateTimeSerializer;
+import jakarta.validation.Validation;
+import jakarta.validation.Validator;
+import jakarta.validation.ValidatorFactory;
+import lombok.extern.slf4j.Slf4j;
+import org.hibernate.validator.HibernateValidator;
+import org.springframework.beans.factory.config.AutowireCapableBeanFactory;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.http.converter.HttpMessageConverter;
+import org.springframework.http.converter.json.MappingJackson2HttpMessageConverter;
+import org.springframework.validation.beanvalidation.SpringConstraintValidatorFactory;
+import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
+
+import java.math.BigInteger;
+import java.text.SimpleDateFormat;
+import java.time.LocalDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.List;
+import java.util.TimeZone;
+
+/**
+ * Web 配置
+ *
+ * @author Ray.Hao
+ * @since 2020/10/16
+ */
+@Configuration
+@Slf4j
+public class WebMvcConfig implements WebMvcConfigurer {
+
+    private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
+
+    /**
+     * 配置消息转换器
+     *
+     * @param converters 消息转换器列表
+     */
+    @Override
+    public void configureMessageConverters(List<HttpMessageConverter<?>> converters) {
+        MappingJackson2HttpMessageConverter jackson2HttpMessageConverter = new MappingJackson2HttpMessageConverter();
+        ObjectMapper objectMapper = new ObjectMapper();
+
+        // 注册 JavaTimeModule(替代手动注册 LocalDateTimeSerializer)
+        JavaTimeModule javaTimeModule = new JavaTimeModule();
+        // 返回指定字符串格式
+        javaTimeModule.addSerializer(LocalDateTime.class, new LocalDateTimeSerializer(DATE_TIME_FORMATTER));
+        // 反序列化,接受前端传来的格式
+        javaTimeModule.addDeserializer(LocalDateTime.class, new LocalDateTimeDeserializer(DATE_TIME_FORMATTER));
+        objectMapper.registerModule(javaTimeModule);
+
+        // 配置全局日期格式和时区
+        objectMapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS);
+        objectMapper.setDateFormat(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"));
+        objectMapper.setTimeZone(TimeZone.getTimeZone("GMT+8"));
+
+        // 处理 Long/BigInteger 的精度问题
+        SimpleModule simpleModule = new SimpleModule();
+        simpleModule.addSerializer(Long.class, ToStringSerializer.instance);
+        simpleModule.addSerializer(BigInteger.class, ToStringSerializer.instance);
+        objectMapper.registerModule(simpleModule);
+
+        jackson2HttpMessageConverter.setObjectMapper(objectMapper);
+        converters.add(1, jackson2HttpMessageConverter);
+    }
+
+    /**
+     * 配置校验器
+     *
+     * @param autowireCapableBeanFactory 用于注入 SpringConstraintValidatorFactory
+     * @return Validator 实例
+     */
+    @Bean
+    public Validator validator(final AutowireCapableBeanFactory autowireCapableBeanFactory) {
+        try (ValidatorFactory validatorFactory = Validation.byProvider(HibernateValidator.class)
+                .configure()
+                .failFast(true) // failFast=true 时,遇到第一个校验失败则立即返回,false 表示校验所有参数
+                .constraintValidatorFactory(new SpringConstraintValidatorFactory(autowireCapableBeanFactory))
+                .buildValidatorFactory()) {
+
+            // 使用 try-with-resources 确保 ValidatorFactory 被正确关闭
+            return validatorFactory.getValidator();
+        }
+    }
+}

+ 293 - 0
src/main/java/com/zsElectric/boot/config/WebSocketConfig.java

@@ -0,0 +1,293 @@
+package com.zsElectric.boot.config;
+
+import cn.hutool.core.util.StrUtil;
+import com.zsElectric.boot.security.model.SysUserDetails;
+import com.zsElectric.boot.security.token.TokenManager;
+import com.zsElectric.boot.system.service.WebSocketService;
+import lombok.extern.slf4j.Slf4j;
+import org.jetbrains.annotations.NotNull;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.Lazy;
+import org.springframework.http.HttpHeaders;
+import org.springframework.messaging.Message;
+import org.springframework.messaging.MessageChannel;
+import org.springframework.messaging.MessagingException;
+import org.springframework.messaging.simp.config.ChannelRegistration;
+import org.springframework.messaging.simp.config.MessageBrokerRegistry;
+import org.springframework.messaging.simp.stomp.StompCommand;
+import org.springframework.messaging.simp.stomp.StompHeaderAccessor;
+import org.springframework.messaging.support.ChannelInterceptor;
+import org.springframework.messaging.support.MessageHeaderAccessor;
+import org.springframework.security.authentication.AuthenticationCredentialsNotFoundException;
+import org.springframework.security.authentication.BadCredentialsException;
+import org.springframework.security.core.Authentication;
+import org.springframework.security.core.AuthenticationException;
+import org.springframework.web.socket.config.annotation.EnableWebSocketMessageBroker;
+import org.springframework.web.socket.config.annotation.StompEndpointRegistry;
+import org.springframework.web.socket.config.annotation.WebSocketMessageBrokerConfigurer;
+
+/**
+ * WebSocket 配置类
+ * 
+ * 核心功能:
+ * - 配置 WebSocket 端点
+ * - 配置消息代理
+ * - 实现连接认证与授权
+ * - 管理用户会话生命周期
+ *
+ * @author Ray.Hao
+ * @since 3.0.0
+ */
+@EnableWebSocketMessageBroker
+@Configuration
+@Slf4j
+public class WebSocketConfig implements WebSocketMessageBrokerConfigurer {
+
+    private static final String WS_ENDPOINT = "/ws";
+    private static final String APP_DESTINATION_PREFIX = "/app";
+    private static final String USER_DESTINATION_PREFIX = "/user";
+    private static final String[] BROKER_DESTINATIONS = {"/topic", "/queue"};
+
+    private final TokenManager tokenManager;
+    private final WebSocketService webSocketService;
+
+    public WebSocketConfig(TokenManager tokenManager, @Lazy WebSocketService webSocketService) {
+        this.tokenManager = tokenManager;
+        this.webSocketService = webSocketService;
+        log.info("✓ WebSocket 配置已加载");
+    }
+
+    /**
+     * 注册 STOMP 端点
+     * 
+     * 客户端通过该端点建立 WebSocket 连接
+     */
+    @Override
+    public void registerStompEndpoints(StompEndpointRegistry registry) {
+        registry
+                .addEndpoint(WS_ENDPOINT)
+                .setAllowedOriginPatterns("*"); // 允许跨域(生产环境建议配置具体域名)
+
+        log.info("✓ STOMP 端点已注册: {}", WS_ENDPOINT);
+    }
+
+    /**
+     * 配置消息代理
+     * 
+     * - /app 前缀:客户端发送消息到服务端的前缀
+     * - /topic 前缀:用于广播消息
+     * - /queue 前缀:用于点对点消息
+     * - /user 前缀:服务端发送给特定用户的消息前缀
+     */
+    @Override
+    public void configureMessageBroker(MessageBrokerRegistry registry) {
+        // 客户端发送消息的请求前缀
+        registry.setApplicationDestinationPrefixes(APP_DESTINATION_PREFIX);
+
+        // 启用简单消息代理,处理 /topic 和 /queue 前缀的消息
+        registry.enableSimpleBroker(BROKER_DESTINATIONS);
+
+        // 服务端通知客户端的前缀
+        registry.setUserDestinationPrefix(USER_DESTINATION_PREFIX);
+
+        log.info("✓ 消息代理已配置: app={}, broker={}, user={}",
+                APP_DESTINATION_PREFIX, BROKER_DESTINATIONS, USER_DESTINATION_PREFIX);
+    }
+
+    /**
+     * 配置客户端入站通道拦截器
+     * 
+     * 核心功能:
+     * 1. 连接建立时:解析 JWT Token 并绑定用户身份
+     * 2. 连接关闭时:触发用户下线通知
+     * 3. 安全防护:拦截无效连接请求
+     */
+    @Override
+    public void configureClientInboundChannel(ChannelRegistration registration) {
+        registration.interceptors(new ChannelInterceptor() {
+            @Override
+            public Message<?> preSend(@NotNull Message<?> message, @NotNull MessageChannel channel) {
+                StompHeaderAccessor accessor = MessageHeaderAccessor.getAccessor(message, StompHeaderAccessor.class);
+
+                // 防御性检查:确保 accessor 不为空
+                if (accessor == null) {
+                    log.warn("⚠ 收到异常消息:无法获取 StompHeaderAccessor");
+                    return ChannelInterceptor.super.preSend(message, channel);
+                }
+
+                StompCommand command = accessor.getCommand();
+                if (command == null) {
+                    return ChannelInterceptor.super.preSend(message, channel);
+                }
+
+                try {
+                    switch (command) {
+                        case CONNECT:
+                            handleConnect(accessor);
+                            break;
+
+                        case DISCONNECT:
+                            handleDisconnect(accessor);
+                            break;
+
+                        case SUBSCRIBE:
+                            handleSubscribe(accessor);
+                            break;
+
+                        default:
+                            // 其他命令不需要特殊处理
+                            break;
+                    }
+                } catch (AuthenticationException ex) {
+                    // 认证失败时强制关闭连接
+                    log.error("❌ 连接认证失败: {}", ex.getMessage());
+                    throw ex;
+                } catch (Exception ex) {
+                    // 捕获其他未知异常
+                    log.error("❌ WebSocket 消息处理异常", ex);
+                    throw new MessagingException("消息处理失败: " + ex.getMessage());
+                }
+
+                return ChannelInterceptor.super.preSend(message, channel);
+            }
+        });
+
+        log.info("✓ 客户端入站通道拦截器已配置");
+    }
+
+    /**
+     * 处理客户端连接请求
+     * 
+     * 安全校验流程:
+     * 1. 提取 Authorization 头
+     * 2. 验证 Bearer Token 格式
+     * 3. 解析并验证 JWT 有效性
+     * 4. 绑定用户身份到当前会话
+     * 5. 记录用户上线状态
+     */
+    private void handleConnect(StompHeaderAccessor accessor) {
+        String authorization = accessor.getFirstNativeHeader(HttpHeaders.AUTHORIZATION);
+
+        // 安全检查:确保 Authorization 头存在且格式正确
+        if (StrUtil.isBlank(authorization)) {
+            log.warn("⚠ 非法连接请求:缺少 Authorization 头");
+            throw new AuthenticationCredentialsNotFoundException("缺少 Authorization 头");
+        }
+
+        if (!authorization.startsWith("Bearer ")) {
+            log.warn("⚠ 非法连接请求:Authorization 头格式错误");
+            throw new BadCredentialsException("Authorization 头格式错误");
+        }
+
+        // 提取 JWT Token(移除 "Bearer " 前缀)
+        String token = authorization.substring(7);
+
+        if (StrUtil.isBlank(token)) {
+            log.warn("⚠ 非法连接请求:Token 为空");
+            throw new BadCredentialsException("Token 为空");
+        }
+
+        // 解析并验证 Token
+        Authentication authentication;
+        try {
+            authentication = tokenManager.parseToken(token);
+        } catch (Exception ex) {
+            log.error("❌ Token 解析失败", ex);
+            throw new BadCredentialsException("Token 无效: " + ex.getMessage());
+        }
+
+        // 验证解析结果
+        if (authentication == null || !authentication.isAuthenticated()) {
+            log.warn("⚠ Token 解析失败:认证对象无效");
+            throw new BadCredentialsException("Token 解析失败");
+        }
+
+        // 获取用户详细信息
+        Object principal = authentication.getPrincipal();
+        if (!(principal instanceof SysUserDetails)) {
+            log.error("❌ 无效的用户凭证类型: {}", principal.getClass().getName());
+            throw new BadCredentialsException("用户凭证类型错误");
+        }
+
+        SysUserDetails userDetails = (SysUserDetails) principal;
+        String username = userDetails.getUsername();
+
+        if (StrUtil.isBlank(username)) {
+            log.warn("⚠ 用户名为空");
+            throw new BadCredentialsException("用户名为空");
+        }
+
+        // 绑定用户身份到当前会话(重要:用于 @SendToUser 等注解)
+        accessor.setUser(authentication);
+
+        // 获取会话 ID
+        String sessionId = accessor.getSessionId();
+        if (sessionId == null) {
+            log.warn("⚠ 会话 ID 为空,使用临时 ID");
+            sessionId = "temp-" + System.nanoTime();
+        }
+
+        // 记录用户上线状态
+        try {
+            webSocketService.userConnected(username, sessionId);
+            log.info("✓ WebSocket 连接建立成功: 用户[{}], 会话[{}]", username, sessionId);
+        } catch (Exception ex) {
+            log.error("❌ 记录用户上线状态失败: 用户[{}], 会话[{}]", username, sessionId, ex);
+            // 不抛出异常,允许连接继续
+        }
+    }
+
+    /**
+     * 处理客户端断开连接事件
+     * 
+     * 注意:
+     * - 只有成功建立过认证的连接才会触发下线事件
+     * - 防止未认证成功的连接产生脏数据
+     */
+    private void handleDisconnect(StompHeaderAccessor accessor) {
+        Authentication authentication = (Authentication) accessor.getUser();
+
+        // 防御性检查:只处理已认证的连接
+        if (authentication == null || !authentication.isAuthenticated()) {
+            log.debug("未认证的连接断开,跳过处理");
+            return;
+        }
+
+        Object principal = authentication.getPrincipal();
+        if (!(principal instanceof SysUserDetails)) {
+            log.warn("⚠ 断开连接时用户凭证类型异常");
+            return;
+        }
+
+        SysUserDetails userDetails = (SysUserDetails) principal;
+        String username = userDetails.getUsername();
+
+        if (StrUtil.isNotBlank(username)) {
+            try {
+                webSocketService.userDisconnected(username);
+                log.info("✓ WebSocket 连接断开: 用户[{}]", username);
+            } catch (Exception ex) {
+                log.error("❌ 记录用户下线状态失败: 用户[{}]", username, ex);
+            }
+        }
+    }
+
+    /**
+     * 处理客户端订阅事件(可选)
+     * 
+     * 用于记录订阅信息或实施订阅级别的权限控制
+     */
+    private void handleSubscribe(StompHeaderAccessor accessor) {
+        Authentication authentication = (Authentication) accessor.getUser();
+
+        if (authentication != null && authentication.isAuthenticated()) {
+            String destination = accessor.getDestination();
+            String username = authentication.getName();
+
+            log.debug("用户[{}]订阅主题: {}", username, destination);
+
+            // TODO: 这里可以实现订阅级别的权限控制
+            // 例如:检查用户是否有权限订阅某个主题
+        }
+    }
+}

+ 41 - 0
src/main/java/com/zsElectric/boot/config/WxMiniAppConfig.java

@@ -0,0 +1,41 @@
+package com.zsElectric.boot.config;
+
+import cn.binarywang.wx.miniapp.api.WxMaService;
+import cn.binarywang.wx.miniapp.api.impl.WxMaServiceImpl;
+import cn.binarywang.wx.miniapp.config.WxMaConfig;
+import cn.binarywang.wx.miniapp.config.impl.WxMaDefaultConfigImpl;
+import lombok.Setter;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+/**
+ * 配置微信 appId 和 appSecret
+ *
+ * @author wangtao
+ * @since 2024/11/26 17:28
+ */
+@Setter
+@ConfigurationProperties(prefix = "wx.miniapp")
+@Configuration
+public class WxMiniAppConfig {
+
+    private String appId;
+
+    private String appSecret;
+
+    @Bean
+    public WxMaConfig wxMaConfig() {
+        WxMaDefaultConfigImpl config = new WxMaDefaultConfigImpl();
+        config.setAppid(appId);
+        config.setSecret(appSecret);
+        return config;
+    }
+
+    @Bean
+    public WxMaService wxMaService(WxMaConfig wxMaConfig) {
+        WxMaService service = new WxMaServiceImpl();
+        service.setWxMaConfig(wxMaConfig);
+        return service;
+    }
+}

+ 61 - 0
src/main/java/com/zsElectric/boot/config/XxlJobConfig.java

@@ -0,0 +1,61 @@
+package com.zsElectric.boot.config;
+
+import com.xxl.job.core.executor.impl.XxlJobSpringExecutor;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+/**
+ * xxl-job config
+ *
+ * @author xuxueli 2017-04-28
+ */
+@Configuration
+@ConditionalOnProperty(name = "xxl.job.enabled") // xxl.job.enabled = true 才会自动装配
+@Slf4j
+public class XxlJobConfig {
+
+    @Value("${xxl.job.admin.addresses}")
+    private String adminAddresses;
+
+    @Value("${xxl.job.accessToken}")
+    private String accessToken;
+
+    @Value("${xxl.job.executor.appname}")
+    private String appname;
+
+    @Value("${xxl.job.executor.address}")
+    private String address;
+
+    @Value("${xxl.job.executor.ip}")
+    private String ip;
+
+    @Value("${xxl.job.executor.port}")
+    private int port;
+
+    @Value("${xxl.job.executor.logpath}")
+    private String logPath;
+
+    @Value("${xxl.job.executor.logretentiondays}")
+    private int logRetentionDays;
+
+
+    @Bean
+    public XxlJobSpringExecutor xxlJobExecutor() {
+        log.info(">>>>>>>>>>> xxl-job config init.");
+        XxlJobSpringExecutor xxlJobSpringExecutor = new XxlJobSpringExecutor();
+        xxlJobSpringExecutor.setAdminAddresses(adminAddresses);
+        xxlJobSpringExecutor.setAppname(appname);
+        xxlJobSpringExecutor.setAddress(address);
+        xxlJobSpringExecutor.setIp(ip);
+        xxlJobSpringExecutor.setPort(port);
+        xxlJobSpringExecutor.setAccessToken(accessToken);
+        xxlJobSpringExecutor.setLogPath(logPath);
+        xxlJobSpringExecutor.setLogRetentionDays(logRetentionDays);
+
+        return xxlJobSpringExecutor;
+    }
+
+}

+ 50 - 0
src/main/java/com/zsElectric/boot/config/property/AliyunSmsProperties.java

@@ -0,0 +1,50 @@
+package com.zsElectric.boot.config.property;
+
+import lombok.Data;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.context.annotation.Configuration;
+
+import java.util.Map;
+
+/**
+ * 阿里云短信配置
+ *
+ * @author Ray
+ * @since 2024/8/17
+ */
+@Configuration
+@ConfigurationProperties(prefix = "sms.aliyun")
+@Data
+public class AliyunSmsProperties {
+
+    /**
+     * 阿里云账户的Access Key ID,用于API请求认证
+     */
+    private String accessKeyId;
+
+    /**
+     *阿里云账户的Access Key Secret,用于API请求认证
+     */
+    private String accessKeySecret;
+
+    /**
+     * 阿里云短信服务API的域名 eg: dysmsapi.aliyuncs.com
+     */
+    private String domain;
+
+    /**
+     * 阿里云服务的区域ID,如cn-shanghai
+     */
+    private String regionId;
+
+    /**
+     * 短信签名,必须是已经在阿里云短信服务中注册并通过审核的
+     */
+    private String signName;
+
+    /**
+     * 短信模板集合
+     */
+    private Map<String, String> templates;
+
+}

+ 92 - 0
src/main/java/com/zsElectric/boot/config/property/CaptchaProperties.java

@@ -0,0 +1,92 @@
+package com.zsElectric.boot.config.property;
+
+import lombok.Data;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.stereotype.Component;
+
+/**
+ * 验证码 属性配置
+ *
+ * @author haoxr
+ * @since 2023/11/24
+ */
+@Component
+@ConfigurationProperties(prefix = "captcha")
+@Data
+public class CaptchaProperties {
+
+    /**
+     * 验证码类型  circle-圆圈干扰验证码|gif-Gif验证码|line-干扰线验证码|shear-扭曲干扰验证码
+     */
+    private String type;
+
+    /**
+     * 验证码图片宽度
+     */
+    private int width;
+    /**
+     * 验证码图片高度
+     */
+    private int height;
+
+    /**
+     * 干扰线数量
+     */
+    private int interfereCount;
+
+    /**
+     * 文本透明度
+     */
+    private Float textAlpha;
+
+    /**
+     * 验证码过期时间,单位:秒
+     */
+    private Long expireSeconds;
+
+    /**
+     * 验证码字符配置
+     */
+    private CodeProperties code;
+
+    /**
+     * 验证码字体
+     */
+    private FontProperties font;
+
+    /**
+     * 验证码字符配置
+     */
+    @Data
+    public static class CodeProperties {
+        /**
+         * 验证码字符类型 math-算术|random-随机字符串
+         */
+        private String type;
+        /**
+         * 验证码字符长度,type=算术时,表示运算位数(1:个位数 2:十位数);type=随机字符时,表示字符个数
+         */
+        private int length;
+    }
+
+    /**
+     * 验证码字体配置
+     */
+    @Data
+    public static class FontProperties {
+        /**
+         * 字体名称
+         */
+        private String name;
+        /**
+         * 字体样式  0-普通|1-粗体|2-斜体
+         */
+        private int weight;
+        /**
+         * 字体大小
+         */
+        private int size;
+    }
+
+
+}

+ 96 - 0
src/main/java/com/zsElectric/boot/config/property/CodegenProperties.java

@@ -0,0 +1,96 @@
+package com.zsElectric.boot.config.property;
+
+import cn.hutool.core.io.file.FileNameUtil;
+import cn.hutool.core.map.MapUtil;
+import lombok.Data;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.stereotype.Component;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * 代码生成配置属性
+ *
+ * @author Ray
+ * @since 2.11.0
+ */
+@Component
+@ConfigurationProperties(prefix = "codegen")
+@Data
+public class CodegenProperties {
+
+
+    /**
+     * 默认配置
+     */
+    private DefaultConfig defaultConfig ;
+
+    /**
+     * 模板配置
+     */
+    private Map<String, TemplateConfig> templateConfigs = MapUtil.newHashMap(true);
+
+    /**
+     * 后端应用名
+     */
+    private String backendAppName;
+
+    /**
+     * 前端应用名
+     */
+    private String frontendAppName;
+
+    /**
+     * 下载文件名
+     */
+    private String downloadFileName;
+
+    /**
+     * 排除数据表
+     */
+    private List<String> excludeTables;
+
+    /**
+     * 模板配置
+     */
+    @Data
+    public static class TemplateConfig {
+
+        /**
+         * 模板路径 (e.g. /templates/codegen/controller.java.vm)
+         */
+        private String templatePath;
+
+        /**
+         * 子包名 (e.g. controller/service/mapper/model)
+         */
+        private String subpackageName;
+
+        /**
+         * 文件扩展名,如 .java
+         */
+        private String extension = FileNameUtil.EXT_JAVA;
+
+    }
+
+    /**
+     * 默认配置
+     */
+    @Data
+    public static class DefaultConfig {
+
+        /**
+         * 作者 (e.g. Ray)
+         */
+        private String author;
+
+        /**
+         * 默认模块名(e.g. system)
+         */
+        private String moduleName;
+
+    }
+
+
+}

+ 89 - 0
src/main/java/com/zsElectric/boot/config/property/MailProperties.java

@@ -0,0 +1,89 @@
+package com.zsElectric.boot.config.property;
+
+import lombok.Data;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+
+/**
+ * 邮件配置类,用于接收和存储邮件相关的配置属性。
+ *
+ * @author Ray
+ * @since 2024/8/17
+ */
+@ConfigurationProperties(prefix = "spring.mail")
+@Data
+public class MailProperties {
+
+    /**
+     * 邮件服务器主机名或 IP 地址。
+     * 例如:smtp.example.com
+     */
+    private String host;
+
+    /**
+     * 邮件服务器端口号。
+     * 例如:587
+     */
+    private int port;
+
+    /**
+     * 用于连接邮件服务器的用户名。
+     * 例如:your_email@example.com
+     */
+    private String username;
+
+    /**
+     * 用于连接邮件服务器的密码。
+     * 该密码应安全存储,不应在代码中硬编码。
+     */
+    private String password;
+
+    /**
+     * 邮件发送者地址。
+     */
+    private String from;
+
+    /**
+     * 邮件服务器的其他属性配置。
+     * 这些配置通常用于进一步定制邮件发送行为。
+     */
+    private Properties properties = new Properties();
+
+    /**
+     * 内部类,用于封装邮件服务器的详细配置。
+     * 包含 SMTP 相关的配置选项。
+     */
+    @Data
+    public static class Properties {
+
+        /**
+         * SMTP 配置选项类。
+         * 包含认证、加密等与 SMTP 协议相关的配置。
+         */
+        private Smtp smtp = new Smtp();
+
+        @Data
+        public static class Smtp {
+
+            /**
+             * 是否启用 SMTP 认证。
+             * 如果为 `true`,则需要提供有效的用户名和密码进行认证。
+             */
+            private boolean auth;
+
+            /**
+             * STARTTLS 加密配置选项。
+             */
+            private StartTls starttls = new StartTls();
+
+            @Data
+            public static class StartTls {
+
+                /**
+                 * 是否启用 STARTTLS 加密。
+                 * 如果为 `true`,在发送邮件时将启用 STARTTLS 协议进行加密传输。
+                 */
+                private boolean enable;
+            }
+        }
+    }
+}

+ 112 - 0
src/main/java/com/zsElectric/boot/config/property/SecurityProperties.java

@@ -0,0 +1,112 @@
+package com.zsElectric.boot.config.property;
+
+import jakarta.validation.constraints.Min;
+import jakarta.validation.constraints.NotEmpty;
+import jakarta.validation.constraints.NotNull;
+import lombok.Data;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.stereotype.Component;
+import org.springframework.validation.annotation.Validated;
+
+/**
+ * 安全模块配置属性类
+ *
+ * <p>映射 application.yml 中 security 前缀的安全相关配置</p>
+ *
+ * @author Ray.Hao
+ * @since 2024/4/18
+ */
+@Data
+@Component
+@Validated
+@ConfigurationProperties(prefix = "security")
+public class SecurityProperties {
+
+    /**
+     * 会话管理配置
+     */
+    private SessionConfig session;
+
+    /**
+     * 安全白名单路径(完全绕过安全过滤器)
+     * <p>示例值:/api/v1/auth/login/**, /ws/**
+     */
+    @NotEmpty
+    private String[] ignoreUrls;
+
+    /**
+     * 非安全端点路径(允许匿名访问的API)
+     * <p>示例值:/doc.html, /v3/api-docs/**
+     */
+    @NotEmpty
+    private String[] unsecuredUrls;
+
+    /**
+     * 会话配置嵌套类
+     */
+    @Data
+    public static class SessionConfig {
+        /**
+         * 认证策略类型
+         * <ul>
+         *   <li>jwt - 基于JWT的无状态认证</li>
+         *   <li>redis-token - 基于Redis的有状态认证</li>
+         * </ul>
+         */
+        @NotNull
+        private String type;
+
+        /**
+         * 访问令牌有效期(单位:秒)
+         * <p>默认值:3600(1小时)</p>
+         * <p>-1 表示永不过期</p>
+         */
+        @Min(-1)
+        private Integer accessTokenTimeToLive = 3600;
+
+        /**
+         * 刷新令牌有效期(单位:秒)
+         * <p>默认值:604800(7天)</p>
+         * <p>-1 表示永不过期</p>
+         */
+        @Min(-1)
+        private Integer refreshTokenTimeToLive = 604800;
+
+        /**
+         * JWT 配置项
+         */
+        private JwtConfig jwt;
+
+        /**
+         * Redis令牌配置项
+         */
+        private RedisTokenConfig redisToken;
+    }
+
+    /**
+     * JWT 配置嵌套类
+     */
+    @Data
+    public static class JwtConfig {
+        /**
+         * JWT签名密钥
+         * <p>HS256算法要求至少32个字符</p>
+         * <p>示例:SecretKey012345678901234567890123456789</p>
+         */
+        @NotNull
+        private String secretKey;
+    }
+
+    /**
+     * Redis令牌配置嵌套类
+     */
+    @Data
+    public static class RedisTokenConfig {
+        /**
+         * 是否允许多设备同时登录
+         * <p>true - 允许同一账户多设备登录(默认)</p>
+         * <p>false - 新登录会使旧令牌失效</p>
+         */
+        private Boolean allowMultiLogin = true;
+    }
+}

+ 232 - 0
src/main/java/com/zsElectric/boot/core/aspect/LogAspect.java

@@ -0,0 +1,232 @@
+package com.zsElectric.boot.core.aspect;
+
+import cn.hutool.core.date.DateUtil;
+import cn.hutool.core.date.TimeInterval;
+import cn.hutool.core.util.StrUtil;
+import cn.hutool.crypto.digest.DigestUtil;
+import cn.hutool.http.useragent.UserAgent;
+import cn.hutool.http.useragent.UserAgentUtil;
+import cn.hutool.json.JSONUtil;
+import com.aliyun.oss.HttpMethod;
+import com.zsElectric.boot.common.enums.LogModuleEnum;
+import com.zsElectric.boot.common.util.IPUtils;
+import com.zsElectric.boot.security.util.SecurityUtils;
+import com.zsElectric.boot.system.model.entity.Log;
+import com.zsElectric.boot.system.service.LogService;
+import jakarta.servlet.http.HttpServletRequest;
+import jakarta.servlet.http.HttpServletResponse;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.aspectj.lang.JoinPoint;
+import org.aspectj.lang.ProceedingJoinPoint;
+import org.aspectj.lang.annotation.*;
+import org.springframework.cache.CacheManager;
+import org.springframework.stereotype.Component;
+import org.springframework.web.context.request.RequestContextHolder;
+import org.springframework.web.context.request.ServletRequestAttributes;
+import org.springframework.web.multipart.MultipartFile;
+import org.springframework.web.servlet.HandlerMapping;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * 日志切面
+ *
+ * @author Ray.Hao
+ * @since 2024/6/25
+ */
+@Slf4j
+@Aspect
+@Component
+@RequiredArgsConstructor
+public class LogAspect {
+    private final LogService logService;
+    private final HttpServletRequest request;
+    private final CacheManager cacheManager;
+
+    /**
+     * 切点
+     */
+    @Pointcut("@annotation(com.zsElectric.boot.common.annotation.Log)")
+    public void logPointcut() {
+    }
+
+    /**
+     * 处理完请求后执行
+     *
+     * @param joinPoint 切点
+     */
+    @Around("logPointcut() && @annotation(logAnnotation)")
+    public Object doAround(ProceedingJoinPoint joinPoint, com.zsElectric.boot.common.annotation.Log logAnnotation) throws Throwable {
+        // 在方法执行前获取用户ID,避免在方法执行过程中清除上下文导致获取不到用户ID
+        Long userId = SecurityUtils.getUserId();
+        
+        TimeInterval timer = DateUtil.timer();
+        Object result = null;
+        Exception exception = null;
+
+        try {
+            result = joinPoint.proceed();
+        } catch (Exception e) {
+            exception = e;
+            throw e;
+        } finally {
+            long executionTime = timer.interval(); // 执行时长
+            this.saveLog(joinPoint, exception, result, logAnnotation, executionTime, userId);
+        }
+        return result;
+    }
+
+
+    /**
+     * 保存日志
+     *
+     * @param joinPoint     切点
+     * @param e             异常
+     * @param jsonResult    响应结果
+     * @param logAnnotation 日志注解
+     * @param userId        用户ID
+     */
+    private void saveLog(final JoinPoint joinPoint, final Exception e, Object jsonResult, com.zsElectric.boot.common.annotation.Log logAnnotation, long executionTime, Long userId) {
+        String requestURI = request.getRequestURI();
+        // 创建日志记录
+        Log log = new Log();
+        log.setExecutionTime(executionTime);
+        if (logAnnotation == null && e != null) {
+            log.setModule(LogModuleEnum.EXCEPTION);
+            log.setContent("系统发生异常");
+            this.setRequestParameters(joinPoint, log);
+            log.setResponseContent(JSONUtil.toJsonStr(e.getStackTrace()));
+        } else {
+            log.setModule(logAnnotation.module());
+            log.setContent(logAnnotation.value());
+            // 请求参数
+            if (logAnnotation.params()) {
+                this.setRequestParameters(joinPoint, log);
+            }
+            // 响应结果
+            if (logAnnotation.result() && jsonResult != null) {
+                log.setResponseContent(JSONUtil.toJsonStr(jsonResult));
+            }
+        }
+        log.setRequestUri(requestURI);
+        log.setCreateBy(userId);
+        String ipAddr = IPUtils.getIpAddr(request);
+        if (StrUtil.isNotBlank(ipAddr)) {
+            log.setIp(ipAddr);
+            String region = IPUtils.getRegion(ipAddr);
+            // 中国|0|四川省|成都市|电信 解析省和市
+            if (StrUtil.isNotBlank(region)) {
+                String[] regionArray = region.split("\\|");
+                if (regionArray.length > 2) {
+                    log.setProvince(regionArray[2]);
+                    log.setCity(regionArray[3]);
+                }
+            }
+        }
+
+
+        // 获取浏览器和终端系统信息
+        String userAgentString = request.getHeader("User-Agent");
+        UserAgent userAgent = resolveUserAgent(userAgentString);
+        if (Objects.nonNull(userAgent)) {
+            // 系统信息
+            log.setOs(userAgent.getOs().getName());
+            // 浏览器信息
+            log.setBrowser(userAgent.getBrowser().getName());
+            log.setBrowserVersion(userAgent.getBrowser().getVersion(userAgentString));
+        }
+        //获取方法名
+        String methodName = joinPoint.getSignature().getName();
+        log.setMethod(methodName);
+        // 保存日志到数据库
+        logService.save(log);
+    }
+
+    /**
+     * 设置请求参数到日志对象中
+     *
+     * @param joinPoint 切点
+     * @param log       操作日志
+     */
+    private void setRequestParameters(JoinPoint joinPoint, Log log) {
+        String requestMethod = request.getMethod();
+        log.setRequestMethod(requestMethod);
+        if (HttpMethod.GET.name().equalsIgnoreCase(requestMethod) || HttpMethod.PUT.name().equalsIgnoreCase(requestMethod) || HttpMethod.POST.name().equalsIgnoreCase(requestMethod)) {
+            String params = convertArgumentsToString(joinPoint.getArgs());
+            log.setRequestParams(StrUtil.sub(params, 0, 65535));
+        } else {
+            ServletRequestAttributes attributes = (ServletRequestAttributes) RequestContextHolder.getRequestAttributes();
+            if (attributes != null) {
+                Map<?, ?> paramsMap = (Map<?, ?>) attributes.getRequest().getAttribute(HandlerMapping.URI_TEMPLATE_VARIABLES_ATTRIBUTE);
+                log.setRequestParams(StrUtil.sub(paramsMap.toString(), 0, 65535));
+            } else {
+                log.setRequestParams("");
+            }
+        }
+    }
+
+    /**
+     * 将参数数组转换为字符串
+     *
+     * @param paramsArray 参数数组
+     * @return 参数字符串
+     */
+    private String convertArgumentsToString(Object[] paramsArray) {
+        StringBuilder params = new StringBuilder();
+        if (paramsArray != null) {
+            for (Object param : paramsArray) {
+                if (!shouldFilterObject(param)) {
+                    params.append(JSONUtil.toJsonStr(param)).append(" ");
+                }
+            }
+        }
+        return params.toString().trim();
+    }
+
+    /**
+     * 判断是否需要过滤的对象。
+     *
+     * @param obj 对象信息。
+     * @return 如果是需要过滤的对象,则返回true;否则返回false。
+     */
+    private boolean shouldFilterObject(Object obj) {
+        Class<?> clazz = obj.getClass();
+        if (clazz.isArray()) {
+            return MultipartFile.class.isAssignableFrom(clazz.getComponentType());
+        } else if (Collection.class.isAssignableFrom(clazz)) {
+            Collection<?> collection = (Collection<?>) obj;
+            return collection.stream().anyMatch(item -> item instanceof MultipartFile);
+        } else if (Map.class.isAssignableFrom(clazz)) {
+            Map<?, ?> map = (Map<?, ?>) obj;
+            return map.values().stream().anyMatch(value -> value instanceof MultipartFile);
+        }
+        return obj instanceof MultipartFile || obj instanceof HttpServletRequest || obj instanceof HttpServletResponse;
+    }
+
+
+    /**
+     * 解析UserAgent
+     *
+     * @param userAgentString UserAgent字符串
+     * @return UserAgent
+     */
+    public UserAgent resolveUserAgent(String userAgentString) {
+        if (StrUtil.isBlank(userAgentString)) {
+            return null;
+        }
+        // 给userAgentStringMD5加密一次防止过长
+        String userAgentStringMD5 = DigestUtil.md5Hex(userAgentString);
+        //判断是否命中缓存
+        UserAgent userAgent = Objects.requireNonNull(cacheManager.getCache("userAgent")).get(userAgentStringMD5, UserAgent.class);
+        if (userAgent != null) {
+            return userAgent;
+        }
+        userAgent = UserAgentUtil.parse(userAgentString);
+        Objects.requireNonNull(cacheManager.getCache("userAgent")).put(userAgentStringMD5, userAgent);
+        return userAgent;
+    }
+
+}

+ 102 - 0
src/main/java/com/zsElectric/boot/core/aspect/RepeatSubmitAspect.java

@@ -0,0 +1,102 @@
+package com.zsElectric.boot.core.aspect;
+
+import cn.hutool.core.util.StrUtil;
+import cn.hutool.crypto.digest.DigestUtil;
+import com.zsElectric.boot.common.constant.RedisConstants;
+import com.zsElectric.boot.common.constant.SecurityConstants;
+import com.zsElectric.boot.core.web.ResultCode;
+import com.zsElectric.boot.core.exception.BusinessException;
+import com.zsElectric.boot.common.annotation.RepeatSubmit;
+import com.zsElectric.boot.common.util.IPUtils;
+import jakarta.servlet.http.HttpServletRequest;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.aspectj.lang.ProceedingJoinPoint;
+import org.aspectj.lang.annotation.Around;
+import org.aspectj.lang.annotation.Aspect;
+import org.aspectj.lang.annotation.Pointcut;
+import org.redisson.api.RLock;
+import org.redisson.api.RedissonClient;
+import org.springframework.http.HttpHeaders;
+import org.springframework.stereotype.Component;
+import org.springframework.web.context.request.RequestContextHolder;
+import org.springframework.web.context.request.ServletRequestAttributes;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * 防重复提交切面
+ *
+ * @author Ray.Hao
+ * @since 2.3.0
+ */
+@Aspect
+@Component
+@RequiredArgsConstructor
+@Slf4j
+public class RepeatSubmitAspect {
+
+    private final RedissonClient redissonClient;
+
+    /**
+     * 防重复提交切点
+     */
+    @Pointcut("@annotation(repeatSubmit)")
+    public void repeatSubmitPointCut(RepeatSubmit repeatSubmit) {
+    }
+
+    /**
+     * 环绕通知:处理防重复提交逻辑
+     */
+    @Around(value = "repeatSubmitPointCut(repeatSubmit)", argNames = "pjp,repeatSubmit")
+    public Object handleRepeatSubmit(ProceedingJoinPoint pjp, RepeatSubmit repeatSubmit) throws Throwable {
+        String lockKey = buildLockKey();
+
+        int expire = repeatSubmit.expire();
+        RLock lock = redissonClient.getLock(lockKey);
+
+        boolean locked = lock.tryLock(0, expire, TimeUnit.SECONDS);
+        if (!locked) {
+            throw new BusinessException(ResultCode.USER_DUPLICATE_REQUEST);
+        }
+        return pjp.proceed();
+    }
+
+    /**
+     * 生成防重复提交锁的 key
+     * @return 锁的 key
+     */
+    private String buildLockKey() {
+        HttpServletRequest request = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes()).getRequest();
+        // 用户唯一标识
+        String userIdentifier = getUserIdentifier(request);
+        // 请求唯一标识 = 请求方法 + 请求路径 + 请求参数(严谨的做法)
+        String requestIdentifier = StrUtil.join(":", request.getMethod(), request.getRequestURI());
+        return StrUtil.format(RedisConstants.Lock.RESUBMIT, userIdentifier, requestIdentifier);
+    }
+
+    /**
+     *  获取用户唯一标识
+     *  1. 从请求头中获取 Token,使用 SHA-256 加密 Token 作为用户唯一标识
+     *  2. 如果 Token 为空,使用 IP 作为用户唯一标识
+     *
+     * @param request 请求对象
+     * @return 用户唯一标识
+     */
+    private String getUserIdentifier(HttpServletRequest request) {
+        // 用户身份唯一标识
+        String userIdentifier;
+        // 从请求头中获取 Token
+        String tokenHeader = request.getHeader(HttpHeaders.AUTHORIZATION);
+        if (StrUtil.isNotBlank(tokenHeader) && tokenHeader.startsWith(SecurityConstants.BEARER_TOKEN_PREFIX)) {
+            String rawToken = tokenHeader.substring(SecurityConstants.BEARER_TOKEN_PREFIX.length());  // 去掉 Bearer 后的 Token
+            userIdentifier = DigestUtil.sha256Hex(rawToken); // 使用 SHA-256 加密 Token 作为用户唯一标识
+        } else {
+            userIdentifier = IPUtils.getIpAddr(request); // 使用 IP 作为用户唯一标识
+        }
+        return userIdentifier;
+    }
+
+
+}
+

+ 45 - 0
src/main/java/com/zsElectric/boot/core/exception/BusinessException.java

@@ -0,0 +1,45 @@
+package com.zsElectric.boot.core.exception;
+
+import com.zsElectric.boot.core.web.IResultCode;
+import lombok.Getter;
+import org.slf4j.helpers.MessageFormatter;
+
+/**
+ * 自定义业务异常
+ *
+ * @author Ray
+ * @since 2022/7/31
+ */
+@Getter
+public class BusinessException extends RuntimeException {
+
+    public IResultCode resultCode;
+
+    public BusinessException(IResultCode errorCode) {
+        super(errorCode.getMsg());
+        this.resultCode = errorCode;
+    }
+
+
+    public BusinessException(IResultCode errorCode,String message) {
+        super(message);
+        this.resultCode = errorCode;
+    }
+
+
+    public BusinessException(String message, Throwable cause) {
+        super(message, cause);
+    }
+
+    public BusinessException(Throwable cause) {
+        super(cause);
+    }
+
+    public BusinessException(String message, Object... args) {
+        super(formatMessage(message, args));
+    }
+
+    private static String formatMessage(String message, Object... args) {
+        return MessageFormatter.arrayFormat(message, args).getMessage();
+    }
+}

+ 278 - 0
src/main/java/com/zsElectric/boot/core/exception/GlobalExceptionHandler.java

@@ -0,0 +1,278 @@
+package com.zsElectric.boot.core.exception;
+
+import cn.hutool.core.util.StrUtil;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.zsElectric.boot.core.web.Result;
+import com.zsElectric.boot.core.web.ResultCode;
+import jakarta.servlet.ServletException;
+import jakarta.validation.ConstraintViolation;
+import jakarta.validation.ConstraintViolationException;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.beans.TypeMismatchException;
+import org.springframework.context.support.DefaultMessageSourceResolvable;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.converter.HttpMessageNotReadableException;
+import org.springframework.jdbc.BadSqlGrammarException;
+import org.springframework.security.access.AccessDeniedException;
+import org.springframework.security.core.AuthenticationException;
+import org.springframework.validation.BindException;
+import org.springframework.web.bind.MethodArgumentNotValidException;
+import org.springframework.web.bind.MissingServletRequestParameterException;
+import org.springframework.web.bind.annotation.ExceptionHandler;
+import org.springframework.web.bind.annotation.ResponseStatus;
+import org.springframework.web.bind.annotation.RestControllerAdvice;
+import org.springframework.web.method.annotation.MethodArgumentTypeMismatchException;
+import org.springframework.web.servlet.NoHandlerFoundException;
+
+import java.sql.SQLIntegrityConstraintViolationException;
+import java.sql.SQLSyntaxErrorException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+/**
+ * 全局系统异常处理器
+ * <p>
+ * 调整异常处理的HTTP状态码,丰富异常处理类型
+ */
+@RestControllerAdvice
+@Slf4j
+public class GlobalExceptionHandler {
+
+    /**
+     * 处理绑定异常
+     * <p>
+     * 当请求参数绑定到对象时发生错误,会抛出 BindException 异常。
+     */
+    @ExceptionHandler(BindException.class)
+    @ResponseStatus(HttpStatus.BAD_REQUEST)
+    public <T> Result<T> processException(BindException e) {
+        log.error("BindException:{}", e.getMessage());
+        String msg = e.getAllErrors().stream().map(DefaultMessageSourceResolvable::getDefaultMessage).collect(Collectors.joining(";"));
+        return Result.failed(ResultCode.USER_REQUEST_PARAMETER_ERROR, msg);
+    }
+
+    /**
+     * 处理 @RequestParam 参数校验异常
+     * <p>
+     * 当请求参数在校验过程中发生违反约束条件的异常时(如 @RequestParam 验证不通过),
+     * 会捕获到 ConstraintViolationException 异常。
+     */
+    @ExceptionHandler(ConstraintViolationException.class)
+    @ResponseStatus(HttpStatus.BAD_REQUEST)
+    public <T> Result<T> processException(ConstraintViolationException e) {
+        log.error("ConstraintViolationException:{}", e.getMessage());
+        String msg = e.getConstraintViolations().stream().map(ConstraintViolation::getMessage).collect(Collectors.joining(";"));
+        return Result.failed(ResultCode.INVALID_USER_INPUT, msg);
+    }
+
+    /**
+     * 处理方法参数校验异常
+     * <p>
+     * 当使用 @Valid 或 @Validated 注解对方法参数进行验证时,如果验证失败,
+     * 会抛出 MethodArgumentNotValidException 异常。
+     */
+    @ExceptionHandler(MethodArgumentNotValidException.class)
+    @ResponseStatus(HttpStatus.BAD_REQUEST)
+    public <T> Result<T> processException(MethodArgumentNotValidException e) {
+        log.error("MethodArgumentNotValidException:{}", e.getMessage());
+        String msg = e.getBindingResult().getAllErrors().stream().map(DefaultMessageSourceResolvable::getDefaultMessage).collect(Collectors.joining(";"));
+        return Result.failed(ResultCode.INVALID_USER_INPUT, msg);
+    }
+
+    /**
+     * 处理接口不存在的异常
+     * <p>
+     * 当客户端请求一个不存在的路径时,会抛出 NoHandlerFoundException 异常。
+     */
+    @ExceptionHandler(NoHandlerFoundException.class)
+    @ResponseStatus(HttpStatus.NOT_FOUND)
+    public <T> Result<T> processException(NoHandlerFoundException e) {
+        log.error(e.getMessage(), e);
+        return Result.failed(ResultCode.INTERFACE_NOT_EXIST);
+    }
+
+    /**
+     * 处理缺少请求参数的异常
+     * <p>
+     * 当请求缺少必需的参数时,会抛出 MissingServletRequestParameterException 异常。
+     */
+    @ExceptionHandler(MissingServletRequestParameterException.class)
+    @ResponseStatus(HttpStatus.BAD_REQUEST)
+    public <T> Result<T> processException(MissingServletRequestParameterException e) {
+        log.error(e.getMessage(), e);
+        return Result.failed(ResultCode.REQUEST_REQUIRED_PARAMETER_IS_EMPTY);
+    }
+
+    /**
+     * 处理方法参数类型不匹配的异常
+     * <p>
+     * 当请求参数类型不匹配时,会抛出 MethodArgumentTypeMismatchException 异常。
+     */
+    @ExceptionHandler(MethodArgumentTypeMismatchException.class)
+    @ResponseStatus(HttpStatus.BAD_REQUEST)
+    public <T> Result<T> processException(MethodArgumentTypeMismatchException e) {
+        log.error(e.getMessage(), e);
+        return Result.failed(ResultCode.PARAMETER_FORMAT_MISMATCH, "类型错误");
+    }
+
+    /**
+     * 处理 Servlet 异常
+     * <p>
+     * 当 Servlet 处理请求时发生异常时,会抛出 ServletException 异常。
+     */
+    @ExceptionHandler(ServletException.class)
+    @ResponseStatus(HttpStatus.BAD_REQUEST)
+    public <T> Result<T> processException(ServletException e) {
+        log.error(e.getMessage(), e);
+        return Result.failed(e.getMessage());
+    }
+
+    /**
+     * 处理非法参数异常
+     * <p>
+     * 当方法接收到非法参数时,会抛出 IllegalArgumentException 异常。
+     */
+    @ExceptionHandler(IllegalArgumentException.class)
+    @ResponseStatus(HttpStatus.BAD_REQUEST)
+    public <T> Result<T> handleIllegalArgumentException(IllegalArgumentException e) {
+        log.error("非法参数异常,异常原因:{}", e.getMessage(), e);
+        return Result.failed(e.getMessage());
+    }
+
+    /**
+     * 处理 JSON 处理异常
+     * <p>
+     * 当处理 JSON 数据时发生错误,会抛出 JsonProcessingException 异常。
+     */
+    @ExceptionHandler(JsonProcessingException.class)
+    @ResponseStatus(HttpStatus.BAD_REQUEST)
+    public <T> Result<T> handleJsonProcessingException(JsonProcessingException e) {
+        log.error("Json转换异常,异常原因:{}", e.getMessage(), e);
+        return Result.failed(e.getMessage());
+    }
+
+    /**
+     * 处理请求体不可读的异常
+     * <p>
+     * 当请求体不可读时,会抛出 HttpMessageNotReadableException 异常。
+     */
+    @ExceptionHandler(HttpMessageNotReadableException.class)
+    @ResponseStatus(HttpStatus.BAD_REQUEST)
+    public <T> Result<T> processException(HttpMessageNotReadableException e) {
+        log.error(e.getMessage(), e);
+        String errorMessage = "请求体不可为空";
+        Throwable cause = e.getCause();
+        if (cause != null) {
+            errorMessage = convertMessage(cause);
+        }
+        return Result.failed(errorMessage);
+    }
+
+    /**
+     * 处理类型不匹配异常
+     * <p>
+     * 当方法参数类型不匹配时,会抛出 TypeMismatchException 异常。
+     */
+    @ExceptionHandler(TypeMismatchException.class)
+    @ResponseStatus(HttpStatus.BAD_REQUEST)
+    public <T> Result<T> processException(TypeMismatchException e) {
+        log.error(e.getMessage(), e);
+        return Result.failed(e.getMessage());
+    }
+
+    /**
+     * 处理 SQL 语法错误异常
+     * <p>
+     * 当 SQL 语法错误时,会抛出 BadSqlGrammarException 异常。
+     */
+    @ExceptionHandler(BadSqlGrammarException.class)
+    @ResponseStatus(HttpStatus.FORBIDDEN)
+    public <T> Result<T> handleBadSqlGrammarException(BadSqlGrammarException e) {
+        log.error(e.getMessage(), e);
+        String errorMsg = e.getMessage();
+        if (StrUtil.isNotBlank(errorMsg) && errorMsg.contains("denied to user")) {
+            return Result.failed(ResultCode.DATABASE_ACCESS_DENIED);
+        } else {
+            return Result.failed(e.getMessage());
+        }
+    }
+
+    /**
+     * 处理 SQL 语法错误异常
+     * <p>
+     * 当 SQL 语法错误时,会抛出 SQLSyntaxErrorException 异常。
+     */
+    @ExceptionHandler(SQLSyntaxErrorException.class)
+    @ResponseStatus(HttpStatus.FORBIDDEN)
+    public <T> Result<T> processSQLSyntaxErrorException(SQLSyntaxErrorException e) {
+        log.error(e.getMessage(), e);
+        return Result.failed(ResultCode.DATABASE_EXECUTION_SYNTAX_ERROR);
+    }
+
+
+    /**
+     * 处理 SQL 违反了完整性约束
+     * <p>
+     * 当 SQL 违反了完整性约束时,会抛出 SQLIntegrityConstraintViolationException 异常。
+     */
+    @ExceptionHandler(SQLIntegrityConstraintViolationException.class)
+    @ResponseStatus(HttpStatus.FORBIDDEN)
+    public <T> Result<T> handleSQLIntegrityConstraintViolationException(SQLIntegrityConstraintViolationException e) {
+        log.error(e.getMessage(), e);
+        return Result.failed(ResultCode.INTEGRITY_CONSTRAINT_VIOLATION);
+    }
+
+    /**
+     * 处理业务异常
+     * <p>
+     * 当业务逻辑发生错误时,会抛出 BusinessException 异常。
+     */
+    @ExceptionHandler(BusinessException.class)
+    @ResponseStatus(HttpStatus.BAD_REQUEST)
+    public <T> Result<T> handleBizException(BusinessException e) {
+        log.error("biz exception", e);
+        if (e.getResultCode() != null) {
+            return Result.failed(e.getResultCode(), e.getMessage());
+        }
+        return Result.failed(e.getMessage());
+    }
+
+    /**
+     * 处理所有未捕获的异常
+     * <p>
+     * 当发生未捕获的异常时,会抛出 Exception 异常。
+     */
+    @ExceptionHandler(Exception.class)
+    @ResponseStatus(HttpStatus.BAD_REQUEST)
+    public <T> Result<T> handleException(Exception e) throws Exception {
+        // 将 Spring Security 异常继续抛出,以便交给自定义处理器处理
+        if (e instanceof AccessDeniedException
+                || e instanceof AuthenticationException) {
+            throw e;
+        }
+        log.error("unknown exception", e);
+        return Result.failed(e.getLocalizedMessage());
+    }
+
+    /**
+     * 传参类型错误时,用于消息转换
+     *
+     * @param throwable 异常
+     * @return 错误信息
+     */
+    private String convertMessage(Throwable throwable) {
+        String error = throwable.toString();
+        String regulation = "\\[\"(.*?)\"]+";
+        Pattern pattern = Pattern.compile(regulation);
+        Matcher matcher = pattern.matcher(error);
+        String group = "";
+        if (matcher.find()) {
+            String matchString = matcher.group();
+            matchString = matchString.replace("[", "").replace("]", "");
+            matchString = "%s字段类型错误".formatted(matchString.replaceAll("\"", ""));
+            group += matchString;
+        }
+        return group;
+    }
+}

+ 98 - 0
src/main/java/com/zsElectric/boot/core/filter/RateLimiterFilter.java

@@ -0,0 +1,98 @@
+package com.zsElectric.boot.core.filter;
+
+import cn.hutool.core.convert.Convert;
+import cn.hutool.core.util.StrUtil;
+import com.zsElectric.boot.common.constant.RedisConstants;
+import com.zsElectric.boot.common.constant.SystemConstants;
+import com.zsElectric.boot.core.web.ResultCode;
+import com.zsElectric.boot.common.util.IPUtils;
+import com.zsElectric.boot.core.web.WebResponseHelper;
+import com.zsElectric.boot.system.service.ConfigService;
+import jakarta.servlet.FilterChain;
+import jakarta.servlet.ServletException;
+import jakarta.servlet.http.HttpServletRequest;
+import jakarta.servlet.http.HttpServletResponse;
+import lombok.extern.slf4j.Slf4j;
+import org.jetbrains.annotations.NotNull;
+import org.springframework.data.redis.core.RedisTemplate;
+import org.springframework.web.filter.OncePerRequestFilter;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * IP 限流过滤器
+ *
+ * @author Theo
+ * @since 2024/08/10 14:38
+ */
+@Slf4j
+public class RateLimiterFilter extends OncePerRequestFilter {
+
+    private final RedisTemplate<String, Object> redisTemplate;
+    private final ConfigService configService;
+
+    private static final long DEFAULT_IP_LIMIT = 10L; // 默认 IP 限流阈值
+
+    public RateLimiterFilter(RedisTemplate<String, Object> redisTemplate, ConfigService configService) {
+        this.redisTemplate = redisTemplate;
+        this.configService = configService;
+    }
+
+    /**
+     * 判断 IP 是否触发限流
+     * 默认限制同一 IP 每秒最多请求 10 次,可通过系统配置调整。
+     * 如果系统未配置限流阈值,默认跳过限流。
+     *
+     * @param ip IP 地址
+     * @return 是否限流:true 表示限流;false 表示未限流
+     */
+    public boolean rateLimit(String ip) {
+        // 限流 Redis 键
+        String key = StrUtil.format(RedisConstants.RateLimiter.IP, ip);
+
+        // 自增请求计数
+        Long count = redisTemplate.opsForValue().increment(key);
+        if (count == null || count == 1) {
+            // 第一次访问时设置过期时间为 1 秒
+            redisTemplate.expire(key, 1, TimeUnit.SECONDS);
+        }
+
+        // 获取系统配置的限流阈值
+        Object systemConfig = configService.getSystemConfig(SystemConstants.SYSTEM_CONFIG_IP_QPS_LIMIT_KEY);
+        if (systemConfig == null) {
+            // 系统未配置限流,跳过限流逻辑
+            log.warn("系统未配置限流阈值,跳过限流");
+            return false;
+        }
+
+        // 转换系统配置为限流值,默认为 10
+        long limit = Convert.toLong(systemConfig, DEFAULT_IP_LIMIT);
+        return count != null && count > limit;
+    }
+
+    /**
+     * 执行 IP 限流逻辑
+     * 如果 IP 请求超出限制,直接返回限流响应;否则继续执行过滤器链。
+     *
+     * @param request     请求体
+     * @param response    响应体
+     * @param filterChain 过滤器链
+     */
+    @Override
+    protected void doFilterInternal(@NotNull HttpServletRequest request, @NotNull HttpServletResponse response,
+                                    @NotNull FilterChain filterChain) throws ServletException, IOException {
+        // 获取请求的 IP 地址
+        String ip = IPUtils.getIpAddr(request);
+
+        // 判断是否限流
+        if (rateLimit(ip)) {
+            // 返回限流错误信息
+            WebResponseHelper.writeError(response, ResultCode.REQUEST_CONCURRENCY_LIMIT_EXCEEDED);
+            return;
+        }
+
+        // 未触发限流,继续执行过滤器链
+        filterChain.doFilter(request, response);
+    }
+}

+ 38 - 0
src/main/java/com/zsElectric/boot/core/filter/RequestLogFilter.java

@@ -0,0 +1,38 @@
+package com.zsElectric.boot.core.filter;
+
+import com.zsElectric.boot.common.util.IPUtils;
+import jakarta.servlet.http.HttpServletRequest;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.web.filter.CommonsRequestLoggingFilter;
+
+/**
+ * 请求日志打印过滤器
+ *
+ * @author haoxr
+ * @since 2023/03/03
+ */
+@Configuration
+@Slf4j
+public class RequestLogFilter extends CommonsRequestLoggingFilter {
+
+    @Override
+    protected boolean shouldLog(HttpServletRequest request) {
+        // 设置日志输出级别,默认debug
+        return this.logger.isInfoEnabled();
+    }
+
+    @Override
+    protected void beforeRequest(HttpServletRequest request, String message) {
+        String requestURI = request.getRequestURI();
+        String ip = IPUtils.getIpAddr(request);
+        log.info("request,ip:{}, uri: {}", ip, requestURI);
+        super.beforeRequest(request, message);
+    }
+
+    @Override
+    protected void afterRequest(HttpServletRequest request, String message) {
+        super.afterRequest(request, message);
+    }
+
+}

+ 33 - 0
src/main/java/com/zsElectric/boot/core/validator/FieldValidator.java

@@ -0,0 +1,33 @@
+package com.zsElectric.boot.core.validator;
+
+import com.zsElectric.boot.common.annotation.ValidField;
+import jakarta.validation.ConstraintValidator;
+import jakarta.validation.ConstraintValidatorContext;
+
+import java.util.Arrays;
+
+/**
+ * 字段校验器
+ *
+ * @author Ray.Hao
+ * @since 2024/11/18
+ */
+public class FieldValidator implements ConstraintValidator<ValidField, String> {
+
+    private String[] allowedValues;
+
+    @Override
+    public void initialize(ValidField constraintAnnotation) {
+        // 初始化允许的值列表
+        this.allowedValues = constraintAnnotation.allowedValues();
+    }
+
+    @Override
+    public boolean isValid(String value, ConstraintValidatorContext context) {
+        if (value == null) {
+            return true; // 如果字段允许为空,可以返回 true
+        }
+        // 检查值是否在允许列表中
+        return Arrays.asList(allowedValues).contains(value);
+    }
+}

+ 43 - 0
src/main/java/com/zsElectric/boot/core/web/ExcelResult.java

@@ -0,0 +1,43 @@
+package com.zsElectric.boot.core.web;
+
+import lombok.Data;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Excel导出响应结构体
+ *
+ * @author Theo
+ * @since 2025/1/14 11:46:08
+ */
+@Data
+public class ExcelResult {
+
+    /**
+     * 响应码,来确定是否导入成功
+     */
+    private String code;
+
+    /**
+     * 有效条数
+     */
+    private Integer validCount;
+
+    /**
+     * 无效条数
+     */
+    private Integer invalidCount;
+
+    /**
+     * 错误提示信息
+     */
+    private List<String> messageList;
+
+    public ExcelResult() {
+        this.code = ResultCode.SUCCESS.getCode();
+        this.validCount = 0;
+        this.invalidCount = 0;
+        this.messageList = new ArrayList<>();
+    }
+}

+ 15 - 0
src/main/java/com/zsElectric/boot/core/web/IResultCode.java

@@ -0,0 +1,15 @@
+package com.zsElectric.boot.core.web;
+
+/**
+ * 响应码接口
+ *
+ * @author Ray.Hao
+ * @since 1.0.0
+ **/
+public interface IResultCode {
+
+    String getCode();
+
+    String getMsg();
+
+}

+ 46 - 0
src/main/java/com/zsElectric/boot/core/web/PageResult.java

@@ -0,0 +1,46 @@
+package com.zsElectric.boot.core.web;
+
+import com.baomidou.mybatisplus.core.metadata.IPage;
+import lombok.Data;
+
+import java.io.Serializable;
+import java.util.List;
+
+/**
+ * 分页响应结构体
+ *
+ * @author Ray
+ * @since 2022/2/18
+ */
+@Data
+public class PageResult<T> implements Serializable {
+
+    private String code;
+
+    private Data<T> data;
+
+    private String msg;
+
+    public static <T> PageResult<T> success(IPage<T> page) {
+        PageResult<T> result = new PageResult<>();
+        result.setCode(ResultCode.SUCCESS.getCode());
+
+        Data<T> data = new Data<>();
+        data.setList(page.getRecords());
+        data.setTotal(page.getTotal());
+
+        result.setData(data);
+        result.setMsg(ResultCode.SUCCESS.getMsg());
+        return result;
+    }
+
+    @lombok.Data
+    public static class Data<T> {
+
+        private List<T> list;
+
+        private long total;
+
+    }
+
+}

+ 74 - 0
src/main/java/com/zsElectric/boot/core/web/Result.java

@@ -0,0 +1,74 @@
+package com.zsElectric.boot.core.web;
+
+import cn.hutool.core.util.StrUtil;
+import lombok.Data;
+
+import java.io.Serializable;
+
+/**
+ * 统一响应结构体
+ *
+ * @author Ray
+ * @since 2022/1/30
+ **/
+@Data
+public class Result<T> implements Serializable {
+
+    private String code;
+
+    private T data;
+
+    private String msg;
+
+    public static <T> Result<T> success() {
+        return success(null);
+    }
+
+    public static <T> Result<T> success(T data) {
+        Result<T> result = new Result<>();
+        result.setCode(ResultCode.SUCCESS.getCode());
+        result.setMsg(ResultCode.SUCCESS.getMsg());
+        result.setData(data);
+        return result;
+    }
+
+    public static <T> Result<T> failed() {
+        return result(ResultCode.SYSTEM_ERROR.getCode(), ResultCode.SYSTEM_ERROR.getMsg(), null);
+    }
+
+    public static <T> Result<T> failed(String msg) {
+        return result(ResultCode.SYSTEM_ERROR.getCode(), msg, null);
+    }
+
+    public static <T> Result<T> judge(boolean status) {
+        if (status) {
+            return success();
+        } else {
+            return failed();
+        }
+    }
+
+    public static <T> Result<T> failed(IResultCode resultCode) {
+        return result(resultCode.getCode(), resultCode.getMsg(), null);
+    }
+
+    public static <T> Result<T> failed(IResultCode resultCode, String msg) {
+        return result(resultCode.getCode(), StrUtil.isNotBlank(msg) ? msg : resultCode.getMsg(), null);
+    }
+
+    private static <T> Result<T> result(IResultCode resultCode, T data) {
+        return result(resultCode.getCode(), resultCode.getMsg(), data);
+    }
+
+    private static <T> Result<T> result(String code, String msg, T data) {
+        Result<T> result = new Result<>();
+        result.setCode(code);
+        result.setData(data);
+        result.setMsg(msg);
+        return result;
+    }
+
+    public static boolean isSuccess(Result<?> result) {
+        return result != null && ResultCode.SUCCESS.getCode().equals(result.getCode());
+    }
+}

+ 300 - 0
src/main/java/com/zsElectric/boot/core/web/ResultCode.java

@@ -0,0 +1,300 @@
+package com.zsElectric.boot.core.web;
+
+import lombok.AllArgsConstructor;
+import lombok.NoArgsConstructor;
+
+import java.io.Serializable;
+
+/**
+ * 响应码枚举
+ * <p>
+ * 参考阿里巴巴开发手册响应码规范
+ * 00000 正常
+ * A**** 用户端错误
+ * B**** 系统执行出错
+ * C**** 调用第三方服务出错
+ *
+ * @author Ray.Hao
+ * @since 2020/6/23
+ **/
+@AllArgsConstructor
+@NoArgsConstructor
+public enum ResultCode implements IResultCode, Serializable {
+
+    SUCCESS("00000", "一切ok"),
+
+    /** 一级宏观错误码  */
+    USER_ERROR("A0001", "用户端错误"),
+
+    /** 二级宏观错误码  */
+    USER_REGISTRATION_ERROR("A0100", "用户注册错误"),
+    USER_NOT_AGREE_PRIVACY_AGREEMENT("A0101", "用户未同意隐私协议"),
+    REGISTRATION_COUNTRY_OR_REGION_RESTRICTED("A0102", "注册国家或地区受限"),
+
+    USERNAME_VERIFICATION_FAILED("A0110", "用户名校验失败"),
+    USERNAME_ALREADY_EXISTS("A0111", "用户名已存在"),
+    USERNAME_CONTAINS_SENSITIVE_WORDS("A0112", "用户名包含敏感词"),
+    USERNAME_CONTAINS_SPECIAL_CHARACTERS("A0113", "用户名包含特殊字符"),
+
+    PASSWORD_VERIFICATION_FAILED("A0120", "密码校验失败"),
+    PASSWORD_LENGTH_NOT_ENOUGH("A0121", "密码长度不够"),
+    PASSWORD_STRENGTH_NOT_ENOUGH("A0122", "密码强度不够"),
+
+    VERIFICATION_CODE_INPUT_ERROR("A0130", "校验码输入错误"),
+    SMS_VERIFICATION_CODE_INPUT_ERROR("A0131", "短信校验码输入错误"),
+    EMAIL_VERIFICATION_CODE_INPUT_ERROR("A0132", "邮件校验码输入错误"),
+    VOICE_VERIFICATION_CODE_INPUT_ERROR("A0133", "语音校验码输入错误"),
+
+    USER_CERTIFICATE_EXCEPTION("A0140", "用户证件异常"),
+    USER_CERTIFICATE_TYPE_NOT_SELECTED("A0141", "用户证件类型未选择"),
+    MAINLAND_ID_NUMBER_VERIFICATION_ILLEGAL("A0142", "大陆身份证编号校验非法"),
+
+    USER_BASIC_INFORMATION_VERIFICATION_FAILED("A0150", "用户基本信息校验失败"),
+    PHONE_FORMAT_VERIFICATION_FAILED("A0151", "手机格式校验失败"),
+    ADDRESS_FORMAT_VERIFICATION_FAILED("A0152", "地址格式校验失败"),
+    EMAIL_FORMAT_VERIFICATION_FAILED("A0153", "邮箱格式校验失败"),
+
+    /** 二级宏观错误码  */
+    USER_LOGIN_EXCEPTION("A0200", "用户登录异常"),
+    USER_ACCOUNT_FROZEN("A0201", "用户账户被冻结"),
+    USER_ACCOUNT_ABOLISHED("A0202", "用户账户已作废"),
+
+    USER_PASSWORD_ERROR("A0210", "用户名或密码错误"),
+    USER_INPUT_PASSWORD_ERROR_LIMIT_EXCEEDED("A0211", "用户输入密码错误次数超限"),
+    USER_NOT_EXIST("A0212", "用户不存在"),
+
+    USER_IDENTITY_VERIFICATION_FAILED("A0220", "用户身份校验失败"),
+    USER_FINGERPRINT_RECOGNITION_FAILED("A0221", "用户指纹识别失败"),
+    USER_FACE_RECOGNITION_FAILED("A0222", "用户面容识别失败"),
+    USER_NOT_AUTHORIZED_THIRD_PARTY_LOGIN("A0223", "用户未获得第三方登录授权"),
+
+    ACCESS_TOKEN_INVALID("A0230", "访问令牌无效或已过期"),
+    REFRESH_TOKEN_INVALID("A0231", "刷新令牌无效或已过期"),
+
+    // 验证码错误
+    USER_VERIFICATION_CODE_ERROR("A0240", "验证码错误"),
+    USER_VERIFICATION_CODE_ATTEMPT_LIMIT_EXCEEDED("A0241", "用户验证码尝试次数超限"),
+    USER_VERIFICATION_CODE_EXPIRED("A0242", "用户验证码过期"),
+
+    /** 二级宏观错误码  */
+    ACCESS_PERMISSION_EXCEPTION("A0300", "访问权限异常"),
+    ACCESS_UNAUTHORIZED("A0301", "访问未授权"),
+    AUTHORIZATION_IN_PROGRESS("A0302", "正在授权中"),
+    USER_AUTHORIZATION_APPLICATION_REJECTED("A0303", "用户授权申请被拒绝"),
+
+    ACCESS_OBJECT_PRIVACY_SETTINGS_BLOCKED("A0310", "因访问对象隐私设置被拦截"),
+    AUTHORIZATION_EXPIRED("A0311", "授权已过期"),
+    NO_PERMISSION_TO_USE_API("A0312", "无权限使用 API"),
+
+    USER_ACCESS_BLOCKED("A0320", "用户访问被拦截"),
+    BLACKLISTED_USER("A0321", "黑名单用户"),
+    ACCOUNT_FROZEN("A0322", "账号被冻结"),
+    ILLEGAL_IP_ADDRESS("A0323", "非法 IP 地址"),
+    GATEWAY_ACCESS_RESTRICTED("A0324", "网关访问受限"),
+    REGION_BLACKLIST("A0325", "地域黑名单"),
+
+    SERVICE_ARREARS("A0330", "服务已欠费"),
+
+    USER_SIGNATURE_EXCEPTION("A0340", "用户签名异常"),
+    RSA_SIGNATURE_ERROR("A0341", "RSA 签名错误"),
+
+    /** 二级宏观错误码  */
+    USER_REQUEST_PARAMETER_ERROR("A0400", "用户请求参数错误"),
+    CONTAINS_ILLEGAL_MALICIOUS_REDIRECT_LINK("A0401", "包含非法恶意跳转链接"),
+    INVALID_USER_INPUT("A0402", "无效的用户输入"),
+
+    REQUEST_REQUIRED_PARAMETER_IS_EMPTY("A0410", "请求必填参数为空"),
+
+    REQUEST_PARAMETER_VALUE_EXCEEDS_ALLOWED_RANGE("A0420", "请求参数值超出允许的范围"),
+    PARAMETER_FORMAT_MISMATCH("A0421", "参数格式不匹配"),
+
+    USER_INPUT_CONTENT_ILLEGAL("A0430", "用户输入内容非法"),
+    CONTAINS_PROHIBITED_SENSITIVE_WORDS("A0431", "包含违禁敏感词"),
+
+    USER_OPERATION_EXCEPTION("A0440", "用户操作异常"),
+
+    /** 二级宏观错误码  */
+    USER_REQUEST_SERVICE_EXCEPTION("A0500", "用户请求服务异常"),
+    REQUEST_LIMIT_EXCEEDED("A0501", "请求次数超出限制"),
+    REQUEST_CONCURRENCY_LIMIT_EXCEEDED("A0502", "请求并发数超出限制"),
+    USER_OPERATION_PLEASE_WAIT("A0503", "用户操作请等待"),
+    WEBSOCKET_CONNECTION_EXCEPTION("A0504", "WebSocket 连接异常"),
+    WEBSOCKET_CONNECTION_DISCONNECTED("A0505", "WebSocket 连接断开"),
+    USER_DUPLICATE_REQUEST("A0506", "请求过于频繁,请稍后再试。"),
+
+    /** 二级宏观错误码  */
+    USER_RESOURCE_EXCEPTION("A0600", "用户资源异常"),
+    ACCOUNT_BALANCE_INSUFFICIENT("A0601", "账户余额不足"),
+    USER_DISK_SPACE_INSUFFICIENT("A0602", "用户磁盘空间不足"),
+    USER_MEMORY_SPACE_INSUFFICIENT("A0603", "用户内存空间不足"),
+    USER_OSS_CAPACITY_INSUFFICIENT("A0604", "用户 OSS 容量不足"),
+    USER_QUOTA_EXHAUSTED("A0605", "用户配额已用光"),
+    USER_RESOURCE_NOT_FOUND("A0606", "用户资源不存在"),
+
+    /** 二级宏观错误码  */
+    UPLOAD_FILE_EXCEPTION("A0700", "上传文件异常"),
+    UPLOAD_FILE_TYPE_MISMATCH("A0701", "上传文件类型不匹配"),
+    UPLOAD_FILE_TOO_LARGE("A0702", "上传文件太大"),
+    UPLOAD_IMAGE_TOO_LARGE("A0703", "上传图片太大"),
+    UPLOAD_VIDEO_TOO_LARGE("A0704", "上传视频太大"),
+    UPLOAD_COMPRESSED_FILE_TOO_LARGE("A0705", "上传压缩文件太大"),
+
+    DELETE_FILE_EXCEPTION("A0710", "删除文件异常"),
+
+    /** 二级宏观错误码  */
+    USER_CURRENT_VERSION_EXCEPTION("A0800", "用户当前版本异常"),
+    USER_INSTALLED_VERSION_NOT_MATCH_SYSTEM("A0801", "用户安装版本与系统不匹配"),
+    USER_INSTALLED_VERSION_TOO_LOW("A0802", "用户安装版本过低"),
+    USER_INSTALLED_VERSION_TOO_HIGH("A0803", "用户安装版本过高"),
+    USER_INSTALLED_VERSION_EXPIRED("A0804", "用户安装版本已过期"),
+    USER_API_REQUEST_VERSION_NOT_MATCH("A0805", "用户 API 请求版本不匹配"),
+    USER_API_REQUEST_VERSION_TOO_HIGH("A0806", "用户 API 请求版本过高"),
+    USER_API_REQUEST_VERSION_TOO_LOW("A0807", "用户 API 请求版本过低"),
+
+    /** 二级宏观错误码  */
+    USER_PRIVACY_NOT_AUTHORIZED("A0900", "用户隐私未授权"),
+    USER_PRIVACY_NOT_SIGNED("A0901", "用户隐私未签署"),
+    USER_CAMERA_NOT_AUTHORIZED("A0903", "用户相机未授权"),
+    USER_PHOTO_LIBRARY_NOT_AUTHORIZED("A0904", "用户图片库未授权"),
+    USER_FILE_NOT_AUTHORIZED("A0905", "用户文件未授权"),
+    USER_LOCATION_INFORMATION_NOT_AUTHORIZED("A0906", "用户位置信息未授权"),
+    USER_CONTACTS_NOT_AUTHORIZED("A0907", "用户通讯录未授权"),
+
+    /** 二级宏观错误码  */
+    USER_DEVICE_EXCEPTION("A1000", "用户设备异常"),
+    USER_CAMERA_EXCEPTION("A1001", "用户相机异常"),
+    USER_MICROPHONE_EXCEPTION("A1002", "用户麦克风异常"),
+    USER_EARPIECE_EXCEPTION("A1003", "用户听筒异常"),
+    USER_SPEAKER_EXCEPTION("A1004", "用户扬声器异常"),
+    USER_GPS_POSITIONING_EXCEPTION("A1005", "用户 GPS 定位异常"),
+
+    /** 一级宏观错误码  */
+    SYSTEM_ERROR("B0001", "系统执行出错"),
+
+    /** 二级宏观错误码  */
+    SYSTEM_EXECUTION_TIMEOUT("B0100", "系统执行超时"),
+
+    /** 二级宏观错误码  */
+    SYSTEM_DISASTER_RECOVERY_FUNCTION_TRIGGERED("B0200", "系统容灾功能被触发"),
+
+    SYSTEM_RATE_LIMITING("B0210", "系统限流"),
+
+    SYSTEM_FUNCTION_DEGRADATION("B0220", "系统功能降级"),
+
+    /** 二级宏观错误码  */
+    SYSTEM_RESOURCE_EXCEPTION("B0300", "系统资源异常"),
+    SYSTEM_RESOURCE_EXHAUSTED("B0310", "系统资源耗尽"),
+    SYSTEM_DISK_SPACE_EXHAUSTED("B0311", "系统磁盘空间耗尽"),
+    SYSTEM_MEMORY_EXHAUSTED("B0312", "系统内存耗尽"),
+    FILE_HANDLE_EXHAUSTED("B0313", "文件句柄耗尽"),
+    SYSTEM_CONNECTION_POOL_EXHAUSTED("B0314", "系统连接池耗尽"),
+    SYSTEM_THREAD_POOL_EXHAUSTED("B0315", "系统线程池耗尽"),
+
+    SYSTEM_RESOURCE_ACCESS_EXCEPTION("B0320", "系统资源访问异常"),
+    SYSTEM_READ_DISK_FILE_FAILED("B0321", "系统读取磁盘文件失败"),
+
+
+    /** 一级宏观错误码  */
+    THIRD_PARTY_SERVICE_ERROR("C0001", "调用第三方服务出错"),
+
+    /** 二级宏观错误码  */
+    MIDDLEWARE_SERVICE_ERROR("C0100", "中间件服务出错"),
+
+    RPC_SERVICE_ERROR("C0110", "RPC 服务出错"),
+    RPC_SERVICE_NOT_FOUND("C0111", "RPC 服务未找到"),
+    RPC_SERVICE_NOT_REGISTERED("C0112", "RPC 服务未注册"),
+    INTERFACE_NOT_EXIST("C0113", "接口不存在"),
+
+    MESSAGE_SERVICE_ERROR("C0120", "消息服务出错"),
+    MESSAGE_DELIVERY_ERROR("C0121", "消息投递出错"),
+    MESSAGE_CONSUMPTION_ERROR("C0122", "消息消费出错"),
+    MESSAGE_SUBSCRIPTION_ERROR("C0123", "消息订阅出错"),
+    MESSAGE_GROUP_NOT_FOUND("C0124", "消息分组未查到"),
+
+    CACHE_SERVICE_ERROR("C0130", "缓存服务出错"),
+    KEY_LENGTH_EXCEEDS_LIMIT("C0131", "key 长度超过限制"),
+    VALUE_LENGTH_EXCEEDS_LIMIT("C0132", "value 长度超过限制"),
+    STORAGE_CAPACITY_FULL("C0133", "存储容量已满"),
+    UNSUPPORTED_DATA_FORMAT("C0134", "不支持的数据格式"),
+
+    CONFIGURATION_SERVICE_ERROR("C0140", "配置服务出错"),
+
+    NETWORK_RESOURCE_SERVICE_ERROR("C0150", "网络资源服务出错"),
+    VPN_SERVICE_ERROR("C0151", "VPN 服务出错"),
+    CDN_SERVICE_ERROR("C0152", "CDN 服务出错"),
+    DOMAIN_NAME_RESOLUTION_SERVICE_ERROR("C0153", "域名解析服务出错"),
+    GATEWAY_SERVICE_ERROR("C0154", "网关服务出错"),
+
+    /** 二级宏观错误码  */
+    THIRD_PARTY_SYSTEM_EXECUTION_TIMEOUT("C0200", "第三方系统执行超时"),
+
+    RPC_EXECUTION_TIMEOUT("C0210", "RPC 执行超时"),
+
+    MESSAGE_DELIVERY_TIMEOUT("C0220", "消息投递超时"),
+
+    CACHE_SERVICE_TIMEOUT("C0230", "缓存服务超时"),
+
+    CONFIGURATION_SERVICE_TIMEOUT("C0240", "配置服务超时"),
+
+    DATABASE_SERVICE_TIMEOUT("C0250", "数据库服务超时"),
+
+    /** 二级宏观错误码  */
+    DATABASE_SERVICE_ERROR("C0300", "数据库服务出错"),
+
+    TABLE_NOT_EXIST("C0311", "表不存在"),
+    COLUMN_NOT_EXIST("C0312", "列不存在"),
+    DATABASE_EXECUTION_SYNTAX_ERROR("C0313", "数据库执行语法错误"),
+
+    MULTIPLE_SAME_NAME_COLUMNS_IN_MULTI_TABLE_ASSOCIATION("C0321", "多表关联中存在多个相同名称的列"),
+
+    DATABASE_DEADLOCK("C0331", "数据库死锁"),
+
+    PRIMARY_KEY_CONFLICT("C0341", "主键冲突"),
+    INTEGRITY_CONSTRAINT_VIOLATION("C0342", "违反了完整性约束"),
+
+    DATABASE_ACCESS_DENIED("C0351", "演示环境已禁用数据库写入功能,请本地部署修改数据库链接或开启Mock模式进行体验"),
+
+    /** 二级宏观错误码  */
+    THIRD_PARTY_DISASTER_RECOVERY_SYSTEM_TRIGGERED("C0400", "第三方容灾系统被触发"),
+    THIRD_PARTY_SYSTEM_RATE_LIMITING("C0401", "第三方系统限流"),
+    THIRD_PARTY_FUNCTION_DEGRADATION("C0402", "第三方功能降级"),
+
+    /** 二级宏观错误码  */
+    NOTIFICATION_SERVICE_ERROR("C0500", "通知服务出错"),
+    SMS_REMINDER_SERVICE_FAILED("C0501", "短信提醒服务失败"),
+    VOICE_REMINDER_SERVICE_FAILED("C0502", "语音提醒服务失败"),
+    EMAIL_REMINDER_SERVICE_FAILED("C0503", "邮件提醒服务失败");
+
+
+    @Override
+    public String getCode() {
+        return code;
+    }
+
+    @Override
+    public String getMsg() {
+        return msg;
+    }
+
+    private String code;
+
+    private String msg;
+
+    @Override
+    public String toString() {
+        return "{" +
+                "\"code\":\"" + code + '\"' +
+                ", \"msg\":\"" + msg + '\"' +
+                '}';
+    }
+
+
+    public static ResultCode getValue(String code) {
+        for (ResultCode value : values()) {
+            if (value.getCode().equals(code)) {
+                return value;
+            }
+        }
+        return SYSTEM_ERROR; // 默认系统执行错误
+    }
+}

+ 77 - 0
src/main/java/com/zsElectric/boot/core/web/WebResponseHelper.java

@@ -0,0 +1,77 @@
+package com.zsElectric.boot.core.web;
+
+import cn.hutool.extra.servlet.JakartaServletUtil;
+import cn.hutool.json.JSONUtil;
+import jakarta.servlet.http.HttpServletResponse;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.MediaType;
+
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Web响应辅助类
+ * <p>
+ * 用于在过滤器、处理器等无法使用 @RestControllerAdvice 的场景中统一处理响应
+ *
+ * @author Ray.Hao
+ * @since 2.0.0
+ */
+@Slf4j
+public class WebResponseHelper {
+
+    /**
+     * 写入错误响应
+     *
+     * @param response   HttpServletResponse
+     * @param resultCode 响应结果码
+     */
+    public static void writeError(HttpServletResponse response, ResultCode resultCode) {
+        writeError(response, resultCode, null);
+    }
+
+    /**
+     * 写入错误响应(带自定义消息)
+     *
+     * @param response   HttpServletResponse
+     * @param resultCode 响应结果码
+     * @param message    自定义消息
+     */
+    public static void writeError(HttpServletResponse response, ResultCode resultCode, String message) {
+        try {
+            // 设置HTTP状态码
+            int httpStatus = mapHttpStatus(resultCode);
+            response.setStatus(httpStatus);
+            response.setCharacterEncoding(StandardCharsets.UTF_8.toString());
+            // 构建响应对象
+            Result<?> result = message == null
+                    ? Result.failed(resultCode)
+                    : Result.failed(resultCode, message);
+
+            // 写入响应
+            JakartaServletUtil.write(response,
+                    JSONUtil.toJsonStr(result),
+                    MediaType.APPLICATION_JSON_VALUE
+            );
+
+        } catch (Exception e) {
+            log.error("写入错误响应失败: resultCode={}, message={}", resultCode, message, e);
+        }
+    }
+
+    /**
+     * 根据业务结果码映射HTTP状态码
+     *
+     * @param resultCode 业务结果码
+     * @return HTTP状态码
+     */
+    private static int mapHttpStatus(ResultCode resultCode) {
+        return switch (resultCode) {
+            case ACCESS_UNAUTHORIZED,
+                    ACCESS_TOKEN_INVALID,
+                    REFRESH_TOKEN_INVALID -> HttpStatus.UNAUTHORIZED.value();
+            default -> HttpStatus.BAD_REQUEST.value();
+        };
+    }
+}
+

+ 113 - 0
src/main/java/com/zsElectric/boot/platform/ai/config/AiProperties.java

@@ -0,0 +1,113 @@
+package com.zsElectric.boot.platform.ai.config;
+
+import lombok.Data;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.stereotype.Component;
+
+import java.util.Map;
+
+/**
+ * AI 配置属性
+ * 
+ * 优势:
+ * 1. 统一管理所有提供商配置
+ * 2. 添加新提供商只需在 yml 中添加配置,无需修改代码
+ * 3. 类型安全,支持 IDE 提示
+ *
+ * @author Ray.Hao
+ */
+@Data
+@Component
+@ConfigurationProperties(prefix = "ai")
+public class AiProperties {
+
+    /**
+     * 是否启用 AI 功能
+     */
+    private Boolean enabled = false;
+
+    /**
+     * 当前使用的提供商(qwen、deepseek、openai 等)
+     */
+    private String provider = "qwen";
+
+    /**
+     * 所有提供商的配置
+     * Key: 提供商名称(qwen、deepseek、openai)
+     * Value: 提供商配置
+     */
+    private Map<String, ProviderConfig> providers;
+
+    /**
+     * 安全配置
+     */
+    private SecurityConfig security = new SecurityConfig();
+
+    /**
+     * 限流配置
+     */
+    private RateLimitConfig rateLimit = new RateLimitConfig();
+
+    /**
+     * 提供商配置
+     */
+    @Data
+    public static class ProviderConfig {
+        /**
+         * API Key
+         */
+        private String apiKey;
+
+        /**
+         * Base URL(统一命名,符合行业惯例)
+         */
+        private String baseUrl;
+
+        /**
+         * 模型名称
+         */
+        private String model;
+
+        /**
+         * 提供商显示名称(可选)
+         */
+        private String displayName;
+
+        /**
+         * 超时时间(秒)
+         */
+        private Integer timeout = 30;
+    }
+
+    /**
+     * 安全配置
+     */
+    @Data
+    public static class SecurityConfig {
+        private Boolean enableAudit = true;
+        private Boolean dangerousOperationsConfirm = true;
+        private java.util.List<String> functionWhitelist;
+        private java.util.List<String> sensitiveParams;
+    }
+
+    /**
+     * 限流配置
+     */
+    @Data
+    public static class RateLimitConfig {
+        private Integer maxExecutionsPerMinute = 10;
+        private Integer maxExecutionsPerDay = 100;
+    }
+
+    /**
+     * 获取当前提供商配置
+     */
+    public ProviderConfig getCurrentProviderConfig() {
+        if (providers == null || !providers.containsKey(provider)) {
+            throw new IllegalStateException("未找到提供商配置: " + provider);
+        }
+        return providers.get(provider);
+    }
+}
+
+

+ 103 - 0
src/main/java/com/zsElectric/boot/platform/ai/controller/AiCommandController.java

@@ -0,0 +1,103 @@
+package com.zsElectric.boot.platform.ai.controller;
+
+import com.zsElectric.boot.core.web.Result;
+import com.zsElectric.boot.platform.ai.model.dto.*;
+import com.zsElectric.boot.platform.ai.model.dto.AiCommandRequestDTO;
+import com.zsElectric.boot.platform.ai.model.dto.AiCommandResponseDTO;
+import com.zsElectric.boot.platform.ai.model.dto.AiExecuteRequestDTO;
+import com.zsElectric.boot.platform.ai.model.dto.AiExecuteResponseDTO;
+import com.zsElectric.boot.platform.ai.service.AiCommandService;
+import io.swagger.v3.oas.annotations.Operation;
+import io.swagger.v3.oas.annotations.Parameter;
+import io.swagger.v3.oas.annotations.tags.Tag;
+import jakarta.servlet.http.HttpServletRequest;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.web.bind.annotation.*;
+
+
+/**
+ * AI 命令控制器
+ *
+ * @author Ray.Hao
+ * @since 3.0.0
+ */
+@Tag(name = "AI命令接口")
+@RestController
+@RequestMapping("/api/v1/ai/command")
+@RequiredArgsConstructor
+@Slf4j
+public class AiCommandController {
+
+    private final AiCommandService aiCommandService;
+
+    @Operation(summary = "解析自然语言命令")
+    @PostMapping("/parse")
+    public Result<AiCommandResponseDTO> parseCommand(
+            @RequestBody AiCommandRequestDTO request,
+            HttpServletRequest httpRequest
+    ) {
+        log.info("收到AI命令解析请求: {}", request.getCommand());
+
+        try {
+            AiCommandResponseDTO response = aiCommandService.parseCommand(request, httpRequest);
+            return Result.success(response);
+        } catch (Exception e) {
+            log.error("命令解析失败", e);
+            return Result.success(AiCommandResponseDTO.builder()
+                    .success(false)
+                    .error(e.getMessage())
+                    .build());
+        }
+    }
+
+    @Operation(summary = "执行已解析的命令")
+    @PostMapping("/execute")
+    public Result<AiExecuteResponseDTO> executeCommand(
+            @RequestBody AiExecuteRequestDTO request,
+            HttpServletRequest httpRequest
+    ) {
+        log.info("收到AI命令执行请求: {}", request.getFunctionCall().getName());
+
+        try {
+            AiExecuteResponseDTO response = aiCommandService.executeCommand(request, httpRequest);
+            return Result.success(response);
+        } catch (Exception e) {
+            log.error("命令执行失败", e);
+            return Result.success(AiExecuteResponseDTO.builder()
+                    .success(false)
+                    .error(e.getMessage())
+                    .build());
+        }
+    }
+
+    @Operation(summary = "获取命令执行历史")
+    @GetMapping("/history")
+    public Result<?> getCommandHistory(
+            @Parameter(description = "页码") @RequestParam(defaultValue = "1") Integer page,
+            @Parameter(description = "每页数量") @RequestParam(defaultValue = "10") Integer size
+    ) {
+        return Result.success(aiCommandService.getCommandHistory(page, size));
+    }
+
+    @Operation(summary = "获取可用的函数列表")
+    @GetMapping("/functions")
+    public Result<?> getAvailableFunctions() {
+        return Result.success(aiCommandService.getAvailableFunctions());
+    }
+
+    @Operation(summary = "撤销命令执行")
+    @PostMapping("/rollback/{auditId}")
+    public Result<?> rollbackCommand(
+            @Parameter(description = "审计ID") @PathVariable String auditId
+    ) {
+        aiCommandService.rollbackCommand(auditId);
+        return Result.success("撤销成功");
+    }
+}
+
+
+
+
+
+

+ 20 - 0
src/main/java/com/zsElectric/boot/platform/ai/mapper/AiCommandAuditMapper.java

@@ -0,0 +1,20 @@
+package com.zsElectric.boot.platform.ai.mapper;
+
+import com.baomidou.mybatisplus.core.mapper.BaseMapper;
+import com.zsElectric.boot.platform.ai.model.entity.AiCommandAudit;
+import org.apache.ibatis.annotations.Mapper;
+
+/**
+ * AI 命令审计 Mapper
+ *
+ * @author Ray.Hao
+ * @since 3.0.0
+ */
+@Mapper
+public interface AiCommandAuditMapper extends BaseMapper<AiCommandAudit> {
+}
+
+
+
+
+

+ 37 - 0
src/main/java/com/zsElectric/boot/platform/ai/model/dto/AiCommandRequestDTO.java

@@ -0,0 +1,37 @@
+package com.zsElectric.boot.platform.ai.model.dto;
+
+import lombok.Data;
+import java.util.Map;
+
+/**
+ * AI 命令请求 DTO
+ *
+ * @author Ray.Hao
+ * @since 3.0.0
+ */
+@Data
+public class AiCommandRequestDTO {
+
+    /**
+     * 用户输入的自然语言命令
+     */
+    private String command;
+
+    /**
+     * 当前页面路由(用于上下文)
+     */
+    private String currentRoute;
+
+    /**
+     * 当前激活的组件名称
+     */
+    private String currentComponent;
+
+    /**
+     * 额外上下文信息
+     */
+    private Map<String, Object> context;
+}
+
+
+

+ 53 - 0
src/main/java/com/zsElectric/boot/platform/ai/model/dto/AiCommandResponseDTO.java

@@ -0,0 +1,53 @@
+package com.zsElectric.boot.platform.ai.model.dto;
+
+import lombok.AllArgsConstructor;
+import lombok.Builder;
+import lombok.Data;
+import lombok.NoArgsConstructor;
+import java.util.List;
+
+/**
+ * AI 命令解析响应 DTO
+ *
+ * @author Ray.Hao
+ * @since 3.0.0
+ */
+@Data
+@Builder
+@NoArgsConstructor
+@AllArgsConstructor
+public class AiCommandResponseDTO {
+
+    /**
+     * 是否成功解析
+     */
+    private Boolean success;
+
+    /**
+     * 解析后的函数调用列表
+     */
+    private List<FunctionCallDTO> functionCalls;
+
+    /**
+     * AI 的理解和说明
+     */
+    private String explanation;
+
+    /**
+     * 置信度 (0-1)
+     */
+    private Double confidence;
+
+    /**
+     * 错误信息
+     */
+    private String error;
+
+    /**
+     * 原始 LLM 响应(用于调试)
+     */
+    private String rawResponse;
+}
+
+
+

+ 36 - 0
src/main/java/com/zsElectric/boot/platform/ai/model/dto/AiExecuteRequestDTO.java

@@ -0,0 +1,36 @@
+package com.zsElectric.boot.platform.ai.model.dto;
+
+import lombok.Data;
+
+/**
+ * AI 命令执行请求 DTO
+ *
+ * @author Ray.Hao
+ * @since 3.0.0
+ */
+@Data
+public class AiExecuteRequestDTO {
+
+    /**
+     * 要执行的函数调用
+     */
+    private FunctionCallDTO functionCall;
+
+    /**
+     * 确认模式:auto=自动执行, manual=需要用户确认
+     */
+    private String confirmMode;
+
+    /**
+     * 用户确认标志
+     */
+    private Boolean userConfirmed;
+
+    /**
+     * 幂等性令牌(防止重复执行)
+     */
+    private String idempotencyKey;
+}
+
+
+

+ 62 - 0
src/main/java/com/zsElectric/boot/platform/ai/model/dto/AiExecuteResponseDTO.java

@@ -0,0 +1,62 @@
+package com.zsElectric.boot.platform.ai.model.dto;
+
+import lombok.AllArgsConstructor;
+import lombok.Builder;
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+/**
+ * AI 命令执行响应 DTO
+ *
+ * @author Ray.Hao
+ * @since 3.0.0
+ */
+@Data
+@Builder
+@NoArgsConstructor
+@AllArgsConstructor
+public class AiExecuteResponseDTO {
+
+    /**
+     * 是否执行成功
+     */
+    private Boolean success;
+
+    /**
+     * 执行结果数据
+     */
+    private Object data;
+
+    /**
+     * 执行结果说明
+     */
+    private String message;
+
+    /**
+     * 影响的记录数
+     */
+    private Integer affectedRows;
+
+    /**
+     * 错误信息
+     */
+    private String error;
+
+    /**
+     * 审计ID(用于追踪)
+     */
+    private String auditId;
+
+    /**
+     * 需要用户确认
+     */
+    private Boolean requiresConfirmation;
+
+    /**
+     * 确认提示信息
+     */
+    private String confirmationPrompt;
+}
+
+
+

+ 38 - 0
src/main/java/com/zsElectric/boot/platform/ai/model/dto/FunctionCallDTO.java

@@ -0,0 +1,38 @@
+package com.zsElectric.boot.platform.ai.model.dto;
+
+import lombok.AllArgsConstructor;
+import lombok.Builder;
+import lombok.Data;
+import lombok.NoArgsConstructor;
+import java.util.Map;
+
+/**
+ * 函数调用 DTO
+ *
+ * @author Ray.Hao
+ * @since 3.0.0
+ */
+@Data
+@Builder
+@NoArgsConstructor
+@AllArgsConstructor
+public class FunctionCallDTO {
+
+    /**
+     * 函数名称
+     */
+    private String name;
+
+    /**
+     * 函数描述
+     */
+    private String description;
+
+    /**
+     * 参数对象
+     */
+    private Map<String, Object> arguments;
+}
+
+
+

+ 121 - 0
src/main/java/com/zsElectric/boot/platform/ai/model/entity/AiCommandAudit.java

@@ -0,0 +1,121 @@
+package com.zsElectric.boot.platform.ai.model.entity;
+
+import com.baomidou.mybatisplus.annotation.*;
+import lombok.Data;
+import java.time.LocalDateTime;
+
+/**
+ * AI 命令审计记录
+ *
+ * @author Ray.Hao
+ * @since 3.0.0
+ */
+@Data
+@TableName("ai_command_audit")
+public class AiCommandAudit {
+
+    /**
+     * 主键ID
+     */
+    @TableId(type = IdType.ASSIGN_UUID)
+    private String id;
+
+    /**
+     * 用户ID
+     */
+    private Long userId;
+
+    /**
+     * 用户名
+     */
+    private String username;
+
+    /**
+     * 原始命令
+     */
+    private String originalCommand;
+
+    /**
+     * 解析后的函数名称
+     */
+    private String functionName;
+
+    /**
+     * 函数参数(JSON)
+     */
+    private String functionArguments;
+
+    /**
+     * 执行状态:pending, success, failed
+     */
+    private String executeStatus;
+
+    /**
+     * 执行结果(JSON)
+     */
+    private String executeResult;
+
+    /**
+     * 错误信息
+     */
+    private String errorMessage;
+
+    /**
+     * 影响的记录数
+     */
+    private Integer affectedRows;
+
+    /**
+     * 是否危险操作
+     */
+    private Boolean isDangerous;
+
+    /**
+     * 是否需要确认
+     */
+    private Boolean requiresConfirmation;
+
+    /**
+     * 用户是否确认
+     */
+    private Boolean userConfirmed;
+
+    /**
+     * 幂等性令牌
+     */
+    private String idempotencyKey;
+
+    /**
+     * IP 地址
+     */
+    private String ipAddress;
+
+    /**
+     * 用户代理
+     */
+    private String userAgent;
+
+    /**
+     * 当前路由
+     */
+    private String currentRoute;
+
+    /**
+     * 创建时间
+     */
+    @TableField(fill = FieldFill.INSERT)
+    private LocalDateTime createTime;
+
+    /**
+     * 执行时间(毫秒)
+     */
+    private Long executionTime;
+
+    /**
+     * 备注
+     */
+    private String remark;
+}
+
+
+

+ 101 - 0
src/main/java/com/zsElectric/boot/platform/ai/provider/AbstractOpenAiCompatibleProvider.java

@@ -0,0 +1,101 @@
+package com.zsElectric.boot.platform.ai.provider;
+
+import cn.hutool.core.util.StrUtil;
+import cn.hutool.http.HttpRequest;
+import cn.hutool.http.HttpResponse;
+import cn.hutool.json.JSONObject;
+import cn.hutool.json.JSONUtil;
+import com.zsElectric.boot.platform.ai.config.AiProperties;
+import lombok.extern.slf4j.Slf4j;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * OpenAI 兼容协议的抽象提供商
+ * 
+ * 适用于:通义千问、DeepSeek、OpenAI、ChatGLM 等兼容 OpenAI API 的模型
+ *
+ * @author Ray.Hao
+ */
+@Slf4j
+public abstract class AbstractOpenAiCompatibleProvider implements AiProvider {
+
+    protected final AiProperties.ProviderConfig config;
+
+    public AbstractOpenAiCompatibleProvider(AiProperties.ProviderConfig config) {
+        this.config = config;
+    }
+
+    @Override
+    public String call(String systemPrompt, String userPrompt) {
+        if (!isConfigValid()) {
+            throw new IllegalStateException(getProviderName() + " 配置无效");
+        }
+
+        try {
+            // 构建请求体(OpenAI 标准格式)
+            JSONObject requestBody = JSONUtil.createObj()
+                    .set("model", config.getModel())
+                    .set("messages", JSONUtil.createArray()
+                            .put(JSONUtil.createObj()
+                                    .set("role", "system")
+                                    .set("content", systemPrompt))
+                            .put(JSONUtil.createObj()
+                                    .set("role", "user")
+                                    .set("content", userPrompt))
+                    )
+                    .set("temperature", 0.7);
+
+            log.info("📤 调用 {} API: {}/chat/completions", getProviderName(), config.getBaseUrl());
+            log.debug("请求参数: {}", requestBody);
+
+            // 发送 HTTP 请求
+            HttpResponse response = HttpRequest.post(config.getBaseUrl() + "/chat/completions")
+                    .header("Authorization", "Bearer " + config.getApiKey())
+                    .header("Content-Type", "application/json")
+                    .body(requestBody.toString())
+                    .timeout((int) TimeUnit.SECONDS.toMillis(config.getTimeout()))
+                    .execute();
+
+            // 检查响应状态
+            if (!response.isOk()) {
+                String errorMsg = String.format("%s API 调用失败: HTTP %d - %s",
+                        getProviderName(), response.getStatus(), response.body());
+                log.error(errorMsg);
+                throw new RuntimeException(errorMsg);
+            }
+
+            // 解析响应
+            JSONObject responseJson = JSONUtil.parseObj(response.body());
+            String content = responseJson.getByPath("choices[0].message.content", String.class);
+
+            // 记录 Token 使用情况
+            JSONObject usage = responseJson.getJSONObject("usage");
+            if (usage != null) {
+                Integer inputTokens = usage.getInt("prompt_tokens");
+                Integer outputTokens = usage.getInt("completion_tokens");
+                Integer totalTokens = usage.getInt("total_tokens");
+                log.info("✅ {} 响应成功,tokens: 输入={}, 输出={}, 总计={}",
+                        getProviderName(), inputTokens, outputTokens, totalTokens);
+            }
+
+            log.debug("📥 {} 返回内容: {}", getProviderName(), content);
+            return content;
+
+        } catch (Exception e) {
+            String errorMsg = String.format("%s API 调用失败: %s", getProviderName(), e.getMessage());
+            log.error(errorMsg, e);
+            throw new RuntimeException(errorMsg, e);
+        }
+    }
+
+    @Override
+    public boolean isConfigValid() {
+        return config != null
+                && StrUtil.isNotBlank(config.getApiKey())
+                && StrUtil.isNotBlank(config.getBaseUrl())
+                && StrUtil.isNotBlank(config.getModel());
+    }
+}
+
+

+ 32 - 0
src/main/java/com/zsElectric/boot/platform/ai/provider/AiProvider.java

@@ -0,0 +1,32 @@
+package com.zsElectric.boot.platform.ai.provider;
+
+/**
+ * AI 提供商接口
+ * 
+ * 策略模式:不同提供商实现各自的调用逻辑
+ *
+ * @author Ray.Hao
+ */
+public interface AiProvider {
+
+    /**
+     * 调用 AI API
+     *
+     * @param systemPrompt 系统提示词
+     * @param userPrompt   用户提示词
+     * @return AI 响应内容
+     */
+    String call(String systemPrompt, String userPrompt);
+
+    /**
+     * 获取提供商名称
+     */
+    String getProviderName();
+
+    /**
+     * 检查配置是否有效
+     */
+    boolean isConfigValid();
+}
+
+

+ 51 - 0
src/main/java/com/zsElectric/boot/platform/ai/provider/AiProviderFactory.java

@@ -0,0 +1,51 @@
+package com.zsElectric.boot.platform.ai.provider;
+
+import com.zsElectric.boot.platform.ai.config.AiProperties;
+import lombok.RequiredArgsConstructor;
+import org.springframework.stereotype.Component;
+
+import java.util.Map;
+
+/**
+ * AI 提供商工厂
+ * 
+ * 职责:根据配置获取对应的提供商实例
+ *
+ * @author Ray.Hao
+ */
+@Component
+@RequiredArgsConstructor
+public class AiProviderFactory {
+
+    private final AiProperties aiProperties;
+    
+    /**
+     * Spring 自动注入所有 AiProvider 实现类
+     * Key: Bean 名称(qwen、deepseek、openai)
+     * Value: 提供商实例
+     */
+    private final Map<String, AiProvider> providers;
+
+    /**
+     * 获取当前配置的提供商
+     */
+    public AiProvider getCurrentProvider() {
+        String providerName = aiProperties.getProvider();
+        
+        if (!providers.containsKey(providerName)) {
+            throw new IllegalStateException("不支持的 AI 提供商: " + providerName 
+                    + ",可用提供商: " + providers.keySet());
+        }
+        
+        AiProvider provider = providers.get(providerName);
+        
+        if (!provider.isConfigValid()) {
+            throw new IllegalStateException(provider.getProviderName() 
+                    + " 配置无效,请检查 API Key、Base URL 和 Model 是否配置");
+        }
+        
+        return provider;
+    }
+}
+
+

+ 25 - 0
src/main/java/com/zsElectric/boot/platform/ai/provider/impl/DeepSeekProvider.java

@@ -0,0 +1,25 @@
+package com.zsElectric.boot.platform.ai.provider.impl;
+
+import com.zsElectric.boot.platform.ai.config.AiProperties;
+import com.zsElectric.boot.platform.ai.provider.AbstractOpenAiCompatibleProvider;
+import org.springframework.stereotype.Component;
+
+/**
+ * DeepSeek 提供商
+ *
+ * @author Ray.Hao
+ */
+@Component("deepseek")
+public class DeepSeekProvider extends AbstractOpenAiCompatibleProvider {
+
+    public DeepSeekProvider(AiProperties aiProperties) {
+        super(aiProperties.getProviders().get("deepseek"));
+    }
+
+    @Override
+    public String getProviderName() {
+        return config.getDisplayName() != null ? config.getDisplayName() : "DeepSeek";
+    }
+}
+
+

+ 30 - 0
src/main/java/com/zsElectric/boot/platform/ai/provider/impl/OpenAiProvider.java

@@ -0,0 +1,30 @@
+package com.zsElectric.boot.platform.ai.provider.impl;
+
+import com.zsElectric.boot.platform.ai.config.AiProperties;
+import com.zsElectric.boot.platform.ai.provider.AbstractOpenAiCompatibleProvider;
+import org.springframework.stereotype.Component;
+
+/**
+ * OpenAI 提供商(GPT-4、GPT-3.5 等)
+ * 
+ * 添加新提供商只需:
+ * 1. 继承 AbstractOpenAiCompatibleProvider
+ * 2. 实现 getProviderName()
+ * 3. 在配置文件中添加配置
+ *
+ * @author Ray.Hao
+ */
+@Component("openai")
+public class OpenAiProvider extends AbstractOpenAiCompatibleProvider {
+
+    public OpenAiProvider(AiProperties aiProperties) {
+        super(aiProperties.getProviders().get("openai"));
+    }
+
+    @Override
+    public String getProviderName() {
+        return config.getDisplayName() != null ? config.getDisplayName() : "OpenAI";
+    }
+}
+
+

+ 25 - 0
src/main/java/com/zsElectric/boot/platform/ai/provider/impl/QwenProvider.java

@@ -0,0 +1,25 @@
+package com.zsElectric.boot.platform.ai.provider.impl;
+
+import com.zsElectric.boot.platform.ai.config.AiProperties;
+import com.zsElectric.boot.platform.ai.provider.AbstractOpenAiCompatibleProvider;
+import org.springframework.stereotype.Component;
+
+/**
+ * 阿里通义千问提供商
+ *
+ * @author Ray.Hao
+ */
+@Component("qwen")
+public class QwenProvider extends AbstractOpenAiCompatibleProvider {
+
+    public QwenProvider(AiProperties aiProperties) {
+        super(aiProperties.getProviders().get("qwen"));
+    }
+
+    @Override
+    public String getProviderName() {
+        return config.getDisplayName() != null ? config.getDisplayName() : "阿里通义千问";
+    }
+}
+
+

+ 67 - 0
src/main/java/com/zsElectric/boot/platform/ai/service/AiCommandService.java

@@ -0,0 +1,67 @@
+package com.zsElectric.boot.platform.ai.service;
+
+import com.zsElectric.boot.platform.ai.model.dto.*;
+import com.zsElectric.boot.platform.ai.model.dto.AiCommandRequestDTO;
+import com.zsElectric.boot.platform.ai.model.dto.AiCommandResponseDTO;
+import com.zsElectric.boot.platform.ai.model.dto.AiExecuteRequestDTO;
+import com.zsElectric.boot.platform.ai.model.dto.AiExecuteResponseDTO;
+import jakarta.servlet.http.HttpServletRequest;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * AI 命令服务接口
+ *
+ * @author Ray.Hao
+ * @since 3.0.0
+ */
+public interface AiCommandService {
+
+    /**
+     * 解析自然语言命令
+     *
+     * @param request     命令请求
+     * @param httpRequest HTTP 请求
+     * @return 解析结果
+     */
+    AiCommandResponseDTO parseCommand(AiCommandRequestDTO request, HttpServletRequest httpRequest);
+
+    /**
+     * 执行已解析的命令
+     *
+     * @param request     执行请求
+     * @param httpRequest HTTP 请求
+     * @return 执行结果
+     */
+    AiExecuteResponseDTO executeCommand(AiExecuteRequestDTO request, HttpServletRequest httpRequest);
+
+    /**
+     * 获取命令执行历史
+     *
+     * @param page 页码
+     * @param size 每页数量
+     * @return 历史记录
+     */
+    Map<String, Object> getCommandHistory(Integer page, Integer size);
+
+    /**
+     * 获取可用的函数列表
+     *
+     * @return 函数列表
+     */
+    List<Map<String, Object>> getAvailableFunctions();
+
+    /**
+     * 撤销命令执行
+     *
+     * @param auditId 审计ID
+     */
+    void rollbackCommand(String auditId);
+}
+
+
+
+
+
+

+ 263 - 0
src/main/java/com/zsElectric/boot/platform/ai/service/impl/AiCommandServiceImpl.java

@@ -0,0 +1,263 @@
+package com.zsElectric.boot.platform.ai.service.impl;
+
+import cn.hutool.json.JSONArray;
+import cn.hutool.json.JSONObject;
+import cn.hutool.json.JSONUtil;
+import com.zsElectric.boot.platform.ai.config.AiProperties;
+import com.zsElectric.boot.platform.ai.model.dto.*;
+import com.zsElectric.boot.platform.ai.model.dto.*;
+import com.zsElectric.boot.platform.ai.model.entity.AiCommandAudit;
+import com.zsElectric.boot.platform.ai.provider.AiProvider;
+import com.zsElectric.boot.platform.ai.provider.AiProviderFactory;
+import com.zsElectric.boot.platform.ai.service.AiCommandService;
+import jakarta.servlet.http.HttpServletRequest;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.stereotype.Service;
+
+import java.util.*;
+
+/**
+ * AI 命令服务实现类(重构版)
+ * 
+ * 重构改进:
+ * 1. ✅ 使用策略模式 + 工厂模式管理提供商,消除 switch-case
+ * 2. ✅ 配置映射化,添加新提供商只需配置,无需修改代码
+ * 3. ✅ 统一命名为 base-url,符合行业惯例
+ * 4. ✅ Service 层直接返回 DTO,不包装 Result(由 Controller 统一处理)
+ * 5. ✅ 职责清晰,扩展性强
+ *
+ * @author Ray.Hao
+ * @since 3.0.0
+ */
+@Service
+@Slf4j
+@RequiredArgsConstructor
+public class AiCommandServiceImpl implements AiCommandService {
+
+    private final AiProperties aiProperties;
+    private final AiProviderFactory providerFactory;
+
+    // 审计日志存储(简化实现,实际应使用数据库)
+    private final Map<String, AiCommandAudit> auditStore = new HashMap<>();
+
+    /**
+     * 解析自然语言命令
+     * 
+     * 注意:直接返回 DTO,不包装 Result
+     * Controller 负责统一包装成 Result
+     */
+    @Override
+    public AiCommandResponseDTO parseCommand(AiCommandRequestDTO request, HttpServletRequest httpRequest) {
+        // 检查 AI 功能是否启用
+        if (!aiProperties.getEnabled()) {
+            throw new IllegalStateException("AI 功能未启用,请在配置文件中设置 ai.enabled=true");
+        }
+
+        try {
+            // 获取当前提供商(自动校验配置)
+            AiProvider provider = providerFactory.getCurrentProvider();
+            
+            log.info("📤 使用 {} 解析命令: {}", provider.getProviderName(), request.getCommand());
+
+            // 构建提示词
+            String systemPrompt = buildSystemPrompt();
+            String userPrompt = buildUserPrompt(request);
+
+            // 调用 AI API
+            String response = provider.call(systemPrompt, userPrompt);
+
+            // 解析响应
+            return parseAiResponse(response);
+
+        } catch (IllegalStateException e) {
+            // 配置错误,抛出让 Controller 处理
+            throw e;
+        } catch (Exception e) {
+            log.error("解析命令失败", e);
+            throw new RuntimeException("解析命令失败: " + e.getMessage(), e);
+        }
+    }
+
+    /**
+     * 执行已解析的命令
+     */
+    @Override
+    public AiExecuteResponseDTO executeCommand(AiExecuteRequestDTO request, HttpServletRequest httpRequest) {
+        // TODO: 实现命令执行逻辑
+        throw new UnsupportedOperationException("待实现");
+    }
+
+    /**
+     * 获取命令执行历史
+     */
+    @Override
+    public Map<String, Object> getCommandHistory(Integer page, Integer size) {
+        List<AiCommandAudit> allAudits = new ArrayList<>(auditStore.values());
+        allAudits.sort(Comparator.comparing(AiCommandAudit::getCreateTime).reversed());
+
+        int total = allAudits.size();
+        int start = (page - 1) * size;
+        int end = Math.min(start + size, total);
+
+        List<AiCommandAudit> pageData = start < total ? allAudits.subList(start, end) : new ArrayList<>();
+
+        Map<String, Object> result = new HashMap<>();
+        result.put("list", pageData);
+        result.put("total", total);
+        result.put("page", page);
+        result.put("size", size);
+
+        return result;
+    }
+
+    /**
+     * 获取可用的函数列表
+     */
+    @Override
+    public List<Map<String, Object>> getAvailableFunctions() {
+        List<Map<String, Object>> functions = new ArrayList<>();
+
+        // 用户管理函数
+        functions.add(createFunctionDef(
+                "deleteUser",
+                "删除用户",
+                Map.of("name", "String - 用户姓名", "id", "Long - 用户ID(可选)")
+        ));
+
+        functions.add(createFunctionDef(
+                "updateUser",
+                "更新用户信息",
+                Map.of("id", "Long - 用户ID", "nickname", "String - 昵称", "status", "Integer - 状态")
+        ));
+
+        functions.add(createFunctionDef(
+                "queryUsers",
+                "查询用户列表",
+                Map.of("name", "String - 姓名(可选)", "status", "Integer - 状态(可选)")
+        ));
+
+        // 角色管理函数
+        functions.add(createFunctionDef(
+                "assignRole",
+                "分配角色给用户",
+                Map.of("userId", "Long - 用户ID", "roleIds", "List<Long> - 角色ID列表")
+        ));
+
+        return functions;
+    }
+
+    /**
+     * 撤销命令执行
+     */
+    @Override
+    public void rollbackCommand(String auditId) {
+        AiCommandAudit audit = auditStore.get(auditId);
+        if (audit == null) {
+            throw new RuntimeException("审计记录不存在");
+        }
+
+        if (!"success".equals(audit.getExecuteStatus())) {
+            throw new RuntimeException("只能撤销成功执行的命令");
+        }
+
+        // TODO: 实现具体的回滚逻辑
+        log.info("撤销命令执行: auditId={}, function={}", auditId, audit.getFunctionName());
+        throw new UnsupportedOperationException("回滚功能尚未实现");
+    }
+
+    // ==================== 私有方法 ====================
+
+    /**
+     * 构建系统提示词(包含可用函数定义)
+     */
+    private String buildSystemPrompt() {
+        return """
+                你是一个专业的命令解析助手。你的任务是将用户的自然语言命令转换为结构化的函数调用。
+                
+                可用函数:
+                1. queryUsers - 查询用户列表
+                   参数:keywords(搜索关键字), status(状态), deptId(部门ID)
+                   
+                2. deleteUser - 删除用户
+                   参数:userId(用户ID)
+                   
+                3. updateUser - 更新用户信息
+                   参数:userId(用户ID), nickname(昵称), mobile(手机号)
+                
+                请将命令解析为以下 JSON 格式:
+                {
+                  "functionCalls": [
+                    {
+                      "function": "函数名",
+                      "parameters": { "参数名": "参数值" },
+                      "description": "操作说明"
+                    }
+                  ]
+                }
+                """;
+    }
+
+    /**
+     * 构建用户提示词
+     */
+    private String buildUserPrompt(AiCommandRequestDTO request) {
+        return "请解析以下命令:" + request.getCommand();
+    }
+
+    /**
+     * 解析 AI 响应
+     */
+    private AiCommandResponseDTO parseAiResponse(String response) {
+        try {
+            // 提取 JSON
+            int jsonStart = response.indexOf("{");
+            int jsonEnd = response.lastIndexOf("}") + 1;
+
+            if (jsonStart == -1 || jsonEnd == 0) {
+                throw new IllegalArgumentException("AI 返回格式错误:未找到 JSON");
+            }
+
+            String jsonStr = response.substring(jsonStart, jsonEnd);
+            JSONObject json = JSONUtil.parseObj(jsonStr);
+
+            // 解析函数调用列表
+            List<FunctionCallDTO> functionCalls = new ArrayList<>();
+            JSONArray callsArray = json.getJSONArray("functionCalls");
+
+            if (callsArray != null) {
+                for (int i = 0; i < callsArray.size(); i++) {
+                    JSONObject call = callsArray.getJSONObject(i);
+                    functionCalls.add(FunctionCallDTO.builder()
+                            .name(call.getStr("function"))
+                            .arguments(call.getJSONObject("parameters") != null ? 
+                                call.getJSONObject("parameters").toBean(Map.class) : new HashMap<>())
+                            .description(call.getStr("description"))
+                            .build());
+                }
+            }
+
+            return AiCommandResponseDTO.builder()
+                    .success(true)
+                    .functionCalls(functionCalls)
+                    .rawResponse(response)
+                    .build();
+
+        } catch (Exception e) {
+            log.error("解析 AI 响应失败", e);
+            throw new RuntimeException("解析响应失败: " + e.getMessage(), e);
+        }
+    }
+
+    /**
+     * 创建函数定义
+     */
+    private Map<String, Object> createFunctionDef(String name, String description, Map<String, String> parameters) {
+        Map<String, Object> func = new HashMap<>();
+        func.put("name", name);
+        func.put("description", description);
+        func.put("parameters", parameters);
+        return func;
+    }
+}
+

+ 111 - 0
src/main/java/com/zsElectric/boot/platform/codegen/controller/CodegenController.java

@@ -0,0 +1,111 @@
+package com.zsElectric.boot.platform.codegen.controller;
+
+import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
+import com.zsElectric.boot.core.web.PageResult;
+import com.zsElectric.boot.core.web.Result;
+import com.zsElectric.boot.config.property.CodegenProperties;
+import com.zsElectric.boot.common.enums.LogModuleEnum;
+import com.zsElectric.boot.platform.codegen.service.CodegenService;
+import com.zsElectric.boot.platform.codegen.model.form.GenConfigForm;
+import com.zsElectric.boot.platform.codegen.model.query.TablePageQuery;
+import com.zsElectric.boot.platform.codegen.model.vo.CodegenPreviewVO;
+import com.zsElectric.boot.platform.codegen.model.vo.TablePageVO;
+import com.zsElectric.boot.common.annotation.Log;
+import com.zsElectric.boot.platform.codegen.service.GenConfigService;
+import io.swagger.v3.oas.annotations.Operation;
+import io.swagger.v3.oas.annotations.Parameter;
+import io.swagger.v3.oas.annotations.tags.Tag;
+import jakarta.servlet.ServletOutputStream;
+import jakarta.servlet.http.HttpServletResponse;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.web.bind.annotation.*;
+
+import java.io.IOException;
+import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
+import java.util.List;
+
+/**
+ * 代码生成器控制层
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@Tag(name = "11.代码生成")
+@RestController
+@RequestMapping("/api/v1/codegen")
+@RequiredArgsConstructor
+@Slf4j
+public class CodegenController {
+
+    private final CodegenService codegenService;
+    private final GenConfigService genConfigService;
+    private final CodegenProperties codegenProperties;
+
+    @Operation(summary = "获取数据表分页列表")
+    @GetMapping("/table/page")
+    @Log(value = "代码生成分页列表", module = LogModuleEnum.OTHER)
+    public PageResult<TablePageVO> getTablePage(
+            TablePageQuery queryParams
+    ) {
+        Page<TablePageVO> result = codegenService.getTablePage(queryParams);
+        return PageResult.success(result);
+    }
+
+    @Operation(summary = "获取代码生成配置")
+    @GetMapping("/{tableName}/config")
+    public Result<GenConfigForm> getGenConfigFormData(
+            @Parameter(description = "表名", example = "sys_user") @PathVariable String tableName
+    ) {
+        GenConfigForm formData = genConfigService.getGenConfigFormData(tableName);
+        return Result.success(formData);
+    }
+
+    @Operation(summary = "保存代码生成配置")
+    @PostMapping("/{tableName}/config")
+    @Log(value = "生成代码", module = LogModuleEnum.OTHER)
+    public Result<?> saveGenConfig(@RequestBody GenConfigForm formData) {
+        genConfigService.saveGenConfig(formData);
+        return Result.success();
+    }
+
+    @Operation(summary = "删除代码生成配置")
+    @DeleteMapping("/{tableName}/config")
+    public Result<?> deleteGenConfig(
+            @Parameter(description = "表名", example = "sys_user") @PathVariable String tableName
+    ) {
+        genConfigService.deleteGenConfig(tableName);
+        return Result.success();
+    }
+
+    @Operation(summary = "获取预览生成代码")
+    @GetMapping("/{tableName}/preview")
+    @Log(value = "预览生成代码", module = LogModuleEnum.OTHER)
+    public Result<List<CodegenPreviewVO>> getTablePreviewData(@PathVariable String tableName,
+                                                              @RequestParam(value = "pageType", required = false, defaultValue = "classic") String pageType) {
+        List<CodegenPreviewVO> list = codegenService.getCodegenPreviewData(tableName, pageType);
+        return Result.success(list);
+    }
+
+    @Operation(summary = "下载代码")
+    @GetMapping("/{tableName}/download")
+    @Log(value = "下载代码", module = LogModuleEnum.OTHER)
+    public void downloadZip(HttpServletResponse response, @PathVariable String tableName,
+                            @RequestParam(value = "pageType", required = false, defaultValue = "classic") String pageType) {
+        String[] tableNames = tableName.split(",");
+        byte[] data = codegenService.downloadCode(tableNames, pageType);
+
+        response.reset();
+        response.setHeader("Content-Disposition", "attachment; filename=" + URLEncoder.encode(codegenProperties.getDownloadFileName(), StandardCharsets.UTF_8));
+        response.setContentType("application/octet-stream; charset=UTF-8");
+
+        try (ServletOutputStream outputStream = response.getOutputStream()) {
+            outputStream.write(data);
+            outputStream.flush();
+        } catch (IOException e) {
+            log.error("Error while writing the zip file to response", e);
+            throw new RuntimeException("Failed to write the zip file to response", e);
+        }
+    }
+}

+ 41 - 0
src/main/java/com/zsElectric/boot/platform/codegen/converter/CodegenConverter.java

@@ -0,0 +1,41 @@
+package com.zsElectric.boot.platform.codegen.converter;
+
+import com.zsElectric.boot.platform.codegen.model.entity.GenConfig;
+import com.zsElectric.boot.platform.codegen.model.entity.GenFieldConfig;
+import com.zsElectric.boot.platform.codegen.model.form.GenConfigForm;
+import org.mapstruct.Mapper;
+import org.mapstruct.Mapping;
+
+import java.util.List;
+
+/**
+ * 代码生成配置转换器
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@Mapper(componentModel = "spring")
+public interface CodegenConverter {
+
+    @Mapping(source = "genConfig.tableName", target = "tableName")
+    @Mapping(source = "genConfig.businessName", target = "businessName")
+    @Mapping(source = "genConfig.moduleName", target = "moduleName")
+    @Mapping(source = "genConfig.packageName", target = "packageName")
+    @Mapping(source = "genConfig.entityName", target = "entityName")
+    @Mapping(source = "genConfig.author", target = "author")
+    @Mapping(source = "genConfig.pageType", target = "pageType")
+    @Mapping(source = "genConfig.removeTablePrefix", target = "removeTablePrefix")
+    @Mapping(source = "fieldConfigs", target = "fieldConfigs")
+    GenConfigForm toGenConfigForm(GenConfig genConfig, List<GenFieldConfig> fieldConfigs);
+
+    List<GenConfigForm.FieldConfig> toGenFieldConfigForm(List<GenFieldConfig> fieldConfigs);
+
+    GenConfigForm.FieldConfig toGenFieldConfigForm(GenFieldConfig genFieldConfig);
+
+    GenConfig toGenConfig(GenConfigForm formData);
+
+    List<GenFieldConfig> toGenFieldConfig(List<GenConfigForm.FieldConfig> fieldConfigs);
+
+    GenFieldConfig toGenFieldConfig(GenConfigForm.FieldConfig fieldConfig);
+
+}

+ 89 - 0
src/main/java/com/zsElectric/boot/platform/codegen/enums/FormTypeEnum.java

@@ -0,0 +1,89 @@
+package com.zsElectric.boot.platform.codegen.enums;
+
+import com.baomidou.mybatisplus.annotation.EnumValue;
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonValue;
+import com.zsElectric.boot.common.base.IBaseEnum;
+import lombok.Getter;
+import lombok.RequiredArgsConstructor;
+
+/**
+ * 表单类型枚举
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@Getter
+@RequiredArgsConstructor
+public enum FormTypeEnum implements IBaseEnum<Integer> {
+
+    /**
+     * 输入框
+     */
+    INPUT(1, "输入框"),
+
+    /**
+     * 下拉框
+     */
+    SELECT(2, "下拉框"),
+
+    /**
+     * 单选框
+     */
+    RADIO(3, "单选框"),
+
+    /**
+     * 复选框
+     */
+    CHECK_BOX(4, "复选框"),
+
+    /**
+     * 数字输入框
+     */
+    INPUT_NUMBER(5, "数字输入框"),
+
+    /**
+     * 开关
+     */
+    SWITCH(6, "开关"),
+
+    /**
+     * 文本域
+     */
+    TEXT_AREA(7, "文本域"),
+
+    /**
+     * 日期时间框
+     */
+    DATE(8, "日期框"),
+
+    /**
+     * 日期框
+     */
+    DATE_TIME(9, "日期时间框"),
+
+    /**
+     * 隐藏域
+     */
+    HIDDEN(10, "隐藏域");
+
+
+    //  Mybatis-Plus 提供注解表示插入数据库时插入该值
+    @EnumValue
+    @JsonValue
+    private final Integer value;
+
+    // @JsonValue //  表示对枚举序列化时返回此字段
+    private final String label;
+
+
+    @JsonCreator
+    public static FormTypeEnum fromValue(Integer value) {
+        for (FormTypeEnum type : FormTypeEnum.values()) {
+            if (type.getValue().equals(value)) {
+                return type;
+            }
+        }
+        throw new IllegalArgumentException("No enum constant with value " + value);
+    }
+}

+ 85 - 0
src/main/java/com/zsElectric/boot/platform/codegen/enums/JavaTypeEnum.java

@@ -0,0 +1,85 @@
+package com.zsElectric.boot.platform.codegen.enums;
+
+import lombok.Getter;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * 表单类型枚举
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@Getter
+public enum JavaTypeEnum {
+
+    VARCHAR("varchar", "String", "string"),
+    CHAR("char", "String", "string"),
+    BLOB("blob", "byte[]", "Uint8Array"),
+    TEXT("text", "String", "string"),
+    JSON("json", "String", "any"),
+    INTEGER("int", "Integer", "number"),
+    TINYINT("tinyint", "Integer", "number"),
+    SMALLINT("smallint", "Integer", "number"),
+    MEDIUMINT("mediumint", "Integer", "number"),
+    BIGINT("bigint", "Long", "number"),
+    FLOAT("float", "Float", "number"),
+    DOUBLE("double", "Double", "number"),
+    DECIMAL("decimal", "BigDecimal", "number"),
+    DATE("date", "LocalDate", "Date"),
+    DATETIME("datetime", "LocalDateTime", "Date"),
+    TIMESTAMP("timestamp", "LocalDateTime", "Date");
+
+    // 数据库类型
+    private final String dbType;
+    // Java类型
+    private final String javaType;
+    // TypeScript类型
+    private final String tsType;
+
+    // 数据库类型和Java类型的映射
+    private static final Map<String, JavaTypeEnum> typeMap = new HashMap<>();
+
+    // 初始化映射关系
+    static {
+        for (JavaTypeEnum javaTypeEnum : JavaTypeEnum.values()) {
+            typeMap.put(javaTypeEnum.getDbType(), javaTypeEnum);
+        }
+    }
+
+    JavaTypeEnum(String dbType, String javaType, String tsType) {
+        this.dbType = dbType;
+        this.javaType = javaType;
+        this.tsType = tsType;
+    }
+
+    /**
+     * 根据数据库类型获取对应的Java类型
+     *
+     * @param columnType 列类型
+     * @return 对应的Java类型
+     */
+    public static String getJavaTypeByColumnType(String columnType) {
+        JavaTypeEnum javaTypeEnum = typeMap.get(columnType);
+        if (javaTypeEnum != null) {
+            return javaTypeEnum.getJavaType();
+        }
+        return null;
+    }
+
+    /**
+     * 根据Java类型获取对应的TypeScript类型
+     *
+     * @param javaType Java类型
+     * @return 对应的TypeScript类型
+     */
+    public static String getTsTypeByJavaType(String javaType) {
+        for (JavaTypeEnum javaTypeEnum : JavaTypeEnum.values()) {
+            if (javaTypeEnum.getJavaType().equals(javaType)) {
+                return javaTypeEnum.getTsType();
+            }
+        }
+        return null;
+    }
+}

+ 73 - 0
src/main/java/com/zsElectric/boot/platform/codegen/enums/QueryTypeEnum.java

@@ -0,0 +1,73 @@
+package com.zsElectric.boot.platform.codegen.enums;
+
+import com.baomidou.mybatisplus.annotation.EnumValue;
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonValue;
+import com.zsElectric.boot.common.base.IBaseEnum;
+import lombok.Getter;
+import lombok.RequiredArgsConstructor;
+
+/**
+ * 查询类型枚举
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@Getter
+@RequiredArgsConstructor
+public enum QueryTypeEnum implements IBaseEnum<Integer> {
+
+    /** 等于 */
+    EQ(1, "="),
+
+    /** 模糊匹配 */
+    LIKE(2, "LIKE '%s%'"),
+
+    /** 包含 */
+    IN(3, "IN"),
+
+    /** 范围 */
+    BETWEEN(4, "BETWEEN"),
+
+    /** 大于 */
+    GT(5, ">"),
+
+    /** 大于等于 */
+    GE(6, ">="),
+
+    /** 小于 */
+    LT(7, "<"),
+
+    /** 小于等于 */
+    LE(8, "<="),
+
+    /** 不等于 */
+    NE(9, "!="),
+
+    /** 左模糊匹配 */
+    LIKE_LEFT(10, "LIKE '%s'"),
+
+    /** 右模糊匹配 */
+    LIKE_RIGHT(11, "LIKE 's%'");
+
+
+    // 存储在数据库中的枚举属性值
+    @EnumValue
+    @JsonValue
+    private final Integer value;
+
+    // 序列化成 JSON 时的属性值
+    private final String label;
+
+
+    @JsonCreator
+    public static QueryTypeEnum fromValue(Integer value) {
+        for (QueryTypeEnum type : QueryTypeEnum.values()) {
+            if (type.getValue().equals(value)) {
+                return type;
+            }
+        }
+        throw new IllegalArgumentException("No enum constant with value " + value);
+    }
+
+}

+ 47 - 0
src/main/java/com/zsElectric/boot/platform/codegen/mapper/DatabaseMapper.java

@@ -0,0 +1,47 @@
+package com.zsElectric.boot.platform.codegen.mapper;
+
+import com.baomidou.mybatisplus.core.mapper.BaseMapper;
+import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
+import com.zsElectric.boot.platform.codegen.model.bo.ColumnMetaData;
+import com.zsElectric.boot.platform.codegen.model.bo.TableMetaData;
+import com.zsElectric.boot.platform.codegen.model.query.TablePageQuery;
+import com.zsElectric.boot.platform.codegen.model.vo.TablePageVO;
+import org.apache.ibatis.annotations.Mapper;
+
+import java.util.List;
+
+
+/**
+ * 数据库映射层
+ *
+ * @author Ray
+ * @since 2.9.0
+ */
+@Mapper
+public interface DatabaseMapper extends BaseMapper {
+
+    /**
+     * 获取表分页列表
+     *
+     * @param page
+     * @param queryParams
+     * @return
+     */
+    Page<TablePageVO> getTablePage(Page<TablePageVO> page, TablePageQuery queryParams);
+
+    /**
+     * 获取表字段列表
+     *
+     * @param tableName
+     * @return
+     */
+    List<ColumnMetaData> getTableColumns(String tableName);
+
+    /**
+     * 获取表元数据
+     *
+     * @param tableName
+     * @return
+     */
+    TableMetaData getTableMetadata(String tableName);
+}

+ 20 - 0
src/main/java/com/zsElectric/boot/platform/codegen/mapper/GenConfigMapper.java

@@ -0,0 +1,20 @@
+package com.zsElectric.boot.platform.codegen.mapper;
+
+import com.baomidou.mybatisplus.core.mapper.BaseMapper;
+import com.zsElectric.boot.platform.codegen.model.entity.GenConfig;
+import org.apache.ibatis.annotations.Mapper;
+
+/**
+ * 代码生成基础配置访问层
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@Mapper
+public interface GenConfigMapper extends BaseMapper<GenConfig> {
+
+}
+
+
+
+

+ 20 - 0
src/main/java/com/zsElectric/boot/platform/codegen/mapper/GenFieldConfigMapper.java

@@ -0,0 +1,20 @@
+package com.zsElectric.boot.platform.codegen.mapper;
+
+import com.baomidou.mybatisplus.core.mapper.BaseMapper;
+import com.zsElectric.boot.platform.codegen.model.entity.GenFieldConfig;
+import org.apache.ibatis.annotations.Mapper;
+
+/**
+ * 代码生成字段配置访问层
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@Mapper
+public interface GenFieldConfigMapper extends BaseMapper<GenFieldConfig> {
+
+}
+
+
+
+

+ 50 - 0
src/main/java/com/zsElectric/boot/platform/codegen/model/bo/ColumnMetaData.java

@@ -0,0 +1,50 @@
+package com.zsElectric.boot.platform.codegen.model.bo;
+
+import io.swagger.v3.oas.annotations.media.Schema;
+import lombok.Data;
+
+@Schema(description = "数据表字段VO")
+@Data
+public class ColumnMetaData {
+
+        /**
+         * 字段名称
+         */
+        private String columnName;
+
+        /**
+         * 字段类型
+         */
+        private String dataType;
+
+        /**
+         * 字段描述
+         */
+        private String columnComment;
+
+        /**
+         * 字段长度
+         */
+        private Long characterMaximumLength;
+
+        /**
+         * 是否主键(1-是 0-否)
+         */
+        private Integer isPrimaryKey;
+
+        /**
+         * 是否可为空(1-是 0-否)
+         */
+        private String isNullable;
+
+        /**
+         * 字符集
+         */
+        private String characterSetName;
+
+        /**
+         * 排序规则
+         */
+        private String collationName;
+
+}

+ 45 - 0
src/main/java/com/zsElectric/boot/platform/codegen/model/bo/TableMetaData.java

@@ -0,0 +1,45 @@
+package com.zsElectric.boot.platform.codegen.model.bo;
+
+import lombok.Data;
+
+
+/**
+ * 数据表元数据
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@Data
+public class TableMetaData {
+
+    /**
+     * 表名称
+     */
+    private String tableName;
+
+    /**
+     * 表描述
+     */
+    private String tableComment;
+
+    /**
+     * 排序规则
+     */
+    private String tableCollation;
+
+    /**
+     * 存储引擎
+     */
+    private String engine;
+
+    /**
+     * 字符集
+     */
+    private String charset;
+
+    /**
+     * 创建时间
+     */
+    private String createTime;
+
+}

+ 64 - 0
src/main/java/com/zsElectric/boot/platform/codegen/model/entity/GenConfig.java

@@ -0,0 +1,64 @@
+package com.zsElectric.boot.platform.codegen.model.entity;
+
+import com.baomidou.mybatisplus.annotation.*;
+
+import com.zsElectric.boot.common.base.BaseEntity;
+import lombok.Getter;
+import lombok.Setter;
+
+/**
+ * 代码生成基础配置
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@TableName(value = "gen_config")
+@Getter
+@Setter
+public class GenConfig extends BaseEntity {
+
+    /**
+     * 表名
+     */
+    private String tableName;
+
+    /**
+     * 包名
+     */
+    private String packageName;
+
+    /**
+     * 模块名
+     */
+    private String moduleName;
+
+    /**
+     * 实体类名
+     */
+    private String entityName;
+
+    /**
+     * 业务名
+     */
+    private String businessName;
+
+    /**
+     * 父菜单ID
+     */
+    private Long parentMenuId;
+
+    /**
+     * 作者
+     */
+    private String author;
+
+    /**
+     * 页面类型 classic|curd
+     */
+    private String pageType;
+
+    /**
+     * 要移除的表前缀,如: sys_
+     */
+    private String removeTablePrefix;
+}

+ 106 - 0
src/main/java/com/zsElectric/boot/platform/codegen/model/entity/GenFieldConfig.java

@@ -0,0 +1,106 @@
+package com.zsElectric.boot.platform.codegen.model.entity;
+
+import com.baomidou.mybatisplus.annotation.TableField;
+import com.baomidou.mybatisplus.annotation.TableName;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.zsElectric.boot.common.base.BaseEntity;
+import com.zsElectric.boot.platform.codegen.enums.FormTypeEnum;
+import com.zsElectric.boot.platform.codegen.enums.QueryTypeEnum;
+import lombok.Getter;
+import lombok.Setter;
+
+/**
+ * 字段生成配置实体
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@TableName(value = "gen_field_config")
+@Getter
+@Setter
+public class GenFieldConfig extends BaseEntity {
+
+
+    /**
+     * 关联的配置ID
+     */
+    private Long configId;
+
+    /**
+     * 列名
+     */
+    private String columnName;
+
+    /**
+     * 列类型
+     */
+    private String columnType;
+
+    /**
+     * 字段长度
+     */
+    private Long maxLength;
+
+    /**
+     * 字段名称
+     */
+    private String fieldName;
+
+    /**
+     * 字段排序
+     */
+    private Integer fieldSort;
+
+    /**
+     * 字段类型
+     */
+    private String fieldType;
+
+    /**
+     * 字段描述
+     */
+    private String fieldComment;
+
+    /**
+     * 表单类型
+     */
+    private FormTypeEnum formType;
+
+    /**
+     * 查询方式
+     */
+    private QueryTypeEnum queryType;
+
+    /**
+     * 是否在列表显示
+     */
+    private Integer isShowInList;
+
+    /**
+     * 是否在表单显示
+     */
+    private Integer isShowInForm;
+
+    /**
+     * 是否在查询条件显示
+     */
+    private Integer isShowInQuery;
+
+    /**
+     * 是否必填
+     */
+    private Integer isRequired;
+
+    /**
+     * TypeScript类型
+     */
+    @TableField(exist = false)
+    @JsonIgnore
+    private String tsType;
+
+    /**
+     * 字典类型
+     */
+    private String dictType;
+}

+ 109 - 0
src/main/java/com/zsElectric/boot/platform/codegen/model/form/GenConfigForm.java

@@ -0,0 +1,109 @@
+package com.zsElectric.boot.platform.codegen.model.form;
+
+import com.zsElectric.boot.platform.codegen.enums.FormTypeEnum;
+import com.zsElectric.boot.platform.codegen.enums.QueryTypeEnum;
+import io.swagger.v3.oas.annotations.media.Schema;
+import lombok.Data;
+
+import java.util.List;
+
+/**
+ * 代码生成配置表单
+ *
+ * @author Ray
+ * @since 2.10.0
+ */
+@Schema(description = "代码生成配置表单")
+@Data
+public class GenConfigForm {
+
+    @Schema(description = "主键",example = "1")
+    private Long id;
+
+    @Schema(description = "表名",example = "sys_user")
+    private String tableName;
+
+    @Schema(description = "业务名",example = "用户")
+    private String businessName;
+
+    @Schema(description = "模块名",example = "system")
+    private String moduleName;
+
+    @Schema(description = "包名",example = "com.youlai")
+    private String packageName;
+
+    @Schema(description = "实体名",example = "User")
+    private String entityName;
+
+    @Schema(description = "作者",example = "youlaitech")
+    private String author;
+
+    @Schema(description = "上级菜单ID",example = "1")
+    private Long parentMenuId;
+
+    @Schema(description = "字段配置列表")
+    private List<FieldConfig> fieldConfigs;
+
+    @Schema(description = "后端应用名")
+    private String backendAppName;
+
+    @Schema(description = "前端应用名")
+    private String frontendAppName;
+
+    @Schema(description = "页面类型 classic|curd", example = "classic")
+    private String pageType;
+
+    @Schema(description = "要移除的表前缀,如: sys_", example = "sys_")
+    private String removeTablePrefix;
+
+    @Schema(description = "字段配置")
+    @Data
+    public static class FieldConfig {
+
+        @Schema(description = "主键")
+        private Long id;
+
+        @Schema(description = "列名")
+        private String columnName;
+
+        @Schema(description = "列类型")
+        private String columnType;
+
+        @Schema(description = "字段名")
+        private String fieldName;
+
+        @Schema(description = "字段排序")
+        private Integer fieldSort;
+
+        @Schema(description = "字段类型")
+        private String fieldType;
+
+        @Schema(description = "字段描述")
+        private String fieldComment;
+
+        @Schema(description = "是否在列表显示")
+        private Integer isShowInList;
+
+        @Schema(description = "是否在表单显示")
+        private Integer isShowInForm;
+
+        @Schema(description = "是否在查询条件显示")
+        private Integer isShowInQuery;
+
+        @Schema(description = "是否必填")
+        private Integer isRequired;
+
+        @Schema(description = "最大长度")
+        private Integer maxLength;
+
+        @Schema(description = "表单类型")
+        private FormTypeEnum formType;
+
+        @Schema(description = "查询类型")
+        private QueryTypeEnum queryType;
+
+        @Schema(description = "字典类型")
+        private String dictType;
+
+    }
+}

Деякі файли не було показано, через те що забагато файлів було змінено