feat: 调整部署脚本

This commit is contained in:
zerosaturation 2026-01-08 20:47:24 +08:00
parent 3793cc3252
commit 37a3355649
32 changed files with 3468 additions and 8879 deletions

1
docker/.gitignore vendored
View File

@ -11,7 +11,6 @@ logs/
# 生成的配置文件
docker-compose.yml
configs/nginx.conf
configs/application-docker.yml
configs/my.cnf
# Docker相关

View File

@ -172,17 +172,7 @@ docker/
### 步骤1环境配置
#### 1.1 创建环境配置文件
```bash
# 复制配置模板
cp environments/.env.example environments/.env.development
# 编辑配置文件
vim environments/.env.development
```
#### 1.2 关键配置项说明
#### 1.1 关键配置项说明
```bash
# 基础环境配置

View File

@ -1,6 +1,6 @@
# 多阶段构建 Dockerfile for 若依后端应用
# Stage 1: 构建阶段 - 使用Maven构建应用
FROM maven:3.8-openjdk-8 AS builder
FROM maven:3.9-eclipse-temurin-8 AS builder
# 设置工作目录
WORKDIR /app
@ -47,10 +47,11 @@ COPY ruoyi-monitor/src ruoyi-monitor/src
RUN mvn clean package -DskipTests -B
# Stage 2: 运行阶段 - 使用轻量级JRE镜像
FROM openjdk:8-jre-alpine
FROM eclipse-temurin:8-jre-alpine
# FROM openjdk:8-jre-alpine
# 设置时区并安装必要工具
RUN apk add --no-cache tzdata netcat-openbsd && \
RUN apk add --no-cache tzdata netcat-openbsd curl bash && \
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
echo "Asia/Shanghai" > /etc/timezone && \
apk del tzdata
@ -65,15 +66,20 @@ WORKDIR /app
# 从构建阶段复制JAR文件
COPY --from=builder /app/ruoyi-admin/target/ruoyi-admin.jar app.jar
# 复制Docker环境配置文件和脚本
COPY docker/configs/application-docker.yml /app/config/application-docker.yml
COPY docker/configs/backend-env.sh /app/scripts/backend-env.sh
COPY docker/configs/startup.sh /app/scripts/startup.sh
# 创建日志目录、配置目录和脚本目录
RUN mkdir -p /app/logs /app/config /app/uploadPath /app/scripts && \
chown -R anxin:anxin /app && \
chmod +x /app/scripts/backend-env.sh /app/scripts/startup.sh
mkdir -p /home/ruoyi && \
ln -sf /app/logs /home/ruoyi/logs
# 复制Docker环境配置文件和脚本
COPY docker/configs/backend-env.sh /app/scripts/backend-env.sh
COPY docker/configs/startup.sh /app/scripts/startup.sh
COPY docker/configs/logback-spring.xml /app/config/logback-spring.xml
COPY docker/configs/application-docker.yml /app/config/application-docker.yml
# 设置脚本权限并修改所有者
RUN chmod +x /app/scripts/backend-env.sh /app/scripts/startup.sh && \
chown -R anxin:anxin /app /home/ruoyi
# 切换到应用用户
USER anxin
@ -81,12 +87,12 @@ USER anxin
# 暴露应用端口
EXPOSE 8080
# 设置环境变量 - 数据库连接配置
# 设置环境变量 - 数据库连接配置默认值可被docker-compose覆盖
ENV DB_HOST=anxin-mysql \
DB_PORT=3306 \
DB_NAME=anxin \
DB_USER=anxin_user \
DB_PASSWORD=anxin_password
DB_NAME=anxin_prod \
DB_USER=anxin_prod \
DB_PASSWORD=prod_password
# 设置环境变量 - 应用端口和网络配置
ENV SERVER_PORT=8080 \

75
docker/build.sh Normal file → Executable file
View File

@ -24,7 +24,7 @@ VERBOSE=false
# 镜像配置
FRONTEND_IMAGE="anxin-frontend"
BACKEND_IMAGE="anxin-backend"
MYSQL_IMAGE="mysql:8.0"
MYSQL_IMAGE="mysql:8.0.36"
# ===========================================
# 颜色定义
@ -334,25 +334,36 @@ pull_latest_code() {
prepare_build() {
log_step "准备构建环境..."
# 创建必要的目录
# 从环境变量获取路径,如果没有则使用默认值
local mysql_data_path=${MYSQL_DATA_PATH:-"./data/${ENVIRONMENT}/mysql"}
local mysql_log_path=${MYSQL_LOG_PATH:-"./data/${ENVIRONMENT}/mysql-logs"}
local backend_log_path=${BACKEND_LOG_PATH:-"./data/${ENVIRONMENT}/backend-logs"}
local backend_upload_path=${BACKEND_UPLOAD_PATH:-"./data/${ENVIRONMENT}/uploads"}
local frontend_log_path=${FRONTEND_LOG_PATH:-"./data/${ENVIRONMENT}/nginx-logs"}
local data_dirs=(
"${DOCKER_DIR}/data/mysql"
"${DOCKER_DIR}/data/mysql-logs"
"${DOCKER_DIR}/data/backend-logs"
"${DOCKER_DIR}/data/uploads"
"${DOCKER_DIR}/data/nginx-logs"
"$mysql_data_path"
"$mysql_log_path"
"$backend_log_path"
"$backend_upload_path"
"$frontend_log_path"
)
# 创建必要的目录
for dir in "${data_dirs[@]}"; do
if [[ ! -d "$dir" ]]; then
log_debug "创建目录: $dir"
mkdir -p "$dir"
# 转换相对路径为绝对路径
local abs_dir="${DOCKER_DIR}/${dir#./}"
if [[ ! -d "$abs_dir" ]]; then
log_debug "创建目录: $abs_dir"
mkdir -p "$abs_dir"
fi
done
# 设置目录权限
log_debug "设置目录权限..."
chmod -R 755 "${DOCKER_DIR}/data"
local base_data_dir="${DOCKER_DIR}/data/${ENVIRONMENT}"
if [[ -d "$base_data_dir" ]]; then
chmod -R 755 "$base_data_dir"
fi
# 清理构建缓存 (如果指定)
if [[ "$CLEAN_BUILD" == "true" ]]; then
@ -398,20 +409,28 @@ clean_build_cache() {
build_frontend_image() {
log_step "构建前端镜像..."
local image_name="${FRONTEND_IMAGE}:${TAG}"
# 根据环境设置镜像标签
local env_tag
case $ENVIRONMENT in
production) env_tag="prod" ;;
staging) env_tag="staging" ;;
development) env_tag="dev" ;;
esac
local image_name="${FRONTEND_IMAGE}:${env_tag}"
local dockerfile_path="${DOCKER_DIR}/frontend/Dockerfile"
local build_args=""
# 根据环境设置构建参数
case $ENVIRONMENT in
production)
build_args="--build-arg NODE_ENV=production --build-arg API_BASE_URL=${API_BASE_URL:-http://localhost:8080}"
build_args="--build-arg NODE_ENV=production --build-arg API_BASE_URL=${API_BASE_URL:-http://localhost:8080} --build-arg ENVIRONMENT=prod"
;;
staging)
build_args="--build-arg NODE_ENV=production --build-arg API_BASE_URL=${API_BASE_URL:-http://localhost:8080}"
build_args="--build-arg NODE_ENV=production --build-arg API_BASE_URL=${API_BASE_URL:-http://localhost:8080} --build-arg ENVIRONMENT=staging"
;;
development)
build_args="--build-arg NODE_ENV=development --build-arg API_BASE_URL=${API_BASE_URL:-http://localhost:8080}"
build_args="--build-arg NODE_ENV=development --build-arg API_BASE_URL=${API_BASE_URL:-http://localhost:8080} --build-arg ENVIRONMENT=dev"
;;
esac
@ -428,6 +447,13 @@ build_frontend_image() {
log_debug "构建命令: $build_cmd"
if eval "$build_cmd"; then
# 如果指定了自定义标签,也打上自定义标签
if [[ "$TAG" != "latest" && "$TAG" != "$env_tag" ]]; then
local custom_image_name="${FRONTEND_IMAGE}:${TAG}"
log_info "添加自定义标签: $custom_image_name"
docker tag "$image_name" "$custom_image_name"
fi
log_success "前端镜像构建成功: $image_name"
# 显示镜像信息
@ -444,7 +470,15 @@ build_frontend_image() {
build_backend_image() {
log_step "构建后端镜像..."
local image_name="${BACKEND_IMAGE}:${TAG}"
# 根据环境设置镜像标签
local env_tag
case $ENVIRONMENT in
production) env_tag="prod" ;;
staging) env_tag="staging" ;;
development) env_tag="dev" ;;
esac
local image_name="${BACKEND_IMAGE}:${env_tag}"
local dockerfile_path="${DOCKER_DIR}/backend/Dockerfile"
local build_args=""
@ -474,6 +508,13 @@ build_backend_image() {
log_debug "构建命令: $build_cmd"
if eval "$build_cmd"; then
# 如果指定了自定义标签,也打上自定义标签
if [[ "$TAG" != "latest" && "$TAG" != "$env_tag" ]]; then
local custom_image_name="${BACKEND_IMAGE}:${TAG}"
log_info "添加自定义标签: $custom_image_name"
docker tag "$image_name" "$custom_image_name"
fi
log_success "后端镜像构建成功: $image_name"
# 显示镜像信息

View File

@ -0,0 +1,211 @@
# Docker环境专用配置文件
# 用于覆盖默认配置适配Docker容器环境
server:
port: 8080
servlet:
context-path: /
tomcat:
uri-encoding: UTF-8
accept-count: 1000
threads:
max: 800
min-spare: 100
# 数据源配置
spring:
datasource:
type: com.alibaba.druid.pool.DruidDataSource
driverClassName: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://${DB_HOST:anxin-mysql}:${DB_PORT:3306}/${DB_NAME:anxin_dev}?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=false&serverTimezone=GMT%2B8&allowPublicKeyRetrieval=true
username: ${DB_USER:anxin_dev}
password: ${DB_PASSWORD:dev_password}
druid:
# 初始连接数
initialSize: 5
# 最小连接池数量
minIdle: 10
# 最大连接池数量
maxActive: 20
# 配置获取连接等待超时的时间
maxWait: 60000
# 配置连接超时时间
connectTimeout: 30000
# 配置网络超时时间
socketTimeout: 60000
# 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
timeBetweenEvictionRunsMillis: 60000
# 配置一个连接在池中最小生存的时间,单位是毫秒
minEvictableIdleTimeMillis: 300000
# 配置一个连接在池中最大生存的时间,单位是毫秒
maxEvictableIdleTimeMillis: 900000
# 配置检测连接是否有效
validationQuery: SELECT 1 FROM DUAL
testWhileIdle: true
testOnBorrow: false
testOnReturn: false
# 连接池关闭时等待时间
timeBetweenConnectErrorMillis: 30000
# 连接泄漏检测
removeAbandoned: true
removeAbandonedTimeout: 1800
logAbandoned: true
# 防止连接池耗尽
breakAfterAcquireFailure: true
connectionErrorRetryAttempts: 3
webStatFilter:
enabled: true
statViewServlet:
enabled: true
# 设置白名单,不填则允许所有访问
allow:
url-pattern: /druid/*
# 控制台管理用户名和密码
login-username: ${DRUID_USERNAME:admin}
login-password: ${DRUID_PASSWORD:admin123}
filter:
stat:
enabled: true
# 慢SQL记录
log-slow-sql: true
slow-sql-millis: 1000
merge-sql: true
wall:
config:
multi-statement-allow: true
# Redis配置
redis:
host: ${REDIS_HOST:anxin-redis}
port: ${REDIS_PORT:6379}
password: ${REDIS_PASSWORD:}
database: ${REDIS_DATABASE:0}
timeout: 10s
lettuce:
pool:
# 连接池最大连接数
max-active: 200
# 连接池最大阻塞等待时间(使用负值表示没有限制)
max-wait: -1ms
# 连接池中的最大空闲连接
max-idle: 10
# 连接池中的最小空闲连接
min-idle: 0
# 日志配置
logging:
config: /app/config/logback-spring.xml
level:
com.ruoyi: ${LOG_LEVEL:DEBUG}
root: ${LOG_LEVEL:INFO}
file:
path: ${LOG_PATH:/app/logs}
name: ${LOG_PATH:/app/logs}/application.log
# 若依配置
ruoyi:
# 名称
name: 安信数字信贷系统
# 版本
version: 1.0.0
# 版权年份
copyrightYear: 2024
# 实例演示开关
demoEnabled: true
# 文件路径 示例( Windows配置D:/ruoyi/uploadPathLinux配置 /home/ruoyi/uploadPath
profile: ${UPLOAD_PATH:/app/uploadPath}
# 获取ip地址开关
addressEnabled: false
# 验证码类型 math 数组计算 char 字符验证
captchaType: math
# token配置
token:
# 令牌自定义标识
header: Authorization
# 令牌密钥
secret: ${JWT_SECRET:abcdefghijklmnopqrstuvwxyz}
# 令牌有效期默认30分钟
expireTime: ${JWT_EXPIRE_TIME:30}
# MyBatis配置
mybatis:
# 搜索指定包别名
typeAliasesPackage: com.ruoyi.**.domain
# 配置mapper的扫描找到所有的mapper.xml映射文件
mapperLocations: classpath*:mapper/**/*Mapper.xml
# 加载全局的配置文件
configLocation: classpath:mybatis/mybatis-config.xml
# PageHelper分页插件
pagehelper:
helperDialect: mysql
supportMethodsArguments: true
params: count=countSql
# Swagger配置
swagger:
# 是否开启swagger
enabled: ${SWAGGER_ENABLED:true}
# 请求前缀
pathMapping: /dev-api
# 防止XSS攻击
xss:
# 过滤开关
enabled: true
# 排除链接(多个用逗号分隔)
excludes: /system/notice
# 匹配链接
urlPatterns: /system/*,/monitor/*,/tool/*
# 代码生成
gen:
# 作者
author: ruoyi
# 默认生成包路径 system 需改成自己的模块名称 如 system monitor tool
packageName: com.ruoyi.system
# 自动去除表前缀默认是false
autoRemovePre: false
# 表前缀(生成类名不会包含表前缀,多个用逗号分隔)
tablePrefix: sys_
# 监控配置
management:
endpoints:
web:
exposure:
include: ${MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE:health,info,metrics}
endpoint:
health:
show-details: always
# Redisson配置
redisson:
# 线程池数量
threads: 16
# Netty线程池数量
nettyThreads: 32
# 单节点配置
single-server-config:
address: redis://${REDIS_HOST:anxin-redis}:${REDIS_PORT:6379}
password: ${REDIS_PASSWORD:}
database: ${REDIS_DATABASE:0}
# 连接超时,单位:毫秒
connectTimeout: 10000
# 命令等待超时,单位:毫秒
timeout: 3000
# 命令失败重试次数
retryAttempts: 3
# 命令重试发送时间间隔,单位:毫秒
retryInterval: 1500
# 连接池大小
connectionPoolSize: 64
# 最小空闲连接数
connectionMinimumIdleSize: 10
# 空闲连接超时,单位:毫秒
idleConnectionTimeout: 10000
# ping连接间隔
pingTimeout: 1000
# keep alive
keepAlive: true

13
docker/configs/backend-env.sh Normal file → Executable file
View File

@ -8,9 +8,9 @@ set -e
# 默认环境变量配置
export DB_HOST=${DB_HOST:-"anxin-mysql"}
export DB_PORT=${DB_PORT:-"3306"}
export DB_NAME=${DB_NAME:-"anxin"}
export DB_USER=${DB_USER:-"anxin_user"}
export DB_PASSWORD=${DB_PASSWORD:-"anxin_password"}
export DB_NAME=${DB_NAME:-"anxin_prod"}
export DB_USER=${DB_USER:-"anxin_prod"}
export DB_PASSWORD=${DB_PASSWORD:-"prod_password"}
# 应用端口和网络配置
export SERVER_PORT=${SERVER_PORT:-"8080"}
@ -113,7 +113,7 @@ load_environment() {
export JAVA_OPTS="-Xms512m -Xmx1024m -Djava.security.egd=file:/dev/./urandom"
echo "已加载预发布环境配置"
;;
"development")
"development"|"dev")
export LOG_LEVEL="DEBUG"
export SWAGGER_ENABLED="true"
export JAVA_OPTS="-Xms256m -Xmx512m -Djava.security.egd=file:/dev/./urandom"
@ -125,6 +125,11 @@ load_environment() {
esac
}
# 自动加载当前环境配置
if [[ -n "$SPRING_PROFILES_ACTIVE" ]]; then
load_environment "$SPRING_PROFILES_ACTIVE"
fi
# 主函数
main() {
local command=${1:-"show"}

View File

@ -0,0 +1,110 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<!-- 定义日志文件路径 -->
<property name="LOG_PATH" value="${LOG_PATH:-/app/logs}" />
<property name="LOG_LEVEL" value="${LOG_LEVEL:-INFO}" />
<!-- 控制台输出 -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - [%method,%line] - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- 系统日志文件 -->
<appender name="SYS_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_PATH}/sys.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - [%method,%line] - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}/sys.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>15</maxHistory>
</rollingPolicy>
</appender>
<!-- 错误日志文件 -->
<appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_PATH}/sys-error.log</file>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - [%method,%line] - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}/sys-error.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>15</maxHistory>
</rollingPolicy>
</appender>
<!-- 用户操作日志 -->
<appender name="USER_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_PATH}/sys-user.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - [%method,%line] - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}/sys-user.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>15</maxHistory>
</rollingPolicy>
</appender>
<!-- 若依框架日志 -->
<logger name="com.ruoyi" level="${LOG_LEVEL}" additivity="false">
<appender-ref ref="CONSOLE" />
<appender-ref ref="SYS_FILE" />
<appender-ref ref="ERROR_FILE" />
</logger>
<!-- 用户操作日志 -->
<logger name="sys-user" level="INFO" additivity="false">
<appender-ref ref="USER_FILE" />
</logger>
<!-- 根日志级别 -->
<root level="${LOG_LEVEL}">
<appender-ref ref="CONSOLE" />
<appender-ref ref="SYS_FILE" />
<appender-ref ref="ERROR_FILE" />
</root>
<!-- 开发环境配置 -->
<springProfile name="dev">
<logger name="com.ruoyi" level="DEBUG" additivity="false">
<appender-ref ref="CONSOLE" />
<appender-ref ref="SYS_FILE" />
</logger>
<root level="INFO">
<appender-ref ref="CONSOLE" />
<appender-ref ref="SYS_FILE" />
</root>
</springProfile>
<!-- 生产环境配置 -->
<springProfile name="prod">
<logger name="com.ruoyi" level="WARN" additivity="false">
<appender-ref ref="SYS_FILE" />
<appender-ref ref="ERROR_FILE" />
</logger>
<root level="WARN">
<appender-ref ref="SYS_FILE" />
<appender-ref ref="ERROR_FILE" />
</root>
</springProfile>
</configuration>

75
docker/configs/my.cnf.dev Normal file
View File

@ -0,0 +1,75 @@
# MySQL配置文件 - 开发环境
# 针对开发环境优化的MySQL配置
[mysqld]
# 基础配置
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
lc-messages-dir = /usr/share/mysql
skip-external-locking
# 字符集配置
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci
init_connect = 'SET NAMES utf8mb4'
# 网络配置
bind-address = 0.0.0.0
max_connections = 100
max_connect_errors = 10000
max_allowed_packet = 32M
interactive_timeout = 300
wait_timeout = 300
# 缓存配置 - 开发环境轻量化
key_buffer_size = 64M
max_allowed_packet = 32M
table_open_cache = 1000
sort_buffer_size = 2M
read_buffer_size = 1M
read_rnd_buffer_size = 4M
myisam_sort_buffer_size = 64M
thread_cache_size = 20
# query_cache已在MySQL 8.0中移除
# query_cache_type = 1
# query_cache_size = 64M
# query_cache_limit = 1M
# InnoDB配置 - 开发环境轻量化
innodb_buffer_pool_size = 256M
innodb_log_file_size = 64M
innodb_log_buffer_size = 8M
innodb_flush_log_at_trx_commit = 2 # 开发环境可以牺牲一些安全性换取性能
innodb_lock_wait_timeout = 50
innodb_file_per_table = 1
# 日志配置 - 开发环境详细日志
log_error = /var/log/mysql/error.log
slow_query_log = 1
slow_query_log_file = /var/log/mysql/slow.log
long_query_time = 1 # 开发环境更严格的慢查询阈值
log_queries_not_using_indexes = 1
# 二进制日志配置 - 开发环境可选
# log_bin = /var/log/mysql/mysql-bin.log
# binlog_format = ROW
# binlog_expire_logs_seconds = 86400 # 1天
# max_binlog_size = 50M
# 安全配置 - MySQL 8.0兼容
sql_mode = STRICT_TRANS_TABLES,NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
# 开发环境特殊配置
general_log = 1 # 开发环境启用通用日志
general_log_file = /var/log/mysql/general.log
[mysql]
default-character-set = utf8mb4
[client]
default-character-set = utf8mb4

View File

@ -35,9 +35,10 @@ read_buffer_size = 2M
read_rnd_buffer_size = 8M
myisam_sort_buffer_size = 128M
thread_cache_size = 50
query_cache_type = 1
query_cache_size = 128M
query_cache_limit = 2M
# query_cache已在MySQL 8.0中移除
# query_cache_type = 1
# query_cache_size = 128M
# query_cache_limit = 2M
# InnoDB配置 - 生产环境优化
innodb_buffer_pool_size = 512M
@ -58,11 +59,11 @@ log_queries_not_using_indexes = 1
# 二进制日志配置 - 生产环境启用
log_bin = /var/log/mysql/mysql-bin.log
binlog_format = ROW
expire_logs_days = 7
binlog_expire_logs_seconds = 604800 # 7天 (7*24*60*60)
max_binlog_size = 100M
# 安全配置
sql_mode = STRICT_TRANS_TABLES,NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
# 安全配置 - MySQL 8.0兼容
sql_mode = STRICT_TRANS_TABLES,NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
[mysql]
default-character-set = utf8mb4

View File

@ -0,0 +1,72 @@
# MySQL配置文件 - 测试环境
# 针对测试环境优化的MySQL配置
[mysqld]
# 基础配置
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
lc-messages-dir = /usr/share/mysql
skip-external-locking
# 字符集配置
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci
init_connect = 'SET NAMES utf8mb4'
# 网络配置
bind-address = 0.0.0.0
max_connections = 150
max_connect_errors = 50000
max_allowed_packet = 48M
interactive_timeout = 450
wait_timeout = 450
# 缓存配置 - 测试环境中等配置
key_buffer_size = 128M
max_allowed_packet = 48M
table_open_cache = 2000
sort_buffer_size = 3M
read_buffer_size = 1.5M
read_rnd_buffer_size = 6M
myisam_sort_buffer_size = 96M
thread_cache_size = 30
# query_cache已在MySQL 8.0中移除
# query_cache_type = 1
# query_cache_size = 96M
# query_cache_limit = 1.5M
# InnoDB配置 - 测试环境中等配置
innodb_buffer_pool_size = 384M
innodb_log_file_size = 96M
innodb_log_buffer_size = 12M
innodb_flush_log_at_trx_commit = 1
innodb_lock_wait_timeout = 50
innodb_file_per_table = 1
innodb_flush_method = O_DIRECT
# 日志配置
log_error = /var/log/mysql/error.log
slow_query_log = 1
slow_query_log_file = /var/log/mysql/slow.log
long_query_time = 1.5
log_queries_not_using_indexes = 1
# 二进制日志配置 - 测试环境启用
log_bin = /var/log/mysql/mysql-bin.log
binlog_format = ROW
binlog_expire_logs_seconds = 259200 # 3天
max_binlog_size = 75M
# 安全配置 - MySQL 8.0兼容
sql_mode = STRICT_TRANS_TABLES,NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
[mysql]
default-character-set = utf8mb4
[client]
default-character-set = utf8mb4

View File

@ -1,34 +1,9 @@
# Nginx配置文件 - 生产环境
# Nginx配置文件 - 生产环境 (HTTP版本)
# 用于Vue3前端静态文件服务和API代理
# HTTP服务器 - 重定向到HTTPS
server {
listen 80;
server_name anxin.com www.anxin.com;
# 强制重定向到HTTPS
return 301 https://$server_name$request_uri;
}
# HTTPS服务器 - 主要配置
server {
listen 443 ssl http2;
server_name anxin.com www.anxin.com;
# SSL配置
ssl_certificate /etc/nginx/ssl/anxin.com.crt;
ssl_certificate_key /etc/nginx/ssl/anxin.com.key;
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
# 现代SSL配置
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
# HSTS配置
add_header Strict-Transport-Security "max-age=63072000" always;
server_name _;
# 生产环境日志配置
access_log /var/log/nginx/access.log;
@ -39,7 +14,6 @@ server {
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Referrer-Policy "strict-origin-when-cross-origin";
add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval'; style-src 'self' 'unsafe-inline'; img-src 'self' data: https:; font-src 'self' data:; connect-src 'self' https://api.anxin.com;";
# Gzip压缩配置
gzip on;

14
docker/configs/startup.sh Normal file → Executable file
View File

@ -46,16 +46,20 @@ fi
# 创建必要的目录
mkdir -p "$LOG_PATH" "$UPLOAD_PATH"
# 设置Spring Boot配置文件路径
export SPRING_CONFIG_LOCATION="classpath:/application.yml,file:/app/config/application-docker.yml"
# 设置Spring Boot配置文件路径使用Docker专用配置
export SPRING_CONFIG_LOCATION="classpath:/application.yml,/app/config/application-docker.yml"
# 构建完整的Java启动命令
JAVA_CMD="java $JAVA_OPTS \
-Dserver.port=$SERVER_PORT \
-Dspring.profiles.active=$SPRING_PROFILES_ACTIVE \
-Dspring.config.location=$SPRING_CONFIG_LOCATION \
-Dlogging.level.com.ruoyi=$LOG_LEVEL \
-Dlogging.file.name=$LOG_PATH/anxin.log \
-Dlogging.config=/app/config/logback-spring.xml \
-Dlog.path=$LOG_PATH \
-DLOG_PATH=$LOG_PATH \
-DLOG_LEVEL=$LOG_LEVEL \
-Dlogging.path=$LOG_PATH \
-Dlogging.file.path=$LOG_PATH \
-Druoyi.profile=$UPLOAD_PATH \
-Dspring.datasource.url=jdbc:mysql://$DB_HOST:$DB_PORT/$DB_NAME?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=false&serverTimezone=GMT%2B8&allowPublicKeyRetrieval=true \
-Dspring.datasource.username=$DB_USER \
-Dspring.datasource.password=$DB_PASSWORD \

View File

@ -6,7 +6,7 @@ version: '3.8'
services:
# MySQL数据库服务
anxin-mysql:
image: mysql:8.0
image: mysql:8.0.36
container_name: ${COMPOSE_PROJECT_NAME:-anxin}-mysql
restart: unless-stopped

File diff suppressed because one or more lines are too long

216
docker/deploy.sh Normal file → Executable file
View File

@ -19,6 +19,8 @@ FOLLOW_LOGS=false
VERBOSE=false
FORCE_RECREATE=false
TIMEOUT=300
AUTO_CLEAN=false
IMAGE_DIR=""
# 服务配置
SERVICES=("anxin-mysql" "anxin-backend" "anxin-frontend")
@ -83,6 +85,7 @@ show_help() {
ps 查看运行中的容器
down 停止并删除所有容器、网络和卷
up 启动所有服务 (等同于start all)
load-images 加载Docker镜像文件 (从tar文件)
服务:
all 所有服务 (默认)
@ -110,6 +113,10 @@ show_help() {
--tail LINES 显示最后N行日志 [默认: 100]
--since TIME 显示指定时间后的日志 (如: 2h, 30m)
镜像加载选项:
--image-dir DIR 指定镜像文件目录 [默认: 当前目录]
--auto-clean 加载后自动删除镜像文件
示例:
$0 # 查看服务状态 (开发环境)
$0 start # 启动所有服务
@ -120,6 +127,8 @@ show_help() {
$0 logs backend -f # 跟踪后端服务日志
$0 logs --tail 50 --since 1h # 查看最近1小时的50行日志
$0 status -e staging # 查看测试环境状态
$0 load-images -e production # 加载生产环境镜像
$0 load-images --image-dir /tmp # 从指定目录加载镜像
Requirements Coverage:
5.4 - 日志输出用于问题排查
@ -136,7 +145,7 @@ parse_args() {
while [[ $# -gt 0 ]]; do
case $1 in
start|stop|restart|status|health|logs|ps|down|up)
start|stop|restart|status|health|logs|ps|down|up|load-images)
ACTION="$1"
shift
;;
@ -144,6 +153,14 @@ parse_args() {
ENVIRONMENT="$2"
shift 2
;;
--image-dir)
IMAGE_DIR="$2"
shift 2
;;
--auto-clean)
AUTO_CLEAN=true
shift
;;
-f|--follow)
FOLLOW_LOGS=true
shift
@ -206,6 +223,7 @@ parse_args() {
# 设置默认值
ACTION=${ACTION:-$DEFAULT_ACTION}
ENVIRONMENT=${ENVIRONMENT:-$DEFAULT_ENVIRONMENT}
IMAGE_DIR=${IMAGE_DIR:-"."}
HEALTH_RETRY_COUNT=${HEALTH_RETRY_COUNT:-10}
HEALTH_CHECK_INTERVAL=${HEALTH_CHECK_INTERVAL:-30}
LOG_TAIL_LINES=${LOG_TAIL_LINES:-100}
@ -326,8 +344,11 @@ start_services() {
# 创建必要的数据目录
create_data_directories
# 检查必需的镜像是否存在
check_required_images
# 构建启动命令
local compose_args=("up" "-d")
local compose_args=("up" "-d" "--no-build") # 添加 --no-build 参数
if [[ "$FORCE_RECREATE" == "true" ]]; then
compose_args+=("--force-recreate")
@ -365,23 +386,80 @@ start_services() {
create_data_directories() {
log_debug "创建必要的数据目录..."
# 从环境变量获取路径,如果没有则使用默认值
local mysql_data_path=${MYSQL_DATA_PATH:-"./data/${ENVIRONMENT}/mysql"}
local mysql_log_path=${MYSQL_LOG_PATH:-"./data/${ENVIRONMENT}/mysql-logs"}
local backend_log_path=${BACKEND_LOG_PATH:-"./data/${ENVIRONMENT}/backend-logs"}
local backend_upload_path=${BACKEND_UPLOAD_PATH:-"./data/${ENVIRONMENT}/uploads"}
local frontend_log_path=${FRONTEND_LOG_PATH:-"./data/${ENVIRONMENT}/nginx-logs"}
local data_dirs=(
"${DOCKER_DIR}/data/${ENVIRONMENT}/mysql"
"${DOCKER_DIR}/data/${ENVIRONMENT}/mysql-logs"
"${DOCKER_DIR}/data/${ENVIRONMENT}/backend-logs"
"${DOCKER_DIR}/data/${ENVIRONMENT}/uploads"
"${DOCKER_DIR}/data/${ENVIRONMENT}/nginx-logs"
"$mysql_data_path"
"$mysql_log_path"
"$backend_log_path"
"$backend_upload_path"
"$frontend_log_path"
)
for dir in "${data_dirs[@]}"; do
if [[ ! -d "$dir" ]]; then
log_debug "创建目录: $dir"
mkdir -p "$dir"
# 转换相对路径为绝对路径
local abs_dir="${DOCKER_DIR}/${dir#./}"
if [[ ! -d "$abs_dir" ]]; then
log_debug "创建目录: $abs_dir"
mkdir -p "$abs_dir"
fi
done
# 设置目录权限
chmod -R 755 "${DOCKER_DIR}/data/${ENVIRONMENT}"
local base_data_dir="${DOCKER_DIR}/data/${ENVIRONMENT}"
if [[ -d "$base_data_dir" ]]; then
chmod -R 755 "$base_data_dir"
fi
}
# 检查必需的镜像是否存在
check_required_images() {
log_debug "检查必需的Docker镜像..."
# 根据环境确定镜像标签
local env_tag
case $ENVIRONMENT in
production) env_tag="prod" ;;
staging) env_tag="staging" ;;
development) env_tag="dev" ;;
esac
local required_images=(
"anxin-frontend:${env_tag}"
"anxin-backend:${env_tag}"
"mysql:8.0.36"
)
local missing_images=()
for image in "${required_images[@]}"; do
if ! docker images --format "{{.Repository}}:{{.Tag}}" | grep -q "^$image$"; then
missing_images+=("$image")
else
log_debug "✓ 镜像存在: $image"
fi
done
if [[ ${#missing_images[@]} -gt 0 ]]; then
log_error "以下必需的镜像不存在:"
for image in "${missing_images[@]}"; do
log_error "$image"
done
log_info "解决方案:"
log_info "1. 运行构建脚本: ./build.sh -e $ENVIRONMENT"
log_info "2. 或者从远程服务器拉取镜像"
log_info "3. 或者使用推送脚本从其他机器传输镜像"
exit 1
else
log_success "所有必需的镜像都已存在"
fi
}
# ===========================================
@ -734,6 +812,119 @@ show_services_logs() {
compose_cmd "${compose_args[@]}"
}
# ===========================================
# 镜像加载功能
# ===========================================
load_docker_images() {
log_step "加载Docker镜像..."
# 检查镜像目录是否存在
if [[ ! -d "$IMAGE_DIR" ]]; then
log_error "镜像目录不存在: $IMAGE_DIR"
exit 1
fi
log_info "搜索镜像目录: $IMAGE_DIR"
# 根据环境确定镜像标签
local env_tag
case $ENVIRONMENT in
production) env_tag="prod" ;;
staging) env_tag="staging" ;;
development) env_tag="dev" ;;
esac
# 查找镜像文件
local image_files=()
local expected_images=(
"anxin-frontend_${env_tag}.tar"
"anxin-backend_${env_tag}.tar"
)
# 检查预期的镜像文件
for expected_image in "${expected_images[@]}"; do
local image_path="${IMAGE_DIR}/${expected_image}"
if [[ -f "$image_path" ]]; then
image_files+=("$image_path")
log_info "找到镜像文件: $expected_image"
else
log_warn "未找到预期镜像文件: $expected_image"
fi
done
# 查找其他可能的镜像文件
while IFS= read -r -d '' file; do
local filename=$(basename "$file")
if [[ "$filename" == anxin-*.tar && ! " ${image_files[*]} " =~ " ${file} " ]]; then
image_files+=("$file")
log_info "找到额外镜像文件: $filename"
fi
done < <(find "$IMAGE_DIR" -name "anxin-*.tar" -type f -print0 2>/dev/null)
# 检查是否找到镜像文件
if [[ ${#image_files[@]} -eq 0 ]]; then
log_error "在目录 $IMAGE_DIR 中未找到任何镜像文件"
log_info "预期的镜像文件格式: anxin-frontend_${env_tag}.tar, anxin-backend_${env_tag}.tar"
exit 1
fi
log_info "找到 ${#image_files[@]} 个镜像文件"
# 加载镜像
local loaded_count=0
local failed_count=0
for image_file in "${image_files[@]}"; do
local filename=$(basename "$image_file")
log_info "正在加载镜像: $filename"
# 检查文件大小
local file_size=$(stat -f%z "$image_file" 2>/dev/null || stat -c%s "$image_file" 2>/dev/null || echo "0")
local size_mb=$((file_size / 1024 / 1024))
log_info "文件大小: ${size_mb}MB"
# 加载镜像
if docker load -i "$image_file"; then
log_success "镜像加载成功: $filename"
((loaded_count++))
# 如果启用自动清理,删除镜像文件
if [[ "$AUTO_CLEAN" == "true" ]]; then
log_info "删除镜像文件: $filename"
rm -f "$image_file"
log_success "镜像文件已删除: $filename"
fi
else
log_error "镜像加载失败: $filename"
((failed_count++))
fi
echo ""
done
# 显示加载结果
echo "========================================"
log_info "镜像加载结果:"
log_success "成功加载: $loaded_count 个镜像"
if [[ $failed_count -gt 0 ]]; then
log_error "加载失败: $failed_count 个镜像"
fi
# 显示当前镜像列表
log_info "当前Docker镜像列表:"
echo "----------------------------------------"
docker images --format "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}" | grep -E "(anxin-|REPOSITORY)" || echo "未找到相关镜像"
echo "----------------------------------------"
if [[ $failed_count -gt 0 ]]; then
return 1
else
log_success "所有镜像加载完成!"
return 0
fi
}
# ===========================================
# 完整清理功能
# ===========================================
@ -865,6 +1056,9 @@ main() {
down)
down_all_services
;;
load-images)
load_docker_images
;;
*)
log_error "未知动作: $ACTION"
show_help

View File

@ -5,9 +5,36 @@
version: '3.8'
services:
# Redis缓存服务 - 开发环境配置
anxin-redis:
image: redis:7.2-alpine
container_name: anxin-redis-dev
restart: unless-stopped
environment:
TZ: Asia/Shanghai
ports:
- "${REDIS_PORT:-6379}:6379"
volumes:
- redis-data-dev:/data
- redis-logs-dev:/var/log/redis
networks:
- anxin-dev-network
deploy:
resources:
limits:
memory: ${REDIS_MEMORY_LIMIT:-128M}
cpus: '${REDIS_CPU_LIMIT:-0.25}'
healthcheck:
test: ["CMD", "redis-cli", "ping"]
timeout: 10s
retries: 5
interval: 30s
start_period: 30s
command: redis-server --appendonly yes --maxmemory 100mb --maxmemory-policy allkeys-lru
# MySQL数据库服务 - 开发环境配置
anxin-mysql:
image: mysql:8.0
image: mysql:8.0.36
container_name: anxin-mysql-dev
restart: unless-stopped
environment:
@ -21,7 +48,7 @@ services:
volumes:
- mysql-data-dev:/var/lib/mysql
- ./database/init:/docker-entrypoint-initdb.d:ro
- ./configs/my.cnf:/etc/mysql/conf.d/my.cnf:ro
- ./configs/my.cnf.dev:/etc/mysql/conf.d/my.cnf:ro
- mysql-logs-dev:/var/log/mysql
networks:
- anxin-dev-network
@ -73,6 +100,8 @@ services:
depends_on:
anxin-mysql:
condition: service_healthy
anxin-redis:
condition: service_healthy
deploy:
resources:
limits:
@ -138,13 +167,29 @@ networks:
# 卷配置 - 开发环境
volumes:
# Redis数据持久化卷
redis-data-dev:
driver: local
driver_opts:
type: none
o: bind
device: ${REDIS_DATA_PATH:-./data/development/redis}
# Redis日志卷
redis-logs-dev:
driver: local
driver_opts:
type: none
o: bind
device: ${REDIS_LOG_PATH:-./data/development/redis-logs}
# 数据库数据持久化卷
mysql-data-dev:
driver: local
driver_opts:
type: none
o: bind
device: ${MYSQL_DATA_PATH:-./data/dev/mysql}
device: ${MYSQL_DATA_PATH:-./data/development/mysql}
# 数据库日志卷
mysql-logs-dev:
@ -152,7 +197,7 @@ volumes:
driver_opts:
type: none
o: bind
device: ${MYSQL_LOG_PATH:-./data/dev/mysql-logs}
device: ${MYSQL_LOG_PATH:-./data/development/mysql-logs}
# 后端应用日志卷
backend-logs-dev:
@ -160,7 +205,7 @@ volumes:
driver_opts:
type: none
o: bind
device: ${BACKEND_LOG_PATH:-./data/dev/backend-logs}
device: ${BACKEND_LOG_PATH:-./data/development/backend-logs}
# 后端文件上传卷
backend-uploads-dev:
@ -168,7 +213,7 @@ volumes:
driver_opts:
type: none
o: bind
device: ${BACKEND_UPLOAD_PATH:-./data/dev/uploads}
device: ${BACKEND_UPLOAD_PATH:-./data/development/uploads}
# 前端Nginx日志卷
frontend-logs-dev:
@ -176,4 +221,4 @@ volumes:
driver_opts:
type: none
o: bind
device: ${FRONTEND_LOG_PATH:-./data/dev/nginx-logs}
device: ${FRONTEND_LOG_PATH:-./data/development/nginx-logs}

View File

@ -5,9 +5,46 @@
version: '3.8'
services:
# Redis缓存服务 - 生产环境配置
anxin-redis:
image: redis:7.2-alpine
container_name: anxin-redis-prod
restart: always
environment:
TZ: Asia/Shanghai
ports:
- "127.0.0.1:${REDIS_PORT_EXPORT:-6379}:6379" # 生产环境仅绑定本地接口
volumes:
- redis-data-prod:/data
- redis-logs-prod:/var/log/redis
networks:
- anxin-prod-network
deploy:
resources:
limits:
memory: ${REDIS_MEMORY_LIMIT:-256M}
cpus: '${REDIS_CPU_LIMIT:-0.5}'
reservations:
memory: 128M
cpus: '0.25'
healthcheck:
test: ["CMD", "redis-cli", "ping"]
timeout: 10s
retries: 5
interval: 30s
start_period: 30s
command: redis-server --appendonly yes --maxmemory 200mb --maxmemory-policy allkeys-lru --requirepass ${REDIS_PASSWORD:-}
logging:
driver: "json-file"
options:
max-size: "${LOG_MAX_SIZE:-200m}"
max-file: "${LOG_MAX_FILES:-15}"
security_opt:
- no-new-privileges:true
# MySQL数据库服务 - 生产环境配置
anxin-mysql:
image: mysql:8.0
image: mysql:8.0.36
container_name: anxin-mysql-prod
restart: always
environment:
@ -17,7 +54,7 @@ services:
MYSQL_PASSWORD: ${DB_PASSWORD}
TZ: Asia/Shanghai
ports:
- "127.0.0.1:${DB_PORT:-3306}:3306" # 生产环境仅绑定本地接
- "${DB_PORT:-3306}:3306" # 允许外部访问,使用环境变量指定的端
volumes:
- mysql-data-prod:/var/lib/mysql
- ./database/init:/docker-entrypoint-initdb.d:ro
@ -61,12 +98,30 @@ services:
container_name: anxin-backend-prod
restart: always
environment:
# 数据库连接配置 - 使用与启动脚本一致的环境变量名
DB_HOST: anxin-mysql
DB_PORT: 3306
DB_NAME: ${DB_NAME:-anxin_prod}
DB_USER: ${DB_USER:-anxin_prod}
DB_PASSWORD: ${DB_PASSWORD}
# Redis配置
REDIS_HOST: ${REDIS_HOST:-anxin-redis}
REDIS_PORT: ${REDIS_PORT:-6379}
REDIS_PASSWORD: ${REDIS_PASSWORD:-}
REDIS_DATABASE: ${REDIS_DATABASE:-0}
# Spring Boot配置
SPRING_PROFILES_ACTIVE: ${SPRING_PROFILES_ACTIVE:-prod}
SPRING_DATASOURCE_URL: jdbc:mysql://anxin-mysql:3306/${DB_NAME:-anxin_prod}?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8&requireSSL=true
SPRING_DATASOURCE_USERNAME: ${DB_USER:-anxin_prod}
SPRING_DATASOURCE_PASSWORD: ${DB_PASSWORD}
SERVER_PORT: 8080
# JVM配置
JAVA_OPTS: ${JAVA_OPTS:--Xms1024m -Xmx2048m -Djava.security.egd=file:/dev/./urandom -XX:+UseG1GC -XX:+UseStringDeduplication}
# 日志配置
LOG_LEVEL: ${LOG_LEVEL:-WARN}
LOG_PATH: /app/logs
LOG_MAX_SIZE: ${LOG_MAX_SIZE:-200MB}
LOG_MAX_FILES: ${LOG_MAX_FILES:-15}
# 文件上传路径
UPLOAD_PATH: /app/uploadPath
# 系统配置
TZ: Asia/Shanghai
# 生产环境特有配置
MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE: health,info,metrics
@ -87,6 +142,8 @@ services:
depends_on:
anxin-mysql:
condition: service_healthy
anxin-redis:
condition: service_healthy
deploy:
resources:
limits:
@ -120,7 +177,7 @@ services:
dockerfile: docker/frontend/Dockerfile
target: production
args:
API_BASE_URL: ${API_BASE_URL:-https://api.anxin.com}
API_BASE_URL: ${API_BASE_URL:-http://localhost:8080}
NODE_ENV: production
image: anxin-frontend:prod
container_name: anxin-frontend-prod
@ -130,11 +187,9 @@ services:
NODE_ENV: production
ports:
- "${FRONTEND_PORT:-80}:80"
- "${FRONTEND_SSL_PORT:-443}:443"
volumes:
- frontend-logs-prod:/var/log/nginx
- ./configs/nginx.conf.prod:/etc/nginx/conf.d/default.conf:ro
- ./configs/ssl:/etc/nginx/ssl:ro # SSL证书
networks:
- anxin-prod-network
depends_on:
@ -163,70 +218,6 @@ services:
security_opt:
- no-new-privileges:true
# 生产环境专用服务 - 数据库备份服务
anxin-db-backup:
image: mysql:8.0
container_name: anxin-db-backup-prod
restart: always
environment:
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
BACKUP_SCHEDULE: ${BACKUP_SCHEDULE:-0 2 * * *} # 每天凌晨2点备份
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-30}
volumes:
- mysql-data-prod:/var/lib/mysql:ro
- backup-data-prod:/backup
- ./scripts/backup-prod.sh:/backup.sh:ro
networks:
- anxin-prod-network
depends_on:
anxin-mysql:
condition: service_healthy
command: >
sh -c "
echo 'Starting database backup service for production environment...'
echo '${BACKUP_SCHEDULE} /backup.sh' | crontab -
crond -f
"
deploy:
resources:
limits:
memory: 256M
cpus: '0.2'
logging:
driver: "json-file"
options:
max-size: "50m"
max-file: "5"
# 生产环境专用服务 - 监控服务
anxin-monitor:
image: prom/node-exporter:latest
container_name: anxin-monitor-prod
restart: always
ports:
- "127.0.0.1:9100:9100"
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
networks:
- anxin-prod-network
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
deploy:
resources:
limits:
memory: 128M
cpus: '0.1'
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# 网络配置 - 生产环境
networks:
anxin-prod-network:
@ -242,13 +233,29 @@ networks:
# 卷配置 - 生产环境
volumes:
# Redis数据持久化卷
redis-data-prod:
driver: local
driver_opts:
type: none
o: bind
device: ${REDIS_DATA_PATH:-./data/production/redis}
# Redis日志卷
redis-logs-prod:
driver: local
driver_opts:
type: none
o: bind
device: ${REDIS_LOG_PATH:-./log/production/redis}
# 数据库数据持久化卷
mysql-data-prod:
driver: local
driver_opts:
type: none
o: bind
device: ${MYSQL_DATA_PATH:-/var/lib/anxin/mysql}
device: ${MYSQL_DATA_PATH:-./data/mysql}
# 数据库日志卷
mysql-logs-prod:
@ -256,7 +263,7 @@ volumes:
driver_opts:
type: none
o: bind
device: ${MYSQL_LOG_PATH:-/var/log/anxin/mysql}
device: ${MYSQL_LOG_PATH:-./log/mysql}
# 后端应用日志卷
backend-logs-prod:
@ -264,7 +271,7 @@ volumes:
driver_opts:
type: none
o: bind
device: ${BACKEND_LOG_PATH:-/var/log/anxin/backend}
device: ${BACKEND_LOG_PATH:-./log/backend}
# 后端文件上传卷
backend-uploads-prod:
@ -272,7 +279,7 @@ volumes:
driver_opts:
type: none
o: bind
device: ${BACKEND_UPLOAD_PATH:-/var/lib/anxin/uploads}
device: ${BACKEND_UPLOAD_PATH:-./data/uploads}
# 前端Nginx日志卷
frontend-logs-prod:
@ -280,12 +287,4 @@ volumes:
driver_opts:
type: none
o: bind
device: ${FRONTEND_LOG_PATH:-/var/log/anxin/nginx}
# 数据库备份卷
backup-data-prod:
driver: local
driver_opts:
type: none
o: bind
device: ${BACKUP_DATA_PATH:-/var/lib/anxin/backups}
device: ${FRONTEND_LOG_PATH:-./log/nginx}

View File

@ -7,7 +7,7 @@ version: '3.8'
services:
# MySQL数据库服务 - 测试环境配置
anxin-mysql:
image: mysql:8.0
image: mysql:8.0.36
container_name: anxin-mysql-staging
restart: unless-stopped
environment:
@ -21,7 +21,7 @@ services:
volumes:
- mysql-data-staging:/var/lib/mysql
- ./database/init:/docker-entrypoint-initdb.d:ro
- ./configs/my.cnf:/etc/mysql/conf.d/my.cnf:ro
- ./configs/my.cnf.staging:/etc/mysql/conf.d/my.cnf:ro
- mysql-logs-staging:/var/log/mysql
networks:
- anxin-staging-network
@ -135,7 +135,7 @@ services:
# 测试环境专用服务 - 数据库备份服务
anxin-db-backup:
image: mysql:8.0
image: mysql:8.0.36
container_name: anxin-db-backup-staging
restart: "no"
environment:

View File

@ -10,23 +10,31 @@ DB_USER=anxin_dev
DB_PASSWORD=dev_password
MYSQL_ROOT_PASSWORD=dev_root_password
# Redis配置
REDIS_HOST=anxin-redis
REDIS_PORT=6379
REDIS_PASSWORD=
REDIS_DATABASE=0
# 后端服务配置
BACKEND_PORT=8080
SPRING_PROFILES_ACTIVE=dev
JAVA_OPTS=-Xms256m -Xmx512m
JAVA_OPTS="-Xms256m -Xmx512m"
# 前端服务配置
FRONTEND_PORT=3000
API_BASE_URL=http://localhost:8080
# 容器资源配置 (开发环境使用较少资源)
FRONTEND_MEMORY_LIMIT=128
BACKEND_MEMORY_LIMIT=512
DATABASE_MEMORY_LIMIT=256
FRONTEND_MEMORY_LIMIT=128M
BACKEND_MEMORY_LIMIT=512M
DATABASE_MEMORY_LIMIT=256M
REDIS_MEMORY_LIMIT=128M
FRONTEND_CPU_LIMIT=0.25
BACKEND_CPU_LIMIT=0.5
DATABASE_CPU_LIMIT=0.25
REDIS_CPU_LIMIT=0.25
# 日志配置 (开发环境使用DEBUG级别)
LOG_LEVEL=DEBUG
@ -39,6 +47,10 @@ NETWORK_NAME=anxin-dev-network
SUBNET=172.21.0.0/16
# 卷配置
MYSQL_DATA_PATH=./data/dev/mysql
LOG_DATA_PATH=./data/dev/logs
CONFIG_DATA_PATH=./data/dev/configs
MYSQL_DATA_PATH=./data/development/mysql
MYSQL_LOG_PATH=./data/development/mysql-logs
BACKEND_LOG_PATH=./data/development/backend-logs
BACKEND_UPLOAD_PATH=./data/development/uploads
FRONTEND_LOG_PATH=./data/development/nginx-logs
REDIS_DATA_PATH=./data/development/redis
REDIS_LOG_PATH=./data/development/redis-logs

View File

@ -4,25 +4,36 @@ COMPOSE_PROJECT_NAME=anxin-prod
# 数据库配置
DB_HOST=anxin-mysql
DB_PORT=3306
DB_PORT=33068
DB_NAME=anxin_prod
DB_USER=anxin_prod
DB_PASSWORD=CHANGE_ME_PRODUCTION_PASSWORD
MYSQL_ROOT_PASSWORD=CHANGE_ME_ROOT_PASSWORD
DB_PASSWORD="anxin_prod@123"
MYSQL_ROOT_PASSWORD=6r1fJtceNbV@%0mN=
# Redis配置
REDIS_HOST=anxin-redis
REDIS_POST=6379
REDIS_PORT_EXPORT=16379
REDIS_PASSWORD=123456
REDIS_DATABASE=0
REDIS_MEMORY_LIMIT=256M
REDIS_CPU_LIMIT=0.5
REDIS_DATA_PATH=./data/production/redis
REDIS_LOG_PATH=./log/production/redis
# 后端服务配置
BACKEND_PORT=8080
BACKEND_PORT=7989
SPRING_PROFILES_ACTIVE=prod
JAVA_OPTS=-Xms1024m -Xmx2048m
JAVA_OPTS="-Xms1024m -Xmx2048m"
# 前端服务配置
FRONTEND_PORT=80
API_BASE_URL=https://api.anxin.com
FRONTEND_PORT=7988
API_BASE_URL=http://liantu.tech:7989
# 容器资源配置 (生产环境使用更多资源)
FRONTEND_MEMORY_LIMIT=512
BACKEND_MEMORY_LIMIT=2048
DATABASE_MEMORY_LIMIT=1024
FRONTEND_MEMORY_LIMIT=512M
BACKEND_MEMORY_LIMIT=2048M
DATABASE_MEMORY_LIMIT=1024M
FRONTEND_CPU_LIMIT=1.0
BACKEND_CPU_LIMIT=2.0
@ -39,6 +50,8 @@ NETWORK_NAME=anxin-prod-network
SUBNET=172.23.0.0/16
# 卷配置
MYSQL_DATA_PATH=./data/prod/mysql
LOG_DATA_PATH=./data/prod/logs
CONFIG_DATA_PATH=./data/prod/configs
MYSQL_DATA_PATH=./data/production/mysql
MYSQL_LOG_PATH=./data/production/mysql-logs
BACKEND_LOG_PATH=./data/production/backend-logs
BACKEND_UPLOAD_PATH=./data/production/uploads
FRONTEND_LOG_PATH=./data/production/nginx-logs

View File

@ -13,16 +13,16 @@ MYSQL_ROOT_PASSWORD=staging_root_password
# 后端服务配置
BACKEND_PORT=8080
SPRING_PROFILES_ACTIVE=staging
JAVA_OPTS=-Xms512m -Xmx768m
JAVA_OPTS="-Xms512m -Xmx768m"
# 前端服务配置
FRONTEND_PORT=80
API_BASE_URL=http://staging-api.anxin.com
# 容器资源配置 (测试环境使用中等资源)
FRONTEND_MEMORY_LIMIT=192
BACKEND_MEMORY_LIMIT=768
DATABASE_MEMORY_LIMIT=384
FRONTEND_MEMORY_LIMIT=192M
BACKEND_MEMORY_LIMIT=768M
DATABASE_MEMORY_LIMIT=384M
FRONTEND_CPU_LIMIT=0.5
BACKEND_CPU_LIMIT=0.75
@ -40,5 +40,7 @@ SUBNET=172.22.0.0/16
# 卷配置
MYSQL_DATA_PATH=./data/staging/mysql
LOG_DATA_PATH=./data/staging/logs
CONFIG_DATA_PATH=./data/staging/configs
MYSQL_LOG_PATH=./data/staging/mysql-logs
BACKEND_LOG_PATH=./data/staging/backend-logs
BACKEND_UPLOAD_PATH=./data/staging/uploads
FRONTEND_LOG_PATH=./data/staging/nginx-logs

View File

@ -21,6 +21,9 @@ RUN npm run build:prod
# 第二阶段:运行阶段
FROM nginx:alpine AS production-stage
# 定义构建参数
ARG ENVIRONMENT=development
# 安装 tzdata 用于时区设置
RUN apk add --no-cache tzdata
@ -30,8 +33,8 @@ ENV TZ=Asia/Shanghai
# 从构建阶段复制构建产物到 nginx 默认目录
COPY --from=build-stage /app/dist /usr/share/nginx/html
# 复制 nginx 配置文件
COPY docker/configs/nginx.conf.template /etc/nginx/conf.d/default.conf
# 根据环境复制对应的 nginx 配置文件
COPY docker/configs/nginx.conf.${ENVIRONMENT} /etc/nginx/conf.d/default.conf
# 创建日志目录
RUN mkdir -p /var/log/nginx

609
docker/push.sh Executable file
View File

@ -0,0 +1,609 @@
#!/bin/bash
# Docker镜像和配置文件推送脚本
# 将生产环境所需的文件和镜像推送到远程服务器
set -e
# ===========================================
# 脚本配置
# ===========================================
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
# 默认配置
DEFAULT_REMOTE_HOST=""
DEFAULT_REMOTE_USER="root"
DEFAULT_REMOTE_PATH="/root/product/anxin"
DEFAULT_SSH_PORT="22"
# 镜像配置
FRONTEND_IMAGE="anxin-frontend"
BACKEND_IMAGE="anxin-backend"
DEFAULT_TAG="latest"
# 临时目录
TEMP_DIR="/tmp/anxin-deploy-$(date +%s)"
# ===========================================
# 颜色定义
# ===========================================
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# ===========================================
# 日志函数
# ===========================================
log_info() {
echo -e "${BLUE}[INFO]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1"
}
log_step() {
echo -e "${CYAN}[STEP]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1"
}
# ===========================================
# 帮助信息
# ===========================================
show_help() {
cat << EOF
若依框架Docker部署 - 推送脚本
用法: $0 [选项]
选项:
-h, --host HOST 远程服务器地址 (必需)
-u, --user USER 远程服务器用户名 [默认: root]
-p, --port PORT SSH端口 [默认: 22]
-d, --dest PATH 远程服务器目标路径 [默认: /opt/anxin]
-e, --env ENV 环境 (development|staging|production) [默认: production]
-t, --tag TAG 镜像标签 [默认: latest会自动使用环境标签]
--skip-images 跳过镜像推送,仅推送配置文件
--skip-files 跳过配置文件推送,仅推送镜像
--help 显示此帮助信息
示例:
$0 -h 192.168.1.100 # 推送生产环境到指定服务器
$0 -h 192.168.1.100 -e staging # 推送测试环境
$0 -h 192.168.1.100 -u deploy -p 2222 # 指定用户和端口
$0 -h 192.168.1.100 -d /home/deploy/anxin # 指定目标路径
$0 -h 192.168.1.100 -t v1.0.0 # 指定镜像标签
$0 -h 192.168.1.100 --skip-images # 仅推送配置文件
推送内容:
配置文件:
- docker-compose.{ENVIRONMENT}.yml
- deploy.sh
- environments/ 目录
- configs/ 目录 (如果存在)
- database/ 目录 (如果存在)
Docker镜像:
- anxin-frontend:{ENV_TAG}
- anxin-backend:{ENV_TAG}
注意:
- ENV_TAG 根据环境自动确定: dev/staging/prod
- 如果指定 -t 参数,将使用自定义标签
- 推送前请确保已运行 ./build.sh -e {ENVIRONMENT} 构建镜像
EOF
}
# ===========================================
# 参数解析
# ===========================================
parse_args() {
SKIP_IMAGES=false
SKIP_FILES=false
while [[ $# -gt 0 ]]; do
case $1 in
-h|--host)
REMOTE_HOST="$2"
shift 2
;;
-u|--user)
REMOTE_USER="$2"
shift 2
;;
-p|--port)
SSH_PORT="$2"
shift 2
;;
-d|--dest)
REMOTE_PATH="$2"
shift 2
;;
-e|--env)
ENVIRONMENT="$2"
shift 2
;;
-t|--tag)
TAG="$2"
shift 2
;;
--skip-images)
SKIP_IMAGES=true
shift
;;
--skip-files)
SKIP_FILES=true
shift
;;
--help)
show_help
exit 0
;;
*)
log_error "未知参数: $1"
show_help
exit 1
;;
esac
done
# 设置默认值
REMOTE_HOST=${REMOTE_HOST:-$DEFAULT_REMOTE_HOST}
REMOTE_USER=${REMOTE_USER:-$DEFAULT_REMOTE_USER}
SSH_PORT=${SSH_PORT:-$DEFAULT_SSH_PORT}
REMOTE_PATH=${REMOTE_PATH:-$DEFAULT_REMOTE_PATH}
ENVIRONMENT=${ENVIRONMENT:-"production"} # 推送脚本默认为生产环境
TAG=${TAG:-$DEFAULT_TAG}
# 验证必需参数
if [[ -z "$REMOTE_HOST" ]]; then
log_error "必须指定远程服务器地址 (-h|--host)"
show_help
exit 1
fi
}
# ===========================================
# 系统检查
# ===========================================
check_prerequisites() {
log_step "检查系统依赖..."
# 检查SSH
if ! command -v ssh &> /dev/null; then
log_error "SSH未安装或不在PATH中"
exit 1
fi
# 检查SCP
if ! command -v scp &> /dev/null; then
log_error "SCP未安装或不在PATH中"
exit 1
fi
# 检查Docker (如果需要推送镜像)
if [[ "$SKIP_IMAGES" != "true" ]] && ! command -v docker &> /dev/null; then
log_error "Docker未安装或不在PATH中"
exit 1
fi
log_success "系统依赖检查通过"
}
# ===========================================
# SSH执行函数
# ===========================================
# ===========================================
# SSH连接管理
# ===========================================
SSH_CONTROL_PATH=""
SSH_MASTER_STARTED=false
# 启动SSH主连接
start_ssh_master() {
if [[ "$SSH_MASTER_STARTED" == "true" ]]; then
return 0
fi
SSH_CONTROL_PATH="/tmp/ssh-anxin-deploy-$$"
log_info "启动SSH主连接 (连接复用)..."
# 启动SSH主连接
ssh -M -S "$SSH_CONTROL_PATH" -f -N -p "$SSH_PORT" \
-o ControlPersist=600 \
-o ConnectTimeout=30 \
-o ServerAliveInterval=60 \
-o ServerAliveCountMax=3 \
"$REMOTE_USER@$REMOTE_HOST"
if [[ $? -eq 0 ]]; then
SSH_MASTER_STARTED=true
log_success "SSH主连接启动成功"
else
log_error "SSH主连接启动失败"
return 1
fi
}
# 停止SSH主连接
stop_ssh_master() {
if [[ "$SSH_MASTER_STARTED" == "true" && -n "$SSH_CONTROL_PATH" ]]; then
log_info "关闭SSH主连接..."
ssh -S "$SSH_CONTROL_PATH" -O exit "$REMOTE_USER@$REMOTE_HOST" 2>/dev/null || true
SSH_MASTER_STARTED=false
fi
}
# 通用SCP传输函数支持大文件传输和连接复用
scp_transfer() {
local source="$1"
local destination="$2"
local is_recursive=${3:-false}
local scp_options="-P $SSH_PORT -o ConnectTimeout=30 -o ServerAliveInterval=60 -o ServerAliveCountMax=3"
# 如果SSH主连接已启动使用连接复用
if [[ "$SSH_MASTER_STARTED" == "true" && -n "$SSH_CONTROL_PATH" ]]; then
scp_options="$scp_options -o ControlPath=$SSH_CONTROL_PATH"
fi
if [[ "$is_recursive" == "true" ]]; then
scp_options="$scp_options -r"
fi
# 对于大文件,显示传输进度
if [[ -f "$source" ]]; then
local file_size=$(stat -f%z "$source" 2>/dev/null || stat -c%s "$source" 2>/dev/null || echo "0")
if [[ $file_size -gt 10485760 ]]; then # 大于10MB的文件显示进度
scp_options="$scp_options -v"
fi
fi
scp $scp_options "$source" "$destination"
}
# 通用SSH执行函数支持密钥和密码认证
ssh_execute() {
local command="$1"
local show_output=${2:-true}
local timeout=${3:-300} # 默认5分钟超时
local ssh_options="-p $SSH_PORT -o ConnectTimeout=30 -o ServerAliveInterval=60 -o ServerAliveCountMax=3"
# 如果SSH主连接已启动使用连接复用
if [[ "$SSH_MASTER_STARTED" == "true" && -n "$SSH_CONTROL_PATH" ]]; then
ssh_options="$ssh_options -o ControlPath=$SSH_CONTROL_PATH"
if [[ "$show_output" == "true" ]]; then
ssh $ssh_options "$REMOTE_USER@$REMOTE_HOST" "$command"
else
ssh $ssh_options "$REMOTE_USER@$REMOTE_HOST" "$command" 2>/dev/null
fi
return $?
fi
# 首先尝试使用SSH密钥连接非交互式
if ssh $ssh_options -o BatchMode=yes "$REMOTE_USER@$REMOTE_HOST" "$command" 2>/dev/null; then
return 0
fi
# 如果SSH密钥失败使用交互式连接允许密码输入
if [[ "$show_output" == "true" ]]; then
ssh $ssh_options "$REMOTE_USER@$REMOTE_HOST" "$command"
else
ssh $ssh_options "$REMOTE_USER@$REMOTE_HOST" "$command" 2>/dev/null
fi
}
# ===========================================
# 连接测试
# ===========================================
test_connection() {
log_step "测试远程服务器连接..."
# 首先尝试使用SSH密钥连接非交互式
if ssh -p "$SSH_PORT" -o ConnectTimeout=10 -o BatchMode=yes "$REMOTE_USER@$REMOTE_HOST" "echo 'Connection test successful'" &>/dev/null; then
log_success "远程服务器连接测试成功 (使用SSH密钥)"
return 0
fi
# 如果SSH密钥失败尝试交互式连接允许密码输入
log_info "SSH密钥认证失败尝试密码认证..."
if ssh -p "$SSH_PORT" -o ConnectTimeout=10 -o PasswordAuthentication=yes "$REMOTE_USER@$REMOTE_HOST" "echo 'Connection test successful'"; then
log_success "远程服务器连接测试成功 (使用密码认证)"
return 0
else
log_error "无法连接到远程服务器: $REMOTE_USER@$REMOTE_HOST:$SSH_PORT"
log_info "请确保:"
log_info "1. 服务器地址和端口正确"
log_info "2. SSH密钥已配置或密码正确"
log_info "3. 用户有相应权限"
log_info "4. 服务器SSH服务正在运行"
exit 1
fi
}
# ===========================================
# 准备推送
# ===========================================
prepare_push() {
log_step "准备推送环境..."
# 创建临时目录
mkdir -p "$TEMP_DIR"
log_info "创建临时目录: $TEMP_DIR"
# 在远程服务器创建目标目录
log_info "在远程服务器创建目标目录..."
ssh_execute "mkdir -p $REMOTE_PATH"
log_success "推送环境准备完成"
}
# ===========================================
# 推送配置文件
# ===========================================
push_config_files() {
if [[ "$SKIP_FILES" == "true" ]]; then
log_info "跳过配置文件推送"
return 0
fi
log_step "推送配置文件..."
cd "$SCRIPT_DIR"
# 推送对应环境的docker-compose文件
local compose_file="docker-compose.${ENVIRONMENT}.yml"
log_info "推送 $compose_file..."
if [[ -f "$compose_file" ]]; then
scp_transfer "$compose_file" "$REMOTE_USER@$REMOTE_HOST:$REMOTE_PATH/"
else
log_error "Docker Compose文件不存在: $compose_file"
return 1
fi
# 推送deploy.sh
log_info "推送 deploy.sh..."
scp_transfer "deploy.sh" "$REMOTE_USER@$REMOTE_HOST:$REMOTE_PATH/"
# 推送environments目录
log_info "推送 environments/ 目录..."
scp_transfer "environments" "$REMOTE_USER@$REMOTE_HOST:$REMOTE_PATH/" true
# 推送configs目录如果存在
if [[ -d "configs" ]]; then
log_info "推送 configs/ 目录..."
scp_transfer "configs" "$REMOTE_USER@$REMOTE_HOST:$REMOTE_PATH/" true
fi
# 推送database目录如果存在
if [[ -d "database" ]]; then
log_info "推送 database/ 目录..."
scp_transfer "database" "$REMOTE_USER@$REMOTE_HOST:$REMOTE_PATH/" true
fi
# 设置deploy.sh执行权限
log_info "设置deploy.sh执行权限..."
ssh_execute "chmod +x $REMOTE_PATH/deploy.sh"
log_success "配置文件推送完成"
}
# ===========================================
# 推送Docker镜像
# ===========================================
push_docker_images() {
if [[ "$SKIP_IMAGES" == "true" ]]; then
log_info "跳过Docker镜像推送"
return 0
fi
log_step "推送Docker镜像..."
# 根据环境确定镜像标签
local env_tag
case $ENVIRONMENT in
production) env_tag="prod" ;;
staging) env_tag="staging" ;;
development) env_tag="dev" ;;
esac
# 如果指定了自定义标签,使用自定义标签,否则使用环境标签
local image_tag
if [[ "$TAG" != "latest" ]]; then
image_tag="$TAG"
else
image_tag="$env_tag"
fi
local images=("${FRONTEND_IMAGE}:${image_tag}" "${BACKEND_IMAGE}:${image_tag}")
for image in "${images[@]}"; do
log_info "处理镜像: $image"
# 检查本地镜像是否存在
if ! docker images --format "{{.Repository}}:{{.Tag}}" | grep -q "^$image$"; then
log_error "本地镜像不存在: $image"
log_info "请先运行构建脚本: ./build.sh -e $ENVIRONMENT"
if [[ "$TAG" != "latest" ]]; then
log_info "或者: ./build.sh -e $ENVIRONMENT -t $TAG"
fi
exit 1
fi
# 导出镜像到tar文件
local image_file="$TEMP_DIR/$(echo $image | tr ':/' '_').tar"
log_info "导出镜像到: $image_file"
docker save -o "$image_file" "$image"
# 推送镜像文件到远程服务器
log_info "推送镜像文件到远程服务器..."
scp_transfer "$image_file" "$REMOTE_USER@$REMOTE_HOST:$REMOTE_PATH/"
# 在远程服务器加载镜像
local remote_image_file="$REMOTE_PATH/$(basename $image_file)"
log_info "在远程服务器加载镜像..."
ssh_execute "docker load -i $remote_image_file && rm $remote_image_file"
log_success "镜像推送完成: $image"
done
log_success "所有Docker镜像推送完成"
}
# ===========================================
# 验证推送结果
# ===========================================
verify_push() {
log_step "验证推送结果..."
# 验证配置文件
if [[ "$SKIP_FILES" != "true" ]]; then
log_info "验证配置文件..."
local files=("docker-compose.${ENVIRONMENT}.yml" "deploy.sh" "environments")
for file in "${files[@]}"; do
if ssh_execute "test -e $REMOTE_PATH/$file" false; then
log_success "$file"
else
log_error "$file 不存在"
fi
done
fi
# 验证Docker镜像
if [[ "$SKIP_IMAGES" != "true" ]]; then
log_info "验证Docker镜像..."
# 根据环境确定镜像标签
local env_tag
case $ENVIRONMENT in
production) env_tag="prod" ;;
staging) env_tag="staging" ;;
development) env_tag="dev" ;;
esac
# 如果指定了自定义标签,使用自定义标签,否则使用环境标签
local image_tag
if [[ "$TAG" != "latest" ]]; then
image_tag="$TAG"
else
image_tag="$env_tag"
fi
local images=("${FRONTEND_IMAGE}:${image_tag}" "${BACKEND_IMAGE}:${image_tag}")
for image in "${images[@]}"; do
if ssh_execute "docker images --format '{{.Repository}}:{{.Tag}}' | grep -q '^$image$'" false; then
log_success "$image"
else
log_error "$image 不存在"
fi
done
fi
log_success "推送结果验证完成"
}
# ===========================================
# 显示部署信息
# ===========================================
show_deploy_info() {
log_info "推送完成! 部署信息:"
echo "----------------------------------------"
echo "远程服务器: $REMOTE_USER@$REMOTE_HOST:$SSH_PORT"
echo "部署路径: $REMOTE_PATH"
echo "环境: $ENVIRONMENT"
echo "镜像标签: $TAG"
echo "----------------------------------------"
echo "下一步操作:"
echo "1. 登录远程服务器:"
echo " ssh -p $SSH_PORT $REMOTE_USER@$REMOTE_HOST"
echo ""
echo "2. 进入部署目录:"
echo " cd $REMOTE_PATH"
echo ""
echo "3. 启动服务:"
echo " ./deploy.sh start -e $ENVIRONMENT"
echo ""
echo "4. 查看服务状态:"
echo " ./deploy.sh status -e $ENVIRONMENT"
echo ""
echo "5. 查看服务日志:"
echo " ./deploy.sh logs -e $ENVIRONMENT"
echo "----------------------------------------"
}
# ===========================================
# 主函数
# ===========================================
main() {
# 显示脚本信息
log_info "若依框架Docker部署 - 推送脚本"
log_info "执行时间: $(date '+%Y-%m-%d %H:%M:%S')"
# 解析参数
parse_args "$@"
# 检查系统依赖
check_prerequisites
# 测试连接
test_connection
# 如果连接测试成功但不是使用SSH密钥启动SSH主连接进行连接复用
if ! ssh -p "$SSH_PORT" -o ConnectTimeout=10 -o BatchMode=yes "$REMOTE_USER@$REMOTE_HOST" "echo 'test'" &>/dev/null; then
log_info "检测到密码认证启用SSH连接复用以减少密码输入次数"
start_ssh_master
fi
# 准备推送
prepare_push
# 推送配置文件
push_config_files
# 推送Docker镜像
push_docker_images
# 验证推送结果
verify_push
# 显示部署信息
show_deploy_info
log_success "推送脚本执行完成!"
}
# ===========================================
# 错误处理和清理
# ===========================================
cleanup() {
# 停止SSH主连接
stop_ssh_master
# 清理临时文件
if [[ -n "$TEMP_DIR" && -d "$TEMP_DIR" ]]; then
log_info "清理临时文件..."
rm -rf "$TEMP_DIR"
log_success "临时文件清理完成"
fi
}
trap 'log_error "脚本执行被中断"; cleanup; exit 1' INT TERM
trap 'cleanup' EXIT
# 执行主函数
main "$@"

View File

@ -0,0 +1,231 @@
#!/bin/bash
# 环境变量检查脚本
# 用于验证容器中的环境变量是否正确加载
set -e
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# 检查环境文件
check_env_file() {
log_info "检查环境配置文件..."
if [[ -f "environments/.env.production" ]]; then
log_success "✓ 找到生产环境配置文件"
echo "环境文件内容:"
echo "----------------------------------------"
cat environments/.env.production | grep -E "^(DB_|REDIS_|BACKEND_|SPRING_)" | while read line; do
if [[ $line == *"PASSWORD"* ]]; then
echo " ${line%%=*}=[HIDDEN]"
else
echo " $line"
fi
done
echo "----------------------------------------"
else
log_error "✗ 未找到生产环境配置文件"
return 1
fi
}
# 检查docker-compose配置
check_compose_config() {
log_info "检查docker-compose环境变量配置..."
if [[ -f "docker-compose.production.yml" ]]; then
log_success "✓ 找到docker-compose配置文件"
# 显示后端服务的环境变量配置
echo "后端服务环境变量配置:"
echo "----------------------------------------"
sed -n '/anxin-backend:/,/ports:/p' docker-compose.production.yml | grep -E "^\s+[A-Z_]+:" | head -20
echo "----------------------------------------"
else
log_error "✗ 未找到docker-compose配置文件"
return 1
fi
}
# 检查容器环境变量
check_container_env() {
log_info "检查容器内环境变量..."
if docker ps --filter "name=anxin-backend-prod" --format "{{.Names}}" | grep -q "anxin-backend-prod"; then
log_success "✓ 后端容器正在运行"
echo "容器内环境变量:"
echo "----------------------------------------"
echo "数据库配置:"
docker exec anxin-backend-prod env | grep -E "^DB_" | while read line; do
if [[ $line == *"PASSWORD"* ]]; then
echo " ${line%%=*}=[HIDDEN]"
else
echo " $line"
fi
done
echo ""
echo "Redis配置:"
docker exec anxin-backend-prod env | grep -E "^REDIS_" | while read line; do
if [[ $line == *"PASSWORD"* ]]; then
echo " ${line%%=*}=[HIDDEN]"
else
echo " $line"
fi
done
echo ""
echo "Spring配置:"
docker exec anxin-backend-prod env | grep -E "^SPRING_" | head -5
echo ""
echo "其他配置:"
docker exec anxin-backend-prod env | grep -E "^(LOG_LEVEL|SERVER_PORT|JAVA_OPTS)" | head -5
echo "----------------------------------------"
else
log_error "✗ 后端容器未运行"
return 1
fi
}
# 对比配置差异
compare_configs() {
log_info "对比配置差异..."
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
echo "配置对比:"
echo "----------------------------------------"
# 检查数据库配置
echo "数据库配置对比:"
echo " 环境文件 DB_HOST: $DB_HOST"
if docker ps --filter "name=anxin-backend-prod" --format "{{.Names}}" | grep -q "anxin-backend-prod"; then
container_db_host=$(docker exec anxin-backend-prod env | grep "^DB_HOST=" | cut -d'=' -f2)
echo " 容器内 DB_HOST: $container_db_host"
if [[ "$DB_HOST" == "$container_db_host" ]]; then
log_success " ✓ DB_HOST 配置一致"
else
log_error " ✗ DB_HOST 配置不一致"
fi
fi
echo ""
echo "Redis配置对比:"
echo " 环境文件 REDIS_HOST: $REDIS_HOST"
if docker ps --filter "name=anxin-backend-prod" --format "{{.Names}}" | grep -q "anxin-backend-prod"; then
container_redis_host=$(docker exec anxin-backend-prod env | grep "^REDIS_HOST=" | cut -d'=' -f2 || echo "未设置")
echo " 容器内 REDIS_HOST: $container_redis_host"
if [[ "$REDIS_HOST" == "$container_redis_host" ]]; then
log_success " ✓ REDIS_HOST 配置一致"
else
log_error " ✗ REDIS_HOST 配置不一致"
fi
fi
echo "----------------------------------------"
fi
}
# 测试配置生效
test_config_effectiveness() {
log_info "测试配置是否生效..."
if docker ps --filter "name=anxin-backend-prod" --format "{{.Names}}" | grep -q "anxin-backend-prod"; then
# 检查应用启动日志中的配置信息
echo "应用启动日志中的配置信息:"
echo "----------------------------------------"
# 查找数据库连接信息
if ./deploy.sh logs backend -e production --tail 100 | grep -i "jdbc:mysql" | tail -1; then
log_success "✓ 找到数据库连接配置"
else
log_warn "未找到数据库连接配置"
fi
# 查找Redis连接信息
if ./deploy.sh logs backend -e production --tail 100 | grep -i "redis" | tail -3; then
log_success "✓ 找到Redis连接配置"
else
log_warn "未找到Redis连接配置"
fi
echo "----------------------------------------"
fi
}
# 显示修复建议
show_fix_suggestions() {
log_info "修复建议:"
echo "========================================"
echo "如果环境变量未正确加载,请按以下步骤操作:"
echo ""
echo "1. 完全重新创建容器:"
echo " ./deploy.sh down -e production"
echo " ./deploy.sh up -e production"
echo ""
echo "2. 检查环境文件是否被正确读取:"
echo " docker-compose -f docker-compose.production.yml --env-file environments/.env.production config | grep -A 5 -B 5 REDIS"
echo ""
echo "3. 手动验证环境变量:"
echo " docker exec anxin-backend-prod env | grep -E '^(DB_|REDIS_)'"
echo ""
echo "4. 如果仍有问题检查docker-compose.yml中是否缺少环境变量定义"
echo "========================================"
}
# 主函数
main() {
log_info "环境变量检查工具"
echo "========================================"
check_env_file
echo ""
check_compose_config
echo ""
check_container_env
echo ""
compare_configs
echo ""
test_config_effectiveness
echo ""
show_fix_suggestions
echo "========================================"
log_success "检查完成!"
}
# 如果脚本被直接执行
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi

View File

@ -0,0 +1,232 @@
#!/bin/bash
# 部署并验证日志修复脚本
# 用于在服务器上部署更新的后端镜像并验证日志功能
set -e
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# 检查是否在正确的目录
check_directory() {
if [[ ! -f "deploy.sh" ]]; then
log_error "请在部署目录中运行此脚本 (应包含 deploy.sh)"
exit 1
fi
}
# 停止后端服务
stop_backend() {
log_info "停止后端服务..."
./deploy.sh stop backend -e production || true
sleep 5
}
# 启动后端服务
start_backend() {
log_info "启动后端服务..."
./deploy.sh start backend -e production
# 等待服务启动
log_info "等待服务启动..."
sleep 30
}
# 检查服务状态
check_service_status() {
log_info "检查服务状态..."
./deploy.sh status backend -e production
}
# 检查日志目录
check_log_directories() {
log_info "检查容器内日志目录..."
# 检查 /app/logs 目录
if docker exec anxin-backend-prod ls -la /app/logs 2>/dev/null; then
log_success "✓ /app/logs 目录存在"
else
log_error "✗ /app/logs 目录不存在"
fi
# 检查符号链接
if docker exec anxin-backend-prod ls -la /home/ruoyi/logs 2>/dev/null; then
log_success "✓ /home/ruoyi/logs 符号链接存在"
else
log_error "✗ /home/ruoyi/logs 符号链接不存在"
fi
# 检查权限
docker exec anxin-backend-prod ls -la /app/ | grep logs || true
docker exec anxin-backend-prod ls -la /home/ruoyi/ | grep logs || true
}
# 检查启动日志
check_startup_logs() {
log_info "检查启动日志..."
# 显示最近的启动日志
./deploy.sh logs backend -e production --tail 50
# 检查是否有日志错误
if ./deploy.sh logs backend -e production --tail 100 | grep -i "Read-only file system"; then
log_error "仍然存在只读文件系统错误"
return 1
else
log_success "✓ 未发现只读文件系统错误"
fi
# 检查是否有其他日志相关错误
if ./deploy.sh logs backend -e production --tail 100 | grep -i "FileNotFoundException.*logs"; then
log_error "仍然存在日志文件未找到错误"
return 1
else
log_success "✓ 未发现日志文件未找到错误"
fi
}
# 测试日志文件创建
test_log_file_creation() {
log_info "测试日志文件创建..."
# 等待应用完全启动
sleep 10
# 检查日志文件是否被创建
if docker exec anxin-backend-prod ls -la /app/logs/ 2>/dev/null | grep -E "\.(log|out)$"; then
log_success "✓ 日志文件已创建"
docker exec anxin-backend-prod ls -la /app/logs/
else
log_warn "日志文件可能还未创建,继续等待..."
sleep 20
if docker exec anxin-backend-prod ls -la /app/logs/ 2>/dev/null | grep -E "\.(log|out)$"; then
log_success "✓ 日志文件已创建"
docker exec anxin-backend-prod ls -la /app/logs/
else
log_error "✗ 日志文件未创建"
fi
fi
}
# 运行诊断脚本
run_diagnostics() {
log_info "运行日志诊断脚本..."
# 复制诊断脚本到容器
if [[ -f "scripts/fix-logging-issues.sh" ]]; then
docker cp scripts/fix-logging-issues.sh anxin-backend-prod:/tmp/
docker exec anxin-backend-prod chmod +x /tmp/fix-logging-issues.sh
docker exec anxin-backend-prod /tmp/fix-logging-issues.sh check
else
log_warn "诊断脚本不存在,跳过诊断"
fi
}
# 检查应用健康状态
check_application_health() {
log_info "检查应用健康状态..."
# 等待健康检查
sleep 30
# 检查容器健康状态
if docker ps --filter "name=anxin-backend-prod" --format "table {{.Names}}\t{{.Status}}" | grep -i healthy; then
log_success "✓ 应用健康检查通过"
else
log_warn "应用健康检查状态未知,检查详细状态..."
docker ps --filter "name=anxin-backend-prod"
fi
}
# 显示部署总结
show_summary() {
echo ""
log_info "=== 部署验证总结 ==="
echo "----------------------------------------"
# 服务状态
echo "服务状态:"
./deploy.sh status backend -e production | grep -E "(anxin-backend-prod|State|Health)" || true
echo ""
echo "日志目录:"
docker exec anxin-backend-prod ls -la /app/logs/ 2>/dev/null | head -10 || echo "无法访问日志目录"
echo ""
echo "最近日志 (最后10行):"
./deploy.sh logs backend -e production --tail 10 || true
echo "----------------------------------------"
log_info "如果仍有问题,请检查:"
echo "1. 容器启动日志: ./deploy.sh logs backend -e production"
echo "2. 容器内部状态: docker exec -it anxin-backend-prod /bin/bash"
echo "3. 日志目录权限: docker exec anxin-backend-prod ls -la /app/logs /home/ruoyi/logs"
}
# 主函数
main() {
log_info "开始部署并验证日志修复..."
echo "========================================"
# 检查环境
check_directory
# 停止服务
stop_backend
# 启动服务
start_backend
# 检查服务状态
check_service_status
# 检查日志目录
check_log_directories
# 检查启动日志
if check_startup_logs; then
log_success "启动日志检查通过"
else
log_error "启动日志检查失败"
fi
# 测试日志文件创建
test_log_file_creation
# 运行诊断
run_diagnostics
# 检查应用健康
check_application_health
# 显示总结
show_summary
echo "========================================"
log_success "部署验证完成!"
}
# 如果脚本被直接执行
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi

View File

@ -0,0 +1,245 @@
#!/bin/bash
# 数据库连接诊断脚本
# 用于诊断和修复数据库连接池问题
set -e
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# 检查数据库服务状态
check_database_service() {
log_info "检查数据库服务状态..."
if docker ps --filter "name=anxin-mysql-prod" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep anxin-mysql-prod; then
log_success "✓ MySQL容器正在运行"
# 检查健康状态
local health=$(docker inspect anxin-mysql-prod --format='{{.State.Health.Status}}' 2>/dev/null || echo "unknown")
log_info "MySQL健康状态: $health"
if [[ "$health" == "healthy" ]]; then
log_success "✓ MySQL容器健康检查通过"
else
log_warn "MySQL容器健康检查未通过"
fi
else
log_error "✗ MySQL容器未运行"
return 1
fi
}
# 测试数据库连接
test_database_connection() {
log_info "测试数据库连接..."
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
# 测试root连接
log_info "测试root用户连接..."
if docker exec anxin-mysql-prod mysql -uroot -p"$MYSQL_ROOT_PASSWORD" -e "SELECT 1;" 2>/dev/null; then
log_success "✓ root用户连接成功"
else
log_error "✗ root用户连接失败"
fi
# 测试应用用户连接
log_info "测试应用用户连接..."
if docker exec anxin-mysql-prod mysql -u"$DB_USER" -p"$DB_PASSWORD" -e "SELECT 1;" "$DB_NAME" 2>/dev/null; then
log_success "✓ 应用用户连接成功"
else
log_error "✗ 应用用户连接失败"
fi
# 检查数据库是否存在
log_info "检查数据库是否存在..."
if docker exec anxin-mysql-prod mysql -uroot -p"$MYSQL_ROOT_PASSWORD" -e "SHOW DATABASES;" | grep -q "$DB_NAME"; then
log_success "✓ 数据库 $DB_NAME 存在"
else
log_error "✗ 数据库 $DB_NAME 不存在"
fi
fi
}
# 检查后端服务连接配置
check_backend_connection_config() {
log_info "检查后端服务连接配置..."
if docker ps --filter "name=anxin-backend-prod" --format "{{.Names}}" | grep -q "anxin-backend-prod"; then
log_success "✓ 后端容器正在运行"
# 检查环境变量
echo "数据库连接环境变量:"
docker exec anxin-backend-prod env | grep -E "^DB_" | while read line; do
if [[ $line == *"PASSWORD"* ]]; then
echo " ${line%%=*}=[HIDDEN]"
else
echo " $line"
fi
done
# 检查网络连通性
log_info "测试容器间网络连通性..."
if docker exec anxin-backend-prod nc -z anxin-mysql 3306 2>/dev/null; then
log_success "✓ 后端容器可以连接到MySQL容器"
else
log_error "✗ 后端容器无法连接到MySQL容器"
fi
else
log_error "✗ 后端容器未运行"
fi
}
# 检查应用日志中的连接错误
check_connection_errors() {
log_info "检查应用日志中的连接错误..."
echo "最近的数据库连接错误:"
echo "----------------------------------------"
./deploy.sh logs backend -e production --tail 100 | grep -i -E "(connection|datasource|mysql|jdbc)" | tail -10 || echo "未找到相关日志"
echo "----------------------------------------"
# 检查是否有连接池相关错误
if ./deploy.sh logs backend -e production --tail 200 | grep -i "DataSourceClosedException"; then
log_error "发现数据源关闭异常"
fi
if ./deploy.sh logs backend -e production --tail 200 | grep -i "CannotGetJdbcConnectionException"; then
log_error "发现JDBC连接获取异常"
fi
}
# 修复数据库连接问题
fix_database_connection() {
log_info "尝试修复数据库连接问题..."
# 1. 重启MySQL服务
log_info "重启MySQL服务..."
./deploy.sh restart mysql -e production
sleep 10
# 2. 等待MySQL完全启动
log_info "等待MySQL服务完全启动..."
local max_attempts=30
local attempt=1
while [ $attempt -le $max_attempts ]; do
if docker exec anxin-mysql-prod mysqladmin ping -h localhost -u root -p"$MYSQL_ROOT_PASSWORD" 2>/dev/null; then
log_success "MySQL服务已就绪"
break
fi
echo "等待MySQL服务... (尝试 $attempt/$max_attempts)"
sleep 2
attempt=$((attempt + 1))
done
if [ $attempt -gt $max_attempts ]; then
log_error "MySQL服务启动超时"
return 1
fi
# 3. 重启后端服务
log_info "重启后端服务..."
./deploy.sh restart backend -e production
sleep 15
# 4. 检查修复结果
log_info "检查修复结果..."
if ./deploy.sh logs backend -e production --tail 20 | grep -i "started.*in"; then
log_success "✓ 后端服务启动成功"
else
log_warn "后端服务启动状态未知,请检查日志"
fi
}
# 显示修复建议
show_fix_suggestions() {
log_info "修复建议:"
echo "========================================"
echo "如果问题仍然存在,请尝试以下步骤:"
echo ""
echo "1. 完全重启所有服务:"
echo " ./deploy.sh down -e production"
echo " ./deploy.sh up -e production"
echo ""
echo "2. 检查数据库用户权限:"
echo " docker exec anxin-mysql-prod mysql -uroot -p"
echo " SHOW GRANTS FOR 'anxin_prod'@'%';"
echo ""
echo "3. 检查数据库配置:"
echo " docker exec anxin-mysql-prod mysql -uroot -p -e \"SHOW VARIABLES LIKE 'max_connections';\""
echo ""
echo "4. 如果是连接池配置问题检查application-docker.yml中的Druid配置"
echo ""
echo "5. 监控资源使用情况:"
echo " docker stats anxin-mysql-prod anxin-backend-prod"
echo "========================================"
}
# 主函数
main() {
local action=${1:-"diagnose"}
log_info "数据库连接诊断工具"
echo "========================================"
case $action in
"diagnose")
check_database_service
echo ""
test_database_connection
echo ""
check_backend_connection_config
echo ""
check_connection_errors
echo ""
show_fix_suggestions
;;
"fix")
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
fix_database_connection
echo ""
main "diagnose"
else
log_error "未找到环境配置文件"
fi
;;
*)
echo "用法: $0 [diagnose|fix]"
echo " diagnose - 诊断数据库连接问题(默认)"
echo " fix - 尝试修复数据库连接问题"
exit 1
;;
esac
echo "========================================"
log_success "操作完成!"
}
# 如果脚本被直接执行
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi

View File

@ -0,0 +1,283 @@
#!/bin/bash
# 前端服务诊断脚本
# 用于诊断前端服务启动问题
set -e
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# 检查前端容器状态
check_frontend_container() {
log_info "检查前端容器状态..."
# 检查容器是否存在
if docker ps -a --filter "name=anxin-frontend-prod" --format "{{.Names}}" | grep -q "anxin-frontend-prod"; then
log_success "✓ 前端容器存在"
# 显示容器状态
echo "容器状态:"
docker ps -a --filter "name=anxin-frontend-prod" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
# 检查容器是否正在运行
if docker ps --filter "name=anxin-frontend-prod" --format "{{.Names}}" | grep -q "anxin-frontend-prod"; then
log_success "✓ 前端容器正在运行"
else
log_error "✗ 前端容器已停止"
# 显示退出代码
local exit_code=$(docker inspect anxin-frontend-prod --format='{{.State.ExitCode}}' 2>/dev/null || echo "unknown")
log_info "退出代码: $exit_code"
fi
else
log_error "✗ 前端容器不存在"
return 1
fi
}
# 检查前端镜像
check_frontend_image() {
log_info "检查前端镜像..."
if docker images --filter "reference=anxin-frontend:prod" --format "{{.Repository}}:{{.Tag}}" | grep -q "anxin-frontend:prod"; then
log_success "✓ 前端镜像存在"
# 显示镜像信息
echo "镜像信息:"
docker images --filter "reference=anxin-frontend:prod" --format "table {{.Repository}}\t{{.Tag}}\t{{.Size}}\t{{.CreatedAt}}"
else
log_error "✗ 前端镜像不存在"
log_info "需要构建前端镜像"
fi
}
# 检查前端依赖服务
check_frontend_dependencies() {
log_info "检查前端依赖服务..."
# 检查后端服务
if docker ps --filter "name=anxin-backend-prod" --format "{{.Names}}" | grep -q "anxin-backend-prod"; then
log_success "✓ 后端服务正在运行"
# 检查后端健康状态
local backend_health=$(docker inspect anxin-backend-prod --format='{{.State.Health.Status}}' 2>/dev/null || echo "unknown")
log_info "后端健康状态: $backend_health"
else
log_error "✗ 后端服务未运行"
fi
}
# 检查前端日志
check_frontend_logs() {
log_info "检查前端启动日志..."
if docker ps -a --filter "name=anxin-frontend-prod" --format "{{.Names}}" | grep -q "anxin-frontend-prod"; then
echo "最近的前端日志:"
echo "----------------------------------------"
docker logs anxin-frontend-prod --tail 50 2>&1 || echo "无法获取日志"
echo "----------------------------------------"
else
log_warn "前端容器不存在,无法查看日志"
fi
}
# 检查前端配置
check_frontend_config() {
log_info "检查前端配置..."
# 检查docker-compose配置
if [[ -f "docker-compose.production.yml" ]]; then
log_success "✓ 找到docker-compose配置文件"
# 显示前端服务配置
echo "前端服务配置:"
echo "----------------------------------------"
sed -n '/anxin-frontend:/,/^ [a-zA-Z]/p' docker-compose.production.yml | head -20
echo "----------------------------------------"
else
log_error "✗ 未找到docker-compose配置文件"
fi
# 检查nginx配置
if [[ -f "configs/nginx.conf.prod" ]]; then
log_success "✓ 找到nginx生产环境配置"
else
log_warn "未找到nginx生产环境配置文件"
fi
}
# 检查端口占用
check_port_usage() {
log_info "检查端口占用情况..."
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
local frontend_port=${FRONTEND_PORT:-80}
local ssl_port=${FRONTEND_SSL_PORT:-443}
# 检查HTTP端口
log_info "检查端口 $frontend_port..."
if netstat -tlnp 2>/dev/null | grep ":$frontend_port " || ss -tlnp 2>/dev/null | grep ":$frontend_port "; then
log_success "✓ 端口 $frontend_port 正在使用"
else
log_warn "端口 $frontend_port 未被使用"
fi
# 检查HTTPS端口
if [[ "$ssl_port" != "443" ]] || [[ "$frontend_port" != "80" ]]; then
log_info "检查端口 $ssl_port..."
if netstat -tlnp 2>/dev/null | grep ":$ssl_port " || ss -tlnp 2>/dev/null | grep ":$ssl_port "; then
log_success "✓ 端口 $ssl_port 正在使用"
else
log_warn "端口 $ssl_port 未被使用"
fi
fi
fi
}
# 检查前端文件
check_frontend_files() {
log_info "检查前端相关文件..."
# 检查Dockerfile
if [[ -f "frontend/Dockerfile" ]]; then
log_success "✓ 找到前端Dockerfile"
else
log_error "✗ 未找到前端Dockerfile"
fi
# 检查前端源码目录
if [[ -d "../RuoYi-Vue3" ]]; then
log_success "✓ 找到前端源码目录"
else
log_error "✗ 未找到前端源码目录"
fi
}
# 尝试手动启动前端服务
manual_start_frontend() {
log_info "尝试手动启动前端服务..."
# 停止现有容器
docker stop anxin-frontend-prod 2>/dev/null || true
docker rm anxin-frontend-prod 2>/dev/null || true
# 尝试启动前端服务
log_info "启动前端服务..."
if ./deploy.sh start frontend -e production; then
log_success "✓ 前端服务启动命令执行成功"
# 等待启动
sleep 10
# 检查启动结果
if docker ps --filter "name=anxin-frontend-prod" --format "{{.Names}}" | grep -q "anxin-frontend-prod"; then
log_success "✓ 前端服务启动成功"
else
log_error "✗ 前端服务启动失败"
fi
else
log_error "✗ 前端服务启动命令失败"
fi
}
# 显示修复建议
show_fix_suggestions() {
log_info "修复建议:"
echo "========================================"
echo "根据诊断结果,可能的解决方案:"
echo ""
echo "1. 如果前端镜像不存在:"
echo " ./build.sh build -c frontend -e production"
echo ""
echo "2. 如果容器启动失败:"
echo " ./deploy.sh logs frontend -e production"
echo " 检查具体错误信息"
echo ""
echo "3. 如果依赖服务未就绪:"
echo " ./deploy.sh restart backend -e production"
echo " 等待后端服务完全启动后再启动前端"
echo ""
echo "4. 如果端口冲突:"
echo " 检查environments/.env.production中的端口配置"
echo " 确保端口未被其他服务占用"
echo ""
echo "5. 如果配置文件缺失:"
echo " 检查configs/nginx.conf.prod是否存在"
echo " 检查frontend/Dockerfile是否存在"
echo ""
echo "6. 完全重新部署:"
echo " ./deploy.sh down -e production"
echo " ./deploy.sh up -e production"
echo "========================================"
}
# 主函数
main() {
local action=${1:-"diagnose"}
log_info "前端服务诊断工具"
echo "========================================"
case $action in
"diagnose")
check_frontend_container
echo ""
check_frontend_image
echo ""
check_frontend_dependencies
echo ""
check_frontend_config
echo ""
check_port_usage
echo ""
check_frontend_files
echo ""
check_frontend_logs
echo ""
show_fix_suggestions
;;
"start")
manual_start_frontend
;;
"logs")
check_frontend_logs
;;
*)
echo "用法: $0 [diagnose|start|logs]"
echo " diagnose - 诊断前端服务问题(默认)"
echo " start - 尝试手动启动前端服务"
echo " logs - 查看前端服务日志"
exit 1
;;
esac
echo "========================================"
log_success "操作完成!"
}
# 如果脚本被直接执行
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi

View File

@ -0,0 +1,322 @@
#!/bin/bash
# MySQL连接诊断脚本
# 用于诊断和修复MySQL外部连接问题
set -e
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# 检查是否在正确的目录
check_directory() {
if [[ ! -f "deploy.sh" ]]; then
log_error "请在部署目录中运行此脚本 (应包含 deploy.sh)"
exit 1
fi
}
# 检查环境配置
check_environment_config() {
log_info "检查环境配置..."
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
log_info "数据库配置:"
echo " DB_HOST: $DB_HOST"
echo " DB_PORT: $DB_PORT"
echo " DB_NAME: $DB_NAME"
echo " DB_USER: $DB_USER"
echo " DB_PASSWORD: [HIDDEN]"
else
log_error "未找到生产环境配置文件"
return 1
fi
}
# 检查容器状态
check_container_status() {
log_info "检查MySQL容器状态..."
if docker ps --filter "name=anxin-mysql-prod" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep anxin-mysql-prod; then
log_success "✓ MySQL容器正在运行"
# 显示端口映射
local ports=$(docker port anxin-mysql-prod 2>/dev/null || echo "无端口映射")
log_info "端口映射: $ports"
else
log_error "✗ MySQL容器未运行"
# 检查是否存在但已停止
if docker ps -a --filter "name=anxin-mysql-prod" --format "{{.Names}}" | grep -q anxin-mysql-prod; then
log_warn "MySQL容器存在但已停止"
docker ps -a --filter "name=anxin-mysql-prod" --format "table {{.Names}}\t{{.Status}}"
fi
return 1
fi
}
# 检查网络连接
check_network_connectivity() {
log_info "检查网络连接..."
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
# 检查端口是否开放
log_info "检查端口 $DB_PORT 是否开放..."
if netstat -tlnp 2>/dev/null | grep ":$DB_PORT " || ss -tlnp 2>/dev/null | grep ":$DB_PORT "; then
log_success "✓ 端口 $DB_PORT 正在监听"
else
log_error "✗ 端口 $DB_PORT 未开放"
fi
# 测试本地连接
log_info "测试本地连接..."
if nc -z localhost "$DB_PORT" 2>/dev/null; then
log_success "✓ 本地连接测试成功"
else
log_error "✗ 本地连接测试失败"
fi
# 测试容器内连接
if docker ps --filter "name=anxin-mysql-prod" --format "{{.Names}}" | grep -q anxin-mysql-prod; then
log_info "测试容器内连接..."
if docker exec anxin-mysql-prod mysqladmin ping -h localhost -u root -p"$MYSQL_ROOT_PASSWORD" 2>/dev/null; then
log_success "✓ 容器内连接测试成功"
else
log_error "✗ 容器内连接测试失败"
fi
fi
fi
}
# 检查防火墙设置
check_firewall() {
log_info "检查防火墙设置..."
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
# 检查iptables规则
if command -v iptables >/dev/null 2>&1; then
log_info "检查iptables规则..."
if iptables -L INPUT -n | grep -q "$DB_PORT"; then
log_info "找到端口 $DB_PORT 的iptables规则"
iptables -L INPUT -n | grep "$DB_PORT"
else
log_warn "未找到端口 $DB_PORT 的iptables规则"
fi
fi
# 检查firewalld
if command -v firewall-cmd >/dev/null 2>&1 && systemctl is-active firewalld >/dev/null 2>&1; then
log_info "检查firewalld规则..."
if firewall-cmd --list-ports | grep -q "$DB_PORT"; then
log_success "✓ 端口 $DB_PORT 已在firewalld中开放"
else
log_warn "端口 $DB_PORT 未在firewalld中开放"
fi
fi
# 检查ufw
if command -v ufw >/dev/null 2>&1; then
log_info "检查ufw规则..."
if ufw status | grep -q "$DB_PORT"; then
log_success "✓ 端口 $DB_PORT 已在ufw中开放"
else
log_warn "端口 $DB_PORT 未在ufw中开放"
fi
fi
fi
}
# 检查MySQL配置
check_mysql_config() {
log_info "检查MySQL配置..."
if docker ps --filter "name=anxin-mysql-prod" --format "{{.Names}}" | grep -q anxin-mysql-prod; then
# 检查bind-address配置
log_info "检查bind-address配置..."
if docker exec anxin-mysql-prod mysql -u root -p"$MYSQL_ROOT_PASSWORD" -e "SHOW VARIABLES LIKE 'bind_address';" 2>/dev/null; then
log_success "✓ 获取bind_address配置成功"
else
log_error "✗ 无法获取bind_address配置"
fi
# 检查用户权限
log_info "检查用户权限..."
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
if docker exec anxin-mysql-prod mysql -u root -p"$MYSQL_ROOT_PASSWORD" -e "SELECT User, Host FROM mysql.user WHERE User='$DB_USER';" 2>/dev/null; then
log_success "✓ 获取用户权限信息成功"
else
log_error "✗ 无法获取用户权限信息"
fi
fi
fi
}
# 测试外部连接
test_external_connection() {
log_info "测试外部连接..."
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
# 获取服务器外部IP
local external_ip=$(curl -s ifconfig.me 2>/dev/null || curl -s ipinfo.io/ip 2>/dev/null || echo "unknown")
log_info "服务器外部IP: $external_ip"
# 测试从外部连接
log_info "尝试从外部连接MySQL..."
if timeout 10 mysql -h "$external_ip" -P "$DB_PORT" -u "$DB_USER" -p"$DB_PASSWORD" -e "SELECT 1;" 2>/dev/null; then
log_success "✓ 外部连接测试成功"
else
log_error "✗ 外部连接测试失败"
log_info "请检查:"
echo " 1. 防火墙是否开放端口 $DB_PORT"
echo " 2. MySQL用户是否允许远程连接"
echo " 3. 网络安全组是否允许该端口"
fi
fi
}
# 修复常见问题
fix_common_issues() {
log_info "修复常见问题..."
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
# 1. 重启MySQL容器
log_info "重启MySQL容器..."
./deploy.sh restart mysql -e production
sleep 10
# 2. 检查并修复用户权限
if docker ps --filter "name=anxin-mysql-prod" --format "{{.Names}}" | grep -q anxin-mysql-prod; then
log_info "修复用户权限..."
# 创建用户并授权(如果不存在)
docker exec anxin-mysql-prod mysql -u root -p"$MYSQL_ROOT_PASSWORD" -e "
CREATE USER IF NOT EXISTS '$DB_USER'@'%' IDENTIFIED BY '$DB_PASSWORD';
GRANT ALL PRIVILEGES ON $DB_NAME.* TO '$DB_USER'@'%';
FLUSH PRIVILEGES;
" 2>/dev/null && log_success "✓ 用户权限修复完成" || log_error "✗ 用户权限修复失败"
fi
# 3. 开放防火墙端口(如果需要)
if command -v firewall-cmd >/dev/null 2>&1 && systemctl is-active firewalld >/dev/null 2>&1; then
log_info "开放firewalld端口..."
firewall-cmd --permanent --add-port="$DB_PORT/tcp" 2>/dev/null && \
firewall-cmd --reload 2>/dev/null && \
log_success "✓ firewalld端口开放完成" || log_warn "firewalld端口开放失败"
fi
if command -v ufw >/dev/null 2>&1; then
log_info "开放ufw端口..."
ufw allow "$DB_PORT/tcp" 2>/dev/null && \
log_success "✓ ufw端口开放完成" || log_warn "ufw端口开放失败"
fi
fi
}
# 显示连接信息
show_connection_info() {
log_info "MySQL连接信息:"
echo "========================================"
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
local external_ip=$(curl -s ifconfig.me 2>/dev/null || curl -s ipinfo.io/ip 2>/dev/null || echo "your-server-ip")
echo "外部连接信息:"
echo " 主机: $external_ip"
echo " 端口: $DB_PORT"
echo " 数据库: $DB_NAME"
echo " 用户名: $DB_USER"
echo " 密码: $DB_PASSWORD"
echo ""
echo "连接命令示例:"
echo " mysql -h $external_ip -P $DB_PORT -u $DB_USER -p$DB_PASSWORD $DB_NAME"
echo ""
echo "JDBC连接字符串:"
echo " jdbc:mysql://$external_ip:$DB_PORT/$DB_NAME?useUnicode=true&characterEncoding=utf8&useSSL=false"
fi
echo "========================================"
}
# 主函数
main() {
local action=${1:-"diagnose"}
log_info "MySQL连接诊断工具"
echo "========================================"
check_directory
case $action in
"diagnose")
check_environment_config
echo ""
check_container_status
echo ""
check_network_connectivity
echo ""
check_firewall
echo ""
check_mysql_config
echo ""
test_external_connection
echo ""
show_connection_info
;;
"fix")
fix_common_issues
echo ""
main "diagnose"
;;
"info")
show_connection_info
;;
*)
echo "用法: $0 [diagnose|fix|info]"
echo " diagnose - 诊断连接问题(默认)"
echo " fix - 修复常见问题"
echo " info - 显示连接信息"
exit 1
;;
esac
echo "========================================"
log_success "操作完成!"
}
# 如果脚本被直接执行
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi

View File

@ -0,0 +1,212 @@
#!/bin/bash
# 日志问题修复脚本
# 用于诊断和修复Docker容器中的日志问题
set -e
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# 检查日志目录权限
check_log_directories() {
log_info "检查日志目录权限..."
local directories=("/app/logs" "/home/ruoyi/logs")
for dir in "${directories[@]}"; do
if [[ -d "$dir" ]]; then
local owner=$(stat -c '%U:%G' "$dir" 2>/dev/null || stat -f '%Su:%Sg' "$dir" 2>/dev/null)
local perms=$(stat -c '%a' "$dir" 2>/dev/null || stat -f '%A' "$dir" 2>/dev/null)
log_info "目录 $dir - 所有者: $owner, 权限: $perms"
# 检查是否可写
if [[ -w "$dir" ]]; then
log_success "$dir 可写"
else
log_error "$dir 不可写"
fi
else
log_warn "目录不存在: $dir"
fi
done
}
# 检查符号链接
check_symlinks() {
log_info "检查符号链接..."
if [[ -L "/home/ruoyi/logs" ]]; then
local target=$(readlink "/home/ruoyi/logs")
log_info "符号链接: /home/ruoyi/logs -> $target"
if [[ -d "$target" ]]; then
log_success "✓ 符号链接目标存在"
else
log_error "✗ 符号链接目标不存在: $target"
fi
else
log_warn "符号链接不存在: /home/ruoyi/logs"
fi
}
# 检查logback配置
check_logback_config() {
log_info "检查logback配置..."
local config_files=(
"/app/config/logback-spring.xml"
"BOOT-INF/classes/logback.xml"
)
for config in "${config_files[@]}"; do
if [[ -f "$config" ]]; then
log_info "找到配置文件: $config"
# 检查日志路径配置
if grep -q "/home/ruoyi/logs" "$config"; then
log_warn "配置文件使用硬编码路径: /home/ruoyi/logs"
fi
if grep -q "\${LOG_PATH" "$config"; then
log_success "✓ 配置文件使用变量路径: \${LOG_PATH}"
fi
fi
done
}
# 检查环境变量
check_environment_variables() {
log_info "检查环境变量..."
local env_vars=("LOG_PATH" "LOG_LEVEL" "SPRING_PROFILES_ACTIVE")
for var in "${env_vars[@]}"; do
if [[ -n "${!var}" ]]; then
log_success "$var=${!var}"
else
log_warn "环境变量未设置: $var"
fi
done
}
# 创建测试日志文件
test_log_writing() {
log_info "测试日志文件写入..."
local test_dirs=("/app/logs" "/home/ruoyi/logs")
for dir in "${test_dirs[@]}"; do
if [[ -d "$dir" ]]; then
local test_file="$dir/test-write.log"
if echo "Test log entry $(date)" > "$test_file" 2>/dev/null; then
log_success "✓ 可以写入 $dir"
rm -f "$test_file"
else
log_error "✗ 无法写入 $dir"
fi
fi
done
}
# 修复日志目录权限
fix_log_permissions() {
log_info "修复日志目录权限..."
# 确保目录存在
mkdir -p /app/logs
# 创建符号链接(如果不存在)
if [[ ! -L "/home/ruoyi/logs" ]]; then
mkdir -p /home/ruoyi
ln -sf /app/logs /home/ruoyi/logs
log_success "创建符号链接: /home/ruoyi/logs -> /app/logs"
fi
# 设置正确的权限
chmod 755 /app/logs
# 如果运行在容器中,设置正确的所有者
if [[ -n "$USER" && "$USER" != "root" ]]; then
chown -R "$USER:$USER" /app/logs /home/ruoyi 2>/dev/null || true
fi
log_success "日志目录权限修复完成"
}
# 显示日志配置建议
show_recommendations() {
log_info "日志配置建议:"
echo "----------------------------------------"
echo "1. 确保使用 logback-spring.xml 而不是 logback.xml"
echo "2. 在启动参数中设置: -Dlogging.config=/app/config/logback-spring.xml"
echo "3. 设置环境变量: LOG_PATH=/app/logs"
echo "4. 使用符号链接兼容旧路径: /home/ruoyi/logs -> /app/logs"
echo "5. 确保日志目录权限正确: 755"
echo "----------------------------------------"
}
# 主函数
main() {
local action=${1:-"check"}
log_info "日志问题诊断和修复工具"
echo "========================================"
case $action in
"check")
check_log_directories
echo ""
check_symlinks
echo ""
check_logback_config
echo ""
check_environment_variables
echo ""
test_log_writing
echo ""
show_recommendations
;;
"fix")
fix_log_permissions
echo ""
check_log_directories
echo ""
test_log_writing
;;
*)
echo "用法: $0 [check|fix]"
echo " check - 检查日志配置和权限"
echo " fix - 修复日志目录权限问题"
exit 1
;;
esac
echo "========================================"
log_success "操作完成!"
}
# 如果脚本被直接执行
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi

View File

@ -0,0 +1,114 @@
#!/bin/bash
# MySQL配置验证脚本
# 用于验证MySQL配置文件与MySQL 8.0的兼容性
set -e
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# 检查MySQL 8.0不兼容的配置项
check_mysql8_compatibility() {
local config_file="$1"
local issues_found=0
log_info "检查配置文件: $config_file"
if [[ ! -f "$config_file" ]]; then
log_error "配置文件不存在: $config_file"
return 1
fi
# 检查已移除的sql_mode选项
if grep -q "NO_AUTO_CREATE_USER" "$config_file"; then
log_error "发现不兼容的sql_mode选项: NO_AUTO_CREATE_USER (MySQL 8.0已移除)"
((issues_found++))
fi
# 检查已弃用的参数
if grep -q "expire_logs_days" "$config_file"; then
log_warn "发现已弃用的参数: expire_logs_days (建议使用 binlog_expire_logs_seconds)"
((issues_found++))
fi
if grep -q "binlog_format.*STATEMENT" "$config_file"; then
log_warn "发现已弃用的binlog_format: STATEMENT (建议使用 ROW)"
fi
# 检查query_cache相关配置 (MySQL 8.0已移除)
if grep -v "^[[:space:]]*#" "$config_file" | grep -q "query_cache"; then
log_warn "发现query_cache相关配置 (MySQL 8.0已移除查询缓存功能)"
fi
# 检查推荐的安全配置
if ! grep -q "sql_mode.*STRICT_TRANS_TABLES" "$config_file"; then
log_warn "建议在sql_mode中包含 STRICT_TRANS_TABLES"
fi
return $issues_found
}
# 主函数
main() {
log_info "MySQL 8.0配置兼容性检查"
echo "========================================"
local total_issues=0
local config_files=(
"../configs/my.cnf.dev"
"../configs/my.cnf.staging"
"../configs/my.cnf.prod"
)
for config_file in "${config_files[@]}"; do
if check_mysql8_compatibility "$config_file"; then
log_success "配置文件检查通过: $(basename "$config_file")"
else
local issues=$?
total_issues=$((total_issues + issues))
log_error "配置文件存在 $issues 个问题: $(basename "$config_file")"
fi
echo ""
done
echo "========================================"
if [[ $total_issues -eq 0 ]]; then
log_success "所有配置文件检查通过!"
else
log_error "总共发现 $total_issues 个配置问题"
echo ""
log_info "修复建议:"
echo "1. 移除 NO_AUTO_CREATE_USER 从 sql_mode"
echo "2. 将 expire_logs_days 替换为 binlog_expire_logs_seconds"
echo "3. 移除或注释 query_cache 相关配置"
echo "4. 确保 sql_mode 包含推荐的安全选项"
fi
return $total_issues
}
# 如果脚本被直接执行
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
cd "$(dirname "${BASH_SOURCE[0]}")"
main "$@"
fi

View File

@ -0,0 +1,243 @@
#!/bin/bash
# 数据库配置验证脚本
# 用于验证生产环境下后端服务是否正确连接到生产数据库
set -e
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# 检查是否在正确的目录
check_directory() {
if [[ ! -f "deploy.sh" ]]; then
log_error "请在部署目录中运行此脚本 (应包含 deploy.sh)"
exit 1
fi
}
# 检查环境变量配置
check_environment_variables() {
log_info "检查生产环境配置..."
if [[ -f "environments/.env.production" ]]; then
log_success "✓ 找到生产环境配置文件"
# 显示数据库配置
echo "数据库配置:"
grep -E "^(DB_HOST|DB_PORT|DB_NAME|DB_USER|DB_PASSWORD)=" environments/.env.production | while read line; do
if [[ $line == *"DB_PASSWORD"* ]]; then
echo " DB_PASSWORD=[HIDDEN]"
else
echo " $line"
fi
done
else
log_error "✗ 未找到生产环境配置文件"
return 1
fi
}
# 检查docker-compose配置
check_docker_compose_config() {
log_info "检查docker-compose配置..."
if [[ -f "docker-compose.production.yml" ]]; then
log_success "✓ 找到docker-compose生产环境配置"
# 检查后端服务的环境变量配置
if grep -A 20 "anxin-backend:" docker-compose.production.yml | grep -E "(DB_HOST|DB_NAME|DB_USER)" > /dev/null; then
log_success "✓ 后端服务配置了数据库环境变量"
else
log_error "✗ 后端服务未正确配置数据库环境变量"
fi
else
log_error "✗ 未找到docker-compose生产环境配置"
return 1
fi
}
# 检查容器环境变量
check_container_environment() {
log_info "检查容器内环境变量..."
if docker ps --filter "name=anxin-backend-prod" --format "{{.Names}}" | grep -q "anxin-backend-prod"; then
log_success "✓ 后端容器正在运行"
# 检查容器内的数据库环境变量
echo "容器内数据库配置:"
docker exec anxin-backend-prod env | grep -E "^(DB_HOST|DB_PORT|DB_NAME|DB_USER)=" | while read line; do
echo " $line"
done
# 检查密码是否设置(不显示实际值)
if docker exec anxin-backend-prod env | grep -q "^DB_PASSWORD="; then
log_success "✓ DB_PASSWORD 已设置"
else
log_error "✗ DB_PASSWORD 未设置"
fi
else
log_warn "后端容器未运行,无法检查容器环境变量"
fi
}
# 检查数据库连接
check_database_connection() {
log_info "检查数据库连接..."
# 从环境文件读取数据库配置
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
# 检查MySQL容器是否运行
if docker ps --filter "name=anxin-mysql-prod" --format "{{.Names}}" | grep -q "anxin-mysql-prod"; then
log_success "✓ MySQL容器正在运行"
# 测试数据库连接
log_info "测试数据库连接..."
if docker exec anxin-mysql-prod mysql -u"$DB_USER" -p"$DB_PASSWORD" -e "SELECT 1;" "$DB_NAME" 2>/dev/null; then
log_success "✓ 数据库连接测试成功"
# 检查数据库是否存在
if docker exec anxin-mysql-prod mysql -u"$DB_USER" -p"$DB_PASSWORD" -e "SHOW DATABASES;" | grep -q "$DB_NAME"; then
log_success "✓ 数据库 $DB_NAME 存在"
else
log_error "✗ 数据库 $DB_NAME 不存在"
fi
else
log_error "✗ 数据库连接测试失败"
fi
else
log_error "✗ MySQL容器未运行"
fi
else
log_error "无法读取环境配置文件"
fi
}
# 检查应用启动日志中的数据库连接信息
check_application_logs() {
log_info "检查应用启动日志..."
if docker ps --filter "name=anxin-backend-prod" --format "{{.Names}}" | grep -q "anxin-backend-prod"; then
# 查看最近的启动日志
log_info "最近的启动日志:"
./deploy.sh logs backend -e production --tail 30 | grep -E "(数据库|database|mysql|jdbc)" || true
# 检查是否有数据库连接错误
if ./deploy.sh logs backend -e production --tail 100 | grep -i "connection.*refused\|access.*denied\|unknown.*database"; then
log_error "发现数据库连接错误"
else
log_success "✓ 未发现数据库连接错误"
fi
else
log_warn "后端容器未运行,无法检查应用日志"
fi
}
# 验证数据库表结构
verify_database_schema() {
log_info "验证数据库表结构..."
if [[ -f "environments/.env.production" ]]; then
source environments/.env.production
if docker ps --filter "name=anxin-mysql-prod" --format "{{.Names}}" | grep -q "anxin-mysql-prod"; then
# 检查关键表是否存在
local tables=("sys_user" "sys_role" "sys_menu" "sys_dept")
for table in "${tables[@]}"; do
if docker exec anxin-mysql-prod mysql -u"$DB_USER" -p"$DB_PASSWORD" -e "DESCRIBE $table;" "$DB_NAME" 2>/dev/null > /dev/null; then
log_success "✓ 表 $table 存在"
else
log_error "✗ 表 $table 不存在"
fi
done
fi
fi
}
# 显示修复建议
show_fix_suggestions() {
log_info "修复建议:"
echo "----------------------------------------"
echo "如果发现数据库配置问题,请检查:"
echo ""
echo "1. 环境变量配置 (environments/.env.production):"
echo " - DB_NAME 应该是 anxin_prod"
echo " - DB_USER 应该是 anxin_prod"
echo " - DB_PASSWORD 应该是正确的生产环境密码"
echo ""
echo "2. 重启后端服务以应用新配置:"
echo " ./deploy.sh restart backend -e production"
echo ""
echo "3. 如果数据库不存在,需要初始化:"
echo " ./deploy.sh exec mysql -e production"
echo " CREATE DATABASE anxin_prod CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"
echo ""
echo "4. 导入数据库结构:"
echo " docker exec -i anxin-mysql-prod mysql -uroot -p < database/02-schema.sql"
echo "----------------------------------------"
}
# 主函数
main() {
log_info "数据库配置验证工具"
echo "========================================"
# 检查环境
check_directory
# 检查配置文件
check_environment_variables
echo ""
check_docker_compose_config
echo ""
# 检查运行时配置
check_container_environment
echo ""
# 检查数据库连接
check_database_connection
echo ""
# 检查应用日志
check_application_logs
echo ""
# 验证数据库结构
verify_database_schema
echo ""
# 显示修复建议
show_fix_suggestions
echo "========================================"
log_success "验证完成!"
}
# 如果脚本被直接执行
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi

File diff suppressed because one or more lines are too long