第 14 章 - Docker 与容器化部署
第 14 章 - Docker 与容器化部署
14.1 Dockerfile 编写
14.1.1 基础镜像
# /home/free/src/blogp/docker/Dockerfile
# 方式 1:使用官方 OpenResty 镜像(推荐)
FROM openresty/openresty:1.25.3.2-alpine
# 方式 2:基于 Ubuntu 自定义编译
# FROM ubuntu:22.04
# 安装依赖
# RUN apt-get update && apt-get install -y \
# build-essential libpcre3-dev libssl-dev zlib1g-dev \
# && rm -rf /var/lib/apt/lists/*
# 设置工作目录
WORKDIR /usr/local/openresty
# 创建必要目录
RUN mkdir -p /var/log/openresty /var/cache/nginx
# 复制配置文件
COPY nginx/conf/nginx.conf /usr/local/openresty/nginx/conf/
COPY nginx/conf/conf.d/ /usr/local/openresty/nginx/conf/conf.d/
# 复制 Lua 代码
COPY lua/ /usr/local/openresty/lua/
# 安装额外的 Lua 依赖
RUN opm get openresty/lua-resty-http \
&& opm get SkyLothar/lua-resty-jwt \
&& opm get bungle/lua-resty-template
# 暴露端口
EXPOSE 8080 8443
# 健康检查
HEALTHCHECK --interval=30s --timeout=5s --retries=3 \
CMD curl -f http://localhost:8080/health || exit 1
# 启动命令
CMD ["openresty", "-g", "daemon off;"]
14.1.2 多阶段构建(优化镜像大小)
# 构建阶段
FROM openresty/openresty:1.25.3.2-centos8 AS builder
# 安装额外模块
RUN yum install -y git && \
cd /tmp && \
git clone https://github.com/openresty/lua-resty-lrucache.git && \
cd lua-resty-lrucache && \
make install
# 运行阶段
FROM openresty/openresty:1.25.3.2-alpine
# 从构建阶段复制文件
COPY --from=builder /usr/local/openresty/lualib/resty/lrucache.lua \
/usr/local/openresty/lualib/resty/
COPY . /usr/local/openresty/
EXPOSE 8080
CMD ["openresty", "-g", "daemon off;"]
14.1.3 自定义编译镜像
FROM ubuntu:22.04 AS builder
ENV OPENRESTY_VERSION=1.25.3.2
# 安装编译依赖
RUN apt-get update && apt-get install -y \
build-essential wget libpcre3-dev libssl-dev zlib1g-dev \
&& rm -rf /var/lib/apt/lists/*
# 下载并编译 OpenResty
RUN wget https://openresty.org/download/openresty-${OPENRESTY_VERSION}.tar.gz && \
tar -xzf openresty-${OPENRESTY_VERSION}.tar.gz && \
cd openresty-${OPENRESTY_VERSION} && \
./configure \
--prefix=/usr/local/openresty \
--with-pcre-jit \
--with-http_ssl_module \
--with-http_realip_module \
--with-http_stub_status_module \
--with-http_v2_module \
--with-stream \
--with-stream_ssl_module \
-j$(nproc) && \
make -j$(nproc) && \
make install
# 运行阶段(精简镜像)
FROM ubuntu:22.04
RUN apt-get update && apt-get install -y \
libpcre3 libssl3 curl \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /usr/local/openresty /usr/local/openresty
ENV PATH="/usr/local/openresty/bin:/usr/local/openresty/nginx/sbin:${PATH}"
EXPOSE 8080
CMD ["openresty", "-g", "daemon off;"]
14.2 Docker Compose
14.2.1 完整开发环境
# docker-compose.yml
version: "3.8"
services:
# OpenResty 网关
gateway:
build:
context: .
dockerfile: docker/Dockerfile
ports:
- "8080:8080"
- "8443:8443"
volumes:
# 开发时挂载代码(支持热加载)
- ./nginx/conf:/usr/local/openresty/nginx/conf:ro
- ./lua:/usr/local/openresty/lua:ro
# 日志持久化
- gateway-logs:/var/log/openresty
environment:
- CONSUL_ADDR=http://consul:8500
- REDIS_ADDR=redis:6379
- JWT_SECRET=${JWT_SECRET:-dev-secret}
depends_on:
- consul
- redis
- backend
networks:
- gateway-net
restart: unless-stopped
# 后端服务(示例)
backend:
image: hashicorp/http-echo
command: ["-text=Hello from backend", "-listen=:8080"]
networks:
- gateway-net
# Redis
redis:
image: redis:7-alpine
ports:
- "6379:6379"
volumes:
- redis-data:/data
command: redis-server --appendonly yes
networks:
- gateway-net
# Consul(服务发现)
consul:
image: consul:1.15
ports:
- "8500:8500"
- "8600:8600/udp"
command: agent -server -bootstrap-expect=1 -ui -client=0.0.0.0
networks:
- gateway-net
# Prometheus(监控)
prometheus:
image: prom/prometheus:v2.47.0
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro
networks:
- gateway-net
# Grafana(可视化)
grafana:
image: grafana/grafana:10.0.0
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
- grafana-data:/var/lib/grafana
networks:
- gateway-net
volumes:
gateway-logs:
redis-data:
grafana-data:
networks:
gateway-net:
driver: bridge
14.2.2 生产环境 Compose
# docker-compose.prod.yml
version: "3.8"
services:
gateway:
image: your-registry/openresty-gateway:latest
ports:
- "80:8080"
- "443:8443"
deploy:
replicas: 3
resources:
limits:
cpus: "2.0"
memory: 1G
reservations:
cpus: "0.5"
memory: 256M
configs:
- source: gateway_config
target: /usr/local/openresty/nginx/conf/nginx.conf
secrets:
- jwt_secret
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 5s
retries: 3
configs:
gateway_config:
file: ./config/nginx.prod.conf
secrets:
jwt_secret:
file: ./secrets/jwt_secret.txt
14.3 配置管理
14.3.1 环境变量注入
# nginx.conf 中使用环境变量
env REDIS_ADDR;
env JWT_SECRET;
http {
# Lua 中读取环境变量
init_by_lua_block {
config = {
redis_addr = os.getenv("REDIS_ADDR") or "127.0.0.1:6379",
jwt_secret = os.getenv("JWT_SECRET") or "default-secret",
}
}
}
14.3.2 配置模板化
# docker-entrypoint.sh
#!/bin/sh
# 使用 envsubst 替换配置模板中的变量
envsubst '${REDIS_ADDR} ${BACKEND_ADDR} ${DOMAIN}' \
< /etc/nginx/nginx.conf.template \
> /usr/local/openresty/nginx/conf/nginx.conf
# 执行 OpenResty
exec openresty -g "daemon off;"
# Dockerfile 中使用
COPY docker-entrypoint.sh /docker-entrypoint.sh
RUN chmod +x /docker-entrypoint.sh
COPY nginx.conf.template /etc/nginx/nginx.conf.template
ENTRYPOINT ["/docker-entrypoint.sh"]
14.4 热加载策略
14.4.1 配置热加载
-- /usr/local/openresty/lua/hot_reload.lua
local _M = {}
local cjson = require "cjson"
-- 检查配置是否变化
local function check_config_changed()
local shared = ngx.shared.gateway_config
local config_hash = shared:get("config_hash")
-- 计算当前配置的哈希
local config_content = ""
local files = {
"/usr/local/openresty/nginx/conf/nginx.conf",
"/usr/local/openresty/lua/router.lua",
}
for _, file in ipairs(files) do
local f = io.open(file, "r")
if f then
config_content = config_content .. f:read("*all")
f:close()
end
end
local current_hash = ngx.md5(config_content)
return current_hash ~= config_hash, current_hash
end
-- 触发热加载
function _M.reload()
-- 方式 1:Nginx reload(配置变更时)
os.execute("openresty -s reload")
-- 方式 2:Lua 代码热加载(不需要 reload)
-- 清除 require 缓存
for k, _ in pairs(package.loaded) do
if k:match("^gateway") then
package.loaded[k] = nil
end
end
end
-- 文件监听定时器
function _M.start_watcher()
local function watch(premature)
if premature then return end
local changed, hash = check_config_changed()
if changed then
ngx.log(ngx.INFO, "Config changed, reloading...")
ngx.shared.gateway_config:set("config_hash", hash)
_M.reload()
end
end
ngx.timer.every(5, watch)
end
return _M
14.4.2 Lua 代码热加载(开发模式)
# 开发环境:禁用 Lua 代码缓存
http {
lua_code_cache off; # 每次请求重新加载 Lua 文件
# ⚠️ 仅用于开发环境!生产环境必须开启!
}
14.5 集群部署
14.5.1 Nginx 负载均衡(入口层)
# 负载均衡器配置
upstream gateway_cluster {
least_conn;
server gateway-1:8080 weight=5;
server gateway-2:8080 weight=5;
server gateway-3:8080 weight=5;
# 会话保持(如果需要)
# ip_hash;
}
server {
listen 80;
location / {
proxy_pass http://gateway_cluster;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
14.5.2 共享状态同步
-- 在集群环境中,共享内存不跨节点
-- 需要使用 Redis 同步状态
-- 限流状态同步
local function cluster_rate_limit(key, max_requests, window)
local redis = require "resty.redis"
local red = redis:new()
red:connect("redis:6379")
local script = [[
local current = redis.call('INCR', KEYS[1])
if current == 1 then
redis.call('EXPIRE', KEYS[1], ARGV[1])
end
return current
]]
local count = red:eval(script, 1, "rate:" .. key, window)
red:set_keepalive(10000, 100)
return count <= max_requests
end
14.6 Kubernetes 部署
14.6.1 Deployment
# k8s/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: openresty-gateway
labels:
app: openresty-gateway
spec:
replicas: 3
selector:
matchLabels:
app: openresty-gateway
template:
metadata:
labels:
app: openresty-gateway
spec:
containers:
- name: gateway
image: your-registry/openresty-gateway:latest
ports:
- containerPort: 8080
name: http
- containerPort: 8443
name: https
env:
- name: REDIS_ADDR
valueFrom:
configMapKeyRef:
name: gateway-config
key: redis-addr
- name: JWT_SECRET
valueFrom:
secretKeyRef:
name: gateway-secrets
key: jwt-secret
resources:
requests:
cpu: 500m
memory: 256Mi
limits:
cpu: "2"
memory: 1Gi
livenessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 10
periodSeconds: 30
readinessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
volumeMounts:
- name: config
mountPath: /usr/local/openresty/nginx/conf
readOnly: true
- name: lua-code
mountPath: /usr/local/openresty/lua
readOnly: true
volumes:
- name: config
configMap:
name: gateway-nginx-config
- name: lua-code
configMap:
name: gateway-lua-code
14.6.2 Service
# k8s/service.yaml
apiVersion: v1
kind: Service
metadata:
name: openresty-gateway
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
name: http
selector:
app: openresty-gateway
14.6.3 Ingress
# k8s/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: gateway-ingress
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
spec:
rules:
- host: api.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: openresty-gateway
port:
number: 8080
14.6.4 HPA 自动扩缩容
# k8s/hpa.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: gateway-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: openresty-gateway
minReplicas: 3
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
14.7 日志收集
# DaemonSet 方式收集日志
# k8s/filebeat-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
spec:
selector:
matchLabels:
app: filebeat
template:
metadata:
labels:
app: filebeat
spec:
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:8.10.0
volumeMounts:
- name: varlog
mountPath: /var/log
readOnly: true
- name: containerlog
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: varlog
hostPath:
path: /var/log
- name: containerlog
hostPath:
path: /var/lib/docker/containers
14.8 注意事项
镜像大小:使用 Alpine 基础镜像可将镜像大小从 ~1GB 减少到 ~100MB。使用多阶段构建进一步优化。
安全扫描:定期扫描镜像中的安全漏洞。使用
docker scan或 Trivy 工具。
资源限制:K8s 中必须设置 CPU 和内存限制,避免单个 Pod 耗尽节点资源。
优雅关闭:确保容器接收到 SIGTERM 信号后能优雅关闭,完成正在处理的请求。
存储卷:日志和临时数据使用 emptyDir 或 PVC,不要写入容器文件系统。
上一章:← 第 13 章 - 微服务网关架构 下一章:第 15 章 - 测试与质量保障 →