feat(ai): add Open WebUI with Tailscale and Caddy reverse proxy

Set up a complete AI services stack with Open WebUI as the main interface,
secured behind Caddy reverse proxy with automatic HTTPS via Namecheap DNS
challenge. Tailscale integration provides secure remote access.

- Configure Open WebUI to connect to Ollama backend
- Set up Caddy with Namecheap DNS plugin for wildcard SSL certificates
- Add Tailscale for secure networking with health checks
- Configure reverse proxy to forward requests to Open WebUI
- Enable proper header forwarding for client IP preservation
- Set up persistent volumes for data, config, and SSL certificates
- Configure JSON logging with rotation for all services
This commit is contained in:
Davide Polonio 2026-03-18 22:16:58 +01:00
parent 7fdd996f29
commit d856a8704b
2 changed files with 111 additions and 0 deletions

28
ai/Caddyfile Normal file
View File

@ -0,0 +1,28 @@
{
email {env.LETSENCRYPT_EMAIL}
}
*.lan.poldebra.me {
tls {
dns namecheap {
api_key {env.NAMECHEAP_API_KEY}
user {env.NAMECHEAP_API_USER}
api_endpoint https://api.namecheap.com/xml.response
}
resolvers 1.1.1.1 8.8.8.8
}
@ai host ai.lan.poldebra.me
handle @ai {
header {
X-Real-IP {remote_host}
X-Forwarded-For {remote_host}
X-Forwarded-Proto {scheme}
X-Forwarded-Host {host}
X-Forwarded-Port {server_port}
}
reverse_proxy 172.24.0.5:8080 {
header_up X-Forwarded-Proto {scheme}
}
}
}

83
ai/docker-compose.yml Normal file
View File

@ -0,0 +1,83 @@
services:
webui:
image: ghcr.io/open-webui/open-webui:main
restart: unless-stopped
hostname: openwebui
container_name: openwebui
volumes:
- "/srv/docker/ai/data/data:/app/backend/data" # Double data is intentional
- "/srv/docker/ai/data/.webui_secret_key:/app/backend/.webui_secret_key"
environment:
- OLLAMA_BASE_URL=https://ollama.lan.poldebra.me
networks:
internal:
ipv4_address: 172.24.0.5
logging:
driver: "json-file"
options:
mode: "non-blocking"
max-size: "10m"
max-file: "3"
tailscale:
hostname: ai
image: tailscale/tailscale:latest
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "tailscale status"]
interval: 1s
timeout: 5s
retries: 60
volumes:
- /srv/docker/ai/tailscale:/var/lib
- /lib/modules:/lib/modules:ro
devices:
- /dev/net/tun:/dev/net/tun
cap_add:
- net_admin
- sys_module
- net_raw
command: tailscaled
networks:
- internal
logging:
driver: "json-file"
options:
mode: "non-blocking"
max-size: "10m"
max-file: "3"
reverse_proxy:
image: caddybuilds/caddy-namecheap:2-alpine
restart: unless-stopped
network_mode: service:tailscale
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- /srv/docker/ai/caddy/config/:/config/caddy:rw
- /srv/docker/ai/caddy/data/:/data/caddy:rw
- /srv/docker/ai/caddy/share/:/usr/share/caddy:rw
env_file:
- caddy.env
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "https://ai.lan.poldebra.me"]
interval: 10s
timeout: 30s
retries: 5
start_period: 90s
depends_on:
webui:
condition: service_started
tailscale:
condition: service_healthy
logging:
driver: "json-file"
options:
mode: "non-blocking"
max-size: "10m"
max-file: "3"
networks:
internal:
ipam:
config:
- subnet: 172.24.0.0/24