Compare commits

...

26 Commits

Author SHA1 Message Date
90a9d419e6 fix: set proper healthcheck endpoint
All checks were successful
continuous-integration/drone/push Build is passing
2025-05-14 18:46:09 +02:00
a9d15d5ca1 feat: additional iteration over acl, now simpler
All checks were successful
continuous-integration/drone/push Build is passing
2025-05-01 18:07:09 +02:00
67e8b43807 feat: link new acl config 2025-05-01 17:42:03 +02:00
23a08ba8f2 feat: first vpn acl setup 2025-05-01 17:06:55 +02:00
a07f21af3b chore: bump Headscale to latest version
All checks were successful
continuous-integration/drone/push Build is passing
2025-05-01 16:17:19 +02:00
9d366364ef chore: remove obsolete version entry
All checks were successful
continuous-integration/drone/push Build is passing
2025-05-01 13:05:51 +02:00
959dc5af5a chore: update fireflyiii (#42)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #42
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2025-04-04 19:09:08 +02:00
607d2b2489 chore: update gitea (#41)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #41
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2025-04-04 18:49:30 +02:00
bf734085a9 chore: update mail server (#40)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #40
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2025-04-04 18:39:08 +02:00
a883df03b4 chore: update gitignore
All checks were successful
continuous-integration/drone/push Build is passing
2025-04-04 18:30:30 +02:00
66fa1d8b38 chore: update searxng (#39)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #39
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2025-04-04 18:28:36 +02:00
9f650639bb feat: add kodokanjudoeste.org WP hosting (#38)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #38
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2025-03-07 16:44:32 +01:00
51053abc25 feat: add FireflyIII (#37)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #37
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2025-01-26 17:45:51 +01:00
9f128430ba feat: migrate Diary to VPN only access (#36)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #36
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2025-01-12 22:09:22 +01:00
6058545dd4 chore: switch to new reverse proxy images (#35)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #35
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2025-01-06 13:32:20 +01:00
4312a74504 chore: upgrade headscale to 0.23.x (#34)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #34
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2025-01-06 13:29:48 +01:00
54732efe29 chore: upgrade mailserver (#33)
All checks were successful
continuous-integration/drone Build is passing
continuous-integration/drone/push Build is passing
Reviewed-on: #33
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2024-08-04 13:26:48 +02:00
85545c9a17 chore: remove commented section (#32)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #32
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2024-08-04 13:05:31 +02:00
b448f92f68 feat: add headscale (#31)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #31
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2024-08-04 13:02:47 +02:00
6d90cd8518 chore: upgrade mailserver (#30)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #30
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2024-06-01 14:39:22 +02:00
8870c4533e chore: upgrade searxng (#29)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #29
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2024-06-01 14:35:47 +02:00
9cbe11208f chore: upgrade mailserver and songlify (#28)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #28
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2024-02-16 23:27:45 +01:00
f26262caad chore: upgrade SearxNG (#27)
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #27
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2023-11-24 17:37:01 +01:00
8ca21e36a0 chore: upgrade different services (#26)
All checks were successful
continuous-integration/drone/push Build is passing
* Gitea
* SearxNG

Reviewed-on: #26
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Co-committed-by: Davide Polonio <poloniodavide@gmail.com>
2023-10-14 16:01:54 +02:00
7d6a86d546 feat: add Nextcloud service (#25)
All checks were successful
continuous-integration/drone/push Build is passing
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Reviewed-on: #25
2023-10-14 15:48:44 +02:00
e9991e2c1b feat: add rss feeder (#24)
All checks were successful
continuous-integration/drone/push Build is passing
Co-authored-by: Davide Polonio <poloniodavide@gmail.com>
Reviewed-on: #24
2023-08-17 09:48:16 +02:00
19 changed files with 842 additions and 38 deletions

4
.gitignore vendored
View File

@ -4,3 +4,7 @@ config/
.idea/ .idea/
env-searxng env-searxng
.*.env .*.env
nextcloud/db.env
nextcloud/nextcloud.env
kodokanjudoeste/import/
mezzotre/

5
.yamllint.yaml Normal file
View File

@ -0,0 +1,5 @@
extends: default
rules:
line-length:
max: 120

5
diary/Caddyfile Normal file
View File

@ -0,0 +1,5 @@
https://diary.lan.poldebra.me {
tls /cert.crt /key.key
reverse_proxy app:80 {
}
}

View File

@ -1,19 +1,43 @@
version: "3.9"
services: services:
mock:
image: nginx:alpine
restart: unless-stopped
env_file:
- .env
networks:
- proxy
reverse_proxy:
image: caddy:alpine
restart: unless-stopped
network_mode: service:tailscale
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- /srv/docker/reverse_proxy/certs/diary.lan.poldebra.me/fullchain.pem:/cert.crt:ro
- /srv/docker/reverse_proxy/certs/diary.lan.poldebra.me/key.pem:/key.key:ro
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "https://diary.lan.poldebra.me"]
interval: 5s
timeout: 10s
retries: 3
depends_on:
app:
condition: service_started
tailscale:
condition: service_healthy
app: app:
image: monica:4-apache image: monica:4-apache
depends_on: depends_on:
- db db:
condition: service_started
tailscale:
condition: service_healthy
env_file: env_file:
- .monica.env - .monica.env
- .env
volumes: volumes:
- data:/var/www/html/storage - data:/var/www/html/storage
restart: unless-stopped restart: unless-stopped
networks:
- internal
- proxy
cron: cron:
image: monica:4-apache image: monica:4-apache
@ -23,7 +47,7 @@ services:
- data:/var/www/html/storage - data:/var/www/html/storage
command: cron.sh command: cron.sh
depends_on: depends_on:
- db - app
db: db:
image: mysql:8.0 image: mysql:8.0
@ -32,8 +56,26 @@ services:
volumes: volumes:
- mysql:/var/lib/mysql - mysql:/var/lib/mysql
restart: unless-stopped restart: unless-stopped
networks:
- internal tailscale:
hostname: diary
image: tailscale/tailscale:latest
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "tailscale status"]
interval: 1s
timeout: 5s
retries: 60
volumes:
- tailscale:/var/lib
- /lib/modules:/lib/modules:ro
devices:
- /dev/net/tun:/dev/net/tun
cap_add:
- net_admin
- sys_module
- net_raw
command: tailscaled
volumes: volumes:
data: data:
@ -48,9 +90,13 @@ volumes:
type: none type: none
o: bind o: bind
device: "/srv/docker/diary/mysql" device: "/srv/docker/diary/mysql"
tailscale:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/diary/tailscale"
networks: networks:
proxy: proxy:
external: true external: true
internal:

5
firefly/Caddyfile Normal file
View File

@ -0,0 +1,5 @@
https://money.lan.poldebra.me {
tls /cert.crt /key.key
reverse_proxy app:8080 {
}
}

View File

@ -0,0 +1,98 @@
services:
mock:
image: nginx:alpine
restart: unless-stopped
env_file:
- .env
networks:
- proxy
reverse_proxy:
image: caddy:alpine
restart: unless-stopped
network_mode: service:tailscale
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- /srv/docker/reverse_proxy/certs/money.lan.poldebra.me/fullchain.pem:/cert.crt:ro
- /srv/docker/reverse_proxy/certs/money.lan.poldebra.me/key.pem:/key.key:ro
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "https://money.lan.poldebra.me"]
interval: 5s
timeout: 10s
retries: 3
depends_on:
app:
condition: service_started
tailscale:
condition: service_healthy
tailscale:
hostname: money
image: tailscale/tailscale:latest
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "tailscale status"]
interval: 1s
timeout: 5s
retries: 60
volumes:
- tailscale:/var/lib
- /lib/modules:/lib/modules:ro
devices:
- /dev/net/tun:/dev/net/tun
cap_add:
- net_admin
- sys_module
- net_raw
command: tailscaled
app:
image: fireflyiii/core:version-6.2
hostname: app
container_name: firefly_iii_core
restart: unless-stopped
volumes:
- upload:/var/www/html/storage/upload
- db:/var/www/html/storage/database
env_file: .firefly.env
cron:
#
# To make this work, set STATIC_CRON_TOKEN in your .env file or as an environment variable and replace REPLACEME
# below
# The STATIC_CRON_TOKEN must be *exactly* 32 characters long
#
image: alpine
restart: unless-stopped
container_name: firefly_iii_cron
env_file: .firefly.env
command: sh -c "
apk add tzdata
&& ln -s /usr/share/zoneinfo/${TZ} /etc/localtime
| echo \"0 3 * * * wget -qO- http://app:8080/api/v1/cron/${STATIC_CRON_TOKEN};echo\"
| crontab -
&& crond -f -L /dev/stdout"
volumes:
upload:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/firefly/upload"
db:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/firefly/db"
tailscale:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/firefly/tailscale"
networks:
proxy:
external: true

View File

@ -3,7 +3,7 @@ version: '3.7'
services: services:
git-server: git-server:
image: gitea/gitea:1.19 image: gitea/gitea:1.23
restart: always restart: always
volumes: volumes:
- gs_data:/data - gs_data:/data

41
headscale/acl.json Normal file
View File

@ -0,0 +1,41 @@
{
"groups": {
"group:admin": ["davide"],
"group:family": ["davide", "dario"],
"group:services": ["services"],
"group:external": []
},
"tagOwners": {
"tag:web": ["group:admin"],
"tag:dns": ["group:admin"],
"tag:exitnode": ["group:admin"],
"tag:game": ["group:admin", "group:family"]
},
"acls": [
// Family and admin should be able to access everything
{
"action": "accept",
"src": ["group:admin", "group:family"],
"dst": [
"*:*"
]
},
// External can access only hosted games
{
"action": "accept",
"src": ["group:external"],
"dst": [
"tag:game:*"
]
},
// Everyone should access DNS server (or we break their internet connection)
{
"action": "accept",
"src": ["*"],
"proto": "udp",
"dst": [
"tag:dns:53"
]
}
]
}

360
headscale/config.yaml Normal file
View File

@ -0,0 +1,360 @@
---
# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
#
# - `/etc/headscale`
# - `~/.headscale`
# - current working directory
# The url clients will connect to.
# Typically this will be a domain like:
#
# https://myheadscale.example.com:443
#
server_url: https://vpn.poldebra.me:443
# Address to listen to / bind to on the server
#
# For production:
# listen_addr: 0.0.0.0:8080
listen_addr: 0.0.0.0:8080
# Address to listen to /metrics, you may want
# to keep this endpoint private to your internal
# network
#
metrics_listen_addr: 127.0.0.1:9090
# Address to listen for gRPC.
# gRPC is used for controlling a headscale server
# remotely with the CLI
# Note: Remote access _only_ works if you have
# valid certificates.
#
# For production:
# grpc_listen_addr: 0.0.0.0:50443
grpc_listen_addr: 127.0.0.1:50443
# Allow the gRPC admin interface to run in INSECURE
# mode. This is not recommended as the traffic will
# be unencrypted. Only enable if you know what you
# are doing.
grpc_allow_insecure: false
# The Noise section includes specific configuration for the
# TS2021 Noise protocol
noise:
# The Noise private key is used to encrypt the
# traffic between headscale and Tailscale clients when
# using the new Noise-based protocol.
private_key_path: /var/lib/headscale/noise_private.key
# List of IP prefixes to allocate tailaddresses from.
# Each prefix consists of either an IPv4 or IPv6 address,
# and the associated prefix length, delimited by a slash.
# It must be within IP ranges supported by the Tailscale
# client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.
prefixes:
v6: fd7a:115c:a1e0::/48
v4: 100.64.0.0/10
# Strategy used for allocation of IPs to nodes, available options:
# - sequential (default): assigns the next free IP from the previous given IP.
# - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).
allocation: sequential
# DERP is a relay system that Tailscale uses when a direct
# connection cannot be established.
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
#
# headscale needs a list of DERP servers that can be presented
# to the clients.
derp:
server:
# If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
# The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
enabled: false
# Region ID to use for the embedded DERP server.
# The local DERP prevails if the region ID collides with other region ID coming from
# the regular DERP config.
region_id: 999
# Region code and name are displayed in the Tailscale UI to identify a DERP region
region_code: "headscale"
region_name: "Headscale Embedded DERP"
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
#
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
stun_listen_addr: "0.0.0.0:3478"
# Private key used to encrypt the traffic between headscale DERP
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
private_key_path: /var/lib/headscale/derp_server_private.key
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
# it enables the creation of your very own DERP map entry using a locally available file with the parameter
# DERP.paths
# If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using
# DERP.paths
automatically_add_embedded_derp_region: true
# For better connection stability (especially when using an Exit-Node and DNS is not working),
# it is possible to optionally add the public IPv4 and IPv6 address to the Derp-Map using:
ipv4: 1.2.3.4
ipv6: 2001:db8::1
# List of externally available DERP maps encoded in JSON
urls:
- https://controlplane.tailscale.com/derpmap/default
# Locally available DERP map files encoded in YAML
#
# This option is mostly interesting for people hosting
# their own DERP servers:
# https://tailscale.com/kb/1118/custom-derp-servers/
#
# paths:
# - /etc/headscale/derp-example.yaml
paths: []
# If enabled, a worker will be set up to periodically
# refresh the given sources and update the derpmap
# will be set up.
auto_update_enabled: true
# How often should we check for DERP updates?
update_frequency: 24h
# Disables the automatic check for headscale updates on startup
disable_check_updates: false
# Time before an inactive ephemeral node is deleted?
ephemeral_node_inactivity_timeout: 30m
database:
# Database type. Available options: sqlite, postgres
# Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.
# All new development, testing and optimisations are done with SQLite in mind.
type: sqlite
# Enable debug mode. This setting requires the log.level to be set to "debug" or "trace".
debug: false
# GORM configuration settings.
gorm:
# Enable prepared statements.
prepare_stmt: true
# Enable parameterized queries.
parameterized_queries: true
# Skip logging "record not found" errors.
skip_err_record_not_found: true
# Threshold for slow queries in milliseconds.
slow_threshold: 1000
# SQLite config
sqlite:
path: /var/lib/headscale/db.sqlite
# Enable WAL mode for SQLite. This is recommended for production environments.
# https://www.sqlite.org/wal.html
write_ahead_log: true
## Let's encrypt / ACME
#
# headscale supports automatically requesting and setting up
# TLS for a domain with Let's Encrypt.
#
# URL to ACME directory
acme_url: https://acme-v02.api.letsencrypt.org/directory
# Email to register with ACME provider
acme_email: ""
# Domain name to request a TLS certificate for:
tls_letsencrypt_hostname: ""
# Path to store certificates and metadata needed by
# letsencrypt
# For production:
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
# Type of ACME challenge to use, currently supported types:
# HTTP-01 or TLS-ALPN-01
# See [docs/tls.md](docs/tls.md) for more information
tls_letsencrypt_challenge_type: HTTP-01
# When HTTP-01 challenge is chosen, letsencrypt must set up a
# verification endpoint, and it will be listening on:
# :http = port 80
tls_letsencrypt_listen: ":http"
## Use already defined certificates:
tls_cert_path: ""
tls_key_path: ""
log:
# Output formatting for logs: text or json
format: text
level: info
## Policy
# headscale supports Tailscale's ACL policies.
# Please have a look to their KB to better
# understand the concepts: https://tailscale.com/kb/1018/acls/
policy:
# The mode can be "file" or "database" that defines
# where the ACL policies are stored and read from.
mode: file
# If the mode is set to "file", the path to a
# HuJSON file containing ACL policies.
path: "/etc/headscale/acl.json"
## DNS
#
# headscale supports Tailscale's DNS configuration and MagicDNS.
# Please have a look to their KB to better understand the concepts:
#
# - https://tailscale.com/kb/1054/dns/
# - https://tailscale.com/kb/1081/magicdns/
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
#
# Please note that for the DNS configuration to have any effect,
# clients must have the `--accept-dns=true` option enabled. This is the
# default for the Tailscale client. This option is enabled by default
# in the Tailscale client.
#
# Setting _any_ of the configuration and `--accept-dns=true` on the
# clients will integrate with the DNS manager on the client or
# overwrite /etc/resolv.conf.
# https://tailscale.com/kb/1235/resolv-conf
#
# If you want stop Headscale from managing the DNS configuration
# all the fields under `dns` should be set to empty values.
dns:
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
# Only works if there is at least a nameserver defined.
magic_dns: true
# Defines the base domain to create the hostnames for MagicDNS.
# This domain _must_ be different from the server_url domain.
# `base_domain` must be a FQDN, without the trailing dot.
# The FQDN of the hosts will be
# `hostname.base_domain` (e.g., _myhost.example.com_).
base_domain: lan.poldebra.me
# List of DNS servers to expose to clients.
nameservers:
global:
- 100.64.0.4
# - 1.0.0.1
# NextDNS (see https://tailscale.com/kb/1218/nextdns/).
# "abc123" is example NextDNS ID, replace with yours.
# - https://dns.nextdns.io/abc123
# Split DNS (see https://tailscale.com/kb/1054/dns/),
# a map of domains and which DNS server to use for each.
split:
{}
# foo.bar.com:
# - 1.1.1.1
# darp.headscale.net:
# - 1.1.1.1
# - 8.8.8.8
# Set custom DNS search domains. With MagicDNS enabled,
# your tailnet base_domain is always the first search domain.
search_domains: []
# Extra DNS records
# so far only A-records are supported (on the tailscale side)
# See https://github.com/juanfont/headscale/blob/main/docs/dns-records.md#Limitations
extra_records:
- name: "pi.hole"
type: "A"
value: "100.64.0.4"
- name: "pihole.lan.poldebra.me"
type: "A"
value: "100.64.0.4"
# - name: "grafana.myvpn.example.com"
# type: "A"
# value: "100.64.0.3"
#
# # you can also put it in one line
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
# Unix socket used for the CLI to connect without authentication
# Note: for production you will want to set this to something like:
unix_socket: /var/run/headscale/headscale.sock
unix_socket_permission: "0770"
#
# headscale supports experimental OpenID connect support,
# it is still being tested and might have some bugs, please
# help us test it.
# OpenID Connect
# oidc:
# only_start_if_oidc_is_available: true
# issuer: "https://your-oidc.issuer.com/path"
# client_id: "your-oidc-client-id"
# client_secret: "your-oidc-client-secret"
# # Alternatively, set `client_secret_path` to read the secret from the file.
# # It resolves environment variables, making integration to systemd's
# # `LoadCredential` straightforward:
# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
# # client_secret and client_secret_path are mutually exclusive.
#
# # The amount of time from a node is authenticated with OpenID until it
# # expires and needs to reauthenticate.
# # Setting the value to "0" will mean no expiry.
# expiry: 180d
#
# # Use the expiry from the token received from OpenID when the user logged
# # in, this will typically lead to frequent need to reauthenticate and should
# # only been enabled if you know what you are doing.
# # Note: enabling this will cause `oidc.expiry` to be ignored.
# use_expiry_from_token: false
#
# # Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
# # parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
#
# scope: ["openid", "profile", "email", "custom"]
# extra_params:
# domain_hint: example.com
#
# # List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
# # authentication request will be rejected.
#
# allowed_domains:
# - example.com
# # Note: Groups from keycloak have a leading '/'
# allowed_groups:
# - /headscale
# allowed_users:
# - alice@example.com
#
# # If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
# # This will transform `first-name.last-name@example.com` to the user `first-name.last-name`
# # If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
# user: `first-name.last-name.example.com`
#
# strip_email_domain: true
# Logtail configuration
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
# to instruct tailscale nodes to log their activity to a remote server.
logtail:
# Enable logtail for this headscales clients.
# As there is currently no support for overriding the log server in headscale, this is
# disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
enabled: false
# Enabling this option makes devices prefer a random port for WireGuard traffic over the
# default static port 41641. This option is intended as a workaround for some buggy
# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
randomize_client_port: false

View File

@ -0,0 +1,22 @@
services:
headscale:
image: headscale/headscale:0.25
restart: unless-stopped
container_name: headscale
volumes:
- /srv/docker/headscale/data:/var/lib/headscale
- /srv/docker/headscale/config:/etc/headscale
command: serve
networks:
- proxy
env_file:
- env-vpn
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
networks:
proxy:
external: true

View File

@ -0,0 +1,59 @@
services:
app:
image: wordpress:6.7-php8.1-apache
restart: unless-stopped
networks:
- proxy
- internal
env_file:
- .db.env
- .proxy.env
- .wordpress.env
volumes:
- data:/var/www/html
healthcheck:
test: "test \"$(curl -Lso /dev/null -w''%{http_code}'' http://localhost:80)\" = \"200\""
start_period: 5s
interval: 60s
timeout: 10s
retries: 3
depends_on:
db:
condition: service_healthy
db:
image: mysql:8.4-oracle
restart: unless-stopped
healthcheck:
test: "mysql $$MYSQL_DATABASE -u$$MYSQL_USER -p$$MYSQL_PASSWORD -e 'SELECT 1;'"
start_period: 10s
interval: 10s
timeout: 5s
retries: 3
networks:
- internal
env_file:
- .db.env
volumes:
# Enable ONLY for importing the db again
# - ./import:/docker-entrypoint-initdb.d:ro
- db:/var/lib/mysql
networks:
proxy:
external: true
internal:
volumes:
db:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/kodokanjudoeste/db"
data:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/kodokanjudoeste/data"

View File

@ -1,8 +1,6 @@
---
version: '3.9'
services: services:
mail: mail:
image: mailserver/docker-mailserver:12.1.0 image: mailserver/docker-mailserver:15.0.2
hostname: ${HOSTNAME} hostname: ${HOSTNAME}
domainname: ${DOMAINNAME} domainname: ${DOMAINNAME}
container_name: ${CONTAINER_NAME} container_name: ${CONTAINER_NAME}

View File

@ -27,8 +27,7 @@ RESET=$(echo -ne '\e[0m')
set -euEo pipefail set -euEo pipefail
shopt -s inherit_errexit 2>/dev/null || true shopt -s inherit_errexit 2>/dev/null || true
function _show_local_usage function _show_local_usage() {
{
# shellcheck disable=SC2059 # shellcheck disable=SC2059
printf '%s' "${ORANGE}OPTIONS${RESET} printf '%s' "${ORANGE}OPTIONS${RESET}
${LBLUE}Config path, container or image adjustments${RESET} ${LBLUE}Config path, container or image adjustments${RESET}
@ -69,8 +68,7 @@ function _show_local_usage
" "
} }
function _get_absolute_script_directory function _get_absolute_script_directory() {
{
if dirname "$(readlink -f "${0}")" &>/dev/null; then if dirname "$(readlink -f "${0}")" &>/dev/null; then
DIR=$(dirname "$(readlink -f "${0}")") DIR=$(dirname "$(readlink -f "${0}")")
elif realpath -e -L "${0}" &>/dev/null; then elif realpath -e -L "${0}" &>/dev/null; then
@ -79,8 +77,7 @@ function _get_absolute_script_directory
fi fi
} }
function _set_default_config_path function _set_default_config_path() {
{
if [[ -d "${DIR}/config" ]]; then if [[ -d "${DIR}/config" ]]; then
# legacy path (pre v10.2.0) # legacy path (pre v10.2.0)
DEFAULT_CONFIG_PATH="${DIR}/config" DEFAULT_CONFIG_PATH="${DIR}/config"
@ -89,8 +86,7 @@ function _set_default_config_path
fi fi
} }
function _handle_config_path function _handle_config_path() {
{
if [[ -z ${DESIRED_CONFIG_PATH} ]]; then if [[ -z ${DESIRED_CONFIG_PATH} ]]; then
# no desired config path # no desired config path
if [[ -n ${CONTAINER_NAME} ]]; then if [[ -n ${CONTAINER_NAME} ]]; then
@ -111,8 +107,7 @@ function _handle_config_path
fi fi
} }
function _run_in_new_container function _run_in_new_container() {
{
# start temporary container with specified image # start temporary container with specified image
if ! ${CRI} history -q "${IMAGE_NAME}" &>/dev/null; then if ! ${CRI} history -q "${IMAGE_NAME}" &>/dev/null; then
echo "Image '${IMAGE_NAME}' not found. Pulling ..." echo "Image '${IMAGE_NAME}' not found. Pulling ..."
@ -124,14 +119,12 @@ function _run_in_new_container
"${IMAGE_NAME}" "${@}" "${IMAGE_NAME}" "${@}"
} }
function _main function _main() {
{
_get_absolute_script_directory _get_absolute_script_directory
_set_default_config_path _set_default_config_path
local OPTIND local OPTIND
while getopts ":c:i:p:zZR" OPT while getopts ":c:i:p:zZR" OPT; do
do
case ${OPT} in case ${OPT} in
( i ) IMAGE_NAME="${OPTARG}" ;; ( i ) IMAGE_NAME="${OPTARG}" ;;
( z | Z ) USE_SELINUX=":${OPT}" ;; ( z | Z ) USE_SELINUX=":${OPT}" ;;

View File

@ -0,0 +1,83 @@
services:
db:
image: postgres:15-alpine
restart: unless-stopped
volumes:
- db:/var/lib/postgresql/data:Z
env_file:
- db.env
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
redis:
image: redis:7-alpine
restart: unless-stopped
logging:
driver: "json-file"
options:
max-size: "5m"
max-file: "1"
app:
image: nextcloud:production-apache
restart: unless-stopped
volumes:
- app:/var/www/html:z
- data:/data:z
env_file:
- db.env
- nextcloud.env
depends_on:
- db
- redis
networks:
- proxy
- default
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
cron:
image: nextcloud:production-apache
restart: unless-stopped
volumes:
- app:/var/www/html:z
- data:/data:z
entrypoint: /cron.sh
depends_on:
- db
- redis
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
volumes:
db:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/nextcloud/db"
app:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/nextcloud/app"
data:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/mnt/nextcloud/data"
networks:
proxy:
external: true

View File

@ -1 +1,3 @@
client_max_body_size 15g; client_max_body_size 15g;
proxy_request_buffering off;

View File

@ -1,9 +1,9 @@
--- ---
version: '3.7'
services: services:
nginx: nginx:
image: jwilder/nginx-proxy:alpine image: nginxproxy/nginx-proxy:latest
restart: always container_name: nginx-proxy
restart: unless-stopped
ports: ports:
- 80:80 - 80:80
- 443:443 - 443:443
@ -14,7 +14,7 @@ services:
- nginx_html:/usr/share/nginx/html - nginx_html:/usr/share/nginx/html
- ./client_max_body_size.conf:/etc/nginx/conf.d/client_max_body_size.conf:ro - ./client_max_body_size.conf:/etc/nginx/conf.d/client_max_body_size.conf:ro
labels: labels:
- "com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy" - "com.github.nginx-proxy.nginx"
networks: networks:
- proxy - proxy
logging: logging:
@ -24,13 +24,15 @@ services:
max-file: "3" max-file: "3"
nginx-letsencript: nginx-letsencript:
image: jrcs/letsencrypt-nginx-proxy-companion:latest image: nginxproxy/acme-companion:latest
restart: always container_name: nginx-proxy-acme
restart: unless-stopped
volumes: volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro - /var/run/docker.sock:/var/run/docker.sock:ro
- nginx_cert:/etc/nginx/certs - nginx_cert:/etc/nginx/certs
- nginx_vhost:/etc/nginx/vhost.d - nginx_vhost:/etc/nginx/vhost.d
- nginx_html:/usr/share/nginx/html - nginx_html:/usr/share/nginx/html
- nginx_acme:/etc/acme.sh
networks: networks:
- proxy - proxy
@ -43,6 +45,7 @@ volumes:
device: "/srv/docker/reverse_proxy/certs" device: "/srv/docker/reverse_proxy/certs"
nginx_vhost: nginx_vhost:
nginx_html: nginx_html:
nginx_acme:
networks: networks:
proxy: proxy:

80
rss/docker-compose.yml Normal file
View File

@ -0,0 +1,80 @@
version: '3.9'
services:
rss:
image: miniflux/miniflux:latest
restart: unless-stopped
healthcheck:
test: ["CMD", "/usr/bin/miniflux", "-healthcheck", "auto"]
depends_on:
db:
condition: service_healthy
morss:
condition: service_healthy
environment:
- RUN_MIGRATIONS=1
env_file:
- .env
networks:
- internal
- proxy
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
db:
image: postgres:15-alpine
restart: unless-stopped
volumes:
- db:/var/lib/postgresql/data
env_file:
- .env
healthcheck:
test: ["CMD", "pg_isready", "-U", "rss"]
interval: 10s
start_period: 30s
networks:
- internal
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
morss:
image: pictuga/morss:latest
restart: unless-stopped
environment:
- MAX_TIME=10
- MAX_ITEM=-1
- LIM_TIME=120
- LIM_ITEM=-1
- CACHE=diskcache
- CACHE_SIZE=2048000000
healthcheck:
test: ["CMD", "/usr/bin/wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8000"]
interval: 1m30s
timeout: 10s
retries: 3
start_period: 30s
networks:
- internal
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
volumes:
db:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/rss/db"
networks:
internal:
ipam:
driver: default
proxy:
external: true

View File

@ -19,7 +19,7 @@ services:
searxng: searxng:
container_name: searxng container_name: searxng
image: searxng/searxng:2023.3.24-64fea2f9 image: searxng/searxng:2025.2.6-147bda894
restart: unless-stopped restart: unless-stopped
networks: networks:
- searxng - searxng

View File

@ -2,7 +2,7 @@ version: '3.7'
services: services:
bot: bot:
image: polpetta/songlify:0.3.4 image: polpetta/songlify:0.3.5
restart: always restart: always
entrypoint: /usr/bin/songlify entrypoint: /usr/bin/songlify
env_file: env_file: