Compare commits

..

11 Commits

Author SHA1 Message Date
Davide Polonio bfae98946a Merge branch 'master' into vrising
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/pr Build is passing Details
2023-05-27 13:19:00 +02:00
Davide Polonio f34b13b347 chore: minor clean up
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/pr Build is passing Details
2023-05-25 11:41:14 +02:00
Davide Polonio 6c1f36bef9 feat: add possibility to edit docker-compose
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/pr Build is passing Details
2023-05-25 11:35:41 +02:00
Davide Polonio 9ebb0a056d fix: set right path for log, update output
continuous-integration/drone/pr Build is passing Details
continuous-integration/drone/push Build is passing Details
2023-05-25 11:17:57 +02:00
Davide Polonio c3cfd99f17 fix: add log, set handler for SIGINT
continuous-integration/drone/pr Build is passing Details
continuous-integration/drone/push Build is passing Details
2023-05-25 11:13:30 +02:00
Davide Polonio c47158ee88 fix: explicitly ignore SIGINT signal
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/pr Build is passing Details
2023-05-25 10:58:51 +02:00
Davide Polonio 15ce6246fe fix: avoid closing connection when SIGINT
continuous-integration/drone/pr Build is passing Details
continuous-integration/drone/push Build is passing Details
2023-05-25 10:54:45 +02:00
Davide Polonio b466024001 feat: add shell for vrising server editing
continuous-integration/drone/pr Build is passing Details
continuous-integration/drone/push Build is passing Details
2023-05-25 10:19:39 +02:00
Davide Polonio 3805b6e861 feat: switch to PvP mode
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/pr Build is passing Details
2023-05-23 09:34:31 +02:00
Davide Polonio 582b694c4d feat: specify stop grade period
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/pr Build is passing Details
2023-05-22 17:36:29 +02:00
Davide Polonio a814ec077b feat: add V Rising server 2023-05-22 17:36:29 +02:00
17 changed files with 225 additions and 636 deletions

2
.gitignore vendored
View File

@ -4,5 +4,3 @@ config/
.idea/
env-searxng
.*.env
nextcloud/db.env
nextcloud/nextcloud.env

View File

@ -1,5 +0,0 @@
extends: default
rules:
line-length:
max: 120

View File

@ -1,5 +0,0 @@
https://diary.lan.poldebra.me {
tls /cert.crt /key.key
reverse_proxy app:80 {
}
}

View File

@ -1,43 +1,19 @@
version: "3.9"
services:
mock:
image: nginx:alpine
restart: unless-stopped
env_file:
- .env
networks:
- proxy
reverse_proxy:
image: caddy:alpine
restart: unless-stopped
network_mode: service:tailscale
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- /srv/docker/reverse_proxy/certs/diary.lan.poldebra.me/fullchain.pem:/cert.crt:ro
- /srv/docker/reverse_proxy/certs/diary.lan.poldebra.me/key.pem:/key.key:ro
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "https://diary.lan.poldebra.me"]
interval: 5s
timeout: 10s
retries: 3
depends_on:
app:
condition: service_started
tailscale:
condition: service_healthy
app:
image: monica:4-apache
depends_on:
db:
condition: service_started
tailscale:
condition: service_healthy
- db
env_file:
- .monica.env
- .env
volumes:
- data:/var/www/html/storage
restart: unless-stopped
networks:
- internal
- proxy
cron:
image: monica:4-apache
@ -47,7 +23,7 @@ services:
- data:/var/www/html/storage
command: cron.sh
depends_on:
- app
- db
db:
image: mysql:8.0
@ -56,26 +32,8 @@ services:
volumes:
- mysql:/var/lib/mysql
restart: unless-stopped
tailscale:
hostname: diary
image: tailscale/tailscale:latest
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "tailscale status"]
interval: 1s
timeout: 5s
retries: 60
volumes:
- tailscale:/var/lib
- /lib/modules:/lib/modules:ro
devices:
- /dev/net/tun:/dev/net/tun
cap_add:
- net_admin
- sys_module
- net_raw
command: tailscaled
networks:
- internal
volumes:
data:
@ -90,13 +48,9 @@ volumes:
type: none
o: bind
device: "/srv/docker/diary/mysql"
tailscale:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/diary/tailscale"
networks:
proxy:
external: true
internal:

View File

@ -3,7 +3,7 @@ version: '3.7'
services:
git-server:
image: gitea/gitea:1.20
image: gitea/gitea:1.19
restart: always
volumes:
- gs_data:/data

View File

@ -1,357 +0,0 @@
---
# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
#
# - `/etc/headscale`
# - `~/.headscale`
# - current working directory
# The url clients will connect to.
# Typically this will be a domain like:
#
# https://myheadscale.example.com:443
#
server_url: https://vpn.poldebra.me:443
# Address to listen to / bind to on the server
#
# For production:
# listen_addr: 0.0.0.0:8080
listen_addr: 0.0.0.0:8080
# Address to listen to /metrics, you may want
# to keep this endpoint private to your internal
# network
#
metrics_listen_addr: 127.0.0.1:9090
# Address to listen for gRPC.
# gRPC is used for controlling a headscale server
# remotely with the CLI
# Note: Remote access _only_ works if you have
# valid certificates.
#
# For production:
# grpc_listen_addr: 0.0.0.0:50443
grpc_listen_addr: 127.0.0.1:50443
# Allow the gRPC admin interface to run in INSECURE
# mode. This is not recommended as the traffic will
# be unencrypted. Only enable if you know what you
# are doing.
grpc_allow_insecure: false
# The Noise section includes specific configuration for the
# TS2021 Noise protocol
noise:
# The Noise private key is used to encrypt the
# traffic between headscale and Tailscale clients when
# using the new Noise-based protocol.
private_key_path: /var/lib/headscale/noise_private.key
# List of IP prefixes to allocate tailaddresses from.
# Each prefix consists of either an IPv4 or IPv6 address,
# and the associated prefix length, delimited by a slash.
# It must be within IP ranges supported by the Tailscale
# client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.
prefixes:
v6: fd7a:115c:a1e0::/48
v4: 100.64.0.0/10
# Strategy used for allocation of IPs to nodes, available options:
# - sequential (default): assigns the next free IP from the previous given IP.
# - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).
allocation: sequential
# DERP is a relay system that Tailscale uses when a direct
# connection cannot be established.
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
#
# headscale needs a list of DERP servers that can be presented
# to the clients.
derp:
server:
# If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
# The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
enabled: false
# Region ID to use for the embedded DERP server.
# The local DERP prevails if the region ID collides with other region ID coming from
# the regular DERP config.
region_id: 999
# Region code and name are displayed in the Tailscale UI to identify a DERP region
region_code: "headscale"
region_name: "Headscale Embedded DERP"
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
#
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
stun_listen_addr: "0.0.0.0:3478"
# Private key used to encrypt the traffic between headscale DERP
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
private_key_path: /var/lib/headscale/derp_server_private.key
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
# it enables the creation of your very own DERP map entry using a locally available file with the parameter
# DERP.paths
# If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using
# DERP.paths
automatically_add_embedded_derp_region: true
# For better connection stability (especially when using an Exit-Node and DNS is not working),
# it is possible to optionally add the public IPv4 and IPv6 address to the Derp-Map using:
ipv4: 1.2.3.4
ipv6: 2001:db8::1
# List of externally available DERP maps encoded in JSON
urls:
- https://controlplane.tailscale.com/derpmap/default
# Locally available DERP map files encoded in YAML
#
# This option is mostly interesting for people hosting
# their own DERP servers:
# https://tailscale.com/kb/1118/custom-derp-servers/
#
# paths:
# - /etc/headscale/derp-example.yaml
paths: []
# If enabled, a worker will be set up to periodically
# refresh the given sources and update the derpmap
# will be set up.
auto_update_enabled: true
# How often should we check for DERP updates?
update_frequency: 24h
# Disables the automatic check for headscale updates on startup
disable_check_updates: false
# Time before an inactive ephemeral node is deleted?
ephemeral_node_inactivity_timeout: 30m
database:
# Database type. Available options: sqlite, postgres
# Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.
# All new development, testing and optimisations are done with SQLite in mind.
type: sqlite
# Enable debug mode. This setting requires the log.level to be set to "debug" or "trace".
debug: false
# GORM configuration settings.
gorm:
# Enable prepared statements.
prepare_stmt: true
# Enable parameterized queries.
parameterized_queries: true
# Skip logging "record not found" errors.
skip_err_record_not_found: true
# Threshold for slow queries in milliseconds.
slow_threshold: 1000
# SQLite config
sqlite:
path: /var/lib/headscale/db.sqlite
# Enable WAL mode for SQLite. This is recommended for production environments.
# https://www.sqlite.org/wal.html
write_ahead_log: true
## Let's encrypt / ACME
#
# headscale supports automatically requesting and setting up
# TLS for a domain with Let's Encrypt.
#
# URL to ACME directory
acme_url: https://acme-v02.api.letsencrypt.org/directory
# Email to register with ACME provider
acme_email: ""
# Domain name to request a TLS certificate for:
tls_letsencrypt_hostname: ""
# Path to store certificates and metadata needed by
# letsencrypt
# For production:
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
# Type of ACME challenge to use, currently supported types:
# HTTP-01 or TLS-ALPN-01
# See [docs/tls.md](docs/tls.md) for more information
tls_letsencrypt_challenge_type: HTTP-01
# When HTTP-01 challenge is chosen, letsencrypt must set up a
# verification endpoint, and it will be listening on:
# :http = port 80
tls_letsencrypt_listen: ":http"
## Use already defined certificates:
tls_cert_path: ""
tls_key_path: ""
log:
# Output formatting for logs: text or json
format: text
level: info
## Policy
# headscale supports Tailscale's ACL policies.
# Please have a look to their KB to better
# understand the concepts: https://tailscale.com/kb/1018/acls/
policy:
# The mode can be "file" or "database" that defines
# where the ACL policies are stored and read from.
mode: file
# If the mode is set to "file", the path to a
# HuJSON file containing ACL policies.
path: ""
## DNS
#
# headscale supports Tailscale's DNS configuration and MagicDNS.
# Please have a look to their KB to better understand the concepts:
#
# - https://tailscale.com/kb/1054/dns/
# - https://tailscale.com/kb/1081/magicdns/
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
#
# Please note that for the DNS configuration to have any effect,
# clients must have the `--accept-dns=true` option enabled. This is the
# default for the Tailscale client. This option is enabled by default
# in the Tailscale client.
#
# Setting _any_ of the configuration and `--accept-dns=true` on the
# clients will integrate with the DNS manager on the client or
# overwrite /etc/resolv.conf.
# https://tailscale.com/kb/1235/resolv-conf
#
# If you want stop Headscale from managing the DNS configuration
# all the fields under `dns` should be set to empty values.
dns:
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
# Only works if there is at least a nameserver defined.
magic_dns: true
# Defines the base domain to create the hostnames for MagicDNS.
# This domain _must_ be different from the server_url domain.
# `base_domain` must be a FQDN, without the trailing dot.
# The FQDN of the hosts will be
# `hostname.base_domain` (e.g., _myhost.example.com_).
base_domain: lan.poldebra.me
# List of DNS servers to expose to clients.
nameservers:
global:
- 100.64.0.4
# - 1.0.0.1
# NextDNS (see https://tailscale.com/kb/1218/nextdns/).
# "abc123" is example NextDNS ID, replace with yours.
# - https://dns.nextdns.io/abc123
# Split DNS (see https://tailscale.com/kb/1054/dns/),
# a map of domains and which DNS server to use for each.
split:
{}
# foo.bar.com:
# - 1.1.1.1
# darp.headscale.net:
# - 1.1.1.1
# - 8.8.8.8
# Set custom DNS search domains. With MagicDNS enabled,
# your tailnet base_domain is always the first search domain.
search_domains: []
# Extra DNS records
# so far only A-records are supported (on the tailscale side)
# See https://github.com/juanfont/headscale/blob/main/docs/dns-records.md#Limitations
extra_records:
- name: "pi.hole"
type: "A"
value: "100.64.0.4"
# - name: "grafana.myvpn.example.com"
# type: "A"
# value: "100.64.0.3"
#
# # you can also put it in one line
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
# Unix socket used for the CLI to connect without authentication
# Note: for production you will want to set this to something like:
unix_socket: /var/run/headscale/headscale.sock
unix_socket_permission: "0770"
#
# headscale supports experimental OpenID connect support,
# it is still being tested and might have some bugs, please
# help us test it.
# OpenID Connect
# oidc:
# only_start_if_oidc_is_available: true
# issuer: "https://your-oidc.issuer.com/path"
# client_id: "your-oidc-client-id"
# client_secret: "your-oidc-client-secret"
# # Alternatively, set `client_secret_path` to read the secret from the file.
# # It resolves environment variables, making integration to systemd's
# # `LoadCredential` straightforward:
# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
# # client_secret and client_secret_path are mutually exclusive.
#
# # The amount of time from a node is authenticated with OpenID until it
# # expires and needs to reauthenticate.
# # Setting the value to "0" will mean no expiry.
# expiry: 180d
#
# # Use the expiry from the token received from OpenID when the user logged
# # in, this will typically lead to frequent need to reauthenticate and should
# # only been enabled if you know what you are doing.
# # Note: enabling this will cause `oidc.expiry` to be ignored.
# use_expiry_from_token: false
#
# # Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
# # parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
#
# scope: ["openid", "profile", "email", "custom"]
# extra_params:
# domain_hint: example.com
#
# # List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
# # authentication request will be rejected.
#
# allowed_domains:
# - example.com
# # Note: Groups from keycloak have a leading '/'
# allowed_groups:
# - /headscale
# allowed_users:
# - alice@example.com
#
# # If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
# # This will transform `first-name.last-name@example.com` to the user `first-name.last-name`
# # If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
# user: `first-name.last-name.example.com`
#
# strip_email_domain: true
# Logtail configuration
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
# to instruct tailscale nodes to log their activity to a remote server.
logtail:
# Enable logtail for this headscales clients.
# As there is currently no support for overriding the log server in headscale, this is
# disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
enabled: false
# Enabling this option makes devices prefer a random port for WireGuard traffic over the
# default static port 41641. This option is intended as a workaround for some buggy
# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
randomize_client_port: false

View File

@ -1,22 +0,0 @@
services:
headscale:
image: headscale/headscale:0.23
restart: unless-stopped
container_name: headscale
volumes:
- /srv/docker/headscale/data:/var/lib/headscale
- /srv/docker/headscale/config:/etc/headscale
command: serve
networks:
- proxy
env_file:
- env-vpn
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
networks:
proxy:
external: true

View File

@ -1,6 +1,8 @@
---
version: '3.9'
services:
mail:
image: mailserver/docker-mailserver:14.0.0
image: mailserver/docker-mailserver:12.1.0
hostname: ${HOSTNAME}
domainname: ${DOMAINNAME}
container_name: ${CONTAINER_NAME}

View File

@ -27,7 +27,8 @@ RESET=$(echo -ne '\e[0m')
set -euEo pipefail
shopt -s inherit_errexit 2>/dev/null || true
function _show_local_usage() {
function _show_local_usage
{
# shellcheck disable=SC2059
printf '%s' "${ORANGE}OPTIONS${RESET}
${LBLUE}Config path, container or image adjustments${RESET}
@ -68,7 +69,8 @@ function _show_local_usage() {
"
}
function _get_absolute_script_directory() {
function _get_absolute_script_directory
{
if dirname "$(readlink -f "${0}")" &>/dev/null; then
DIR=$(dirname "$(readlink -f "${0}")")
elif realpath -e -L "${0}" &>/dev/null; then
@ -77,7 +79,8 @@ function _get_absolute_script_directory() {
fi
}
function _set_default_config_path() {
function _set_default_config_path
{
if [[ -d "${DIR}/config" ]]; then
# legacy path (pre v10.2.0)
DEFAULT_CONFIG_PATH="${DIR}/config"
@ -86,7 +89,8 @@ function _set_default_config_path() {
fi
}
function _handle_config_path() {
function _handle_config_path
{
if [[ -z ${DESIRED_CONFIG_PATH} ]]; then
# no desired config path
if [[ -n ${CONTAINER_NAME} ]]; then
@ -107,7 +111,8 @@ function _handle_config_path() {
fi
}
function _run_in_new_container() {
function _run_in_new_container
{
# start temporary container with specified image
if ! ${CRI} history -q "${IMAGE_NAME}" &>/dev/null; then
echo "Image '${IMAGE_NAME}' not found. Pulling ..."
@ -119,12 +124,14 @@ function _run_in_new_container() {
"${IMAGE_NAME}" "${@}"
}
function _main() {
function _main
{
_get_absolute_script_directory
_set_default_config_path
local OPTIND
while getopts ":c:i:p:zZR" OPT; do
while getopts ":c:i:p:zZR" OPT
do
case ${OPT} in
( i ) IMAGE_NAME="${OPTARG}" ;;
( z | Z ) USE_SELINUX=":${OPT}" ;;

View File

@ -1,85 +0,0 @@
version: '3.9'
services:
db:
image: postgres:15-alpine
restart: unless-stopped
volumes:
- db:/var/lib/postgresql/data:Z
env_file:
- db.env
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
redis:
image: redis:7-alpine
restart: unless-stopped
logging:
driver: "json-file"
options:
max-size: "5m"
max-file: "1"
app:
image: nextcloud:production-apache
restart: unless-stopped
volumes:
- app:/var/www/html:z
- data:/data:z
env_file:
- db.env
- nextcloud.env
depends_on:
- db
- redis
networks:
- proxy
- default
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
cron:
image: nextcloud:production-apache
restart: unless-stopped
volumes:
- app:/var/www/html:z
- data:/data:z
entrypoint: /cron.sh
depends_on:
- db
- redis
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
volumes:
db:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/nextcloud/db"
app:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/nextcloud/app"
data:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/mnt/nextcloud/data"
networks:
proxy:
external: true

View File

@ -1,3 +1 @@
client_max_body_size 15g;
proxy_request_buffering off;

View File

@ -1,9 +1,9 @@
---
version: '3.7'
services:
nginx:
image: nginxproxy/nginx-proxy:latest
container_name: nginx-proxy
restart: unless-stopped
image: jwilder/nginx-proxy:alpine
restart: always
ports:
- 80:80
- 443:443
@ -14,7 +14,7 @@ services:
- nginx_html:/usr/share/nginx/html
- ./client_max_body_size.conf:/etc/nginx/conf.d/client_max_body_size.conf:ro
labels:
- "com.github.nginx-proxy.nginx"
- "com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy"
networks:
- proxy
logging:
@ -24,15 +24,13 @@ services:
max-file: "3"
nginx-letsencript:
image: nginxproxy/acme-companion:latest
container_name: nginx-proxy-acme
restart: unless-stopped
image: jrcs/letsencrypt-nginx-proxy-companion:latest
restart: always
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- nginx_cert:/etc/nginx/certs
- nginx_vhost:/etc/nginx/vhost.d
- nginx_html:/usr/share/nginx/html
- nginx_acme:/etc/acme.sh
networks:
- proxy
@ -45,7 +43,6 @@ volumes:
device: "/srv/docker/reverse_proxy/certs"
nginx_vhost:
nginx_html:
nginx_acme:
networks:
proxy:

View File

@ -1,80 +0,0 @@
version: '3.9'
services:
rss:
image: miniflux/miniflux:latest
restart: unless-stopped
healthcheck:
test: ["CMD", "/usr/bin/miniflux", "-healthcheck", "auto"]
depends_on:
db:
condition: service_healthy
morss:
condition: service_healthy
environment:
- RUN_MIGRATIONS=1
env_file:
- .env
networks:
- internal
- proxy
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
db:
image: postgres:15-alpine
restart: unless-stopped
volumes:
- db:/var/lib/postgresql/data
env_file:
- .env
healthcheck:
test: ["CMD", "pg_isready", "-U", "rss"]
interval: 10s
start_period: 30s
networks:
- internal
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
morss:
image: pictuga/morss:latest
restart: unless-stopped
environment:
- MAX_TIME=10
- MAX_ITEM=-1
- LIM_TIME=120
- LIM_ITEM=-1
- CACHE=diskcache
- CACHE_SIZE=2048000000
healthcheck:
test: ["CMD", "/usr/bin/wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8000"]
interval: 1m30s
timeout: 10s
retries: 3
start_period: 30s
networks:
- internal
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
volumes:
db:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/rss/db"
networks:
internal:
ipam:
driver: default
proxy:
external: true

View File

@ -19,7 +19,7 @@ services:
searxng:
container_name: searxng
image: searxng/searxng:2024.5.31-18fb701be
image: searxng/searxng:2023.3.24-64fea2f9
restart: unless-stopped
networks:
- searxng

View File

@ -2,7 +2,7 @@ version: '3.7'
services:
bot:
image: polpetta/songlify:0.3.5
image: polpetta/songlify:0.3.4
restart: always
entrypoint: /usr/bin/songlify
env_file:

View File

@ -0,0 +1,60 @@
version: "3.9"
services:
vrising:
image: didstopia/vrising-server:latest
container_name: vrising
restart: unless-stopped
stop_grace_period: 2m30s # Avoid data loss
environment:
# Configure the server
V_RISING_SERVER_PERSISTENT_DATA_PATH: "/app/vrising"
V_RISING_SERVER_BRANCH: "public"
V_RISING_SERVER_START_MODE: "0" # Install/update and start server
# V_RISING_SERVER_START_MODE: "1" # Install/update and exit
# V_RISING_SERVER_START_MODE: "2" # Install, skip update check and start server
V_RISING_SERVER_UPDATE_MODE: "1" # Enable update checking
# Customize the server
V_RISING_SERVER_NAME: "V Rising Clownfiesta Server"
V_RISING_SERVER_DESCRIPTION: "V Rising server for Clownfiesta clan"
V_RISING_SERVER_GAME_PORT: 9876
V_RISING_SERVER_QUERY_PORT: 9877
V_RISING_SERVER_RCON_PORT: 9878
V_RISING_SERVER_RCON_ENABLED: "true"
V_RISING_SERVER_MAX_CONNECTED_USERS: 100
V_RISING_SERVER_MAX_CONNECTED_ADMINS: 100
V_RISING_SERVER_SAVE_NAME: "clownfiesta_server_1"
V_RISING_SERVER_LIST_ON_MASTER_SERVER: "false"
V_RISING_SERVER_LIST_ON_STEAM: "false"
V_RISING_SERVER_LIST_ON_EOS: "false"
V_RISING_SERVER_AUTO_SAVE_COUNT: 5
V_RISING_SERVER_AUTO_SAVE_INTERVAL: 10
V_RISING_SERVER_GAME_SETTINGS_PRESET: "StandardPvP"
env_file: .vrising.env # For server passwords
ports:
- "9876:9876/udp"
- "9877:9877/udp"
- "9878:9878/tcp"
volumes:
- vrising_saves:/app/vrising
- vrising_data:/steamcmd/vrising
logging:
driver: "json-file"
options:
max-size: "5m"
max-file: "1"
volumes:
vrising_saves:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/vrising/saves"
vrising_data:
driver: local
driver_opts:
type: none
o: bind
device: "/srv/docker/vrising/data"

127
vrising/src/vrising.sh Executable file
View File

@ -0,0 +1,127 @@
#!/bin/bash
set -e
### ###
# #
# V Rising on-the-fly shell #
# v0.2 #
### ###
stdout() {
echo "${1}"
}
stderr() {
>&2 echo "${1}"
}
log() {
local action="${1}"
local when=""
when="$(date +%R:%S)"
echo "${when} - ${action}" >> "${THIS_LOG}"
}
editWithRestart() {
local full_path_file_to_edit="${1}"
nano "${full_path_file_to_edit}"
log "File edit - ${full_path_file_to_edit}"
stdout "The server will now be restarted and you will be attached to the logs."
stdout "Use ctrl-c to disconnect from the server"
log "Server restart"
(cd "${VRISING_COMPOSE_HOME}" && docker-compose restart && docker-compose logs -f)
}
editCompose() {
local full_path_file_to_edit="${1}"
log "Server stop"
stdout "Stopping the Server before editing"
(cd "${VRISING_COMPOSE_HOME}" && docker-compose down -v)
nano "${full_path_file_to_edit}"
log "File edit - ${full_path_file_to_edit}"
stdout "The server will be now recreated and you will be attached to the logs."
stdout "Use ctrl-c to disconnect from the server"
log "Server recreate"
(cd "${VRISING_COMPOSE_HOME}" && sleep 1 && docker-compose up -d && docker-compose logs -f)
}
ctrl_c_handler() {
# shellcheck disable=SC2317
log "Logout with ctrl-c"
# shellcheck disable=SC2317
exit 0
}
trap 'ctrl_c_handler' INT
USERNAME="${1}"
shift
BASE_VRISING_FOLDER="/srv/docker/vrising/saves/Settings/"
GAME_SETTINGS="ServerGameSettings.json"
HOST_SETTINGS="ServerHostSettings.json"
VRISING_COMPOSE_HOME="/home/davide/services/vrising/"
LOGS_FOLDER="${HOME}/logs"
THIS_LOG="${LOGS_FOLDER}/${USERNAME}-$(date --iso-8601=ns).log"
mkdir -p "${LOGS_FOLDER}" || (stderr "Unable to create logs folder, exiting" && exit 1)
stdout "Welcome ${USERNAME}, please remember that your current activity in the server is logged!"
stdout "With ctrl-c you will instantly disconnected from the server"
while true
do
stdout """
Select one of the following actions:
0 - exit
1 - edit ${GAME_SETTINGS}
2 - edit ${HOST_SETTINGS}
3 - edit docker-compose definition (stop, edit and recreate the container)
4 - inspect server logs
5 - restart server
6 - recreate server (destroy and recreate container)
"""
ACTION="-1"
read -rp "> " ACTION
case "${ACTION}" in
0)
log "Exit"
exit 0
;;
1)
editWithRestart "${BASE_VRISING_FOLDER}${GAME_SETTINGS}"
;;
2)
editWithRestart "${BASE_VRISING_FOLDER}${HOST_SETTINGS}"
;;
3)
editCompose "${VRISING_COMPOSE_HOME}docker-compose.yml"
;;
4)
stdout "Use ctrl-c to disconnect from the server at any time"
log "Show logs"
(cd "${VRISING_COMPOSE_HOME}" && docker-compose logs -f)
;;
5)
stdout "Use ctrl-c to disconnect from the server at any time"
log "Server restart"
(cd "${VRISING_COMPOSE_HOME}" && docker-compose restart && docker-compose logs -f)
;;
6)
stdout "Use ctrl-c to disconnect from the server at any time"
log "Recreate server"
(cd "${VRISING_COMPOSE_HOME}" && docker-compose down -v && sleep 1 && docker-compose up -d && docker-compose logs -f)
;;
*)
log "Invalid input: ${ACTION}"
stderr "The provided input is not valid. Please chose a valid action"
;;
esac
done
exit 0