healthCheckTimeout: 180 logLevel: info models: # NOTE: qwen3.5:9b and qwen3.5:9bctxSmall are disabled — the GGUF extracted # from Ollama uses the old mrope_sections format (3 elements) which this # llama.cpp build rejects. Download a fresh quantization from HuggingFace # (e.g. bartowski/Qwen3.5-9B-GGUF) and add them back. "hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m": cmd: | /app/llama-server --host 0.0.0.0 --port ${PORT} --model /models/HauhauCS-Qwen3.5-9B-Uncensored-Aggressive.q4_k_m.gguf --mmproj /models/HauhauCS-Qwen3.5-9B-Uncensored-Aggressive.mmproj.gguf --alias "hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m" --n-gpu-layers 999 --ctx-size 32768 "hf.co/Jackrong/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-GGUF:q4_k_m": cmd: | /app/llama-server --host 0.0.0.0 --port ${PORT} --model /models/Jackrong-Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2.q4_k_m.gguf --mmproj /models/Jackrong-Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2.mmproj.gguf --alias "hf.co/Jackrong/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-GGUF:q4_k_m" --n-gpu-layers 999 --ctx-size 32768 --temp 0.6 --top-k 20 --top-p 0.95 --repeat-penalty 1