From 8ab4213b628c7ab6689e49b4c1f37eab5f1ac1a5 Mon Sep 17 00:00:00 2001 From: Davide Polonio Date: Fri, 10 Apr 2026 10:57:34 +0200 Subject: [PATCH] feat(ollama): add persistence in Ollama container Re-enable qwen3.5:9b and qwen3.5:9bctxSmall using fresh unsloth/Qwen3.5-9B-GGUF quantization, which uses the correct rope.dimension_sections format (4 elements) compatible with this llama.cpp build. Both models include the mmproj for multimodal support. The old Ollama-extracted GGUF (mrope_sections, 3 elements) has been removed. --- ollama/llama-swap.yaml | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/ollama/llama-swap.yaml b/ollama/llama-swap.yaml index 66a2b25..8c9f0a4 100644 --- a/ollama/llama-swap.yaml +++ b/ollama/llama-swap.yaml @@ -2,10 +2,27 @@ healthCheckTimeout: 180 logLevel: info models: - # NOTE: qwen3.5:9b and qwen3.5:9bctxSmall are disabled — the GGUF extracted - # from Ollama uses the old mrope_sections format (3 elements) which this - # llama.cpp build rejects. Download a fresh quantization from HuggingFace - # (e.g. bartowski/Qwen3.5-9B-GGUF) and add them back. + "qwen3.5:9b": + cmd: | + /app/llama-server + --host 0.0.0.0 --port ${PORT} + --model /models/Qwen3.5-9B-Q4_K_M.gguf + --mmproj /models/Qwen3.5-9B-mmproj-F16.gguf + --alias qwen3.5:9b + --n-gpu-layers 999 + --ctx-size 8192 + --temp 1 --top-k 20 --top-p 0.95 --presence-penalty 1.5 + + "qwen3.5:9bctxSmall": + cmd: | + /app/llama-server + --host 0.0.0.0 --port ${PORT} + --model /models/Qwen3.5-9B-Q4_K_M.gguf + --mmproj /models/Qwen3.5-9B-mmproj-F16.gguf + --alias qwen3.5:9bctxSmall + --n-gpu-layers 999 + --ctx-size 131072 + --temp 1 --top-k 20 --top-p 0.95 --presence-penalty 1.5 "hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m": cmd: |