desktop-dotfiles/ollama/llama-swap.yaml
Davide Polonio 8ab4213b62 feat(ollama): add persistence in Ollama container
Re-enable qwen3.5:9b and qwen3.5:9bctxSmall using fresh unsloth/Qwen3.5-9B-GGUF
quantization, which uses the correct rope.dimension_sections format (4 elements)
compatible with this llama.cpp build. Both models include the mmproj for
multimodal support. The old Ollama-extracted GGUF (mrope_sections, 3 elements)
has been removed.
2026-04-10 10:57:34 +02:00

47 lines
1.6 KiB
YAML

healthCheckTimeout: 180
logLevel: info
models:
"qwen3.5:9b":
cmd: |
/app/llama-server
--host 0.0.0.0 --port ${PORT}
--model /models/Qwen3.5-9B-Q4_K_M.gguf
--mmproj /models/Qwen3.5-9B-mmproj-F16.gguf
--alias qwen3.5:9b
--n-gpu-layers 999
--ctx-size 8192
--temp 1 --top-k 20 --top-p 0.95 --presence-penalty 1.5
"qwen3.5:9bctxSmall":
cmd: |
/app/llama-server
--host 0.0.0.0 --port ${PORT}
--model /models/Qwen3.5-9B-Q4_K_M.gguf
--mmproj /models/Qwen3.5-9B-mmproj-F16.gguf
--alias qwen3.5:9bctxSmall
--n-gpu-layers 999
--ctx-size 131072
--temp 1 --top-k 20 --top-p 0.95 --presence-penalty 1.5
"hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m":
cmd: |
/app/llama-server
--host 0.0.0.0 --port ${PORT}
--model /models/HauhauCS-Qwen3.5-9B-Uncensored-Aggressive.q4_k_m.gguf
--mmproj /models/HauhauCS-Qwen3.5-9B-Uncensored-Aggressive.mmproj.gguf
--alias "hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m"
--n-gpu-layers 999
--ctx-size 32768
"hf.co/Jackrong/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-GGUF:q4_k_m":
cmd: |
/app/llama-server
--host 0.0.0.0 --port ${PORT}
--model /models/Jackrong-Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2.q4_k_m.gguf
--mmproj /models/Jackrong-Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2.mmproj.gguf
--alias "hf.co/Jackrong/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-GGUF:q4_k_m"
--n-gpu-layers 999
--ctx-size 32768
--temp 0.6 --top-k 20 --top-p 0.95 --repeat-penalty 1