desktop-dotfiles/ollama/llama-swap.yaml
Davide Polonio ebc71492c3 fix(ollama): restrict to RX 9070 XT, restore mmproj
- Set HIP_VISIBLE_DEVICES=0 to use only the discrete GPU (gfx1201).
  llama.cpp was trying to split layers across the iGPU (gfx1036) which
  caused segfaults when loading the multimodal projector.
- Restore --mmproj for both HF models (multimodal works correctly with
  single GPU).
- Keep qwen3.5:9b disabled (Ollama-extracted GGUF uses old mrope_sections
  key format incompatible with this llama.cpp build).
2026-04-10 00:09:12 +02:00

30 lines
1.3 KiB
YAML

healthCheckTimeout: 180
logLevel: info
models:
# NOTE: qwen3.5:9b and qwen3.5:9bctxSmall are disabled — the GGUF extracted
# from Ollama uses the old mrope_sections format (3 elements) which this
# llama.cpp build rejects. Download a fresh quantization from HuggingFace
# (e.g. bartowski/Qwen3.5-9B-GGUF) and add them back.
"hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m":
cmd: |
/app/llama-server
--host 0.0.0.0 --port ${PORT}
--model /models/HauhauCS-Qwen3.5-9B-Uncensored-Aggressive.q4_k_m.gguf
--mmproj /models/HauhauCS-Qwen3.5-9B-Uncensored-Aggressive.mmproj.gguf
--alias "hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m"
--n-gpu-layers 999
--ctx-size 32768
"hf.co/Jackrong/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-GGUF:q4_k_m":
cmd: |
/app/llama-server
--host 0.0.0.0 --port ${PORT}
--model /models/Jackrong-Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2.q4_k_m.gguf
--mmproj /models/Jackrong-Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2.mmproj.gguf
--alias "hf.co/Jackrong/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-GGUF:q4_k_m"
--n-gpu-layers 999
--ctx-size 32768
--temp 0.6 --top-k 20 --top-p 0.95 --repeat-penalty 1