fix(ollama): restrict to RX 9070 XT, restore mmproj

- Set HIP_VISIBLE_DEVICES=0 to use only the discrete GPU (gfx1201).
  llama.cpp was trying to split layers across the iGPU (gfx1036) which
  caused segfaults when loading the multimodal projector.
- Restore --mmproj for both HF models (multimodal works correctly with
  single GPU).
- Keep qwen3.5:9b disabled (Ollama-extracted GGUF uses old mrope_sections
  key format incompatible with this llama.cpp build).
This commit is contained in:
Davide Polonio 2026-04-10 00:09:12 +02:00
parent 3034f987d7
commit ebc71492c3
2 changed files with 6 additions and 19 deletions

View File

@ -8,6 +8,8 @@ services:
volumes: volumes:
- "/srv/docker/ollama/data/models:/models:ro" - "/srv/docker/ollama/data/models:/models:ro"
- "./llama-swap.yaml:/etc/llama-swap/config.yaml:ro" - "./llama-swap.yaml:/etc/llama-swap/config.yaml:ro"
environment:
- HIP_VISIBLE_DEVICES=0
devices: devices:
- "/dev/kfd:/dev/kfd" - "/dev/kfd:/dev/kfd"
- "/dev/dri:/dev/dri" - "/dev/dri:/dev/dri"

View File

@ -2,25 +2,10 @@ healthCheckTimeout: 180
logLevel: info logLevel: info
models: models:
"qwen3.5:9b": # NOTE: qwen3.5:9b and qwen3.5:9bctxSmall are disabled — the GGUF extracted
cmd: | # from Ollama uses the old mrope_sections format (3 elements) which this
/app/llama-server # llama.cpp build rejects. Download a fresh quantization from HuggingFace
--host 0.0.0.0 --port ${PORT} # (e.g. bartowski/Qwen3.5-9B-GGUF) and add them back.
--model /models/qwen3.5-9b.gguf
--alias qwen3.5:9b
--n-gpu-layers 999
--ctx-size 8192
--temp 1 --top-k 20 --top-p 0.95 --presence-penalty 1.5
"qwen3.5:9bctxSmall":
cmd: |
/app/llama-server
--host 0.0.0.0 --port ${PORT}
--model /models/qwen3.5-9b.gguf
--alias qwen3.5:9bctxSmall
--n-gpu-layers 999
--ctx-size 131072
--temp 1 --top-k 20 --top-p 0.95 --presence-penalty 1.5
"hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m": "hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m":
cmd: | cmd: |