feat(llama-swap): add 128k max-ctx profiles for qwen3.5 variants

This commit is contained in:
Davide Polonio 2026-04-10 16:10:04 +02:00
parent df3b927985
commit c233d06dcb

View File

@ -60,6 +60,51 @@ models:
--cache-type-v q8_0 --cache-type-v q8_0
--temp 0.6 --top-p 0.95 --temp 0.6 --top-p 0.95
"qwen3.5:9b-128k":
cmd: |
/app/llama-server
--host 0.0.0.0 --port ${PORT}
--model /models/Qwen3.5-9B-Q4_K_M.gguf
--mmproj /models/Qwen3.5-9B-mmproj-F16.gguf
--alias qwen3.5:9b-128k
--n-gpu-layers 999
--ctx-size 131072
--flash-attn on
--jinja
--cache-type-k q8_0
--cache-type-v q8_0
--temp 0.7 --top-p 0.9
"qwen3.5:9b-uncensored-128k":
cmd: |
/app/llama-server
--host 0.0.0.0 --port ${PORT}
--model /models/HauhauCS-Qwen3.5-9B-Uncensored-Aggressive.q4_k_m.gguf
--mmproj /models/HauhauCS-Qwen3.5-9B-Uncensored-Aggressive.mmproj.gguf
--alias "hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m"
--n-gpu-layers 999
--ctx-size 131072
--flash-attn on
--jinja
--cache-type-k q8_0
--cache-type-v q8_0
--temp 0.7 --top-p 0.9
"qwen3.5:9b-claude-4.6-opus-reasoning-128k":
cmd: |
/app/llama-server
--host 0.0.0.0 --port ${PORT}
--model /models/Jackrong-Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2.q4_k_m.gguf
--mmproj /models/Jackrong-Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2.mmproj.gguf
--alias "hf.co/Jackrong/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-GGUF:q4_k_m"
--n-gpu-layers 999
--ctx-size 131072
--flash-attn on
--jinja
--cache-type-k q8_0
--cache-type-v q8_0
--temp 0.6 --top-p 0.95
"gemma4:e4b-uncensored": "gemma4:e4b-uncensored":
cmd: | cmd: |
/app/llama-server /app/llama-server