feat(llama-swap): fix contexts, add flash-attn/jinja, tune sampling params

This commit is contained in:
Davide Polonio 2026-04-10 15:42:19 +02:00
parent 2d852879b6
commit 55ac2e5568

View File

@ -11,18 +11,23 @@ models:
--alias qwen3.5:9b --alias qwen3.5:9b
--n-gpu-layers 999 --n-gpu-layers 999
--ctx-size 8192 --ctx-size 8192
--temp 1 --top-k 20 --top-p 0.95 --presence-penalty 1.5 --flash-attn
--jinja
--temp 0.7 --top-p 0.9
"qwen3.5:9bctxSmall": "qwen3.5:9b-32k":
cmd: | cmd: |
/app/llama-server /app/llama-server
--host 0.0.0.0 --port ${PORT} --host 0.0.0.0 --port ${PORT}
--model /models/Qwen3.5-9B-Q4_K_M.gguf --model /models/Qwen3.5-9B-Q4_K_M.gguf
--mmproj /models/Qwen3.5-9B-mmproj-F16.gguf --mmproj /models/Qwen3.5-9B-mmproj-F16.gguf
--alias qwen3.5:9bctxSmall --alias qwen3.5:9b-32k
--n-gpu-layers 999 --n-gpu-layers 999
--ctx-size 131072 --ctx-size 32768
--temp 1 --top-k 20 --top-p 0.95 --presence-penalty 1.5 --flash-attn
--jinja
--cache-type-k q8_0 --cache-type-v q8_0
--temp 0.7 --top-p 0.9
"qwen3.5:9b-uncensored": "qwen3.5:9b-uncensored":
cmd: | cmd: |
@ -33,6 +38,10 @@ models:
--alias "hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m" --alias "hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m"
--n-gpu-layers 999 --n-gpu-layers 999
--ctx-size 32768 --ctx-size 32768
--flash-attn
--jinja
--cache-type-k q8_0 --cache-type-v q8_0
--temp 0.7 --top-p 0.9
"qwen3.5:9b-claude-4.6-opus-reasoning": "qwen3.5:9b-claude-4.6-opus-reasoning":
cmd: | cmd: |
@ -43,13 +52,17 @@ models:
--alias "hf.co/Jackrong/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-GGUF:q4_k_m" --alias "hf.co/Jackrong/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-GGUF:q4_k_m"
--n-gpu-layers 999 --n-gpu-layers 999
--ctx-size 32768 --ctx-size 32768
--temp 0.6 --top-k 20 --top-p 0.95 --repeat-penalty 1 --flash-attn
--jinja
--cache-type-k q8_0 --cache-type-v q8_0
--temp 0.6 --top-p 0.95
"gemma4:e4b-uncensored": "gemma4:e4b-uncensored":
cmd: | cmd: |
/app/llama-server /app/llama-server
--host 0.0.0.0 --port ${PORT} --host 0.0.0.0 --port ${PORT}
--jinja --jinja
--flash-attn
--model /models/Gemma-4-E4B-Uncensored-HauhauCS-Aggressive-Q8_K_P.gguf --model /models/Gemma-4-E4B-Uncensored-HauhauCS-Aggressive-Q8_K_P.gguf
--mmproj /models/mmproj-Gemma-4-E4B-Uncensored-HauhauCS-Aggressive-f16.gguf --mmproj /models/mmproj-Gemma-4-E4B-Uncensored-HauhauCS-Aggressive-f16.gguf
--alias "hf.co/HauhauCS/Gemma-4-E4B-Uncensored-HauhauCS-Aggressive:q8_k_p" --alias "hf.co/HauhauCS/Gemma-4-E4B-Uncensored-HauhauCS-Aggressive:q8_k_p"
@ -61,6 +74,7 @@ models:
/app/llama-server /app/llama-server
--host 0.0.0.0 --port ${PORT} --host 0.0.0.0 --port ${PORT}
--jinja --jinja
--flash-attn
--model /models/gemma-4-26B-A4B-it-UD-IQ4_NL.gguf --model /models/gemma-4-26B-A4B-it-UD-IQ4_NL.gguf
--mmproj /models/gemma-4-26B-A4B-it-UD-IQ4_NL-mmproj-BF16.gguf --mmproj /models/gemma-4-26B-A4B-it-UD-IQ4_NL-mmproj-BF16.gguf
--alias "hf.co/unsloth/gemma-4-26B-A4B-it:ud-iq4_nl" --alias "hf.co/unsloth/gemma-4-26B-A4B-it:ud-iq4_nl"