69 lines
2.3 KiB
YAML
69 lines
2.3 KiB
YAML
healthCheckTimeout: 180
|
|
logLevel: info
|
|
|
|
models:
|
|
"qwen3.5:9b":
|
|
cmd: |
|
|
/app/llama-server
|
|
--host 0.0.0.0 --port ${PORT}
|
|
--model /models/Qwen3.5-9B-Q4_K_M.gguf
|
|
--mmproj /models/Qwen3.5-9B-mmproj-F16.gguf
|
|
--alias qwen3.5:9b
|
|
--n-gpu-layers 999
|
|
--ctx-size 8192
|
|
--temp 1 --top-k 20 --top-p 0.95 --presence-penalty 1.5
|
|
|
|
"qwen3.5:9bctxSmall":
|
|
cmd: |
|
|
/app/llama-server
|
|
--host 0.0.0.0 --port ${PORT}
|
|
--model /models/Qwen3.5-9B-Q4_K_M.gguf
|
|
--mmproj /models/Qwen3.5-9B-mmproj-F16.gguf
|
|
--alias qwen3.5:9bctxSmall
|
|
--n-gpu-layers 999
|
|
--ctx-size 131072
|
|
--temp 1 --top-k 20 --top-p 0.95 --presence-penalty 1.5
|
|
|
|
"qwen3.5:9b-uncensored":
|
|
cmd: |
|
|
/app/llama-server
|
|
--host 0.0.0.0 --port ${PORT}
|
|
--model /models/HauhauCS-Qwen3.5-9B-Uncensored-Aggressive.q4_k_m.gguf
|
|
--mmproj /models/HauhauCS-Qwen3.5-9B-Uncensored-Aggressive.mmproj.gguf
|
|
--alias "hf.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive:q4_k_m"
|
|
--n-gpu-layers 999
|
|
--ctx-size 32768
|
|
|
|
"qwen3.5:9b-claude-4.6-opus-reasoning":
|
|
cmd: |
|
|
/app/llama-server
|
|
--host 0.0.0.0 --port ${PORT}
|
|
--model /models/Jackrong-Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2.q4_k_m.gguf
|
|
--mmproj /models/Jackrong-Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2.mmproj.gguf
|
|
--alias "hf.co/Jackrong/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-GGUF:q4_k_m"
|
|
--n-gpu-layers 999
|
|
--ctx-size 32768
|
|
--temp 0.6 --top-k 20 --top-p 0.95 --repeat-penalty 1
|
|
|
|
"gemma4:e4b-uncensored":
|
|
cmd: |
|
|
/app/llama-server
|
|
--host 0.0.0.0 --port ${PORT}
|
|
--jinja
|
|
--model /models/Gemma-4-E4B-Uncensored-HauhauCS-Aggressive-Q8_K_P.gguf
|
|
--mmproj /models/mmproj-Gemma-4-E4B-Uncensored-HauhauCS-Aggressive-f16.gguf
|
|
--alias "hf.co/HauhauCS/Gemma-4-E4B-Uncensored-HauhauCS-Aggressive:q8_k_p"
|
|
--n-gpu-layers 999
|
|
--ctx-size 8192
|
|
|
|
"gemma4:26b-a4b":
|
|
cmd: |
|
|
/app/llama-server
|
|
--host 0.0.0.0 --port ${PORT}
|
|
--jinja
|
|
--model /models/gemma-4-26B-A4B-it-UD-IQ4_NL.gguf
|
|
--mmproj /models/gemma-4-26B-A4B-it-UD-IQ4_NL-mmproj-BF16.gguf
|
|
--alias "hf.co/unsloth/gemma-4-26B-A4B-it:ud-iq4_nl"
|
|
--n-gpu-layers 999
|
|
--ctx-size 8192
|