Enable Ollama engine by default (#13443)

This changes the default behavior to use the Ollama engine for supported
models, while retaining the ability to disable the Ollama engine and
fall back to the Llama engine.  Models in the OllamaEngineRequired list
will always run on the Ollama engine.
This commit is contained in:
Daniel Hiltgen
2025-12-12 11:48:43 -08:00
committed by GitHub
parent de9ecfd01c
commit 7730895158
2 changed files with 3 additions and 3 deletions

View File

@@ -143,7 +143,7 @@ func NewLlamaServer(systemInfo ml.SystemInfo, gpus []ml.DeviceInfo, modelPath st
var llamaModel *llama.Model
var textProcessor model.TextProcessor
var err error
if envconfig.NewEngine() || f.KV().OllamaEngineRequired() {
if envconfig.NewEngine(true) || f.KV().OllamaEngineRequired() {
if len(projectors) == 0 {
textProcessor, err = model.NewTextProcessor(modelPath)
} else {