Switch back to subprocessing for llama.cpp

This should resolve a number of memory leak and stability defects by allowing
us to isolate llama.cpp in a separate process and shutdown when idle, and
gracefully restart if it has problems.  This also serves as a first step to be
able to run multiple copies to support multiple models concurrently.
This commit is contained in:
Daniel Hiltgen
2024-03-14 10:24:13 -07:00
parent 3b6a9154dd
commit 58d95cc9bd
35 changed files with 1416 additions and 1910 deletions

42
llm/status.go Normal file
View File

@@ -0,0 +1,42 @@
package llm
import (
"bytes"
"os"
)
// StatusWriter is a writer that captures error messages from the llama runner process
type StatusWriter struct {
LastErrMsg string
out *os.File
}
func NewStatusWriter(out *os.File) *StatusWriter {
return &StatusWriter{
out: out,
}
}
// TODO - regex matching to detect errors like
// libcublasLt.so.11: cannot open shared object file: No such file or directory
var errorPrefixes = []string{
"error:",
"CUDA error",
"cudaMalloc failed",
"\"ERR\"",
}
func (w *StatusWriter) Write(b []byte) (int, error) {
var errMsg string
for _, prefix := range errorPrefixes {
if _, after, ok := bytes.Cut(b, []byte(prefix)); ok {
errMsg = prefix + string(bytes.TrimSpace(after))
}
}
if errMsg != "" {
w.LastErrMsg = errMsg
}
return w.out.Write(b)
}