From 8643c4d5bfa8ba131661713029d09b9f1792761a Mon Sep 17 00:00:00 2001 From: qwerty108109 <97707491+qwerty108109@users.noreply.github.com> Date: Mon, 7 Apr 2025 19:42:26 -0700 Subject: [PATCH 01/10] readme: fix url for big-AGI in community integrations (#10173) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2e794f83..f14c8155 100644 --- a/README.md +++ b/README.md @@ -291,7 +291,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file) - [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui) - [Ollamac](https://github.com/kevinhermawan/Ollamac) -- [big-AGI](https://github.com/enricoros/big-AGI/blob/main/docs/config-local-ollama.md) +- [big-AGI](https://github.com/enricoros/big-AGI) - [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core) - [Amica](https://github.com/semperai/amica) - [chatd](https://github.com/BruceMacD/chatd) From a807985e598c7a905f563023e1f8ee04756e2e36 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Fri, 4 Apr 2025 15:04:25 -0700 Subject: [PATCH 02/10] ggml: Check for OOM and return as Go errors If there is a CUDA OOM, we currently don't check the return value and will evetually segfault. This checks for the problem and generates a Go error. At the moment, this will still result in a panic but having the error is the first step to being able to handle it more gracefully. --- ml/backend/ggml/ggml.go | 41 +++++++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 7e81e995..0aafd60b 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -281,6 +281,10 @@ func New(ctx context.Context, r *os.File, params ml.BackendParams) (ml.Backend, } b := C.ggml_backend_alloc_ctx_tensors_from_buft(c, bt) + if b == nil { + return nil, fmt.Errorf("unable to allocate memory from device %v for model weights", C.GoString(C.ggml_backend_buft_name(bt))) + } + C.ggml_backend_buffer_set_usage(b, C.GGML_BACKEND_BUFFER_USAGE_WEIGHTS) bbs[c] = b } @@ -547,9 +551,9 @@ func pad(length, pad C.size_t) C.size_t { return ((length + pad - 1) / pad) * pad } -func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor { +func (c Context) newTensor(dtype ml.DType, shape []int) (ml.Tensor, error) { if c.buft == nil { - panic("set Input, Output, or Layer before creating tensors") + panic("set Input or Layer before creating tensors") } var cdtype uint32 @@ -570,7 +574,7 @@ func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor { if len(shape) < 1 || shape[0] == 0 { var shape C.int64_t = 0 - return &Tensor{b: c.b, t: C.ggml_new_tensor(c.ctx, cdtype, 1, &shape)} + return &Tensor{b: c.b, t: C.ggml_new_tensor(c.ctx, cdtype, 1, &shape)}, nil } else if len(shape) > 4 { panic("unsupported number of dimensions") } @@ -584,16 +588,29 @@ func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor { t := C.ggml_new_tensor(c.ctx, cdtype, C.int(len(shape)), shapeToGGML(shape)) size := pad(C.ggml_backend_buft_get_alloc_size(c.buft, t), C.ggml_backend_buft_get_alignment(c.buft)) b := C.ggml_backend_buft_alloc_buffer(c.buft, size) + if b == nil { + return nil, fmt.Errorf("unable to allocate %v from device %v for new tensor", format.HumanBytes2(uint64(size)), C.GoString(C.ggml_backend_buft_name(c.buft))) + } + C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b)) - return &Tensor{b: c.b, t: t} + return &Tensor{b: c.b, t: t}, nil } func (c Context) Empty(dtype ml.DType, shape ...int) ml.Tensor { - return c.newTensor(dtype, shape) + t, err := c.newTensor(dtype, shape) + if err != nil { + panic(err) + } + + return t } func (c Context) Zeros(dtype ml.DType, shape ...int) ml.Tensor { - t := c.newTensor(dtype, shape) + t, err := c.newTensor(dtype, shape) + if err != nil { + panic(err) + } + C.ggml_set_zero(t.(*Tensor).t) return t } @@ -621,7 +638,11 @@ func (c Context) FromFloatSlice(s []float32, shape ...int) (ml.Tensor, error) { return nil, err } - t := c.newTensor(ml.DTypeF32, shape) + t, err := c.newTensor(ml.DTypeF32, shape) + if err != nil { + return nil, err + } + if len(s) > 0 { C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t)) } @@ -634,7 +655,11 @@ func (c Context) FromIntSlice(s []int32, shape ...int) (ml.Tensor, error) { return nil, err } - t := c.newTensor(ml.DTypeI32, shape) + t, err := c.newTensor(ml.DTypeI32, shape) + if err != nil { + return nil, err + } + if len(s) > 0 { C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t)) } From dbb149e6f78673cc1c84e6527321c740d8d36a9a Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Thu, 3 Apr 2025 12:50:20 -0700 Subject: [PATCH 03/10] ollamarunner: Preallocate worst case graph at startup Currently, the KV cache and graph are lazily allocated as needed. The cache is fully allocated on first use of the corresponding layer whereas the graph grows with the size of the context. This can be an issue if another application allocates more VRAM after we do our calculations - Ollama will crash in the middle of inference. If we instead allocate the maximum needed memory at startup of the runner, we will either succeed or fail at that point rather than at some surprising time in the future. Currently, this only generates a worst case batch for text, which means that vision models may get a partial allocation and continue to lazily allocate the rest. --- kvcache/cache.go | 5 +- kvcache/causal.go | 85 +++++++++++++++++-------------- kvcache/causal_test.go | 8 +-- kvcache/encoder.go | 15 ++++-- kvcache/wrapper.go | 4 +- ml/backend.go | 7 +++ ml/backend/ggml/ggml.go | 33 ++++++++++-- model/model.go | 2 +- runner/ollamarunner/cache_test.go | 2 +- runner/ollamarunner/runner.go | 50 ++++++++++++++++++ 10 files changed, 156 insertions(+), 55 deletions(-) diff --git a/kvcache/cache.go b/kvcache/cache.go index 07015b9e..405c7973 100644 --- a/kvcache/cache.go +++ b/kvcache/cache.go @@ -56,8 +56,9 @@ type Cache interface { // StartForward is called before the start of the model's forward pass. // For each token in the coming batch, there must be a corresponding - // entry in positions and seqs. - StartForward(ctx ml.Context, batch input.Batch) error + // entry in positions and seqs. reserve is to preallocate memory + // without actually storing data in the cache. + StartForward(ctx ml.Context, batch input.Batch, reserve bool) error // CopyPrefix copies tokens in the range [0, len) from srcSeq to dstSeq CopyPrefix(srcSeq, dstSeq int, len int32) diff --git a/kvcache/causal.go b/kvcache/causal.go index 4fc18d88..46672284 100644 --- a/kvcache/causal.go +++ b/kvcache/causal.go @@ -146,51 +146,60 @@ func (c *Causal) Close() { } } -func (c *Causal) StartForward(ctx ml.Context, batch input.Batch) error { +func (c *Causal) StartForward(ctx ml.Context, batch input.Batch, reserve bool) error { c.curBatchSize = len(batch.Positions) c.curSequences = batch.Sequences c.curPositions = batch.Positions c.opts.Except = nil - c.updateSlidingWindow() + if !reserve { + c.updateSlidingWindow() + + var err error + c.curLoc, err = c.findStartLoc() + if errors.Is(err, ErrKvCacheFull) { + c.defrag() + c.curLoc, err = c.findStartLoc() + } + if err != nil { + return err + } + + c.curCellRange = newRange() + for i, pos := range batch.Positions { + seq := batch.Sequences[i] + + c.cells[c.curLoc+i] = cacheCell{pos: pos, sequences: []int{seq}} + + seqRange, ok := c.cellRanges[seq] + if !ok { + seqRange = newRange() + } + + if c.curLoc+i > seqRange.max { + seqRange.max = c.curLoc + i + } + if seqRange.max > c.curCellRange.max { + c.curCellRange.max = seqRange.max + } + + if c.curLoc+i < seqRange.min { + seqRange.min = c.curLoc + i + } + if seqRange.min < c.curCellRange.min { + c.curCellRange.min = seqRange.min + } + c.cellRanges[seq] = seqRange + } + } else { + // If we are reserving memory, don't update any of the cache metadata but set the size + // to the worst case. + c.curLoc = 0 + c.curCellRange.min = 0 + c.curCellRange.max = len(c.cells) - 1 + } var err error - c.curLoc, err = c.findStartLoc() - if errors.Is(err, ErrKvCacheFull) { - c.defrag() - c.curLoc, err = c.findStartLoc() - } - if err != nil { - return err - } - - c.curCellRange = newRange() - for i, pos := range batch.Positions { - seq := batch.Sequences[i] - - c.cells[c.curLoc+i] = cacheCell{pos: pos, sequences: []int{seq}} - - seqRange, ok := c.cellRanges[seq] - if !ok { - seqRange = newRange() - } - - if c.curLoc+i > seqRange.max { - seqRange.max = c.curLoc + i - } - if seqRange.max > c.curCellRange.max { - c.curCellRange.max = seqRange.max - } - - if c.curLoc+i < seqRange.min { - seqRange.min = c.curLoc + i - } - if seqRange.min < c.curCellRange.min { - c.curCellRange.min = seqRange.min - } - c.cellRanges[seq] = seqRange - } - c.curMask, err = c.buildMask(ctx) return err diff --git a/kvcache/causal_test.go b/kvcache/causal_test.go index bd63214c..78f60090 100644 --- a/kvcache/causal_test.go +++ b/kvcache/causal_test.go @@ -281,7 +281,7 @@ func testCache(t *testing.T, backend ml.Backend, cache Cache, tests []testCase) context := backend.NewContext() defer context.Close() - err := cache.StartForward(context, input.Batch{Positions: test.pos, Sequences: test.seqs}) + err := cache.StartForward(context, input.Batch{Positions: test.pos, Sequences: test.seqs}, false) if err != nil { panic(err) } @@ -315,7 +315,7 @@ func TestCanResume(t *testing.T) { err := cache.StartForward(context, input.Batch{ Positions: []int32{0, 1, 2, 3}, Sequences: []int{0, 0, 0, 0}, - }) + }, false) if err != nil { t.Fatalf("StartForward failed: %v", err) } @@ -342,7 +342,7 @@ func TestCanResume(t *testing.T) { err = cache.StartForward(context, input.Batch{ Positions: []int32{4, 5}, Sequences: []int{0, 0}, - }) + }, false) if err != nil { t.Fatalf("StartForward failed: %v", err) } @@ -440,6 +440,8 @@ func (c *testContext) Forward(...ml.Tensor) ml.Context { return c } func (c *testContext) Compute(...ml.Tensor) {} +func (c *testContext) Reserve() error { return nil } + func (c *testContext) MaxGraphNodes() int { return 10 } diff --git a/kvcache/encoder.go b/kvcache/encoder.go index 03d650a3..0f269c3e 100644 --- a/kvcache/encoder.go +++ b/kvcache/encoder.go @@ -27,6 +27,11 @@ type EncoderCache struct { // anything will be stored) curPos int32 + // curReserve indicates that this forward pass is only for + // memory reservation and we should not update our metadata + // based on it. + curReserve bool + // ** cache metadata ** // was something stored in the cache? @@ -83,12 +88,14 @@ func (c *EncoderCache) Close() { } } -func (c *EncoderCache) StartForward(ctx ml.Context, batch input.Batch) error { +func (c *EncoderCache) StartForward(ctx ml.Context, batch input.Batch, reserve bool) error { // We work with the most recent image if len(batch.Multimodal) > 0 { c.curPos = batch.Positions[batch.Multimodal[len(batch.Multimodal)-1].Index] } + c.curReserve = reserve + return nil } @@ -105,8 +112,10 @@ func (c *EncoderCache) Get(ctx ml.Context) (ml.Tensor, ml.Tensor, ml.Tensor) { } func (c *EncoderCache) Put(ctx ml.Context, key, value ml.Tensor) { - c.encoderPos = c.curPos - c.encoderCached = true + if !c.curReserve { + c.encoderPos = c.curPos + c.encoderCached = true + } if c.config.PermutedV { value = value.Permute(ctx, 1, 2, 0, 3) diff --git a/kvcache/wrapper.go b/kvcache/wrapper.go index 926bc2d4..7533d959 100644 --- a/kvcache/wrapper.go +++ b/kvcache/wrapper.go @@ -41,9 +41,9 @@ func (c *WrapperCache) Close() { } } -func (c *WrapperCache) StartForward(ctx ml.Context, batch input.Batch) error { +func (c *WrapperCache) StartForward(ctx ml.Context, batch input.Batch, reserve bool) error { for i, cache := range c.caches { - err := cache.StartForward(ctx, batch) + err := cache.StartForward(ctx, batch, reserve) if err != nil { // unwind on error - Remove with endIndex set to math.MaxInt32 does not fail for j := i - 1; j >= 0; j-- { diff --git a/ml/backend.go b/ml/backend.go index fffc04a4..b2a83cfd 100644 --- a/ml/backend.go +++ b/ml/backend.go @@ -97,6 +97,13 @@ type Context interface { Forward(...Tensor) Context Compute(...Tensor) + + // Reserve is analogous to Compute but rather than executing a + // graph, simply preallocates memory. Typically called with a + // worst case graph to ensure all resources are available for + // for future inference. + Reserve() error + MaxGraphNodes() int Close() diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 0aafd60b..24bdd903 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -10,6 +10,7 @@ import "C" import ( "context" + "errors" "fmt" "io" "log/slog" @@ -42,8 +43,12 @@ func devices() []*C.struct_ggml_backend_device { } type Backend struct { - meta *fsggml.GGML - sched *C.struct_ggml_backend_sched + meta *fsggml.GGML + + sched *C.struct_ggml_backend_sched + schedBackends []*C.struct_ggml_backend + schedBufts []*C.struct_ggml_backend_buffer_type + tensors map[string]*C.struct_ggml_tensor // input is the backend used for inputs @@ -389,8 +394,6 @@ func New(ctx context.Context, r *os.File, params ml.BackendParams) (ml.Backend, schedBackends = append(schedBackends, b) schedBufts = append(schedBufts, bt) - slog.Info("compute graph", "backend", C.GoString(C.ggml_backend_name(b)), "buffer_type", C.GoString(C.ggml_backend_buft_name(bt))) - if C.ggml_backend_is_cpu(b) { // set number of threads for cpu backend C.ggml_backend_cpu_set_n_threads(b, C.int(Threads(params.NumThreads))) @@ -409,7 +412,9 @@ func New(ctx context.Context, r *os.File, params ml.BackendParams) (ml.Backend, C.size_t(maxGraphNodes), C._Bool(len(gpus) > 1 && slices.Contains(gpus, output.d)), ), - input: deviceBufferTypes[input.d], + schedBackends: schedBackends, + schedBufts: schedBufts, + input: deviceBufferTypes[input.d], layers: func() map[int]*C.struct_ggml_backend_buffer_type { m := make(map[int]*C.struct_ggml_backend_buffer_type) for i, layer := range layers { @@ -534,6 +539,24 @@ func (c Context) Compute(tensors ...ml.Tensor) { } } +func (c Context) Reserve() error { + if !C.ggml_backend_sched_reserve(c.b.sched, c.graph) { + C.ggml_backend_sched_reset(c.b.sched) + return errors.New("failed to reserve graph") + } + + slog.Debug("compute graph", "nodes", C.ggml_graph_n_nodes(c.graph), "splits", C.ggml_backend_sched_get_n_splits(c.b.sched)) + for i := range c.b.schedBackends { + size := C.ggml_backend_sched_get_buffer_size(c.b.sched, c.b.schedBackends[i]) + slog.Info("compute graph", "backend", C.GoString(C.ggml_backend_name(c.b.schedBackends[i])), "buffer_type", C.GoString(C.ggml_backend_buft_name(c.b.schedBufts[i])), + "size", format.HumanBytes2(uint64(size))) + } + + C.ggml_backend_sched_reset(c.b.sched) + + return nil +} + func (c Context) MaxGraphNodes() int { return c.maxGraphNodes } diff --git a/model/model.go b/model/model.go index bc8944d2..ab96c4c7 100644 --- a/model/model.go +++ b/model/model.go @@ -299,7 +299,7 @@ func Forward(ctx ml.Context, m Model, inputs []int32, batch input.Batch) (ml.Ten cache := m.Config().Cache if cache != nil { - err := cache.StartForward(ctx, batch) + err := cache.StartForward(ctx, batch, false) if err != nil { return nil, err } diff --git a/runner/ollamarunner/cache_test.go b/runner/ollamarunner/cache_test.go index 543b4b2f..062b654c 100644 --- a/runner/ollamarunner/cache_test.go +++ b/runner/ollamarunner/cache_test.go @@ -448,7 +448,7 @@ func (m *mockCache) Get(ctx ml.Context) (ml.Tensor, ml.Tensor, ml.Tensor) func (m *mockCache) Put(ctx ml.Context, key, value ml.Tensor) {} func (m *mockCache) Init(backend ml.Backend, dtype ml.DType, maxSequences, capacity, maxBatch int) {} func (m *mockCache) Close() {} -func (m *mockCache) StartForward(ctx ml.Context, batch input.Batch) error { return nil } +func (m *mockCache) StartForward(ctx ml.Context, batch input.Batch, reserve bool) error { return nil } func (m *mockCache) CopyPrefix(srcSeq, dstSeq int, len int32) {} func (m *mockCache) SetConfig(ml.CacheConfig) {} func (m *mockCache) CanResume(seq int, pos int32) bool { return true } diff --git a/runner/ollamarunner/runner.go b/runner/ollamarunner/runner.go index 7b7e0940..fee05280 100644 --- a/runner/ollamarunner/runner.go +++ b/runner/ollamarunner/runner.go @@ -728,6 +728,51 @@ func (m *multiLPath) String() string { return strings.Join(*m, ", ") } +func (s *Server) reserveWorstCaseGraph() error { + ctx := s.model.Backend().NewContext() + defer ctx.Close() + + var batch input.Batch + + inputs := make([]int32, s.batchSize) + batch.Positions = make([]int32, len(inputs)) + batch.Sequences = make([]int, len(inputs)) + for i := range inputs { + batch.Positions[i] = int32(i) + } + + batch.Outputs = make([]int32, s.parallel) + for i := range batch.Outputs { + batch.Outputs[i] = int32(i) + } + + var err error + batch.Inputs, err = ctx.Input().FromIntSlice(inputs, len(inputs)) + if err != nil { + return err + } + + cache := s.model.Config().Cache + if cache != nil { + err := cache.StartForward(ctx, batch, true) + if err != nil { + return err + } + } + + t, err := s.model.Forward(ctx, batch) + if err != nil { + return err + } + + err = ctx.Forward(t).Reserve() + if err != nil { + return err + } + + return nil +} + func (s *Server) loadModel( ctx context.Context, mpath string, @@ -765,6 +810,11 @@ func (s *Server) loadModel( s.seqs = make([]*Sequence, s.parallel) s.seqsSem = semaphore.NewWeighted(int64(s.parallel)) + err = s.reserveWorstCaseGraph() + if err != nil { + panic(err) + } + s.status = llm.ServerStatusReady s.ready.Done() } From ccc8c6777bc926b4cdb3b0a89ad344418b6710da Mon Sep 17 00:00:00 2001 From: frob Date: Wed, 9 Apr 2025 00:01:39 +0200 Subject: [PATCH 04/10] cleanup: remove OLLAMA_TMPDIR and references to temporary executables (#10182) * cleanup: remove OLLAMA_TMPDIR * cleanup: ollama doesn't use temporary executables anymore --------- Co-authored-by: Richard Lyons --- cmd/cmd.go | 1 - docs/troubleshooting.md | 5 ----- docs/windows.md | 1 - 3 files changed, 7 deletions(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index 84727862..fef7242b 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -1381,7 +1381,6 @@ func NewCLI() *cobra.Command { envVars["OLLAMA_NOPRUNE"], envVars["OLLAMA_ORIGINS"], envVars["OLLAMA_SCHED_SPREAD"], - envVars["OLLAMA_TMPDIR"], envVars["OLLAMA_FLASH_ATTENTION"], envVars["OLLAMA_KV_CACHE_TYPE"], envVars["OLLAMA_LLM_LIBRARY"], diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 32ad48c4..ba5487fe 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -26,7 +26,6 @@ When you run Ollama on **Windows**, there are a few different locations. You can - `explorer %LOCALAPPDATA%\Ollama` to view logs. The most recent server logs will be in `server.log` and older logs will be in `server-#.log` - `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH) - `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored -- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal @@ -69,10 +68,6 @@ If you run into problems on Linux and want to install an older version, or you'd curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION=0.5.7 sh ``` -## Linux tmp noexec - -If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/ - ## Linux docker If Ollama initially works on the GPU in a docker container, but then switches to running on CPU after some period of time with errors in the server log reporting GPU discovery failures, this can be resolved by disabling systemd cgroup management in Docker. Edit `/etc/docker/daemon.json` on the host and add `"exec-opts": ["native.cgroupdriver=cgroupfs"]` to the docker configuration. diff --git a/docs/windows.md b/docs/windows.md index 78b99a5d..0bffa4b4 100644 --- a/docs/windows.md +++ b/docs/windows.md @@ -62,7 +62,6 @@ the explorer window by hitting `+R` and type in: - *upgrade.log* contains log output for upgrades - `explorer %LOCALAPPDATA%\Programs\Ollama` contains the binaries (The installer adds this to your user PATH) - `explorer %HOMEPATH%\.ollama` contains models and configuration -- `explorer %TEMP%` contains temporary executable files in one or more `ollama*` directories ## Uninstall From 6747099d715bfc8c9679c3f6365a08ffb950ad32 Mon Sep 17 00:00:00 2001 From: Parth Sareen Date: Tue, 8 Apr 2025 15:05:38 -0700 Subject: [PATCH 05/10] types: add any type and validation for ToolFunction enum (#10166) --- api/types.go | 2 +- api/types_test.go | 61 ++++++++++++++++++++++++++++++++++ openai/openai_test.go | 6 ++-- server/routes_generate_test.go | 12 +++---- 4 files changed, 71 insertions(+), 10 deletions(-) diff --git a/api/types.go b/api/types.go index 0e63ef8b..4e8486ac 100644 --- a/api/types.go +++ b/api/types.go @@ -217,7 +217,7 @@ type ToolFunction struct { Properties map[string]struct { Type PropertyType `json:"type"` Description string `json:"description"` - Enum []string `json:"enum,omitempty"` + Enum []any `json:"enum,omitempty"` } `json:"properties"` } `json:"parameters"` } diff --git a/api/types_test.go b/api/types_test.go index e22c047f..1a6fc811 100644 --- a/api/types_test.go +++ b/api/types_test.go @@ -232,6 +232,67 @@ func TestMessage_UnmarshalJSON(t *testing.T) { } } +func TestToolFunction_UnmarshalJSON(t *testing.T) { + tests := []struct { + name string + input string + wantErr string + }{ + { + name: "valid enum with same types", + input: `{ + "name": "test", + "description": "test function", + "parameters": { + "type": "object", + "required": ["test"], + "properties": { + "test": { + "type": "string", + "description": "test prop", + "enum": ["a", "b", "c"] + } + } + } + }`, + wantErr: "", + }, + { + name: "empty enum array", + input: `{ + "name": "test", + "description": "test function", + "parameters": { + "type": "object", + "required": ["test"], + "properties": { + "test": { + "type": "string", + "description": "test prop", + "enum": [] + } + } + } + }`, + wantErr: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var tf ToolFunction + err := json.Unmarshal([]byte(tt.input), &tf) + + if tt.wantErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.wantErr) + } else { + require.NoError(t, err) + } + }) + } +} + func TestPropertyType_UnmarshalJSON(t *testing.T) { tests := []struct { name string diff --git a/openai/openai_test.go b/openai/openai_test.go index 6039cb65..46fce7c8 100644 --- a/openai/openai_test.go +++ b/openai/openai_test.go @@ -285,7 +285,7 @@ func TestChatMiddleware(t *testing.T) { Properties map[string]struct { Type api.PropertyType `json:"type"` Description string `json:"description"` - Enum []string `json:"enum,omitempty"` + Enum []any `json:"enum,omitempty"` } `json:"properties"` }{ Type: "object", @@ -293,7 +293,7 @@ func TestChatMiddleware(t *testing.T) { Properties: map[string]struct { Type api.PropertyType `json:"type"` Description string `json:"description"` - Enum []string `json:"enum,omitempty"` + Enum []any `json:"enum,omitempty"` }{ "location": { Type: api.PropertyType{"string"}, @@ -301,7 +301,7 @@ func TestChatMiddleware(t *testing.T) { }, "unit": { Type: api.PropertyType{"string"}, - Enum: []string{"celsius", "fahrenheit"}, + Enum: []any{"celsius", "fahrenheit"}, }, }, }, diff --git a/server/routes_generate_test.go b/server/routes_generate_test.go index 2613978a..00a50cc3 100644 --- a/server/routes_generate_test.go +++ b/server/routes_generate_test.go @@ -374,7 +374,7 @@ func TestGenerateChat(t *testing.T) { Properties map[string]struct { Type api.PropertyType `json:"type"` Description string `json:"description"` - Enum []string `json:"enum,omitempty"` + Enum []any `json:"enum,omitempty"` } `json:"properties"` }{ Type: "object", @@ -382,7 +382,7 @@ func TestGenerateChat(t *testing.T) { Properties: map[string]struct { Type api.PropertyType `json:"type"` Description string `json:"description"` - Enum []string `json:"enum,omitempty"` + Enum []any `json:"enum,omitempty"` }{ "location": { Type: api.PropertyType{"string"}, @@ -390,7 +390,7 @@ func TestGenerateChat(t *testing.T) { }, "unit": { Type: api.PropertyType{"string"}, - Enum: []string{"celsius", "fahrenheit"}, + Enum: []any{"celsius", "fahrenheit"}, }, }, }, @@ -471,7 +471,7 @@ func TestGenerateChat(t *testing.T) { Properties map[string]struct { Type api.PropertyType `json:"type"` Description string `json:"description"` - Enum []string `json:"enum,omitempty"` + Enum []any `json:"enum,omitempty"` } `json:"properties"` }{ Type: "object", @@ -479,7 +479,7 @@ func TestGenerateChat(t *testing.T) { Properties: map[string]struct { Type api.PropertyType `json:"type"` Description string `json:"description"` - Enum []string `json:"enum,omitempty"` + Enum []any `json:"enum,omitempty"` }{ "location": { Type: api.PropertyType{"string"}, @@ -487,7 +487,7 @@ func TestGenerateChat(t *testing.T) { }, "unit": { Type: api.PropertyType{"string"}, - Enum: []string{"celsius", "fahrenheit"}, + Enum: []any{"celsius", "fahrenheit"}, }, }, }, From d98bfe7e7083f54e4c9065d4138f53bd47348761 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 3 Apr 2025 16:54:46 -0700 Subject: [PATCH 06/10] kvcache: stub out test structs --- kvcache/causal_test.go | 117 +++-------------------------------------- 1 file changed, 6 insertions(+), 111 deletions(-) diff --git a/kvcache/causal_test.go b/kvcache/causal_test.go index 78f60090..07bc788b 100644 --- a/kvcache/causal_test.go +++ b/kvcache/causal_test.go @@ -5,7 +5,6 @@ import ( "slices" "testing" - "github.com/ollama/ollama/fs" "github.com/ollama/ollama/ml" "github.com/ollama/ollama/model/input" ) @@ -372,14 +371,8 @@ func TestCanResume(t *testing.T) { } } -type testBackend struct{} - -func (b *testBackend) Config() fs.Config { - panic("not implemented") -} - -func (b *testBackend) Get(name string) ml.Tensor { - panic("not implemented") +type testBackend struct { + ml.Backend } func (b *testBackend) NewContext() ml.Context { @@ -390,12 +383,10 @@ func (b *testBackend) NewContextSize(int) ml.Context { return &testContext{} } -func (b *testBackend) SystemInfo() string { - return "not implemented" +type testContext struct { + ml.Context } -type testContext struct{} - func (c *testContext) Empty(dtype ml.DType, shape ...int) ml.Tensor { total := 0 @@ -449,6 +440,8 @@ func (c *testContext) MaxGraphNodes() int { func (c *testContext) Close() {} type testTensor struct { + ml.Tensor + dtype ml.DType elementSize int data []float32 @@ -476,10 +469,6 @@ func (t *testTensor) DType() ml.DType { return t.dtype } -func (t *testTensor) Bytes() []byte { - panic("not implemented") -} - func (t *testTensor) Floats() []float32 { out := make([]float32, len(t.data)) copy(out, t.data) @@ -504,64 +493,6 @@ func (t *testTensor) Add(ctx ml.Context, t2 ml.Tensor) ml.Tensor { return out } -func (t *testTensor) Mul(ctx ml.Context, t2 ml.Tensor) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) Mulmat(ctx ml.Context, t2 ml.Tensor) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) MulmatFullPrec(ctx ml.Context, t2 ml.Tensor) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) Softmax(ctx ml.Context) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) LayerNorm(ctx ml.Context, weight, bias ml.Tensor, eps float32) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) RMSNorm(ctx ml.Context, weight ml.Tensor, eps float32) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) Scale(ctx ml.Context, s float64) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) AvgPool1D(ctx ml.Context, k, s, p int) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) AvgPool2D(ctx ml.Context, k, s int, p float32) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) Conv2D(ctx ml.Context, weight ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) RoPE(ctx ml.Context, positionIDs, ropeFactors ml.Tensor, dim, ropeType uint32, base, scale float32) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) IM2Col(ctx ml.Context, weight ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) Cos(ctx ml.Context) ml.Tensor { panic("not implemented") } -func (t *testTensor) Sin(ctx ml.Context) ml.Tensor { panic("not implemented") } -func (t *testTensor) Tanh(ctx ml.Context) ml.Tensor { panic("not implemented") } -func (t *testTensor) GELU(ctx ml.Context) ml.Tensor { panic("not implemented") } -func (t *testTensor) SILU(ctx ml.Context) ml.Tensor { panic("not implemented") } - -func (t *testTensor) Reshape(ctx ml.Context, shape ...int) ml.Tensor { - panic("not implemented") -} - func (t *testTensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor { offset /= t.elementSize @@ -584,43 +515,7 @@ func (t *testTensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor { return view } -func (t *testTensor) Permute(ctx ml.Context, shape ...int) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) Contiguous(ctx ml.Context) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) Set(ctx ml.Context, t2 ml.Tensor, offset int, strides ...int) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) Pad(ctx ml.Context, shape ...int) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) Unpad(ctx ml.Context, shape ...int) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) Stack(ctx ml.Context, dim int, s ...ml.Tensor) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) Repeat(ctx ml.Context, dim, n int) ml.Tensor { panic("not implemented") } - -func (t *testTensor) Concat(ctx ml.Context, t2 ml.Tensor, dim int) ml.Tensor { - panic("not implemented") -} - -func (t *testTensor) Rows(ctx ml.Context, t2 ml.Tensor) ml.Tensor { - panic("not implemented") -} - func (t *testTensor) Copy(ctx ml.Context, t2 ml.Tensor) ml.Tensor { copy(t2.(*testTensor).data, t.data) return nil } - -func (t *testTensor) Duplicate(ctx ml.Context) ml.Tensor { panic("not implemented") } From e7019c94554e6d93bf216cb484c8c1c69df98fcb Mon Sep 17 00:00:00 2001 From: CYJiang <86391540+googs1025@users.noreply.github.com> Date: Wed, 9 Apr 2025 06:17:40 +0800 Subject: [PATCH 07/10] fix(integration): move waitgroup Add(1) outside goroutine to avoid potential issue (#10070) Signed-off-by: googs1025 --- integration/max_queue_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration/max_queue_test.go b/integration/max_queue_test.go index c316aa62..50279531 100644 --- a/integration/max_queue_test.go +++ b/integration/max_queue_test.go @@ -52,8 +52,8 @@ func TestMaxQueue(t *testing.T) { embedCtx := ctx var genwg sync.WaitGroup + genwg.Add(1) go func() { - genwg.Add(1) defer genwg.Done() slog.Info("Starting generate request") DoGenerate(ctx, t, client, req, resp, 45*time.Second, 5*time.Second) @@ -71,8 +71,8 @@ func TestMaxQueue(t *testing.T) { counterMu := sync.Mutex{} var embedwg sync.WaitGroup for i := 0; i < threadCount; i++ { + embedwg.Add(1) go func(i int) { - embedwg.Add(1) defer embedwg.Done() slog.Info("embed started", "id", i) embedReq := api.EmbeddingRequest{ From 5c0331fd83877a5a91ec216c6d40b7ceaa8ff51e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B9=9B=E9=9C=B2=E5=85=88=E7=94=9F?= Date: Thu, 10 Apr 2025 04:24:56 +0800 Subject: [PATCH 08/10] Fix dockerfile. (#9855) Signed-off-by: zhanluxianshen --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 4136fca7..4c6619e7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -104,8 +104,8 @@ COPY --from=cuda-12 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_v12 FROM --platform=linux/arm64 scratch AS arm64 COPY --from=cuda-11 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_v11 COPY --from=cuda-12 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_v12 -COPY --from=jetpack-5 dist/lib/ollama/cuda_v11 lib/ollama/cuda_jetpack5 -COPY --from=jetpack-6 dist/lib/ollama/cuda_v12 lib/ollama/cuda_jetpack6 +COPY --from=jetpack-5 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_jetpack5 +COPY --from=jetpack-6 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_jetpack6 FROM scratch AS rocm COPY --from=rocm-6 dist/lib/ollama/rocm /lib/ollama/rocm From 42ecb9f13896c5329764e1946ec3ab1aad2de0a1 Mon Sep 17 00:00:00 2001 From: Ire Gaddr <130914610+IreGaddr@users.noreply.github.com> Date: Wed, 9 Apr 2025 18:01:02 -0500 Subject: [PATCH 09/10] fix(scheduler): make model unload order deterministic (#10185) --- server/sched.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/server/sched.go b/server/sched.go index 8082680b..f3978796 100644 --- a/server/sched.go +++ b/server/sched.go @@ -667,13 +667,19 @@ func (runner *runnerRef) waitForVRAMRecovery() chan any { return finished } -type ByDuration []*runnerRef +type ByDurationAndName []*runnerRef -func (a ByDuration) Len() int { return len(a) } -func (a ByDuration) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByDuration) Less(i, j int) bool { - // uint64 to turn negative time (never unload) to largest - return uint64(a[i].sessionDuration) < uint64(a[j].sessionDuration) +func (a ByDurationAndName) Len() int { return len(a) } +func (a ByDurationAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByDurationAndName) Less(i, j int) bool { + // Primary sort by session duration (uint64 to handle negatives) + d1 := uint64(a[i].sessionDuration) + d2 := uint64(a[j].sessionDuration) + if d1 != d2 { + return d1 < d2 + } + // Secondary sort by model path lex order + return a[i].modelPath < a[j].modelPath } // TODO - future consideration to pick runners based on size @@ -775,7 +781,7 @@ func (s *Scheduler) findRunnerToUnload() *runnerRef { // In the future we can enhance the algorithm to be smarter about picking the optimal runner to unload // e.g., if we have multiple options, will one make room for the request? - sort.Sort(ByDuration(runnerList)) + sort.Sort(ByDurationAndName(runnerList)) // First try to find a runner that's already idle for _, runner := range runnerList { From ef65174df23fb2efb499a18d7071348cc0ec58da Mon Sep 17 00:00:00 2001 From: Tom Sheffler Date: Wed, 9 Apr 2025 17:45:49 -0700 Subject: [PATCH 10/10] types: include the 'items' and '$defs' fields to properly handle "array" types (#10091) --------- Co-authored-by: Parth Sareen --- api/types.go | 4 ++++ openai/openai_test.go | 4 ++++ server/routes_generate_test.go | 8 ++++++++ 3 files changed, 16 insertions(+) diff --git a/api/types.go b/api/types.go index 4e8486ac..53a9593e 100644 --- a/api/types.go +++ b/api/types.go @@ -163,6 +163,7 @@ func (t *ToolCallFunctionArguments) String() string { type Tool struct { Type string `json:"type"` + Items any `json:"items,omitempty"` Function ToolFunction `json:"function"` } @@ -213,9 +214,12 @@ type ToolFunction struct { Description string `json:"description"` Parameters struct { Type string `json:"type"` + Defs any `json:"$defs,omitempty"` + Items any `json:"items,omitempty"` Required []string `json:"required"` Properties map[string]struct { Type PropertyType `json:"type"` + Items any `json:"items,omitempty"` Description string `json:"description"` Enum []any `json:"enum,omitempty"` } `json:"properties"` diff --git a/openai/openai_test.go b/openai/openai_test.go index 46fce7c8..a24093ad 100644 --- a/openai/openai_test.go +++ b/openai/openai_test.go @@ -281,9 +281,12 @@ func TestChatMiddleware(t *testing.T) { Description: "Get the current weather", Parameters: struct { Type string `json:"type"` + Defs any `json:"$defs,omitempty"` + Items any `json:"items,omitempty"` Required []string `json:"required"` Properties map[string]struct { Type api.PropertyType `json:"type"` + Items any `json:"items,omitempty"` Description string `json:"description"` Enum []any `json:"enum,omitempty"` } `json:"properties"` @@ -292,6 +295,7 @@ func TestChatMiddleware(t *testing.T) { Required: []string{"location"}, Properties: map[string]struct { Type api.PropertyType `json:"type"` + Items any `json:"items,omitempty"` Description string `json:"description"` Enum []any `json:"enum,omitempty"` }{ diff --git a/server/routes_generate_test.go b/server/routes_generate_test.go index 00a50cc3..56121d41 100644 --- a/server/routes_generate_test.go +++ b/server/routes_generate_test.go @@ -370,9 +370,12 @@ func TestGenerateChat(t *testing.T) { Description: "Get the current weather", Parameters: struct { Type string `json:"type"` + Defs any `json:"$defs,omitempty"` + Items any `json:"items,omitempty"` Required []string `json:"required"` Properties map[string]struct { Type api.PropertyType `json:"type"` + Items any `json:"items,omitempty"` Description string `json:"description"` Enum []any `json:"enum,omitempty"` } `json:"properties"` @@ -381,6 +384,7 @@ func TestGenerateChat(t *testing.T) { Required: []string{"location"}, Properties: map[string]struct { Type api.PropertyType `json:"type"` + Items any `json:"items,omitempty"` Description string `json:"description"` Enum []any `json:"enum,omitempty"` }{ @@ -467,9 +471,12 @@ func TestGenerateChat(t *testing.T) { Description: "Get the current weather", Parameters: struct { Type string `json:"type"` + Defs any `json:"$defs,omitempty"` + Items any `json:"items,omitempty"` Required []string `json:"required"` Properties map[string]struct { Type api.PropertyType `json:"type"` + Items any `json:"items,omitempty"` Description string `json:"description"` Enum []any `json:"enum,omitempty"` } `json:"properties"` @@ -478,6 +485,7 @@ func TestGenerateChat(t *testing.T) { Required: []string{"location"}, Properties: map[string]struct { Type api.PropertyType `json:"type"` + Items any `json:"items,omitempty"` Description string `json:"description"` Enum []any `json:"enum,omitempty"` }{