From 50b5962042d7c5026d5507af44ae28294f8568d9 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Mon, 17 Mar 2025 09:33:57 -0700 Subject: [PATCH 1/6] Add support for ROCm gfx1151 (#9773) --- CMakePresets.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakePresets.json b/CMakePresets.json index 442cb2a6..f3507e3f 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -56,7 +56,7 @@ "name": "ROCm 6", "inherits": [ "ROCm" ], "cacheVariables": { - "AMDGPU_TARGETS": "gfx900;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-" + "AMDGPU_TARGETS": "gfx900;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102;gfx1151;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-" } } ], From 4561fff36e7338f12f12872a5ba2ced4e670796c Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 14 Mar 2025 14:01:13 -0700 Subject: [PATCH 2/6] conditionally enable parallel pipelines --- ml/backend/ggml/ggml.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 03b9acb3..cc1b5936 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -371,7 +371,7 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { (*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&schedBufts[0])), C.int(len(schedBackends)), C.size_t(maxGraphNodes), - true, + C._Bool(len(gpus) > 1 && slices.Contains(gpus, output.d)), ), input: deviceBufferTypes[input.d], output: deviceBufferTypes[output.d], From 108fe021657ea1f7a012299f424d673e86512df2 Mon Sep 17 00:00:00 2001 From: Parth Sareen Date: Mon, 17 Mar 2025 11:24:18 -0700 Subject: [PATCH 3/6] sample: make mutations in transforms explicit (#9743) * updated minP to use early exit making use of sorted tokens --- sample/samplers.go | 5 +- sample/transforms.go | 39 ++++------- sample/transforms_test.go | 138 ++++++++++++++++++++++++++------------ 3 files changed, 110 insertions(+), 72 deletions(-) diff --git a/sample/samplers.go b/sample/samplers.go index e302f914..7c12da08 100644 --- a/sample/samplers.go +++ b/sample/samplers.go @@ -87,8 +87,9 @@ func (s *Sampler) sample(tokens []token) (token, error) { // topK also sorts the tokens in descending order of logits tokens = topK(tokens, s.topK) - tokens = temperature(tokens, s.temperature) - tokens = softmax(tokens) + // scale and normalize the tokens in place + temperature(tokens, s.temperature) + softmax(tokens) tokens = topP(tokens, s.topP) tokens = minP(tokens, s.minP) diff --git a/sample/transforms.go b/sample/transforms.go index a5efa704..3f677553 100644 --- a/sample/transforms.go +++ b/sample/transforms.go @@ -26,17 +26,16 @@ func (h *tokenHeap) Pop() any { } // temperature applies scaling to the logits -func temperature(ts []token, temp float32) []token { +func temperature(ts []token, temp float32) { // Ensure temperature clipping near 0 to avoid numerical instability temp = max(temp, 1e-7) for i := range ts { ts[i].value = ts[i].value / temp } - return ts } // softmax applies normalization to the logits -func softmax(ts []token) []token { +func softmax(ts []token) { // Find max logit for numerical stability maxLogit := float32(math.Inf(-1)) for _, t := range ts { @@ -56,8 +55,6 @@ func softmax(ts []token) []token { for i := range ts { ts[i].value /= sum } - - return ts } // topK limits the number of tokens considered to the k highest logits @@ -99,6 +96,7 @@ func topK(ts []token, k int) []token { } // topP limits tokens to those with cumulative probability p +// requires ts to be sorted in descending order of probabilities func topP(ts []token, p float32) []token { if p == 1.0 { return ts @@ -109,37 +107,24 @@ func topP(ts []token, p float32) []token { for i, t := range ts { sum += t.value if sum > float32(p) { - ts = ts[:i+1] - return ts + return ts[:i+1] } } return ts } -// minP limits tokens to those with cumulative probability p +// minP filters tokens with probabilities >= p * max_prob +// requires ts to be sorted in descending order of probabilities func minP(ts []token, p float32) []token { - if p == 1.0 { - return ts - } + maxProb := ts[0].value - maxProb := float32(math.Inf(-1)) - for _, token := range ts { - if token.value > maxProb { - maxProb = token.value + threshold := maxProb * p + + for i, t := range ts { + if t.value < threshold { + return ts[:i] } } - - threshold := maxProb * float32(p) - - // Filter tokens in-place - validTokens := ts[:0] - for i, token := range ts { - if token.value >= threshold { - validTokens = append(validTokens, ts[i]) - } - } - - ts = validTokens return ts } diff --git a/sample/transforms_test.go b/sample/transforms_test.go index 4880dd8f..7faf30a5 100644 --- a/sample/transforms_test.go +++ b/sample/transforms_test.go @@ -34,17 +34,22 @@ func compareLogits(t *testing.T, name string, want []float32, got []token) { func TestTemperature(t *testing.T) { input := []float32{1.0, 4.0, -2.0, 0.0} - got := temperature(toTokens(input), 0.5) + tokens := toTokens(input) + temperature(tokens, 0.5) want := []float32{2.0, 8.0, -4.0, 0.0} - compareLogits(t, "temperature(0.5)", want, got) + compareLogits(t, "temperature(0.5)", want, tokens) - got = temperature(toTokens(input), 1.0) + input = []float32{1.0, 4.0, -2.0, 0.0} + tokens = toTokens(input) + temperature(tokens, 1.0) want = []float32{1.0, 4.0, -2.0, 0.0} - compareLogits(t, "temperature(1)", want, got) + compareLogits(t, "temperature(1)", want, tokens) - got = temperature(toTokens(input), 0.0) + input = []float32{1.0, 4.0, -2.0, 0.0} + tokens = toTokens(input) + temperature(tokens, 0.0) want = []float32{1e7, 4e7, -2e7, 0.0} - compareLogits(t, "temperature(0)", want, got) + compareLogits(t, "temperature(0)", want, tokens) } func TestSoftmax(t *testing.T) { @@ -90,16 +95,17 @@ func TestSoftmax(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := softmax(toTokens(tt.input)) + tokens := toTokens(tt.input) + softmax(tokens) if tt.expected != nil { - compareLogits(t, tt.name, tt.expected, got) + compareLogits(t, tt.name, tt.expected, tokens) return } // Check probabilities sum to 1 var sum float32 - for _, token := range got { + for _, token := range tokens { sum += token.value if token.value < 0 || token.value > 1 { t.Errorf("probability out of range [0,1]: got %f", token.value) @@ -114,38 +120,44 @@ func TestSoftmax(t *testing.T) { func TestTopK(t *testing.T) { input := []float32{0.026986899, 0.043722924, 0.036774673, 0.27755088, 0.0046718004, 0.08582123, 0.20409796, 0.00412893, 0.15720603, 0.045046154, 0.0030491839, 0.01681367} - - // Test k=5 - got := topK(toTokens(input), 5) - if len(got) != 5 { - t.Errorf("topK(5): wrong length: want 5, got %d", len(got)) + tokens := toTokens(input) + tokens = topK(tokens, 5) + if len(tokens) != 5 { + t.Errorf("topK(5): wrong length: want 5, got %d", len(tokens)) } - // Should keep highest 3 values in descending order want := []float32{0.27755088, 0.20409796, 0.15720603, 0.08582123, 0.045046154} - compareLogits(t, "topK(3)", want, got) + compareLogits(t, "topK(3)", want, tokens) - got = topK(toTokens(input), 20) - if len(got) != len(input) { - t.Errorf("topK(20): wrong length: want %d, got %d", len(input), len(got)) + tokens = toTokens(input) + tokens = topK(tokens, 20) + if len(tokens) != len(input) { + t.Errorf("topK(20): wrong length: want %d, got %d", len(input), len(tokens)) } - // Test k=-1 input = []float32{0.026986899, 0.043722924, 0.036774673, 0.27755088, 0.0046718004, 0.08582123, 0.20409796, 0.00412893, 0.15720603, 0.045046154, 0.0030491839, 0.01681367} want = []float32{0.27755088, 0.20409796, 0.15720603, 0.08582123, 0.045046154, 0.043722924, 0.036774673, 0.026986899, 0.01681367, 0.0046718004, 0.00412893, 0.0030491839} - got = topK(toTokens(input), -1) - if len(got) != len(input) { - t.Errorf("topK(-1): wrong length: want %d, got %d", len(input), len(got)) + tokens = toTokens(input) + tokens = topK(tokens, -1) + if len(tokens) != len(input) { + t.Errorf("topK(-1): wrong length: want %d, got %d", len(input), len(tokens)) } - compareLogits(t, "topK(-1)", want, got) + compareLogits(t, "topK(-1)", want, tokens) - // Test k=0 input = []float32{0.026986899, 0.043722924, 0.036774673, 0.27755088, 0.0046718004, 0.08582123, 0.20409796, 0.00412893, 0.15720603, 0.045046154, 0.0030491839, 0.01681367} want = []float32{0.27755088, 0.20409796, 0.15720603, 0.08582123, 0.045046154, 0.043722924, 0.036774673, 0.026986899, 0.01681367, 0.0046718004, 0.00412893, 0.0030491839} - got = topK(toTokens(input), 0) - if len(got) != len(input) { - t.Errorf("topK(-1): wrong length: want %d, got %d", len(input), len(got)) + tokens = toTokens(input) + tokens = topK(tokens, 0) + if len(tokens) != len(input) { + t.Errorf("topK(-1): wrong length: want %d, got %d", len(input), len(tokens)) + } + compareLogits(t, "topK(-1)", want, tokens) + + input = []float32{-1e7, -2e7, -3e7, -4e7} + tokens = toTokens(input) + tokens = topK(tokens, 1) + if len(tokens) < 1 { + t.Error("topK should keep at least one token") } - compareLogits(t, "topK(-1)", want, got) } func TestTopP(t *testing.T) { @@ -153,16 +165,25 @@ func TestTopP(t *testing.T) { tokens := toTokens(input) // First apply temperature and softmax to get probabilities - tokens = softmax(tokens) + softmax(tokens) tokens = topK(tokens, 20) // Then apply topP - got := topP(tokens, 0.95) + tokens = topP(tokens, 0.95) // Should keep tokens until cumsum > 0.95 - if len(got) > 3 { - t.Errorf("topP(0.95): kept too many tokens: got %d", len(got)) - t.Logf("got: %v", got) + if len(tokens) > 3 { + t.Errorf("topP(0.95): kept too many tokens: got %d", len(tokens)) + t.Logf("got: %v", tokens) + } + + // Test edge case - ensure at least one token remains + input = []float32{-1e6, -1e6, -1e6} // One dominant token + tokens = toTokens(input) + softmax(tokens) + tokens = topP(tokens, 0.0) // Very small p + if len(tokens) < 1 { + t.Error("topP should keep at least one token") } } @@ -171,14 +192,45 @@ func TestMinP(t *testing.T) { tokens := toTokens(input) // First apply temperature and softmax - tokens = softmax(tokens) + tokens = topK(tokens, 20) + softmax(tokens) - // Then apply minP - got := minP(tokens, 0.2) + tokens = minP(tokens, 1.0) + + if len(tokens) != 1 { + t.Errorf("minP(1.0): should keep all tokens, got %d, want %d", len(tokens), len(tokens)) + } + + // Test with normal p value + tokens = toTokens(input) // Reset tokens + tokens = topK(tokens, 20) + softmax(tokens) + tokens = minP(tokens, 0.2) // Should keep tokens with prob >= 0.2 * max_prob - if len(got) > 3 { - t.Errorf("minP(0.2): kept too many tokens: got %d", len(got)) + if len(tokens) > 3 { + t.Errorf("minP(0.2): kept too many tokens: got %d", len(tokens)) + t.Logf("got: %v", tokens) + } + + // Test with zero p value + tokens = toTokens(input) // Reset tokens + tokens = topK(tokens, 20) + softmax(tokens) + tokens = minP(tokens, 0.0) + + // Should keep only the highest probability token + if len(tokens) != len(input) { + t.Errorf("minP(0.0): should keep only one token, got %d", len(tokens)) + t.Logf("got: %v", tokens) + } + + input = []float32{1e-10, 1e-10, 1e-10} + tokens = toTokens(input) + softmax(tokens) + tokens = minP(tokens, 1.0) + if len(tokens) < 1 { + t.Error("minP should keep at least one token even with extreme probabilities") } } @@ -231,7 +283,7 @@ func BenchmarkTransforms(b *testing.B) { b.ResetTimer() for b.Loop() { copy(tokensCopy, tokens) - topK(tokensCopy, 10) + tokens = topK(tokensCopy, 10) } }) @@ -239,7 +291,7 @@ func BenchmarkTransforms(b *testing.B) { b.ResetTimer() for b.Loop() { copy(tokensCopy, tokens) - topP(tokensCopy, 0.9) + tokens = topP(tokensCopy, 0.9) } }) @@ -247,7 +299,7 @@ func BenchmarkTransforms(b *testing.B) { b.ResetTimer() for b.Loop() { copy(tokensCopy, tokens) - minP(tokensCopy, 0.2) + tokens = minP(tokensCopy, 0.2) } }) @@ -255,7 +307,7 @@ func BenchmarkTransforms(b *testing.B) { b.ResetTimer() for b.Loop() { copy(tokensCopy, tokens) - topK(tokensCopy, 200000) + tokens = topK(tokensCopy, 200000) } }) } From 364629b8d6e004309ea348cb0375810967c160b8 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Mon, 17 Mar 2025 16:32:40 -0400 Subject: [PATCH 4/6] ml/backend/ggml: allocate memory with malloc when loading model (#9822) --- ml/backend/ggml/ggml.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 03b9acb3..fcd28aec 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -312,17 +312,19 @@ func New(r *os.File, params ml.BackendParams) (ml.Backend, error) { return fmt.Errorf("unassigned tensor: %s", t.Name) } - bts := make([]byte, t.Size()) - n, err := io.ReadFull(io.NewSectionReader(sr, int64(t.Offset), int64(t.Size())), bts) - if err != nil { - return err + bts := C.malloc(C.size_t(t.Size())) + if bts == nil { + return errors.New("failed to allocate tensor buffer") + } + defer C.free(bts) + + buf := unsafe.Slice((*byte)(bts), t.Size()) + n, err := io.ReadFull(io.NewSectionReader(sr, int64(t.Offset), int64(t.Size())), buf) + if err != nil || n != len(buf) { + return errors.New("read failed") } - if n != len(bts) { - return errors.New("short read") - } - - C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), 0, C.size_t(t.Size())) + C.ggml_backend_tensor_set(tt, bts, 0, C.size_t(t.Size())) return nil }) } From 95e271d98f0b5717bde2b7e5c4c7639c19fd3763 Mon Sep 17 00:00:00 2001 From: Bruce MacDonald Date: Mon, 17 Mar 2025 15:11:15 -0700 Subject: [PATCH 5/6] runner: remove cache prompt flag from ollama runner (#9826) We do not need to bypass the prompt caching in the ollama runner yet, as only embedding models needed to bypass the prompt caching. When embedding models are implemented they can skip initializing this cache completely. --- runner/ollamarunner/cache.go | 7 +- runner/ollamarunner/cache_test.go | 128 ++++++++++++++++++++++++++++++ runner/ollamarunner/runner.go | 2 +- 3 files changed, 130 insertions(+), 7 deletions(-) diff --git a/runner/ollamarunner/cache.go b/runner/ollamarunner/cache.go index adcb3f73..cf5e6b91 100644 --- a/runner/ollamarunner/cache.go +++ b/runner/ollamarunner/cache.go @@ -89,7 +89,7 @@ type InputCacheSlot struct { lastUsed time.Time } -func (c *InputCache) LoadCacheSlot(prompt []input.Input, cachePrompt bool) (*InputCacheSlot, []input.Input, error) { +func (c *InputCache) LoadCacheSlot(prompt []input.Input) (*InputCacheSlot, []input.Input, error) { var slot *InputCacheSlot var numPast int32 var err error @@ -107,11 +107,6 @@ func (c *InputCache) LoadCacheSlot(prompt []input.Input, cachePrompt bool) (*Inp return nil, nil, err } - // TODO (brucemacd): cachePrompt is always true for completion, but false for embedding, can this be improved? - if !cachePrompt { - numPast = 0 - } - slot.InUse = true slot.lastUsed = time.Now() diff --git a/runner/ollamarunner/cache_test.go b/runner/ollamarunner/cache_test.go index 0a1b73f5..f8925d11 100644 --- a/runner/ollamarunner/cache_test.go +++ b/runner/ollamarunner/cache_test.go @@ -297,3 +297,131 @@ func TestShiftDiscard(t *testing.T) { }) } } + +func TestLoadCacheSlot(t *testing.T) { + tests := []struct { + name string + cache InputCache + prompt []input.Input + wantErr bool + expectedSlotId int + expectedPrompt int // expected length of remaining prompt + }{ + { + name: "Basic cache hit - single user", + cache: InputCache{ + multiUserCache: false, + slots: []InputCacheSlot{ + { + Id: 0, + Inputs: []input.Input{{Token: 1}, {Token: 2}}, + InUse: false, + lastUsed: time.Now().Add(-time.Second), + }, + { + Id: 1, + Inputs: []input.Input{}, + InUse: false, + lastUsed: time.Now().Add(-2 * time.Second), + }, + }, + }, + prompt: []input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, + wantErr: false, + expectedSlotId: 0, + expectedPrompt: 1, // Only token 3 remains + }, + { + name: "Basic cache hit - multi user", + cache: InputCache{ + multiUserCache: true, + slots: []InputCacheSlot{ + { + Id: 0, + Inputs: []input.Input{{Token: 1}, {Token: 2}}, + InUse: false, + lastUsed: time.Now().Add(-time.Second), + }, + { + Id: 1, + Inputs: []input.Input{}, + InUse: false, + lastUsed: time.Now().Add(-2 * time.Second), + }, + }, + }, + prompt: []input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, + wantErr: false, + expectedSlotId: 0, + expectedPrompt: 1, // Only token 3 remains + }, + { + name: "Exact match - leave one input", + cache: InputCache{ + multiUserCache: false, + slots: []InputCacheSlot{ + { + Id: 0, + Inputs: []input.Input{{Token: 1}, {Token: 2}}, + InUse: false, + lastUsed: time.Now().Add(-time.Second), + }, + }, + }, + prompt: []input.Input{{Token: 1}, {Token: 2}}, + wantErr: false, + expectedSlotId: 0, + expectedPrompt: 1, // Should leave 1 token for sampling + }, + { + name: "No available slots", + cache: InputCache{ + multiUserCache: false, + slots: []InputCacheSlot{ + { + Id: 0, + Inputs: []input.Input{{Token: 1}, {Token: 2}}, + InUse: true, + lastUsed: time.Now().Add(-time.Second), + }, + }, + }, + prompt: []input.Input{{Token: 1}, {Token: 2}, {Token: 3}}, + wantErr: true, + expectedSlotId: -1, + expectedPrompt: -1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + slot, remainingPrompt, err := tt.cache.LoadCacheSlot(tt.prompt) + + // Check error state + if (err != nil) != tt.wantErr { + t.Errorf("LoadCacheSlot() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return // Skip further checks if we expected an error + } + + // Verify slot ID + if slot.Id != tt.expectedSlotId { + t.Errorf("LoadCacheSlot() slot ID = %v, expected %v", slot.Id, tt.expectedSlotId) + } + + // Verify slot is now marked in use + if !slot.InUse { + t.Errorf("LoadCacheSlot() slot not marked InUse") + } + + // Verify remaining prompt length + if len(remainingPrompt) != tt.expectedPrompt { + t.Errorf("LoadCacheSlot() remaining prompt length = %v, expected %v", + len(remainingPrompt), tt.expectedPrompt) + } + }) + } +} diff --git a/runner/ollamarunner/runner.go b/runner/ollamarunner/runner.go index d4c24556..98fcf2c0 100644 --- a/runner/ollamarunner/runner.go +++ b/runner/ollamarunner/runner.go @@ -590,7 +590,7 @@ func (s *Server) completion(w http.ResponseWriter, r *http.Request) { found := false for i, sq := range s.seqs { if sq == nil { - seq.cache, seq.inputs, err = s.cache.LoadCacheSlot(seq.inputs, true) + seq.cache, seq.inputs, err = s.cache.LoadCacheSlot(seq.inputs) if err != nil { s.mu.Unlock() http.Error(w, fmt.Sprintf("Failed to load cache: %v", err), http.StatusInternalServerError) From bf24498b1ea2728ef735030f186d1c970c38eaa6 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Mon, 17 Mar 2025 12:03:06 -0700 Subject: [PATCH 6/6] ollamarunner: Check for minBatch of context space when shifting Models can specify that a group of inputs need to be handled a single batch. However, context shifting didn't respect this and could trigger a break anyways. In this case, we should instead trigger a context shift earlier so that it occurs before the grouped batch. Note that there still some corner cases: - A long prompt that exceeds the context window can get truncated in the middle of an image. With the current models, this will result in the model not recognizing the image at all, which is pretty much the expected result with truncation. - The context window is set less than the minimum batch size. The only solution to this is to refuse to load the model with these settings. However, this can never occur with current models and default settings. Since users are unlikely to run into these scenarios, fixing them is left as a follow up. --- runner/ollamarunner/runner.go | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/runner/ollamarunner/runner.go b/runner/ollamarunner/runner.go index 98fcf2c0..9a1a549c 100644 --- a/runner/ollamarunner/runner.go +++ b/runner/ollamarunner/runner.go @@ -115,6 +115,9 @@ func (s *Server) NewSequence(prompt string, images []llm.ImageData, params NewSe params.numKeep = int32(len(inputs)) } + // TODO(jessegross): We should ensure that we always leave minBatch of context space to shift, + // otherwise we might truncate or split the batch against the model's wishes + // Ensure that at least 1 input can be discarded during shift params.numKeep = min(params.numKeep, s.cache.numCtx-1) @@ -366,17 +369,6 @@ func (s *Server) processBatch() error { batchSize := s.batchSize for j, inp := range seq.inputs { - if int32(len(seq.cache.Inputs)+len(seq.pendingInputs)+1) > s.cache.numCtx { - if len(seq.pendingInputs) == 0 { - err := s.cache.ShiftCacheSlot(seq.cache, seq.numKeep) - if err != nil { - return err - } - } else { - break - } - } - // If we are required to put following inputs into a single batch then extend the // batch size. Since we are only extending the size the minimum amount possible, this // will cause a break if we have pending inputs. @@ -389,6 +381,20 @@ func (s *Server) processBatch() error { break } + // If the sum of our working set (already processed tokens, tokens we added to this + // batch, required following tokens) exceeds the context size, then trigger a shift + // now so we don't have to do one later when we can't break the batch. + if int32(len(seq.cache.Inputs)+len(seq.pendingInputs)+minBatch) > s.cache.numCtx { + if len(seq.pendingInputs) != 0 { + break + } + + err := s.cache.ShiftCacheSlot(seq.cache, seq.numKeep) + if err != nil { + return err + } + } + options.Inputs = append(options.Inputs, inp.Token) if inp.Multimodal != nil { options.Multimodal = append(options.Multimodal, input.MultimodalIndex{Index: len(options.Inputs) - 1, Multimodal: inp.Multimodal})