mirror of
https://github.com/likelovewant/ollama-for-amd.git
synced 2025-12-21 22:33:56 +00:00
Merge branch 'ollama:main' into main
This commit is contained in:
@@ -104,8 +104,8 @@ COPY --from=cuda-12 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_v12
|
|||||||
FROM --platform=linux/arm64 scratch AS arm64
|
FROM --platform=linux/arm64 scratch AS arm64
|
||||||
COPY --from=cuda-11 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_v11
|
COPY --from=cuda-11 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_v11
|
||||||
COPY --from=cuda-12 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_v12
|
COPY --from=cuda-12 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_v12
|
||||||
COPY --from=jetpack-5 dist/lib/ollama/cuda_v11 lib/ollama/cuda_jetpack5
|
COPY --from=jetpack-5 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_jetpack5
|
||||||
COPY --from=jetpack-6 dist/lib/ollama/cuda_v12 lib/ollama/cuda_jetpack6
|
COPY --from=jetpack-6 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_jetpack6
|
||||||
|
|
||||||
FROM scratch AS rocm
|
FROM scratch AS rocm
|
||||||
COPY --from=rocm-6 dist/lib/ollama/rocm /lib/ollama/rocm
|
COPY --from=rocm-6 dist/lib/ollama/rocm /lib/ollama/rocm
|
||||||
|
|||||||
@@ -313,7 +313,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
||||||
- [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui)
|
- [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui)
|
||||||
- [Ollamac](https://github.com/kevinhermawan/Ollamac)
|
- [Ollamac](https://github.com/kevinhermawan/Ollamac)
|
||||||
- [big-AGI](https://github.com/enricoros/big-AGI/blob/main/docs/config-local-ollama.md)
|
- [big-AGI](https://github.com/enricoros/big-AGI)
|
||||||
- [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core)
|
- [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core)
|
||||||
- [Amica](https://github.com/semperai/amica)
|
- [Amica](https://github.com/semperai/amica)
|
||||||
- [chatd](https://github.com/BruceMacD/chatd)
|
- [chatd](https://github.com/BruceMacD/chatd)
|
||||||
|
|||||||
@@ -163,6 +163,7 @@ func (t *ToolCallFunctionArguments) String() string {
|
|||||||
|
|
||||||
type Tool struct {
|
type Tool struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
Items any `json:"items,omitempty"`
|
||||||
Function ToolFunction `json:"function"`
|
Function ToolFunction `json:"function"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -213,11 +214,14 @@ type ToolFunction struct {
|
|||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
Parameters struct {
|
Parameters struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
Defs any `json:"$defs,omitempty"`
|
||||||
|
Items any `json:"items,omitempty"`
|
||||||
Required []string `json:"required"`
|
Required []string `json:"required"`
|
||||||
Properties map[string]struct {
|
Properties map[string]struct {
|
||||||
Type PropertyType `json:"type"`
|
Type PropertyType `json:"type"`
|
||||||
|
Items any `json:"items,omitempty"`
|
||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
Enum []string `json:"enum,omitempty"`
|
Enum []any `json:"enum,omitempty"`
|
||||||
} `json:"properties"`
|
} `json:"properties"`
|
||||||
} `json:"parameters"`
|
} `json:"parameters"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -232,6 +232,67 @@ func TestMessage_UnmarshalJSON(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestToolFunction_UnmarshalJSON(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
wantErr string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "valid enum with same types",
|
||||||
|
input: `{
|
||||||
|
"name": "test",
|
||||||
|
"description": "test function",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"required": ["test"],
|
||||||
|
"properties": {
|
||||||
|
"test": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "test prop",
|
||||||
|
"enum": ["a", "b", "c"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`,
|
||||||
|
wantErr: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty enum array",
|
||||||
|
input: `{
|
||||||
|
"name": "test",
|
||||||
|
"description": "test function",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"required": ["test"],
|
||||||
|
"properties": {
|
||||||
|
"test": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "test prop",
|
||||||
|
"enum": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`,
|
||||||
|
wantErr: "",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
var tf ToolFunction
|
||||||
|
err := json.Unmarshal([]byte(tt.input), &tf)
|
||||||
|
|
||||||
|
if tt.wantErr != "" {
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), tt.wantErr)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestPropertyType_UnmarshalJSON(t *testing.T) {
|
func TestPropertyType_UnmarshalJSON(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
|||||||
@@ -1381,7 +1381,6 @@ func NewCLI() *cobra.Command {
|
|||||||
envVars["OLLAMA_NOPRUNE"],
|
envVars["OLLAMA_NOPRUNE"],
|
||||||
envVars["OLLAMA_ORIGINS"],
|
envVars["OLLAMA_ORIGINS"],
|
||||||
envVars["OLLAMA_SCHED_SPREAD"],
|
envVars["OLLAMA_SCHED_SPREAD"],
|
||||||
envVars["OLLAMA_TMPDIR"],
|
|
||||||
envVars["OLLAMA_FLASH_ATTENTION"],
|
envVars["OLLAMA_FLASH_ATTENTION"],
|
||||||
envVars["OLLAMA_KV_CACHE_TYPE"],
|
envVars["OLLAMA_KV_CACHE_TYPE"],
|
||||||
envVars["OLLAMA_LLM_LIBRARY"],
|
envVars["OLLAMA_LLM_LIBRARY"],
|
||||||
|
|||||||
@@ -26,7 +26,6 @@ When you run Ollama on **Windows**, there are a few different locations. You can
|
|||||||
- `explorer %LOCALAPPDATA%\Ollama` to view logs. The most recent server logs will be in `server.log` and older logs will be in `server-#.log`
|
- `explorer %LOCALAPPDATA%\Ollama` to view logs. The most recent server logs will be in `server.log` and older logs will be in `server-#.log`
|
||||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
|
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
|
||||||
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
|
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
|
||||||
- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories
|
|
||||||
|
|
||||||
To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal
|
To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal
|
||||||
|
|
||||||
@@ -69,10 +68,6 @@ If you run into problems on Linux and want to install an older version, or you'd
|
|||||||
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION=0.5.7 sh
|
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION=0.5.7 sh
|
||||||
```
|
```
|
||||||
|
|
||||||
## Linux tmp noexec
|
|
||||||
|
|
||||||
If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/
|
|
||||||
|
|
||||||
## Linux docker
|
## Linux docker
|
||||||
|
|
||||||
If Ollama initially works on the GPU in a docker container, but then switches to running on CPU after some period of time with errors in the server log reporting GPU discovery failures, this can be resolved by disabling systemd cgroup management in Docker. Edit `/etc/docker/daemon.json` on the host and add `"exec-opts": ["native.cgroupdriver=cgroupfs"]` to the docker configuration.
|
If Ollama initially works on the GPU in a docker container, but then switches to running on CPU after some period of time with errors in the server log reporting GPU discovery failures, this can be resolved by disabling systemd cgroup management in Docker. Edit `/etc/docker/daemon.json` on the host and add `"exec-opts": ["native.cgroupdriver=cgroupfs"]` to the docker configuration.
|
||||||
|
|||||||
@@ -62,7 +62,6 @@ the explorer window by hitting `<Ctrl>+R` and type in:
|
|||||||
- *upgrade.log* contains log output for upgrades
|
- *upgrade.log* contains log output for upgrades
|
||||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` contains the binaries (The installer adds this to your user PATH)
|
- `explorer %LOCALAPPDATA%\Programs\Ollama` contains the binaries (The installer adds this to your user PATH)
|
||||||
- `explorer %HOMEPATH%\.ollama` contains models and configuration
|
- `explorer %HOMEPATH%\.ollama` contains models and configuration
|
||||||
- `explorer %TEMP%` contains temporary executable files in one or more `ollama*` directories
|
|
||||||
|
|
||||||
## Uninstall
|
## Uninstall
|
||||||
|
|
||||||
|
|||||||
@@ -52,8 +52,8 @@ func TestMaxQueue(t *testing.T) {
|
|||||||
embedCtx := ctx
|
embedCtx := ctx
|
||||||
|
|
||||||
var genwg sync.WaitGroup
|
var genwg sync.WaitGroup
|
||||||
|
genwg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
genwg.Add(1)
|
|
||||||
defer genwg.Done()
|
defer genwg.Done()
|
||||||
slog.Info("Starting generate request")
|
slog.Info("Starting generate request")
|
||||||
DoGenerate(ctx, t, client, req, resp, 45*time.Second, 5*time.Second)
|
DoGenerate(ctx, t, client, req, resp, 45*time.Second, 5*time.Second)
|
||||||
@@ -71,8 +71,8 @@ func TestMaxQueue(t *testing.T) {
|
|||||||
counterMu := sync.Mutex{}
|
counterMu := sync.Mutex{}
|
||||||
var embedwg sync.WaitGroup
|
var embedwg sync.WaitGroup
|
||||||
for i := 0; i < threadCount; i++ {
|
for i := 0; i < threadCount; i++ {
|
||||||
|
embedwg.Add(1)
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
embedwg.Add(1)
|
|
||||||
defer embedwg.Done()
|
defer embedwg.Done()
|
||||||
slog.Info("embed started", "id", i)
|
slog.Info("embed started", "id", i)
|
||||||
embedReq := api.EmbeddingRequest{
|
embedReq := api.EmbeddingRequest{
|
||||||
|
|||||||
@@ -56,8 +56,9 @@ type Cache interface {
|
|||||||
|
|
||||||
// StartForward is called before the start of the model's forward pass.
|
// StartForward is called before the start of the model's forward pass.
|
||||||
// For each token in the coming batch, there must be a corresponding
|
// For each token in the coming batch, there must be a corresponding
|
||||||
// entry in positions and seqs.
|
// entry in positions and seqs. reserve is to preallocate memory
|
||||||
StartForward(ctx ml.Context, batch input.Batch) error
|
// without actually storing data in the cache.
|
||||||
|
StartForward(ctx ml.Context, batch input.Batch, reserve bool) error
|
||||||
|
|
||||||
// CopyPrefix copies tokens in the range [0, len) from srcSeq to dstSeq
|
// CopyPrefix copies tokens in the range [0, len) from srcSeq to dstSeq
|
||||||
CopyPrefix(srcSeq, dstSeq int, len int32)
|
CopyPrefix(srcSeq, dstSeq int, len int32)
|
||||||
|
|||||||
@@ -146,51 +146,60 @@ func (c *Causal) Close() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Causal) StartForward(ctx ml.Context, batch input.Batch) error {
|
func (c *Causal) StartForward(ctx ml.Context, batch input.Batch, reserve bool) error {
|
||||||
c.curBatchSize = len(batch.Positions)
|
c.curBatchSize = len(batch.Positions)
|
||||||
c.curSequences = batch.Sequences
|
c.curSequences = batch.Sequences
|
||||||
c.curPositions = batch.Positions
|
c.curPositions = batch.Positions
|
||||||
c.opts.Except = nil
|
c.opts.Except = nil
|
||||||
|
|
||||||
c.updateSlidingWindow()
|
if !reserve {
|
||||||
|
c.updateSlidingWindow()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
c.curLoc, err = c.findStartLoc()
|
||||||
|
if errors.Is(err, ErrKvCacheFull) {
|
||||||
|
c.defrag()
|
||||||
|
c.curLoc, err = c.findStartLoc()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.curCellRange = newRange()
|
||||||
|
for i, pos := range batch.Positions {
|
||||||
|
seq := batch.Sequences[i]
|
||||||
|
|
||||||
|
c.cells[c.curLoc+i] = cacheCell{pos: pos, sequences: []int{seq}}
|
||||||
|
|
||||||
|
seqRange, ok := c.cellRanges[seq]
|
||||||
|
if !ok {
|
||||||
|
seqRange = newRange()
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.curLoc+i > seqRange.max {
|
||||||
|
seqRange.max = c.curLoc + i
|
||||||
|
}
|
||||||
|
if seqRange.max > c.curCellRange.max {
|
||||||
|
c.curCellRange.max = seqRange.max
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.curLoc+i < seqRange.min {
|
||||||
|
seqRange.min = c.curLoc + i
|
||||||
|
}
|
||||||
|
if seqRange.min < c.curCellRange.min {
|
||||||
|
c.curCellRange.min = seqRange.min
|
||||||
|
}
|
||||||
|
c.cellRanges[seq] = seqRange
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If we are reserving memory, don't update any of the cache metadata but set the size
|
||||||
|
// to the worst case.
|
||||||
|
c.curLoc = 0
|
||||||
|
c.curCellRange.min = 0
|
||||||
|
c.curCellRange.max = len(c.cells) - 1
|
||||||
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
c.curLoc, err = c.findStartLoc()
|
|
||||||
if errors.Is(err, ErrKvCacheFull) {
|
|
||||||
c.defrag()
|
|
||||||
c.curLoc, err = c.findStartLoc()
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.curCellRange = newRange()
|
|
||||||
for i, pos := range batch.Positions {
|
|
||||||
seq := batch.Sequences[i]
|
|
||||||
|
|
||||||
c.cells[c.curLoc+i] = cacheCell{pos: pos, sequences: []int{seq}}
|
|
||||||
|
|
||||||
seqRange, ok := c.cellRanges[seq]
|
|
||||||
if !ok {
|
|
||||||
seqRange = newRange()
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.curLoc+i > seqRange.max {
|
|
||||||
seqRange.max = c.curLoc + i
|
|
||||||
}
|
|
||||||
if seqRange.max > c.curCellRange.max {
|
|
||||||
c.curCellRange.max = seqRange.max
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.curLoc+i < seqRange.min {
|
|
||||||
seqRange.min = c.curLoc + i
|
|
||||||
}
|
|
||||||
if seqRange.min < c.curCellRange.min {
|
|
||||||
c.curCellRange.min = seqRange.min
|
|
||||||
}
|
|
||||||
c.cellRanges[seq] = seqRange
|
|
||||||
}
|
|
||||||
|
|
||||||
c.curMask, err = c.buildMask(ctx)
|
c.curMask, err = c.buildMask(ctx)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"slices"
|
"slices"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ollama/ollama/fs"
|
|
||||||
"github.com/ollama/ollama/ml"
|
"github.com/ollama/ollama/ml"
|
||||||
"github.com/ollama/ollama/model/input"
|
"github.com/ollama/ollama/model/input"
|
||||||
)
|
)
|
||||||
@@ -281,7 +280,7 @@ func testCache(t *testing.T, backend ml.Backend, cache Cache, tests []testCase)
|
|||||||
context := backend.NewContext()
|
context := backend.NewContext()
|
||||||
defer context.Close()
|
defer context.Close()
|
||||||
|
|
||||||
err := cache.StartForward(context, input.Batch{Positions: test.pos, Sequences: test.seqs})
|
err := cache.StartForward(context, input.Batch{Positions: test.pos, Sequences: test.seqs}, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -315,7 +314,7 @@ func TestCanResume(t *testing.T) {
|
|||||||
err := cache.StartForward(context, input.Batch{
|
err := cache.StartForward(context, input.Batch{
|
||||||
Positions: []int32{0, 1, 2, 3},
|
Positions: []int32{0, 1, 2, 3},
|
||||||
Sequences: []int{0, 0, 0, 0},
|
Sequences: []int{0, 0, 0, 0},
|
||||||
})
|
}, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("StartForward failed: %v", err)
|
t.Fatalf("StartForward failed: %v", err)
|
||||||
}
|
}
|
||||||
@@ -342,7 +341,7 @@ func TestCanResume(t *testing.T) {
|
|||||||
err = cache.StartForward(context, input.Batch{
|
err = cache.StartForward(context, input.Batch{
|
||||||
Positions: []int32{4, 5},
|
Positions: []int32{4, 5},
|
||||||
Sequences: []int{0, 0},
|
Sequences: []int{0, 0},
|
||||||
})
|
}, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("StartForward failed: %v", err)
|
t.Fatalf("StartForward failed: %v", err)
|
||||||
}
|
}
|
||||||
@@ -372,14 +371,8 @@ func TestCanResume(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type testBackend struct{}
|
type testBackend struct {
|
||||||
|
ml.Backend
|
||||||
func (b *testBackend) Config() fs.Config {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *testBackend) Get(name string) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *testBackend) NewContext() ml.Context {
|
func (b *testBackend) NewContext() ml.Context {
|
||||||
@@ -390,12 +383,10 @@ func (b *testBackend) NewContextSize(int) ml.Context {
|
|||||||
return &testContext{}
|
return &testContext{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *testBackend) SystemInfo() string {
|
type testContext struct {
|
||||||
return "not implemented"
|
ml.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
type testContext struct{}
|
|
||||||
|
|
||||||
func (c *testContext) Empty(dtype ml.DType, shape ...int) ml.Tensor {
|
func (c *testContext) Empty(dtype ml.DType, shape ...int) ml.Tensor {
|
||||||
total := 0
|
total := 0
|
||||||
|
|
||||||
@@ -440,6 +431,8 @@ func (c *testContext) Forward(...ml.Tensor) ml.Context { return c }
|
|||||||
|
|
||||||
func (c *testContext) Compute(...ml.Tensor) {}
|
func (c *testContext) Compute(...ml.Tensor) {}
|
||||||
|
|
||||||
|
func (c *testContext) Reserve() error { return nil }
|
||||||
|
|
||||||
func (c *testContext) MaxGraphNodes() int {
|
func (c *testContext) MaxGraphNodes() int {
|
||||||
return 10
|
return 10
|
||||||
}
|
}
|
||||||
@@ -447,6 +440,8 @@ func (c *testContext) MaxGraphNodes() int {
|
|||||||
func (c *testContext) Close() {}
|
func (c *testContext) Close() {}
|
||||||
|
|
||||||
type testTensor struct {
|
type testTensor struct {
|
||||||
|
ml.Tensor
|
||||||
|
|
||||||
dtype ml.DType
|
dtype ml.DType
|
||||||
elementSize int
|
elementSize int
|
||||||
data []float32
|
data []float32
|
||||||
@@ -474,10 +469,6 @@ func (t *testTensor) DType() ml.DType {
|
|||||||
return t.dtype
|
return t.dtype
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testTensor) Bytes() []byte {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Floats() []float32 {
|
func (t *testTensor) Floats() []float32 {
|
||||||
out := make([]float32, len(t.data))
|
out := make([]float32, len(t.data))
|
||||||
copy(out, t.data)
|
copy(out, t.data)
|
||||||
@@ -502,64 +493,6 @@ func (t *testTensor) Add(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testTensor) Mul(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Mulmat(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) MulmatFullPrec(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Softmax(ctx ml.Context) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) LayerNorm(ctx ml.Context, weight, bias ml.Tensor, eps float32) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) RMSNorm(ctx ml.Context, weight ml.Tensor, eps float32) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Scale(ctx ml.Context, s float64) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) AvgPool1D(ctx ml.Context, k, s, p int) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) AvgPool2D(ctx ml.Context, k, s int, p float32) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Conv2D(ctx ml.Context, weight ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) RoPE(ctx ml.Context, positionIDs, ropeFactors ml.Tensor, dim, ropeType uint32, base, scale float32) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) IM2Col(ctx ml.Context, weight ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Cos(ctx ml.Context) ml.Tensor { panic("not implemented") }
|
|
||||||
func (t *testTensor) Sin(ctx ml.Context) ml.Tensor { panic("not implemented") }
|
|
||||||
func (t *testTensor) Tanh(ctx ml.Context) ml.Tensor { panic("not implemented") }
|
|
||||||
func (t *testTensor) GELU(ctx ml.Context) ml.Tensor { panic("not implemented") }
|
|
||||||
func (t *testTensor) SILU(ctx ml.Context) ml.Tensor { panic("not implemented") }
|
|
||||||
|
|
||||||
func (t *testTensor) Reshape(ctx ml.Context, shape ...int) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor {
|
func (t *testTensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor {
|
||||||
offset /= t.elementSize
|
offset /= t.elementSize
|
||||||
|
|
||||||
@@ -582,43 +515,7 @@ func (t *testTensor) View(ctx ml.Context, offset int, shape ...int) ml.Tensor {
|
|||||||
return view
|
return view
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testTensor) Permute(ctx ml.Context, shape ...int) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Contiguous(ctx ml.Context) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Set(ctx ml.Context, t2 ml.Tensor, offset int, strides ...int) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Pad(ctx ml.Context, shape ...int) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Unpad(ctx ml.Context, shape ...int) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Stack(ctx ml.Context, dim int, s ...ml.Tensor) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Repeat(ctx ml.Context, dim, n int) ml.Tensor { panic("not implemented") }
|
|
||||||
|
|
||||||
func (t *testTensor) Concat(ctx ml.Context, t2 ml.Tensor, dim int) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Rows(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testTensor) Copy(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
|
func (t *testTensor) Copy(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
|
||||||
copy(t2.(*testTensor).data, t.data)
|
copy(t2.(*testTensor).data, t.data)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testTensor) Duplicate(ctx ml.Context) ml.Tensor { panic("not implemented") }
|
|
||||||
|
|||||||
@@ -27,6 +27,11 @@ type EncoderCache struct {
|
|||||||
// anything will be stored)
|
// anything will be stored)
|
||||||
curPos int32
|
curPos int32
|
||||||
|
|
||||||
|
// curReserve indicates that this forward pass is only for
|
||||||
|
// memory reservation and we should not update our metadata
|
||||||
|
// based on it.
|
||||||
|
curReserve bool
|
||||||
|
|
||||||
// ** cache metadata **
|
// ** cache metadata **
|
||||||
|
|
||||||
// was something stored in the cache?
|
// was something stored in the cache?
|
||||||
@@ -83,12 +88,14 @@ func (c *EncoderCache) Close() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *EncoderCache) StartForward(ctx ml.Context, batch input.Batch) error {
|
func (c *EncoderCache) StartForward(ctx ml.Context, batch input.Batch, reserve bool) error {
|
||||||
// We work with the most recent image
|
// We work with the most recent image
|
||||||
if len(batch.Multimodal) > 0 {
|
if len(batch.Multimodal) > 0 {
|
||||||
c.curPos = batch.Positions[batch.Multimodal[len(batch.Multimodal)-1].Index]
|
c.curPos = batch.Positions[batch.Multimodal[len(batch.Multimodal)-1].Index]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.curReserve = reserve
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -105,8 +112,10 @@ func (c *EncoderCache) Get(ctx ml.Context) (ml.Tensor, ml.Tensor, ml.Tensor) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *EncoderCache) Put(ctx ml.Context, key, value ml.Tensor) {
|
func (c *EncoderCache) Put(ctx ml.Context, key, value ml.Tensor) {
|
||||||
c.encoderPos = c.curPos
|
if !c.curReserve {
|
||||||
c.encoderCached = true
|
c.encoderPos = c.curPos
|
||||||
|
c.encoderCached = true
|
||||||
|
}
|
||||||
|
|
||||||
if c.config.PermutedV {
|
if c.config.PermutedV {
|
||||||
value = value.Permute(ctx, 1, 2, 0, 3)
|
value = value.Permute(ctx, 1, 2, 0, 3)
|
||||||
|
|||||||
@@ -41,9 +41,9 @@ func (c *WrapperCache) Close() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *WrapperCache) StartForward(ctx ml.Context, batch input.Batch) error {
|
func (c *WrapperCache) StartForward(ctx ml.Context, batch input.Batch, reserve bool) error {
|
||||||
for i, cache := range c.caches {
|
for i, cache := range c.caches {
|
||||||
err := cache.StartForward(ctx, batch)
|
err := cache.StartForward(ctx, batch, reserve)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// unwind on error - Remove with endIndex set to math.MaxInt32 does not fail
|
// unwind on error - Remove with endIndex set to math.MaxInt32 does not fail
|
||||||
for j := i - 1; j >= 0; j-- {
|
for j := i - 1; j >= 0; j-- {
|
||||||
|
|||||||
@@ -97,6 +97,13 @@ type Context interface {
|
|||||||
|
|
||||||
Forward(...Tensor) Context
|
Forward(...Tensor) Context
|
||||||
Compute(...Tensor)
|
Compute(...Tensor)
|
||||||
|
|
||||||
|
// Reserve is analogous to Compute but rather than executing a
|
||||||
|
// graph, simply preallocates memory. Typically called with a
|
||||||
|
// worst case graph to ensure all resources are available for
|
||||||
|
// for future inference.
|
||||||
|
Reserve() error
|
||||||
|
|
||||||
MaxGraphNodes() int
|
MaxGraphNodes() int
|
||||||
Close()
|
Close()
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import "C"
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
@@ -42,8 +43,12 @@ func devices() []*C.struct_ggml_backend_device {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Backend struct {
|
type Backend struct {
|
||||||
meta *fsggml.GGML
|
meta *fsggml.GGML
|
||||||
sched *C.struct_ggml_backend_sched
|
|
||||||
|
sched *C.struct_ggml_backend_sched
|
||||||
|
schedBackends []*C.struct_ggml_backend
|
||||||
|
schedBufts []*C.struct_ggml_backend_buffer_type
|
||||||
|
|
||||||
tensors map[string]*C.struct_ggml_tensor
|
tensors map[string]*C.struct_ggml_tensor
|
||||||
|
|
||||||
// input is the backend used for inputs
|
// input is the backend used for inputs
|
||||||
@@ -281,6 +286,10 @@ func New(ctx context.Context, r *os.File, params ml.BackendParams) (ml.Backend,
|
|||||||
}
|
}
|
||||||
|
|
||||||
b := C.ggml_backend_alloc_ctx_tensors_from_buft(c, bt)
|
b := C.ggml_backend_alloc_ctx_tensors_from_buft(c, bt)
|
||||||
|
if b == nil {
|
||||||
|
return nil, fmt.Errorf("unable to allocate memory from device %v for model weights", C.GoString(C.ggml_backend_buft_name(bt)))
|
||||||
|
}
|
||||||
|
|
||||||
C.ggml_backend_buffer_set_usage(b, C.GGML_BACKEND_BUFFER_USAGE_WEIGHTS)
|
C.ggml_backend_buffer_set_usage(b, C.GGML_BACKEND_BUFFER_USAGE_WEIGHTS)
|
||||||
bbs[c] = b
|
bbs[c] = b
|
||||||
}
|
}
|
||||||
@@ -385,8 +394,6 @@ func New(ctx context.Context, r *os.File, params ml.BackendParams) (ml.Backend,
|
|||||||
schedBackends = append(schedBackends, b)
|
schedBackends = append(schedBackends, b)
|
||||||
schedBufts = append(schedBufts, bt)
|
schedBufts = append(schedBufts, bt)
|
||||||
|
|
||||||
slog.Info("compute graph", "backend", C.GoString(C.ggml_backend_name(b)), "buffer_type", C.GoString(C.ggml_backend_buft_name(bt)))
|
|
||||||
|
|
||||||
if C.ggml_backend_is_cpu(b) {
|
if C.ggml_backend_is_cpu(b) {
|
||||||
// set number of threads for cpu backend
|
// set number of threads for cpu backend
|
||||||
C.ggml_backend_cpu_set_n_threads(b, C.int(Threads(params.NumThreads)))
|
C.ggml_backend_cpu_set_n_threads(b, C.int(Threads(params.NumThreads)))
|
||||||
@@ -405,7 +412,9 @@ func New(ctx context.Context, r *os.File, params ml.BackendParams) (ml.Backend,
|
|||||||
C.size_t(maxGraphNodes),
|
C.size_t(maxGraphNodes),
|
||||||
C._Bool(len(gpus) > 1 && slices.Contains(gpus, output.d)),
|
C._Bool(len(gpus) > 1 && slices.Contains(gpus, output.d)),
|
||||||
),
|
),
|
||||||
input: deviceBufferTypes[input.d],
|
schedBackends: schedBackends,
|
||||||
|
schedBufts: schedBufts,
|
||||||
|
input: deviceBufferTypes[input.d],
|
||||||
layers: func() map[int]*C.struct_ggml_backend_buffer_type {
|
layers: func() map[int]*C.struct_ggml_backend_buffer_type {
|
||||||
m := make(map[int]*C.struct_ggml_backend_buffer_type)
|
m := make(map[int]*C.struct_ggml_backend_buffer_type)
|
||||||
for i, layer := range layers {
|
for i, layer := range layers {
|
||||||
@@ -530,6 +539,24 @@ func (c Context) Compute(tensors ...ml.Tensor) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c Context) Reserve() error {
|
||||||
|
if !C.ggml_backend_sched_reserve(c.b.sched, c.graph) {
|
||||||
|
C.ggml_backend_sched_reset(c.b.sched)
|
||||||
|
return errors.New("failed to reserve graph")
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Debug("compute graph", "nodes", C.ggml_graph_n_nodes(c.graph), "splits", C.ggml_backend_sched_get_n_splits(c.b.sched))
|
||||||
|
for i := range c.b.schedBackends {
|
||||||
|
size := C.ggml_backend_sched_get_buffer_size(c.b.sched, c.b.schedBackends[i])
|
||||||
|
slog.Info("compute graph", "backend", C.GoString(C.ggml_backend_name(c.b.schedBackends[i])), "buffer_type", C.GoString(C.ggml_backend_buft_name(c.b.schedBufts[i])),
|
||||||
|
"size", format.HumanBytes2(uint64(size)))
|
||||||
|
}
|
||||||
|
|
||||||
|
C.ggml_backend_sched_reset(c.b.sched)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c Context) MaxGraphNodes() int {
|
func (c Context) MaxGraphNodes() int {
|
||||||
return c.maxGraphNodes
|
return c.maxGraphNodes
|
||||||
}
|
}
|
||||||
@@ -547,9 +574,9 @@ func pad(length, pad C.size_t) C.size_t {
|
|||||||
return ((length + pad - 1) / pad) * pad
|
return ((length + pad - 1) / pad) * pad
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor {
|
func (c Context) newTensor(dtype ml.DType, shape []int) (ml.Tensor, error) {
|
||||||
if c.buft == nil {
|
if c.buft == nil {
|
||||||
panic("set Input, Output, or Layer before creating tensors")
|
panic("set Input or Layer before creating tensors")
|
||||||
}
|
}
|
||||||
|
|
||||||
var cdtype uint32
|
var cdtype uint32
|
||||||
@@ -570,7 +597,7 @@ func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor {
|
|||||||
|
|
||||||
if len(shape) < 1 || shape[0] == 0 {
|
if len(shape) < 1 || shape[0] == 0 {
|
||||||
var shape C.int64_t = 0
|
var shape C.int64_t = 0
|
||||||
return &Tensor{b: c.b, t: C.ggml_new_tensor(c.ctx, cdtype, 1, &shape)}
|
return &Tensor{b: c.b, t: C.ggml_new_tensor(c.ctx, cdtype, 1, &shape)}, nil
|
||||||
} else if len(shape) > 4 {
|
} else if len(shape) > 4 {
|
||||||
panic("unsupported number of dimensions")
|
panic("unsupported number of dimensions")
|
||||||
}
|
}
|
||||||
@@ -584,16 +611,29 @@ func (c Context) newTensor(dtype ml.DType, shape []int) ml.Tensor {
|
|||||||
t := C.ggml_new_tensor(c.ctx, cdtype, C.int(len(shape)), shapeToGGML(shape))
|
t := C.ggml_new_tensor(c.ctx, cdtype, C.int(len(shape)), shapeToGGML(shape))
|
||||||
size := pad(C.ggml_backend_buft_get_alloc_size(c.buft, t), C.ggml_backend_buft_get_alignment(c.buft))
|
size := pad(C.ggml_backend_buft_get_alloc_size(c.buft, t), C.ggml_backend_buft_get_alignment(c.buft))
|
||||||
b := C.ggml_backend_buft_alloc_buffer(c.buft, size)
|
b := C.ggml_backend_buft_alloc_buffer(c.buft, size)
|
||||||
|
if b == nil {
|
||||||
|
return nil, fmt.Errorf("unable to allocate %v from device %v for new tensor", format.HumanBytes2(uint64(size)), C.GoString(C.ggml_backend_buft_name(c.buft)))
|
||||||
|
}
|
||||||
|
|
||||||
C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b))
|
C.ggml_backend_tensor_alloc(b, t, C.ggml_backend_buffer_get_base(b))
|
||||||
return &Tensor{b: c.b, t: t}
|
return &Tensor{b: c.b, t: t}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Context) Empty(dtype ml.DType, shape ...int) ml.Tensor {
|
func (c Context) Empty(dtype ml.DType, shape ...int) ml.Tensor {
|
||||||
return c.newTensor(dtype, shape)
|
t, err := c.newTensor(dtype, shape)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Context) Zeros(dtype ml.DType, shape ...int) ml.Tensor {
|
func (c Context) Zeros(dtype ml.DType, shape ...int) ml.Tensor {
|
||||||
t := c.newTensor(dtype, shape)
|
t, err := c.newTensor(dtype, shape)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
C.ggml_set_zero(t.(*Tensor).t)
|
C.ggml_set_zero(t.(*Tensor).t)
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
@@ -621,7 +661,11 @@ func (c Context) FromFloatSlice(s []float32, shape ...int) (ml.Tensor, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
t := c.newTensor(ml.DTypeF32, shape)
|
t, err := c.newTensor(ml.DTypeF32, shape)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if len(s) > 0 {
|
if len(s) > 0 {
|
||||||
C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
|
C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
|
||||||
}
|
}
|
||||||
@@ -634,7 +678,11 @@ func (c Context) FromIntSlice(s []int32, shape ...int) (ml.Tensor, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
t := c.newTensor(ml.DTypeI32, shape)
|
t, err := c.newTensor(ml.DTypeI32, shape)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if len(s) > 0 {
|
if len(s) > 0 {
|
||||||
C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
|
C.ggml_backend_tensor_set(t.(*Tensor).t, unsafe.Pointer(&s[0]), 0, C.ggml_nbytes(t.(*Tensor).t))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -299,7 +299,7 @@ func Forward(ctx ml.Context, m Model, inputs []int32, batch input.Batch) (ml.Ten
|
|||||||
|
|
||||||
cache := m.Config().Cache
|
cache := m.Config().Cache
|
||||||
if cache != nil {
|
if cache != nil {
|
||||||
err := cache.StartForward(ctx, batch)
|
err := cache.StartForward(ctx, batch, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -281,19 +281,23 @@ func TestChatMiddleware(t *testing.T) {
|
|||||||
Description: "Get the current weather",
|
Description: "Get the current weather",
|
||||||
Parameters: struct {
|
Parameters: struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
Defs any `json:"$defs,omitempty"`
|
||||||
|
Items any `json:"items,omitempty"`
|
||||||
Required []string `json:"required"`
|
Required []string `json:"required"`
|
||||||
Properties map[string]struct {
|
Properties map[string]struct {
|
||||||
Type api.PropertyType `json:"type"`
|
Type api.PropertyType `json:"type"`
|
||||||
|
Items any `json:"items,omitempty"`
|
||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
Enum []string `json:"enum,omitempty"`
|
Enum []any `json:"enum,omitempty"`
|
||||||
} `json:"properties"`
|
} `json:"properties"`
|
||||||
}{
|
}{
|
||||||
Type: "object",
|
Type: "object",
|
||||||
Required: []string{"location"},
|
Required: []string{"location"},
|
||||||
Properties: map[string]struct {
|
Properties: map[string]struct {
|
||||||
Type api.PropertyType `json:"type"`
|
Type api.PropertyType `json:"type"`
|
||||||
|
Items any `json:"items,omitempty"`
|
||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
Enum []string `json:"enum,omitempty"`
|
Enum []any `json:"enum,omitempty"`
|
||||||
}{
|
}{
|
||||||
"location": {
|
"location": {
|
||||||
Type: api.PropertyType{"string"},
|
Type: api.PropertyType{"string"},
|
||||||
@@ -301,7 +305,7 @@ func TestChatMiddleware(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"unit": {
|
"unit": {
|
||||||
Type: api.PropertyType{"string"},
|
Type: api.PropertyType{"string"},
|
||||||
Enum: []string{"celsius", "fahrenheit"},
|
Enum: []any{"celsius", "fahrenheit"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -448,7 +448,7 @@ func (m *mockCache) Get(ctx ml.Context) (ml.Tensor, ml.Tensor, ml.Tensor)
|
|||||||
func (m *mockCache) Put(ctx ml.Context, key, value ml.Tensor) {}
|
func (m *mockCache) Put(ctx ml.Context, key, value ml.Tensor) {}
|
||||||
func (m *mockCache) Init(backend ml.Backend, dtype ml.DType, maxSequences, capacity, maxBatch int) {}
|
func (m *mockCache) Init(backend ml.Backend, dtype ml.DType, maxSequences, capacity, maxBatch int) {}
|
||||||
func (m *mockCache) Close() {}
|
func (m *mockCache) Close() {}
|
||||||
func (m *mockCache) StartForward(ctx ml.Context, batch input.Batch) error { return nil }
|
func (m *mockCache) StartForward(ctx ml.Context, batch input.Batch, reserve bool) error { return nil }
|
||||||
func (m *mockCache) CopyPrefix(srcSeq, dstSeq int, len int32) {}
|
func (m *mockCache) CopyPrefix(srcSeq, dstSeq int, len int32) {}
|
||||||
func (m *mockCache) SetConfig(ml.CacheConfig) {}
|
func (m *mockCache) SetConfig(ml.CacheConfig) {}
|
||||||
func (m *mockCache) CanResume(seq int, pos int32) bool { return true }
|
func (m *mockCache) CanResume(seq int, pos int32) bool { return true }
|
||||||
|
|||||||
@@ -728,6 +728,51 @@ func (m *multiLPath) String() string {
|
|||||||
return strings.Join(*m, ", ")
|
return strings.Join(*m, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) reserveWorstCaseGraph() error {
|
||||||
|
ctx := s.model.Backend().NewContext()
|
||||||
|
defer ctx.Close()
|
||||||
|
|
||||||
|
var batch input.Batch
|
||||||
|
|
||||||
|
inputs := make([]int32, s.batchSize)
|
||||||
|
batch.Positions = make([]int32, len(inputs))
|
||||||
|
batch.Sequences = make([]int, len(inputs))
|
||||||
|
for i := range inputs {
|
||||||
|
batch.Positions[i] = int32(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
batch.Outputs = make([]int32, s.parallel)
|
||||||
|
for i := range batch.Outputs {
|
||||||
|
batch.Outputs[i] = int32(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
batch.Inputs, err = ctx.Input().FromIntSlice(inputs, len(inputs))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cache := s.model.Config().Cache
|
||||||
|
if cache != nil {
|
||||||
|
err := cache.StartForward(ctx, batch, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t, err := s.model.Forward(ctx, batch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ctx.Forward(t).Reserve()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Server) loadModel(
|
func (s *Server) loadModel(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
mpath string,
|
mpath string,
|
||||||
@@ -765,6 +810,11 @@ func (s *Server) loadModel(
|
|||||||
s.seqs = make([]*Sequence, s.parallel)
|
s.seqs = make([]*Sequence, s.parallel)
|
||||||
s.seqsSem = semaphore.NewWeighted(int64(s.parallel))
|
s.seqsSem = semaphore.NewWeighted(int64(s.parallel))
|
||||||
|
|
||||||
|
err = s.reserveWorstCaseGraph()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
s.status = llm.ServerStatusReady
|
s.status = llm.ServerStatusReady
|
||||||
s.ready.Done()
|
s.ready.Done()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -370,19 +370,23 @@ func TestGenerateChat(t *testing.T) {
|
|||||||
Description: "Get the current weather",
|
Description: "Get the current weather",
|
||||||
Parameters: struct {
|
Parameters: struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
Defs any `json:"$defs,omitempty"`
|
||||||
|
Items any `json:"items,omitempty"`
|
||||||
Required []string `json:"required"`
|
Required []string `json:"required"`
|
||||||
Properties map[string]struct {
|
Properties map[string]struct {
|
||||||
Type api.PropertyType `json:"type"`
|
Type api.PropertyType `json:"type"`
|
||||||
|
Items any `json:"items,omitempty"`
|
||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
Enum []string `json:"enum,omitempty"`
|
Enum []any `json:"enum,omitempty"`
|
||||||
} `json:"properties"`
|
} `json:"properties"`
|
||||||
}{
|
}{
|
||||||
Type: "object",
|
Type: "object",
|
||||||
Required: []string{"location"},
|
Required: []string{"location"},
|
||||||
Properties: map[string]struct {
|
Properties: map[string]struct {
|
||||||
Type api.PropertyType `json:"type"`
|
Type api.PropertyType `json:"type"`
|
||||||
|
Items any `json:"items,omitempty"`
|
||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
Enum []string `json:"enum,omitempty"`
|
Enum []any `json:"enum,omitempty"`
|
||||||
}{
|
}{
|
||||||
"location": {
|
"location": {
|
||||||
Type: api.PropertyType{"string"},
|
Type: api.PropertyType{"string"},
|
||||||
@@ -390,7 +394,7 @@ func TestGenerateChat(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"unit": {
|
"unit": {
|
||||||
Type: api.PropertyType{"string"},
|
Type: api.PropertyType{"string"},
|
||||||
Enum: []string{"celsius", "fahrenheit"},
|
Enum: []any{"celsius", "fahrenheit"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -467,19 +471,23 @@ func TestGenerateChat(t *testing.T) {
|
|||||||
Description: "Get the current weather",
|
Description: "Get the current weather",
|
||||||
Parameters: struct {
|
Parameters: struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
Defs any `json:"$defs,omitempty"`
|
||||||
|
Items any `json:"items,omitempty"`
|
||||||
Required []string `json:"required"`
|
Required []string `json:"required"`
|
||||||
Properties map[string]struct {
|
Properties map[string]struct {
|
||||||
Type api.PropertyType `json:"type"`
|
Type api.PropertyType `json:"type"`
|
||||||
|
Items any `json:"items,omitempty"`
|
||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
Enum []string `json:"enum,omitempty"`
|
Enum []any `json:"enum,omitempty"`
|
||||||
} `json:"properties"`
|
} `json:"properties"`
|
||||||
}{
|
}{
|
||||||
Type: "object",
|
Type: "object",
|
||||||
Required: []string{"location"},
|
Required: []string{"location"},
|
||||||
Properties: map[string]struct {
|
Properties: map[string]struct {
|
||||||
Type api.PropertyType `json:"type"`
|
Type api.PropertyType `json:"type"`
|
||||||
|
Items any `json:"items,omitempty"`
|
||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
Enum []string `json:"enum,omitempty"`
|
Enum []any `json:"enum,omitempty"`
|
||||||
}{
|
}{
|
||||||
"location": {
|
"location": {
|
||||||
Type: api.PropertyType{"string"},
|
Type: api.PropertyType{"string"},
|
||||||
@@ -487,7 +495,7 @@ func TestGenerateChat(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"unit": {
|
"unit": {
|
||||||
Type: api.PropertyType{"string"},
|
Type: api.PropertyType{"string"},
|
||||||
Enum: []string{"celsius", "fahrenheit"},
|
Enum: []any{"celsius", "fahrenheit"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -667,13 +667,19 @@ func (runner *runnerRef) waitForVRAMRecovery() chan any {
|
|||||||
return finished
|
return finished
|
||||||
}
|
}
|
||||||
|
|
||||||
type ByDuration []*runnerRef
|
type ByDurationAndName []*runnerRef
|
||||||
|
|
||||||
func (a ByDuration) Len() int { return len(a) }
|
func (a ByDurationAndName) Len() int { return len(a) }
|
||||||
func (a ByDuration) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
func (a ByDurationAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
func (a ByDuration) Less(i, j int) bool {
|
func (a ByDurationAndName) Less(i, j int) bool {
|
||||||
// uint64 to turn negative time (never unload) to largest
|
// Primary sort by session duration (uint64 to handle negatives)
|
||||||
return uint64(a[i].sessionDuration) < uint64(a[j].sessionDuration)
|
d1 := uint64(a[i].sessionDuration)
|
||||||
|
d2 := uint64(a[j].sessionDuration)
|
||||||
|
if d1 != d2 {
|
||||||
|
return d1 < d2
|
||||||
|
}
|
||||||
|
// Secondary sort by model path lex order
|
||||||
|
return a[i].modelPath < a[j].modelPath
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO - future consideration to pick runners based on size
|
// TODO - future consideration to pick runners based on size
|
||||||
@@ -775,7 +781,7 @@ func (s *Scheduler) findRunnerToUnload() *runnerRef {
|
|||||||
|
|
||||||
// In the future we can enhance the algorithm to be smarter about picking the optimal runner to unload
|
// In the future we can enhance the algorithm to be smarter about picking the optimal runner to unload
|
||||||
// e.g., if we have multiple options, will one make room for the request?
|
// e.g., if we have multiple options, will one make room for the request?
|
||||||
sort.Sort(ByDuration(runnerList))
|
sort.Sort(ByDurationAndName(runnerList))
|
||||||
|
|
||||||
// First try to find a runner that's already idle
|
// First try to find a runner that's already idle
|
||||||
for _, runner := range runnerList {
|
for _, runner := range runnerList {
|
||||||
|
|||||||
Reference in New Issue
Block a user