diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 4adab4f8..40f9c41f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -28,6 +28,7 @@ jobs: security unlock-keychain -p password build.keychain security import certificate.p12 -k build.keychain -P $MACOS_SIGNING_KEY_PASSWORD -T /usr/bin/codesign security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k password build.keychain + security set-keychain-settings -lut 3600 build.keychain - uses: actions/setup-go@v5 with: go-version-file: go.mod diff --git a/README.md b/README.md index e3505a44..c150ecf1 100644 --- a/README.md +++ b/README.md @@ -69,15 +69,17 @@ Here are some example models that can be downloaded: | ------------------ | ---------- | ----- | ------------------------------ | | Llama 3 | 8B | 4.7GB | `ollama run llama3` | | Llama 3 | 70B | 40GB | `ollama run llama3:70b` | -| Phi-3 | 3.8B | 2.3GB | `ollama run phi3` | +| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` | +| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` | +| Gemma | 2B | 1.4GB | `ollama run gemma:2b` | +| Gemma | 7B | 4.8GB | `ollama run gemma:7b` | | Mistral | 7B | 4.1GB | `ollama run mistral` | +| Moondream 2 | 1.4B | 829MB | `ollama run moondream` | | Neural Chat | 7B | 4.1GB | `ollama run neural-chat` | | Starling | 7B | 4.1GB | `ollama run starling-lm` | | Code Llama | 7B | 3.8GB | `ollama run codellama` | | Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` | | LLaVA | 7B | 4.5GB | `ollama run llava` | -| Gemma | 2B | 1.4GB | `ollama run gemma:2b` | -| Gemma | 7B | 4.8GB | `ollama run gemma:7b` | | Solar | 10.7B | 6.1GB | `ollama run solar` | > Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models. @@ -210,25 +212,7 @@ ollama list ## Building -Install `cmake` and `go`: - -``` -brew install cmake go -``` - -Then generate dependencies: - -``` -go generate ./... -``` - -Then build the binary: - -``` -go build . -``` - -More detailed instructions can be found in the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md) +See the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md) ### Running local builds diff --git a/cmd/cmd.go b/cmd/cmd.go index dff8d7c1..5d919d9a 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -35,6 +35,7 @@ import ( "github.com/ollama/ollama/api" "github.com/ollama/ollama/auth" "github.com/ollama/ollama/format" + "github.com/ollama/ollama/parser" "github.com/ollama/ollama/progress" "github.com/ollama/ollama/server" "github.com/ollama/ollama/types/errtypes" @@ -63,7 +64,7 @@ func CreateHandler(cmd *cobra.Command, args []string) error { } defer f.Close() - modelfile, err := model.ParseFile(f) + modelfile, err := parser.ParseFile(f) if err != nil { return err } @@ -207,7 +208,7 @@ func tempZipFiles(path string) (string, error) { // pytorch files might also be unresolved git lfs references; skip if they are // covers pytorch_model-x-of-y.bin, pytorch_model.fp32-x-of-y.bin, pytorch_model.bin files = append(files, pt...) - } else if pt, _ := glob(filepath.Join(path, "consolidated*.pth"), "application/octet-stream"); len(pt) > 0 { + } else if pt, _ := glob(filepath.Join(path, "consolidated*.pth"), "application/zip"); len(pt) > 0 { // pytorch files might also be unresolved git lfs references; skip if they are // covers consolidated.x.pth, consolidated.pth files = append(files, pt...) @@ -1078,12 +1079,24 @@ func versionHandler(cmd *cobra.Command, _ []string) { } } -func appendHostEnvDocs(cmd *cobra.Command) { - const hostEnvDocs = ` +type EnvironmentVar struct { + Name string + Description string +} + +func appendEnvDocs(cmd *cobra.Command, envs []EnvironmentVar) { + if len(envs) == 0 { + return + } + + envUsage := ` Environment Variables: - OLLAMA_HOST The host:port or base URL of the Ollama server (e.g. http://localhost:11434) ` - cmd.SetUsageTemplate(cmd.UsageTemplate() + hostEnvDocs) + for _, e := range envs { + envUsage += fmt.Sprintf(" %-16s %s\n", e.Name, e.Description) + } + + cmd.SetUsageTemplate(cmd.UsageTemplate() + envUsage) } func NewCLI() *cobra.Command { @@ -1220,6 +1233,10 @@ Environment Variables: RunE: DeleteHandler, } + ollamaHostEnv := EnvironmentVar{"OLLAMA_HOST", "The host:port or base URL of the Ollama server (e.g. http://localhost:11434)"} + ollamaNoHistoryEnv := EnvironmentVar{"OLLAMA_NOHISTORY", "Disable readline history"} + envs := []EnvironmentVar{ollamaHostEnv} + for _, cmd := range []*cobra.Command{ createCmd, showCmd, @@ -1231,7 +1248,12 @@ Environment Variables: copyCmd, deleteCmd, } { - appendHostEnvDocs(cmd) + switch cmd { + case runCmd: + appendEnvDocs(cmd, []EnvironmentVar{ollamaHostEnv, ollamaNoHistoryEnv}) + default: + appendEnvDocs(cmd, envs) + } } rootCmd.AddCommand( diff --git a/cmd/interactive.go b/cmd/interactive.go index 1078590c..0a31efb5 100644 --- a/cmd/interactive.go +++ b/cmd/interactive.go @@ -138,6 +138,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error { fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word") fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor") fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor") + fmt.Fprintln(os.Stderr, " Ctrl + w Delete the word before the cursor") fmt.Fprintln(os.Stderr, "") fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen") fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding") @@ -182,6 +183,10 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error { return err } + if os.Getenv("OLLAMA_NOHISTORY") != "" { + scanner.HistoryDisable() + } + fmt.Print(readline.StartBracketedPaste) defer fmt.Printf(readline.EndBracketedPaste) diff --git a/convert/convert.go b/convert/convert.go index f4210e50..e71a0ff3 100644 --- a/convert/convert.go +++ b/convert/convert.go @@ -18,6 +18,16 @@ import ( "github.com/ollama/ollama/llm" ) +const ( + _ int32 = iota + tokenTypeNormal + tokenTypeUnknown + tokenTypeControl + tokenTypeUserDefined + tokenTypeUnused + tokenTypeByte +) + type Params struct { Architectures []string `json:"architectures"` VocabSize int `json:"vocab_size"` @@ -37,6 +47,8 @@ type Params struct { Experts int `json:"num_local_experts"` ExpertsUsed int `json:"num_experts_per_tok"` + PreTokenizer string + ByteOrder } @@ -74,10 +86,9 @@ func GetModelFormat(dirname string) (ModelFormat, error) { } for _, fn := range files { - slog.Debug(fmt.Sprintf("file = %s", fn)) if strings.HasSuffix(fn, ".safetensors") { return &SafetensorFormat{}, nil - } else if strings.HasSuffix(fn, ".bin") { + } else if strings.HasSuffix(fn, ".bin") || strings.HasSuffix(fn, ".pth") { slog.Debug("model is torch") return &TorchFormat{}, nil } @@ -92,6 +103,7 @@ type Vocab struct { Tokens []string Scores []float32 Types []int32 + Merges []string } func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) { @@ -170,7 +182,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) { } v.Tokens = append(v.Tokens, t.key) v.Scores = append(v.Scores, -1000.0) - v.Types = append(v.Types, int32(llm.GGUFTokenUserDefined)) + v.Types = append(v.Types, tokenTypeUserDefined) } slog.Info(fmt.Sprintf("vocab size w/ extra tokens: %d", len(v.Tokens))) @@ -180,7 +192,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) { for cnt := 0; cnt < missingTokens; cnt++ { v.Tokens = append(v.Tokens, fmt.Sprintf("", cnt+1)) v.Scores = append(v.Scores, -1) - v.Types = append(v.Types, int32(llm.GGUFTokenUserDefined)) + v.Types = append(v.Types, tokenTypeUserDefined) } } diff --git a/convert/convert_test.go b/convert/convert_test.go new file mode 100644 index 00000000..6aa33a49 --- /dev/null +++ b/convert/convert_test.go @@ -0,0 +1,103 @@ +//go:build slow + +package convert + +import ( + "os" + "path/filepath" + "testing" + + "github.com/ollama/ollama/llm" +) + +func convertFull(t *testing.T, p string) (llm.KV, llm.Tensors) { + t.Helper() + + mf, err := GetModelFormat(p) + if err != nil { + t.Fatal(err) + } + + params, err := mf.GetParams(p) + if err != nil { + t.Fatal(err) + } + + arch, err := mf.GetModelArch("", p, params) + if err != nil { + t.Fatal(err) + } + + if err := arch.LoadVocab(); err != nil { + t.Fatal(err) + } + + if err := arch.GetTensors(); err != nil { + t.Fatal(err) + } + + f, err := os.CreateTemp(t.TempDir(), "f16") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + if err := arch.WriteGGUF(f); err != nil { + t.Fatal(err) + } + + r, err := os.Open(f.Name()) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + m, _, err := llm.DecodeGGML(r) + if err != nil { + t.Fatal(err) + } + + return m.KV(), m.Tensors() +} + +func TestConvertFull(t *testing.T) { + cases := []struct { + path string + arch string + tensors int + layers int + }{ + {"Meta-Llama-3-8B-Instruct", "llama", 291, 35}, + {"Mistral-7B-Instruct-v0.2", "llama", 291, 35}, + {"Mixtral-8x7B-Instruct-v0.1", "llama", 291, 35}, + {"gemma-2b-it", "gemma", 164, 20}, + } + + for _, tt := range cases { + t.Run(tt.path, func(t *testing.T) { + p := filepath.Join("testdata", tt.path) + if _, err := os.Stat(p); err != nil { + t.Skipf("%s not found", p) + } + + kv, tensors := convertFull(t, p) + + if kv.Architecture() != tt.arch { + t.Fatalf("expected llama, got %s", kv.Architecture()) + } + + if kv.FileType().String() != "F16" { + t.Fatalf("expected F16, got %s", kv.FileType()) + } + + if len(tensors) != tt.tensors { + t.Fatalf("expected %d tensors, got %d", tt.tensors, len(tensors)) + } + + layers := tensors.Layers() + if len(layers) != tt.layers { + t.Fatalf("expected %d layers, got %d", tt.layers, len(layers)) + } + }) + } +} diff --git a/convert/gemma.go b/convert/gemma.go index 88abe646..9dc406e0 100644 --- a/convert/gemma.go +++ b/convert/gemma.go @@ -1,14 +1,11 @@ package convert import ( - "encoding/binary" "fmt" "io" "log/slog" - "os" "strings" - "github.com/d4l3k/go-bfloat16" "github.com/pdevine/tensor" "github.com/pdevine/tensor/native" @@ -19,49 +16,27 @@ type GemmaModel struct { ModelData } -func gemmaLayerHandler(w io.Writer, r safetensorWriterTo, f *os.File) error { - slog.Debug(fmt.Sprintf("converting '%s'", r.t.Name)) - - data := make([]byte, r.end-r.start) - if err := binary.Read(f, r.bo, data); err != nil { - return err - } - - tDataF32 := bfloat16.DecodeFloat32(data) - - var err error - tDataF32, err = addOnes(tDataF32, int(r.t.Shape[0])) - if err != nil { - return err - } - - if err := binary.Write(w, r.bo, tDataF32); err != nil { - return err - } - return nil -} - func addOnes(data []float32, vectorSize int) ([]float32, error) { n := tensor.New(tensor.WithShape(vectorSize), tensor.WithBacking(data)) ones := tensor.Ones(tensor.Float32, vectorSize) - var err error - n, err = n.Add(ones) + n, err := n.Add(ones) if err != nil { - return []float32{}, err + return nil, err } - newN, err := native.SelectF32(n, 0) + ts, err := native.SelectF32(n, 0) if err != nil { - return []float32{}, err + return nil, err } - var fullTensor []float32 - for _, v := range newN { - fullTensor = append(fullTensor, v...) + var f32s []float32 + for _, t := range ts { + f32s = append(f32s, t...) } - return fullTensor, nil + + return f32s, nil } func (m *GemmaModel) GetTensors() error { @@ -71,12 +46,10 @@ func (m *GemmaModel) GetTensors() error { } slog.Debug(fmt.Sprintf("Total tensors: %d", len(t))) - - m.Tensors = []llm.Tensor{} for _, l := range t { if strings.HasSuffix(l.Name, "norm.weight") { wt := l.WriterTo.(safetensorWriterTo) - wt.handler = gemmaLayerHandler + wt.repacker = m.Repack l.WriterTo = wt } m.Tensors = append(m.Tensors, l) @@ -94,6 +67,10 @@ func (m *GemmaModel) LoadVocab() error { return nil } +func (m *GemmaModel) Repack(_ string, data []float32, shape []uint64) ([]float32, error) { + return addOnes(data, int(shape[0])) +} + func (m *GemmaModel) WriteGGUF(ws io.WriteSeeker) error { kv := llm.KV{ "general.architecture": "gemma", diff --git a/convert/llama.go b/convert/llama.go index fb576e2e..7853c4cf 100644 --- a/convert/llama.go +++ b/convert/llama.go @@ -1,17 +1,17 @@ package convert import ( - "encoding/binary" + "cmp" + "errors" "fmt" "io" - "log/slog" + "os" + "path/filepath" "regexp" "strings" - "github.com/nlpodyssey/gopickle/pytorch" "github.com/pdevine/tensor" "github.com/pdevine/tensor/native" - "github.com/x448/float16" "github.com/ollama/ollama/llm" ) @@ -20,81 +20,12 @@ type LlamaModel struct { ModelData } -func llamaLayerHandler(w io.Writer, r torchWriterTo) error { - slog.Debug(fmt.Sprintf("repacking layer '%s'", r.t.Name)) - - data := r.storage.(*pytorch.HalfStorage).Data - tData := make([]uint16, len(data)) - for cnt, v := range data { - tData[cnt] = uint16(float16.Fromfloat32(v)) - } - - var err error - var heads uint32 - if strings.Contains(r.t.Name, "attn_q") { - heads = uint32(r.params.AttentionHeads) - } else if strings.Contains(r.t.Name, "attn_k") { - heads = uint32(r.params.KeyValHeads) - if heads == 0 { - heads = uint32(r.params.AttentionHeads) - } - } else { - return fmt.Errorf("unknown layer type") - } - - slog.Debug(fmt.Sprintf("heads = %d", heads)) - - tData, err = llamaRepack(tData, int(heads), r.t.Shape) - if err != nil { - return err - } - - if err = binary.Write(w, r.bo, tData); err != nil { - return err - } - return nil -} - -func llamaRepack(data []uint16, heads int, shape []uint64) ([]uint16, error) { - n := tensor.New(tensor.WithShape(int(shape[0]), int(shape[1])), tensor.WithBacking(data)) - origShape := n.Shape().Clone() - - // reshape the tensor and swap axes 1 and 2 to unpack the layer for gguf - if err := n.Reshape(heads, 2, origShape[0]/heads/2, origShape[1]); err != nil { - return nil, err - } - - if err := n.T(0, 2, 1, 3); err != nil { - return nil, err - } - - if err := n.Reshape(origShape...); err != nil { - return nil, err - } - - if err := n.Transpose(); err != nil { - return nil, err - } - newN, err := native.SelectU16(n, 1) - if err != nil { - return nil, err - } - - var fullTensor []uint16 - for _, v := range newN { - fullTensor = append(fullTensor, v...) - } - return fullTensor, nil -} - func (m *LlamaModel) GetTensors() error { t, err := m.Format.GetTensors(m.Path, m.Params) if err != nil { return err } - m.Tensors = []llm.Tensor{} - pattern := `^blk\.[0-9]+\.attn_(?Pq|k)\.weight$` re, err := regexp.Compile(pattern) if err != nil { @@ -104,10 +35,16 @@ func (m *LlamaModel) GetTensors() error { for _, l := range t { matches := re.FindAllStringSubmatch(l.Name, -1) if len(matches) > 0 { - slog.Debug(fmt.Sprintf("setting handler for: %s", l.Name)) - wt := l.WriterTo.(torchWriterTo) - wt.handler = llamaLayerHandler - l.WriterTo = wt + switch m.Format.(type) { + case *TorchFormat: + wt := l.WriterTo.(torchWriterTo) + wt.repacker = m.Repack + l.WriterTo = wt + case *SafetensorFormat: + wt := l.WriterTo.(safetensorWriterTo) + wt.repacker = m.Repack + l.WriterTo = wt + } } m.Tensors = append(m.Tensors, l) } @@ -115,19 +52,22 @@ func (m *LlamaModel) GetTensors() error { return nil } -func (m *LlamaModel) LoadVocab() error { - var v *Vocab - var err error - - slog.Debug("loading vocab") - v, err = LoadSentencePieceTokens(m.Path, m.Params) - if err != nil { +func (m *LlamaModel) LoadVocab() (err error) { + pre, ts, merges, err := parseTokens(filepath.Join(m.Path, "tokenizer.json")) + if errors.Is(err, os.ErrNotExist) { + return nil + } else if err != nil { return err } - slog.Debug("vocab loaded") + m.Vocab = &Vocab{} + for _, t := range ts { + m.Vocab.Tokens = append(m.Vocab.Tokens, t.Content) + m.Vocab.Types = append(m.Vocab.Types, t.Type()) + } - m.Vocab = v + m.Vocab.Merges = merges + m.Params.PreTokenizer = pre return nil } @@ -140,23 +80,79 @@ func (m *LlamaModel) WriteGGUF(ws io.WriteSeeker) error { "llama.embedding_length": uint32(m.Params.HiddenSize), "llama.block_count": uint32(m.Params.HiddenLayers), "llama.feed_forward_length": uint32(m.Params.IntermediateSize), + "llama.rope.freq_base": float32(m.Params.RopeFrequencyBase), "llama.rope.dimension_count": uint32(m.Params.HiddenSize / m.Params.AttentionHeads), "llama.attention.head_count": uint32(m.Params.AttentionHeads), "llama.attention.head_count_kv": uint32(m.Params.KeyValHeads), "llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS), "general.file_type": uint32(1), - "tokenizer.ggml.model": "llama", + "tokenizer.ggml.model": "gpt2", + "tokenizer.ggml.pre": m.Params.PreTokenizer, "tokenizer.ggml.tokens": m.Vocab.Tokens, - "tokenizer.ggml.scores": m.Vocab.Scores, "tokenizer.ggml.token_type": m.Vocab.Types, "tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID), "tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID), "tokenizer.ggml.unknown_token_id": uint32(0), - "tokenizer.ggml.add_bos_token": true, - "tokenizer.ggml.add_eos_token": false, + } + + if len(m.Vocab.Merges) > 0 { + kv["tokenizer.ggml.merges"] = m.Vocab.Merges + } else { + kv["tokenizer.ggml.scores"] = m.Vocab.Scores } return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors) } + +func (m *LlamaModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) { + return llamaRepack(name, m.Params, data, shape) +} + +func llamaRepack(name string, params *Params, data []float32, shape []uint64) ([]float32, error) { + var dims []int + for _, dim := range shape { + if dim != 0 { + dims = append(dims, int(dim)) + } + } + + var heads int + if strings.HasSuffix(name, "attn_q.weight") { + heads = params.AttentionHeads + } else if strings.HasSuffix(name, "attn_k.weight") { + heads = cmp.Or(params.KeyValHeads, params.AttentionHeads) + } else { + return nil, fmt.Errorf("unknown tensor name: %s", name) + } + + n := tensor.New(tensor.WithShape(dims...), tensor.WithBacking(data)) + if err := n.Reshape(append([]int{heads, 2, dims[0] / heads / 2}, dims[1:]...)...); err != nil { + return nil, err + } + + if err := n.T(0, 2, 1, 3); err != nil { + return nil, err + } + + if err := n.Reshape(dims...); err != nil { + return nil, err + } + + if err := n.Transpose(); err != nil { + return nil, err + } + + ts, err := native.SelectF32(n, 1) + if err != nil { + return nil, err + } + + var f32s []float32 + for _, t := range ts { + f32s = append(f32s, t...) + } + + return f32s, nil +} diff --git a/convert/mistral.go b/convert/mistral.go index f88de12b..da6874cf 100644 --- a/convert/mistral.go +++ b/convert/mistral.go @@ -1,17 +1,8 @@ package convert import ( - "encoding/binary" - "fmt" "io" - "os" "regexp" - "strings" - - "github.com/d4l3k/go-bfloat16" - "github.com/pdevine/tensor" - "github.com/pdevine/tensor/native" - "github.com/x448/float16" "github.com/ollama/ollama/llm" ) @@ -20,90 +11,12 @@ type MistralModel struct { ModelData } -func mistralLayerHandler(w io.Writer, r safetensorWriterTo, f *os.File) error { - layerSize := r.end - r.start - - var err error - tData := make([]uint16, layerSize/2) - if err = binary.Read(f, r.bo, tData); err != nil { - return err - } - - var heads uint32 - if strings.Contains(r.t.Name, "attn_q") { - heads = uint32(r.params.AttentionHeads) - } else if strings.Contains(r.t.Name, "attn_k") { - heads = uint32(r.params.KeyValHeads) - if heads == 0 { - heads = uint32(r.params.AttentionHeads) - } - } else { - return fmt.Errorf("unknown layer type") - } - - tData, err = repack(tData, int(heads), r.t.Shape) - if err != nil { - return err - } - - var buf []byte - for _, n := range tData { - buf = r.bo.AppendUint16(buf, n) - } - - tempBuf := make([]uint16, len(tData)) - tDataF32 := bfloat16.DecodeFloat32(buf) - for cnt, v := range tDataF32 { - tDataF16 := float16.Fromfloat32(v) - tempBuf[cnt] = uint16(tDataF16) - } - - if err = binary.Write(w, r.bo, tempBuf); err != nil { - return err - } - return nil -} - -func repack(data []uint16, heads int, shape []uint64) ([]uint16, error) { - n := tensor.New(tensor.WithShape(int(shape[0]), int(shape[1])), tensor.WithBacking(data)) - origShape := n.Shape().Clone() - - // reshape the tensor and swap axes 1 and 2 to unpack the layer for gguf - if err := n.Reshape(heads, 2, origShape[0]/heads/2, origShape[1]); err != nil { - return nil, err - } - - if err := n.T(0, 2, 1, 3); err != nil { - return nil, err - } - - if err := n.Reshape(origShape...); err != nil { - return nil, err - } - - if err := n.Transpose(); err != nil { - return nil, err - } - newN, err := native.SelectU16(n, 1) - if err != nil { - return nil, err - } - - var fullTensor []uint16 - for _, v := range newN { - fullTensor = append(fullTensor, v...) - } - return fullTensor, nil -} - func (m *MistralModel) GetTensors() error { t, err := m.Format.GetTensors(m.Path, m.Params) if err != nil { return err } - m.Tensors = []llm.Tensor{} - pattern := `^blk\.[0-9]+\.attn_(?Pq|k)\.weight$` re, err := regexp.Compile(pattern) if err != nil { @@ -114,7 +27,7 @@ func (m *MistralModel) GetTensors() error { matches := re.FindAllStringSubmatch(l.Name, -1) if len(matches) > 0 { wt := l.WriterTo.(safetensorWriterTo) - wt.handler = mistralLayerHandler + wt.repacker = m.Repack l.WriterTo = wt } m.Tensors = append(m.Tensors, l) @@ -160,3 +73,7 @@ func (m *MistralModel) WriteGGUF(ws io.WriteSeeker) error { return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors) } + +func (m *MistralModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) { + return llamaRepack(name, m.Params, data, shape) +} diff --git a/convert/mixtral.go b/convert/mixtral.go index 940df55d..baea68cd 100644 --- a/convert/mixtral.go +++ b/convert/mixtral.go @@ -17,8 +17,6 @@ func (m *MixtralModel) GetTensors() error { return err } - m.Tensors = []llm.Tensor{} - pattern := `^blk\.[0-9]+\.attn_(?Pq|k)\.weight$` re, err := regexp.Compile(pattern) if err != nil { @@ -29,7 +27,7 @@ func (m *MixtralModel) GetTensors() error { matches := re.FindAllStringSubmatch(l.Name, -1) if len(matches) > 0 { wt := l.WriterTo.(safetensorWriterTo) - wt.handler = mistralLayerHandler + wt.repacker = m.Repack l.WriterTo = wt } m.Tensors = append(m.Tensors, l) @@ -83,3 +81,7 @@ func (m *MixtralModel) WriteGGUF(ws io.WriteSeeker) error { return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors) } + +func (m *MixtralModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) { + return llamaRepack(name, m.Params, data, shape) +} diff --git a/convert/safetensors.go b/convert/safetensors.go index 69424c4d..69270b87 100644 --- a/convert/safetensors.go +++ b/convert/safetensors.go @@ -6,14 +6,13 @@ import ( "encoding/json" "fmt" "io" - "log/slog" "os" "path/filepath" "regexp" "slices" + "strings" "github.com/d4l3k/go-bfloat16" - "github.com/mitchellh/mapstructure" "github.com/x448/float16" "github.com/ollama/ollama/llm" @@ -26,39 +25,38 @@ type safetensorWriterTo struct { bo ByteOrder filename string + dtype string - start, end, padding uint64 - handler func(w io.Writer, r safetensorWriterTo, f *os.File) error + offset, size int64 + repacker func(string, []float32, []uint64) ([]float32, error) } -type tensorMetaData struct { - Type string `mapstructure:"dtype"` - Shape []int `mapstructure:"shape"` - Offsets []int `mapstructure:"data_offsets"` +type safetensorMetadata struct { + Type string `json:"dtype"` + Shape []uint64 `json:"shape"` + Offsets []int64 `json:"data_offsets"` } type SafetensorFormat struct{} func (m *SafetensorFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) { - slog.Debug("getting tensor data") var tensors []llm.Tensor - files, err := filepath.Glob(filepath.Join(dirpath, "/model-*.safetensors")) + matches, err := filepath.Glob(filepath.Join(dirpath, "*.safetensors")) if err != nil { return nil, err } var offset uint64 - for _, f := range files { + for _, f := range matches { var t []llm.Tensor var err error t, offset, err = m.readTensors(f, offset, params) if err != nil { - slog.Error(err.Error()) return nil, err } + tensors = append(tensors, t...) } - slog.Debug(fmt.Sprintf("all tensors = %d", len(tensors))) return tensors, nil } @@ -69,70 +67,57 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params) } defer f.Close() - var jsonSize uint64 - if err := binary.Read(f, binary.LittleEndian, &jsonSize); err != nil { + var n int64 + if err := binary.Read(f, binary.LittleEndian, &n); err != nil { return nil, 0, err } - buf := make([]byte, jsonSize) - _, err = io.ReadFull(f, buf) - if err != nil { + b := bytes.NewBuffer(make([]byte, 0, n)) + if _, err = io.CopyN(b, f, n); err != nil { return nil, 0, err } - d := json.NewDecoder(bytes.NewBuffer(buf)) - d.UseNumber() - var parsed map[string]interface{} - if err = d.Decode(&parsed); err != nil { + var headers map[string]safetensorMetadata + if err := json.NewDecoder(b).Decode(&headers); err != nil { return nil, 0, err } var keys []string - for k := range parsed { - keys = append(keys, k) + for key := range headers { + if !strings.HasSuffix(key, "self_attn.rotary_embd.inv_freq") { + keys = append(keys, key) + } } slices.Sort(keys) - slog.Info("converting layers") var tensors []llm.Tensor - for _, k := range keys { - vals := parsed[k].(map[string]interface{}) - var data tensorMetaData - if err = mapstructure.Decode(vals, &data); err != nil { - slog.Error("couldn't decode properly") - return nil, 0, err - } + for _, key := range keys { + value := headers[key] - var size uint64 var kind uint32 - switch len(data.Shape) { + switch len(value.Shape) { case 0: - // metadata + // valuedata continue - case 1: - // convert to float32 - kind = 0 - size = uint64(data.Shape[0] * 4) case 2: - // convert to float16 kind = 1 - size = uint64(data.Shape[0] * data.Shape[1] * 2) } - ggufName, err := m.GetLayerName(k) + name, err := m.GetLayerName(key) if err != nil { - slog.Error(err.Error()) return nil, 0, err } - shape := []uint64{0, 0, 0, 0} - for i := range data.Shape { - shape[i] = uint64(data.Shape[i]) + shape := make([]uint64, len(value.Shape)) + copy(shape, value.Shape) + + pad := func(s int64) int64 { + return 8 + n + s } t := llm.Tensor{ - Name: ggufName, + Name: name, Kind: kind, Offset: offset, Shape: shape[:], @@ -143,18 +128,15 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params) params: params, bo: params.ByteOrder, filename: fn, - start: uint64(data.Offsets[0]), - end: uint64(data.Offsets[1]), - padding: 8 + jsonSize, + dtype: value.Type, + offset: pad(value.Offsets[0]), + size: pad(value.Offsets[1]) - pad(value.Offsets[0]), } - offset += size + offset += t.Size() tensors = append(tensors, t) } - slog.Debug(fmt.Sprintf("total tensors for file = %d", len(tensors))) - slog.Debug(fmt.Sprintf("offset = %d", offset)) - return tensors, offset, nil } @@ -167,9 +149,7 @@ func (m *SafetensorFormat) GetParams(dirpath string) (*Params, error) { var params Params - d := json.NewDecoder(f) - err = d.Decode(¶ms) - if err != nil { + if err := json.NewDecoder(f).Decode(¶ms); err != nil { return nil, err } @@ -224,55 +204,58 @@ func (r safetensorWriterTo) WriteTo(w io.Writer) (n int64, err error) { } defer f.Close() - if _, err = f.Seek(int64(r.padding+r.start), 0); err != nil { + if _, err = f.Seek(r.offset, io.SeekStart); err != nil { return 0, err } - // use the handler if one is present - if r.handler != nil { - return 0, r.handler(w, r, f) - } - - remaining := r.end - r.start - - bufSize := uint64(10240) - var finished bool - for { - data := make([]byte, min(bufSize, remaining)) - - b, err := io.ReadFull(f, data) - remaining -= uint64(b) - - if err == io.EOF || remaining <= 0 { - finished = true - } else if err != nil { + var f32s []float32 + switch r.dtype { + case "F32": + f32s = make([]float32, r.size/4) + if err = binary.Read(f, r.bo, f32s); err != nil { + return 0, err + } + case "F16": + u16s := make([]uint16, r.size/2) + if err = binary.Read(f, r.bo, u16s); err != nil { return 0, err } - // convert bfloat16 -> ieee float32 - tDataF32 := bfloat16.DecodeFloat32(data) - - switch r.t.Kind { - case 0: - if err := binary.Write(w, r.bo, tDataF32); err != nil { - return 0, err - } - case 1: - // convert float32 -> float16 - tempBuf := make([]uint16, len(data)/2) - for cnt, v := range tDataF32 { - tDataF16 := float16.Fromfloat32(v) - tempBuf[cnt] = uint16(tDataF16) - } - if err := binary.Write(w, r.bo, tempBuf); err != nil { - return 0, err - } + for _, b := range u16s { + f32s = append(f32s, float16.Frombits(b).Float32()) } - if finished { - break + + case "BF16": + u8s := make([]uint8, r.size) + if err = binary.Read(f, r.bo, u8s); err != nil { + return 0, err + } + + f32s = bfloat16.DecodeFloat32(u8s) + default: + return 0, fmt.Errorf("unknown data type: %s", r.dtype) + } + + if r.repacker != nil { + f32s, err = r.repacker(r.t.Name, f32s, r.t.Shape) + if err != nil { + return 0, err } } - return 0, nil + + switch r.t.Kind { + case 0: + return 0, binary.Write(w, r.bo, f32s) + case 1: + f16s := make([]uint16, len(f32s)) + for i := range f32s { + f16s[i] = float16.Fromfloat32(f32s[i]).Bits() + } + + return 0, binary.Write(w, r.bo, f16s) + default: + return 0, fmt.Errorf("unknown storage type: %d", r.t.Kind) + } } func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) { @@ -281,6 +264,15 @@ func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (M return nil, fmt.Errorf("No architecture specified to convert") case 1: switch params.Architectures[0] { + case "LlamaForCausalLM": + return &LlamaModel{ + ModelData{ + Name: name, + Path: dirPath, + Params: params, + Format: m, + }, + }, nil case "MistralForCausalLM": return &MistralModel{ ModelData{ diff --git a/convert/tokenizer.go b/convert/tokenizer.go new file mode 100644 index 00000000..efeb5491 --- /dev/null +++ b/convert/tokenizer.go @@ -0,0 +1,109 @@ +package convert + +import ( + "cmp" + "crypto/sha256" + "encoding/json" + "fmt" + "log/slog" + "os" + "slices" + + "golang.org/x/exp/maps" +) + +type Tokenizer struct { + Version string `json:"version"` + AddedTokens []Token `json:"added_tokens"` + Model TokenizerModel `json:"model"` + + PreTokenizer struct { + PreTokenizers []struct { + Type string `json:"type"` + Pattern struct { + Regex string `json:"Regex"` + } `json:"pattern"` + } `json:"pretokenizers"` + } `json:"pre_tokenizer"` +} + +type TokenizerModel struct { + Type string `json:"type"` + Vocab map[string]int `json:"vocab"` + Merges []string `json:"merges"` + Tokens []Token +} + +type Token struct { + ID int `json:"id"` + Content string `json:"content"` + Special bool `json:"special"` + UserDefined bool +} + +func (t *Token) Type() int32 { + switch { + case t.Special: + return tokenTypeControl + case t.UserDefined: + return tokenTypeUserDefined + default: + return tokenTypeNormal + } +} + +func (t *Tokenizer) maxID() int { + return max( + slices.Max(maps.Values(t.Model.Vocab)), + slices.MaxFunc(t.AddedTokens, func(a, b Token) int { + return cmp.Compare(a.ID, b.ID) + }).ID, + ) +} + +func parseTokens(dirpath string) (pre string, tokens []Token, merges []string, err error) { + f, err := os.Open(dirpath) + if err != nil { + panic(err) + } + defer f.Close() + + var t Tokenizer + if err := json.NewDecoder(f).Decode(&t); err != nil { + return "", nil, nil, err + } + + tokens = make([]Token, t.maxID()+1) + for k, v := range t.Model.Vocab { + tokens[v] = Token{ID: v, Content: k, Special: false, UserDefined: false} + } + + for _, v := range t.AddedTokens { + v.UserDefined = true + tokens[v.ID] = v + } + + sha256sum := sha256.New() + for _, pt := range t.PreTokenizer.PreTokenizers { + switch pt.Type { + case "Split": + if pt.Pattern.Regex != "" { + sha256sum.Write([]byte(pt.Pattern.Regex)) + } + } + } + + switch digest := fmt.Sprintf("%x", sha256sum.Sum(nil)); digest { + case "d98f9631be1e9607a9848c26c1f9eac1aa9fc21ac6ba82a2fc0741af9780a48f": + pre = "llama-bpe" + case "03df5c5863ad70781dcfdef491ead25140f895fe8010964be0daefe27be32b02": + pre = "deepseek-llm" + case "21cde974d587f0d54dc8d56b183cc1e6239600172035c68fbd6d4b9f8da0576e": + pre = "deepseek-coder" + default: + slog.Warn("unknown pretokenizer, using default", "digest", digest) + pre = "default" + } + + return pre, tokens, t.Model.Merges, nil +} diff --git a/convert/torch.go b/convert/torch.go index 92c58872..b7ae0f76 100644 --- a/convert/torch.go +++ b/convert/torch.go @@ -24,8 +24,8 @@ type torchWriterTo struct { params *Params bo ByteOrder - storage pytorch.StorageInterface - handler func(w io.Writer, r torchWriterTo) error + storage pytorch.StorageInterface + repacker func(string, []float32, []uint64) ([]float32, error) } type TorchFormat struct{} @@ -33,14 +33,14 @@ type TorchFormat struct{} func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) { slog.Debug("getting torch tensors") - files, err := filepath.Glob(filepath.Join(dirpath, "pytorch_model-*.bin")) - if err != nil { - slog.Error("didn't find any torch files") - return nil, err + var files []string + if pt, _ := filepath.Glob(filepath.Join(dirpath, "consolidated*.pth")); len(pt) > 0 { + files = append(files, pt...) + } else if pt, _ := filepath.Glob(filepath.Join(dirpath, "pytorch_model*.pth")); len(pt) > 0 { + files = append(files, pt...) } var offset uint64 - var tensors []llm.Tensor for _, fn := range files { m, err := pytorch.Load(fn) @@ -77,7 +77,7 @@ func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, slog.Error(err.Error()) return nil, err } - slog.Debug(fmt.Sprintf("finding name for '%s' -> '%s'", k.(string), ggufName)) + slog.Debug(fmt.Sprintf("'%35s': '%30s' %10d [%#v]", k.(string), ggufName, size, tshape)) shape := []uint64{0, 0, 0, 0} for i := range tshape { @@ -120,7 +120,7 @@ func getAltParams(dirpath string) (*Params, error) { AttentionHeads int `json:"n_heads"` KeyValHeads int `json:"n_kv_heads"` HiddenLayers int `json:"n_layers"` - RopeTheta int `json:"rope_theta"` + RopeTheta float64 `json:"rope_theta"` NormEPS float64 `json:"norm_eps"` } @@ -133,6 +133,7 @@ func getAltParams(dirpath string) (*Params, error) { } params := &Params{ + Architectures: []string{"LlamaForCausalLM"}, HiddenSize: tparams.HiddenSize, AttentionHeads: tparams.AttentionHeads, KeyValHeads: tparams.KeyValHeads, @@ -229,37 +230,38 @@ func (m *TorchFormat) GetLayerName(n string) (string, error) { } func (r torchWriterTo) WriteTo(w io.Writer) (n int64, err error) { - // use the handler if one is present - if r.handler != nil { - return 0, r.handler(w, r) + var f32s []float32 + switch s := r.storage.(type) { + case *pytorch.FloatStorage: + f32s = s.Data + case *pytorch.HalfStorage: + f32s = s.Data + case *pytorch.BFloat16Storage: + f32s = s.Data + default: + return 0, fmt.Errorf("unknown data type: %T", s) } - switch r.storage.(type) { - case *pytorch.FloatStorage: - slog.Warn(fmt.Sprintf("unexpected storage found for layer '%s'; skipping", r.t.Name)) - return 0, nil - case *pytorch.HalfStorage: - switch r.t.Kind { - case 0: - data := r.storage.(*pytorch.HalfStorage).Data - slog.Debug(fmt.Sprintf("%35s F32 (%d)", r.t.Name, len(data))) - if err := binary.Write(w, r.bo, data); err != nil { - return 0, err - } - case 1: - data := r.storage.(*pytorch.HalfStorage).Data - tData := make([]uint16, len(data)) - for cnt, v := range data { - tData[cnt] = uint16(float16.Fromfloat32(v)) - } - slog.Debug(fmt.Sprintf("%35s F16 (%d)", r.t.Name, len(tData))) - if err := binary.Write(w, r.bo, tData); err != nil { - return 0, err - } + if r.repacker != nil { + f32s, err = r.repacker(r.t.Name, f32s, r.t.Shape) + if err != nil { + return 0, err } } - return 0, nil + switch r.t.Kind { + case 0: + return 0, binary.Write(w, r.bo, f32s) + case 1: + f16s := make([]uint16, len(f32s)) + for i := range f32s { + f16s[i] = float16.Fromfloat32(f32s[i]).Bits() + } + + return 0, binary.Write(w, r.bo, f16s) + default: + return 0, fmt.Errorf("unknown storage type: %d", r.t.Kind) + } } func (m *TorchFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) { diff --git a/docs/development.md b/docs/development.md index 2f7b9ecf..8c035a51 100644 --- a/docs/development.md +++ b/docs/development.md @@ -6,6 +6,8 @@ Install required tools: - go version 1.22 or higher - gcc version 11.4.0 or higher +### MacOS + ```bash brew install go cmake gcc ``` diff --git a/docs/faq.md b/docs/faq.md index 22bd4da7..b50a3138 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -6,7 +6,7 @@ Ollama on macOS and Windows will automatically download updates. Click on the ta On Linux, re-run the install script: -``` +```shell curl -fsSL https://ollama.com/install.sh | sh ``` @@ -30,7 +30,7 @@ To change this when using `ollama run`, use `/set parameter`: When using the API, specify the `num_ctx` parameter: -``` +```shell curl http://localhost:11434/api/generate -d '{ "model": "llama3", "prompt": "Why is the sky blue?", @@ -40,6 +40,21 @@ curl http://localhost:11434/api/generate -d '{ }' ``` +## How can I tell if my model was loaded onto the GPU? + +Use the `ollama ps` command to see what models are currently loaded into memory. + +```shell +ollama ps +NAME ID SIZE PROCESSOR UNTIL +llama3:70b bcfb190ca3a7 42 GB 100% GPU 4 minutes from now +``` + +The `Processor` column will show which memory the model was loaded in to: +* `100% GPU` means the model was loaded entirely into the GPU +* `100% CPU` means the model was loaded entirely in system memory +* `48%/52% CPU/GPU` means the model was loaded partially onto both the GPU and into system memory + ## How do I configure Ollama server? Ollama server can be configured with environment variables. @@ -94,6 +109,34 @@ On Windows, Ollama inherits your user and system environment variables. 6. Start the Ollama application from the Windows Start menu. +## How do I use Ollama behind a proxy? + +Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values. When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate. Refer to the section above for how to use environment variables on your platform. + +### How do I use Ollama behind a proxy in Docker? + +The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container. + +Alternatively, the Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy). + +Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate. + +```dockerfile +FROM ollama/ollama +COPY my-ca.pem /usr/local/share/ca-certificates/my-ca.crt +RUN update-ca-certificates +``` + +Build and run this image: + +```shell +docker build -t ollama-with-ca . +docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca +``` + +## Does Ollama send my prompts and answers back to ollama.com? + +No. Ollama runs locally, and conversation data does not leave your machine. ## How can I expose Ollama on my network? @@ -120,7 +163,7 @@ server { Ollama can be accessed using a range of tools for tunneling tools. For example with Ngrok: -``` +```shell ngrok http 11434 --host-header="localhost:11434" ``` @@ -128,7 +171,7 @@ ngrok http 11434 --host-header="localhost:11434" To use Ollama with Cloudflare Tunnel, use the `--url` and `--http-host-header` flags: -``` +```shell cloudflared tunnel --url http://localhost:11434 --http-host-header="localhost:11434" ``` @@ -150,39 +193,10 @@ If a different directory needs to be used, set the environment variable `OLLAMA_ Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform. -## Does Ollama send my prompts and answers back to ollama.com? - -No. Ollama runs locally, and conversation data does not leave your machine. - ## How can I use Ollama in Visual Studio Code? There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/ollama/ollama#extensions--plugins) at the bottom of the main repository readme. -## How do I use Ollama behind a proxy? - -Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values. When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate. Refer to the section above for how to use environment variables on your platform. - -### How do I use Ollama behind a proxy in Docker? - -The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container. - -Alternatively, the Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy). - -Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate. - -```dockerfile -FROM ollama/ollama -COPY my-ca.pem /usr/local/share/ca-certificates/my-ca.crt -RUN update-ca-certificates -``` - -Build and run this image: - -```shell -docker build -t ollama-with-ca . -docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca -``` - ## How do I use Ollama with GPU acceleration in Docker? The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details. @@ -197,7 +211,7 @@ Open `Control Panel > Networking and Internet > View network status and tasks` a Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these properties. -## How can I pre-load a model to get faster response times? +## How can I preload a model into Ollama to get faster response times? If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints. @@ -211,6 +225,11 @@ To use the chat completions endpoint, use: curl http://localhost:11434/api/chat -d '{"model": "mistral"}' ``` +To preload a model using the CLI, use the command: +```shell +ollama run llama3 "" +``` + ## How do I keep a model loaded in memory or make it unload immediately? By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory. @@ -235,8 +254,6 @@ Alternatively, you can change the amount of time all models are loaded into memo If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` API parameter with the `/api/generate` or `/api/chat` API endpoints. -## How do I manage the maximum number of requests the server can queue +## How do I manage the maximum number of requests the Ollama server can queue? -If too many requests are sent to the server, it will respond with a 503 error -indicating the server is overloaded. You can adjust how many requests may be -queue by setting `OLLAMA_MAX_QUEUE` +If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`. diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 2586e4e4..729ec96c 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,104 +1,86 @@ -# How to troubleshoot issues - -Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command: - -```shell -cat ~/.ollama/logs/server.log -``` - -On **Linux** systems with systemd, the logs can be found with this command: - -```shell -journalctl -u ollama -``` - -When you run Ollama in a **container**, the logs go to stdout/stderr in the container: - -```shell -docker logs -``` -(Use `docker ps` to find the container name) - -If manually running `ollama serve` in a terminal, the logs will be on that terminal. - -When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `+R` and type in: -- `explorer %LOCALAPPDATA%\Ollama` to view logs -- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH) -- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored -- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories - -To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal -```powershell -$env:OLLAMA_DEBUG="1" -& "ollama app.exe" -``` - -Join the [Discord](https://discord.gg/ollama) for help interpreting the logs. - -## LLM libraries - -Ollama includes multiple LLM libraries compiled for different GPUs and CPU -vector features. Ollama tries to pick the best one based on the capabilities of -your system. If this autodetection has problems, or you run into other problems -(e.g. crashes in your GPU) you can workaround this by forcing a specific LLM -library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest -but most compatible is `cpu`. Rosetta emulation under MacOS will work with the -`cpu` library. - -In the server log, you will see a message that looks something like this (varies -from release to release): - -``` -Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5] -``` - -**Experimental LLM Library Override** - -You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass -autodetection, so for example, if you have a CUDA card, but want to force the -CPU LLM library with AVX2 vector support, use: - -``` -OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve -``` - -You can see what features your CPU has with the following. -``` -cat /proc/cpuinfo| grep flags | head -1 -``` - -## Installing older or pre-release versions on Linux - -If you run into problems on Linux and want to install an older version, or you'd -like to try out a pre-release before it's officially released, you can tell the -install script which version to install. - -```sh -curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh -``` - -## Linux tmp noexec - -If your system is configured with the "noexec" flag where Ollama stores its -temporary executable files, you can specify an alternate location by setting -OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example -OLLAMA_TMPDIR=/usr/share/ollama/ - -## Container fails to run on NVIDIA GPU - -Make sure you've set up the conatiner runtime first as described in [docker.md](./docker.md) - -Sometimes the container runtime can have difficulties initializing the GPU. -When you check the server logs, this can show up as various error codes, such -as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" -(unknown), or others. The following troubleshooting techniques may help resolve -the problem - -- Is the uvm driver not loaded? `sudo nvidia-modprobe -u` -- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm` -- Try rebooting -- Make sure you're running the latest nvidia drivers - -If none of those resolve the problem, gather additional information and file an issue: -- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs -- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia` +# How to troubleshoot issues + +Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command: + +```shell +cat ~/.ollama/logs/server.log +``` + +On **Linux** systems with systemd, the logs can be found with this command: + +```shell +journalctl -u ollama +``` + +When you run Ollama in a **container**, the logs go to stdout/stderr in the container: + +```shell +docker logs +``` +(Use `docker ps` to find the container name) + +If manually running `ollama serve` in a terminal, the logs will be on that terminal. + +When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `+R` and type in: +- `explorer %LOCALAPPDATA%\Ollama` to view logs +- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH) +- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored +- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories + +To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal +```powershell +$env:OLLAMA_DEBUG="1" +& "ollama app.exe" +``` + +Join the [Discord](https://discord.gg/ollama) for help interpreting the logs. + +## LLM libraries + +Ollama includes multiple LLM libraries compiled for different GPUs and CPU vector features. Ollama tries to pick the best one based on the capabilities of your system. If this autodetection has problems, or you run into other problems (e.g. crashes in your GPU) you can workaround this by forcing a specific LLM library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest but most compatible is `cpu`. Rosetta emulation under MacOS will work with the `cpu` library. + +In the server log, you will see a message that looks something like this (varies from release to release): + +``` +Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5] +``` + +**Experimental LLM Library Override** + +You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass autodetection, so for example, if you have a CUDA card, but want to force the CPU LLM library with AVX2 vector support, use: + +``` +OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve +``` + +You can see what features your CPU has with the following. +``` +cat /proc/cpuinfo| grep flags | head -1 +``` + +## Installing older or pre-release versions on Linux + +If you run into problems on Linux and want to install an older version, or you'd like to try out a pre-release before it's officially released, you can tell the install script which version to install. + +```sh +curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh +``` + +## Linux tmp noexec + +If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/ + +## Container fails to run on NVIDIA GPU + +Make sure you've set up the container runtime first as described in [docker.md](./docker.md) + +Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem + +- Is the uvm driver not loaded? `sudo nvidia-modprobe -u` +- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm` +- Try rebooting +- Make sure you're running the latest nvidia drivers + +If none of those resolve the problem, gather additional information and file an issue: +- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs +- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia` diff --git a/docs/windows.md b/docs/windows.md index 242b810a..832b3d43 100644 --- a/docs/windows.md +++ b/docs/windows.md @@ -33,7 +33,7 @@ Here's a quick example showing API access from `powershell` ## Troubleshooting While we're in preview, `OLLAMA_DEBUG` is always enabled, which adds -a "view logs" menu item to the app, and increses logging for the GUI app and +a "view logs" menu item to the app, and increases logging for the GUI app and server. Ollama on Windows stores files in a few different locations. You can view them in diff --git a/go.mod b/go.mod index 784fa847..2f3d4ca3 100644 --- a/go.mod +++ b/go.mod @@ -4,12 +4,10 @@ go 1.22.0 require ( github.com/containerd/console v1.0.3 - github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1 github.com/emirpasic/gods v1.18.1 github.com/gin-gonic/gin v1.10.0 github.com/golang/protobuf v1.5.4 // indirect github.com/google/uuid v1.1.2 - github.com/mitchellh/mapstructure v1.5.0 github.com/olekukonko/tablewriter v0.0.5 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.9.0 @@ -18,6 +16,8 @@ require ( ) require ( + github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1 + github.com/mattn/go-runewidth v0.0.14 github.com/nlpodyssey/gopickle v0.3.0 github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c ) @@ -33,7 +33,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/kr/text v0.2.0 // indirect - github.com/mattn/go-runewidth v0.0.14 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect diff --git a/go.sum b/go.sum index 945919d8..9e1baebe 100644 --- a/go.sum +++ b/go.sum @@ -135,8 +135,6 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 0c339989..e0424a92 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -334,6 +334,7 @@ struct server_metrics { struct llama_server_context { llama_model *model = nullptr; + float modelProgress = 0.0; llama_context *ctx = nullptr; clip_ctx *clp_ctx = nullptr; @@ -737,7 +738,7 @@ struct llama_server_context sampler_names.emplace_back(sampler_name); } } - slot->sparams.samplers_sequence = sampler_types_from_names(sampler_names, false); + slot->sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false); } else { @@ -1095,7 +1096,7 @@ struct llama_server_context std::vector samplers_sequence; for (const auto &sampler_type : slot.sparams.samplers_sequence) { - samplers_sequence.emplace_back(sampler_type_to_name_string(sampler_type)); + samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type)); } return json { @@ -2104,6 +2105,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled"); printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel); printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n"); + printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled"); printf(" -spf FNAME, --system-prompt-file FNAME\n"); printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n"); printf(" -ctk TYPE, --cache-type-k TYPE\n"); @@ -2501,7 +2503,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, { params.use_mmap = false; } - else if (arg == "--numa") { + else if (arg == "--numa") + { if (++i >= argc) { invalid_param = true; break; @@ -2521,6 +2524,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, { params.cont_batching = true; } + else if (arg == "-fa" || arg == "--flash-attn") + { + params.flash_attn = true; + } else if (arg == "-np" || arg == "--parallel") { if (++i >= argc) @@ -2529,7 +2536,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, break; } params.n_parallel = std::stoi(argv[i]); - } else if (arg == "-n" || arg == "--n-predict") + } + else if (arg == "-n" || arg == "--n-predict") { if (++i >= argc) { @@ -2537,7 +2545,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, break; } params.n_predict = std::stoi(argv[i]); - } else if (arg == "-spf" || arg == "--system-prompt-file") + } + else if (arg == "-spf" || arg == "--system-prompt-file") { if (++i >= argc) { @@ -2771,6 +2780,12 @@ inline void signal_handler(int signal) { shutdown_handler(signal); } +static bool update_load_progress(float progress, void *data) +{ + ((llama_server_context*)data)->modelProgress = progress; + return true; +} + #if defined(_WIN32) char* wchar_to_char(const wchar_t* wstr) { if (wstr == nullptr) return nullptr; @@ -2876,7 +2891,9 @@ int main(int argc, char **argv) { break; } case SERVER_STATE_LOADING_MODEL: - res.set_content(R"({"status": "loading model"})", "application/json"); + char buf[128]; + snprintf(&buf[0], 128, R"({"status": "loading model", "progress": %0.2f})", llama.modelProgress); + res.set_content(buf, "application/json"); res.status = 503; // HTTP Service Unavailable break; case SERVER_STATE_ERROR: @@ -3071,6 +3088,9 @@ int main(int argc, char **argv) { }); // load the model + params.progress_callback = update_load_progress; + params.progress_callback_user_data = (void*)&llama; + if (!llama.load_model(params)) { state.store(SERVER_STATE_ERROR); diff --git a/llm/filetype.go b/llm/filetype.go index e5e9410d..7a8e9f69 100644 --- a/llm/filetype.go +++ b/llm/filetype.go @@ -27,8 +27,16 @@ const ( fileTypeIQ2_XXS fileTypeIQ2_XS fileTypeQ2_K_S - fileTypeQ3_K_XS + fileTypeIQ3_XS fileTypeIQ3_XXS + fileTypeIQ1_S + fileTypeIQ4_NL + fileTypeIQ3_S + fileTypeIQ2_S + fileTypeIQ4_XS + fileTypeIQ2_M + fileTypeIQ1_M + fileTypeBF16 fileTypeUnknown ) @@ -75,10 +83,26 @@ func ParseFileType(s string) (fileType, error) { return fileTypeIQ2_XS, nil case "Q2_K_S": return fileTypeQ2_K_S, nil - case "Q3_K_XS": - return fileTypeQ3_K_XS, nil + case "IQ3_XS": + return fileTypeIQ3_XS, nil case "IQ3_XXS": return fileTypeIQ3_XXS, nil + case "IQ1_S": + return fileTypeIQ1_S, nil + case "IQ4_NL": + return fileTypeIQ4_NL, nil + case "IQ3_S": + return fileTypeIQ3_S, nil + case "IQ2_S": + return fileTypeIQ2_S, nil + case "IQ4_XS": + return fileTypeIQ4_XS, nil + case "IQ2_M": + return fileTypeIQ2_M, nil + case "IQ1_M": + return fileTypeIQ1_M, nil + case "BF16": + return fileTypeBF16, nil default: return fileTypeUnknown, fmt.Errorf("unknown fileType: %s", s) } @@ -126,10 +150,26 @@ func (t fileType) String() string { return "IQ2_XS" case fileTypeQ2_K_S: return "Q2_K_S" - case fileTypeQ3_K_XS: - return "Q3_K_XS" + case fileTypeIQ3_XS: + return "IQ3_XS" case fileTypeIQ3_XXS: return "IQ3_XXS" + case fileTypeIQ1_S: + return "IQ1_S" + case fileTypeIQ4_NL: + return "IQ4_NL" + case fileTypeIQ3_S: + return "IQ3_S" + case fileTypeIQ2_S: + return "IQ2_S" + case fileTypeIQ4_XS: + return "IQ4_XS" + case fileTypeIQ2_M: + return "IQ2_M" + case fileTypeIQ1_M: + return "IQ1_M" + case fileTypeBF16: + return "BF16" default: return "unknown" } diff --git a/llm/ggla.go b/llm/ggla.go index cf14f214..a5d90b6c 100644 --- a/llm/ggla.go +++ b/llm/ggla.go @@ -119,7 +119,7 @@ func (llm *ggla) decode(rs io.ReadSeeker) error { t.Offset = uint64(offset) - if _, err := rs.Seek(int64(t.size()), io.SeekCurrent); err != nil { + if _, err := rs.Seek(int64(t.Size()), io.SeekCurrent); err != nil { return err } diff --git a/llm/ggml.go b/llm/ggml.go index 40089be2..9b6da425 100644 --- a/llm/ggml.go +++ b/llm/ggml.go @@ -106,7 +106,7 @@ type Layer map[string]*Tensor func (l Layer) size() (size uint64) { for _, t := range l { - size += t.size() + size += t.Size() } return size @@ -124,12 +124,12 @@ type Tensor struct { } func (t Tensor) blockSize() uint64 { - switch { - case t.Kind < 2: + switch t.Kind { + case 0, 1, 24, 25, 26, 27, 28, 31: // F32, F16, I8, I16, I32, I64, F64, BF16 return 1 - case t.Kind < 10: + case 2, 3, 8, 9, 20: // Q4_0, Q4_1, Q8_0, Q8_1, IQ4_NL return 32 - default: + default: // All others return 256 } } @@ -171,7 +171,29 @@ func (t Tensor) typeSize() uint64 { case 17: // IQ2_XS return 2 + 2*blockSize/8 + blockSize/32 case 18: // IQ3_XXS - return 2 + 3*blockSize/8 + return 2 + blockSize/4 + blockSize/8 + case 19: // IQ1_S + return 2 + blockSize/8 + blockSize/16 + case 20: // IQ4_NL + return 2 + blockSize/2 + case 21: // IQ3_S + return 2 + blockSize/4 + blockSize/8 + blockSize/32 + 4 + case 22: // IQ2_S + return 2 + blockSize/4 + blockSize/16 + case 23: // IQ4_XS + return 2 + 2 + blockSize/2 + blockSize/64 + case 24: // I8 + return 1 + case 25: // I16 + return 2 + case 26: // I32 + return 4 + case 27: // I64 + return 8 + case 28: // F64 + return 8 + case 29: // IQ1_M + return blockSize/8 + blockSize/16 + blockSize/32 default: return 0 } @@ -185,7 +207,7 @@ func (t Tensor) parameters() uint64 { return count } -func (t Tensor) size() uint64 { +func (t Tensor) Size() uint64 { return t.parameters() * t.typeSize() / t.blockSize() } @@ -288,7 +310,7 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui // mixtral 8x22b ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32)) partialOffload = max( - 3*ffnGateExpsWeight.size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV), + 3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV), 4*(context*batch*heads+context*embedding/heads*headsKV+batch*1024+embedding/heads*headsKV*batch), ) } else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok { diff --git a/llm/gguf.go b/llm/gguf.go index 5f6e8004..0ba48f76 100644 --- a/llm/gguf.go +++ b/llm/gguf.go @@ -62,16 +62,6 @@ func (c *containerGGUF) Decode(rs io.ReadSeeker) (model, error) { return model, nil } -const ( - _ uint32 = iota - GGUFTokenNormal - GGUFTokenUnknown - GGUFTokenControl - GGUFTokenUserDefined - GGUFTokenUnused - GGUFTokenByte -) - const ( ggufTypeUint8 uint32 = iota ggufTypeInt8 @@ -251,11 +241,11 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error { } for _, tensor := range llm.tensors { - if _, err := rs.Seek(int64(tensor.size()), io.SeekCurrent); err != nil { + if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil { return err } - padding := llm.padding(int64(tensor.size()), int64(alignment)) + padding := llm.padding(int64(tensor.Size()), int64(alignment)) if _, err := rs.Seek(padding, io.SeekCurrent); err != nil { return err } @@ -480,9 +470,11 @@ var ggufKVOrder = map[string][]string{ "gemma.attention.key_length", "gemma.attention.value_length", "general.file_type", + "tokenizer.ggml.pre", "tokenizer.ggml.model", "tokenizer.ggml.tokens", "tokenizer.ggml.scores", + "tokenizer.ggml.merges", "tokenizer.ggml.token_type", "tokenizer.ggml.bos_token_id", "tokenizer.ggml.eos_token_id", diff --git a/llm/llama.cpp b/llm/llama.cpp index 614d3b91..74f33adf 160000 --- a/llm/llama.cpp +++ b/llm/llama.cpp @@ -1 +1 @@ -Subproject commit 614d3b914e1c3e02596f869649eb4f1d3b68614d +Subproject commit 74f33adf5f8b20b08fc5a6aa17ce081abe86ef2f diff --git a/llm/patches/01-load-progress.diff b/llm/patches/01-load-progress.diff new file mode 100644 index 00000000..acd44d20 --- /dev/null +++ b/llm/patches/01-load-progress.diff @@ -0,0 +1,31 @@ +diff --git a/common/common.cpp b/common/common.cpp +index ba1ecf0e..cead57cc 100644 +--- a/common/common.cpp ++++ b/common/common.cpp +@@ -1836,6 +1836,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & + mparams.use_mmap = params.use_mmap; + mparams.use_mlock = params.use_mlock; + mparams.check_tensors = params.check_tensors; ++ mparams.progress_callback = params.progress_callback; ++ mparams.progress_callback_user_data = params.progress_callback_user_data; + if (params.kv_overrides.empty()) { + mparams.kv_overrides = NULL; + } else { +diff --git a/common/common.h b/common/common.h +index d80344f2..71e84834 100644 +--- a/common/common.h ++++ b/common/common.h +@@ -174,6 +174,13 @@ struct gpt_params { + // multimodal models (see examples/llava) + std::string mmproj = ""; // path to multimodal projector + std::vector image; // path to image file(s) ++ ++ // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. ++ // If the provided progress_callback returns true, model loading continues. ++ // If it returns false, model loading is immediately aborted. ++ llama_progress_callback progress_callback = NULL; ++ // context pointer passed to the progress callback ++ void * progress_callback_user_data; + }; + + void gpt_params_handle_model_default(gpt_params & params); diff --git a/llm/patches/03-load_exception.diff b/llm/patches/03-load_exception.diff index 9e838fa9..eb245c2a 100644 --- a/llm/patches/03-load_exception.diff +++ b/llm/patches/03-load_exception.diff @@ -1,8 +1,17 @@ +From 544a2d2e646d39e878d87dfbb3398a356bc560ab Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Thu, 23 May 2024 11:18:45 -0700 +Subject: [PATCH] throw exception on load errors + +--- + llama.cpp | 25 ++++++++++++++++--------- + 1 file changed, 16 insertions(+), 9 deletions(-) + diff --git a/llama.cpp b/llama.cpp -index 4225f955..7b762f86 100644 +index 15c66077..8ba90b6a 100644 --- a/llama.cpp +++ b/llama.cpp -@@ -4756,7 +4756,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam +@@ -6346,7 +6346,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam } } catch (const std::exception & err) { LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what()); @@ -11,10 +20,10 @@ index 4225f955..7b762f86 100644 } return 0; -@@ -12102,16 +12102,22 @@ struct llama_model * llama_load_model_from_file( - }; +@@ -15600,16 +15600,23 @@ struct llama_model * llama_load_model_from_file( + } + model->rpc_servers.push_back(servers); } - - int status = llama_model_load(path_model, *model, params); - GGML_ASSERT(status <= 0); - if (status < 0) { @@ -22,6 +31,7 @@ index 4225f955..7b762f86 100644 - LLAMA_LOG_ERROR("%s: failed to load model\n", __func__); - } else if (status == -2) { - LLAMA_LOG_INFO("%s: cancelled model load\n", __func__); ++ + try { + int status = llama_model_load(path_model, *model, params); + GGML_ASSERT(status <= 0); @@ -42,3 +52,6 @@ index 4225f955..7b762f86 100644 } return model; +-- +2.45.1 + diff --git a/llm/patches/05-default-pretokenizer.diff b/llm/patches/05-default-pretokenizer.diff new file mode 100644 index 00000000..0d0bf05d --- /dev/null +++ b/llm/patches/05-default-pretokenizer.diff @@ -0,0 +1,35 @@ +From d02a06f3f45a09255ace8684a66590e06ce44605 Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Thu, 23 May 2024 11:33:20 -0700 +Subject: [PATCH] default pretokenizer on unrecognized type + +--- + llama.cpp | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +diff --git a/llama.cpp b/llama.cpp +index 15c66077..af1aede3 100644 +--- a/llama.cpp ++++ b/llama.cpp +@@ -4504,9 +4504,6 @@ static void llm_load_vocab( + LLAMA_LOG_WARN("%s: ************************************ \n", __func__); + LLAMA_LOG_WARN("%s: \n", __func__); + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; +- } else if ( +- tokenizer_pre == "default") { +- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + } else if ( + tokenizer_pre == "llama3" || + tokenizer_pre == "llama-v3" || +@@ -4553,7 +4550,7 @@ static void llm_load_vocab( + tokenizer_pre == "dbrx") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX; + } else { +- throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); ++ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + } + } else { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; +-- +2.45.1 + diff --git a/llm/server.go b/llm/server.go index ccb1e419..384d31ca 100644 --- a/llm/server.go +++ b/llm/server.go @@ -55,6 +55,7 @@ type llmServer struct { totalLayers uint64 gpuCount int loadDuration time.Duration // Record how long it took the model to load + loadProgress float32 sem *semaphore.Weighted } @@ -200,6 +201,23 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr params = append(params, "--numa") } + flashAttnEnabled := envconfig.FlashAttention + + // partial offloading does not support flash attention + if uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 { + flashAttnEnabled = false + } + + // only cuda (compute capability 7+) and metal support flash attention + for _, g := range gpus { + if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) { + flashAttnEnabled = false + } + } + if flashAttnEnabled { + params = append(params, "--flash-attn") + } + numParallel := envconfig.NumParallel // TODO (jmorganca): multimodal models don't support parallel yet @@ -408,10 +426,11 @@ func (s ServerStatus) ToString() string { } type ServerStatusResp struct { - Status string `json:"status"` - SlotsIdle int `json:"slots_idle"` - SlotsProcessing int `json:"slots_processing"` - Error string `json:"error"` + Status string `json:"status"` + SlotsIdle int `json:"slots_idle"` + SlotsProcessing int `json:"slots_processing"` + Error string `json:"error"` + Progress float32 `json:"progress"` } func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) { @@ -459,6 +478,7 @@ func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) { case "no slot available": return ServerStatusNoSlotsAvailable, nil case "loading model": + s.loadProgress = status.Progress return ServerStatusLoadingModel, nil default: return ServerStatusError, fmt.Errorf("server error: %+v", status) @@ -499,7 +519,8 @@ func (s *llmServer) Ping(ctx context.Context) error { func (s *llmServer) WaitUntilRunning(ctx context.Context) error { start := time.Now() - expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load + stallDuration := 60 * time.Second + stallTimer := time.Now().Add(stallDuration) // give up if we stall for slog.Info("waiting for llama runner to start responding") var lastStatus ServerStatus = -1 @@ -517,13 +538,13 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error { return fmt.Errorf("llama runner process has terminated: %v %s", err, msg) default: } - if time.Now().After(expiresAt) { + if time.Now().After(stallTimer) { // timeout msg := "" if s.status != nil && s.status.LastErrMsg != "" { msg = s.status.LastErrMsg } - return fmt.Errorf("timed out waiting for llama runner to start: %s", msg) + return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg) } if s.cmd.ProcessState != nil { msg := "" @@ -534,6 +555,7 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error { } ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond) defer cancel() + priorProgress := s.loadProgress status, _ := s.getServerStatus(ctx) if lastStatus != status && status != ServerStatusReady { // Only log on status changes @@ -546,6 +568,11 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error { return nil default: lastStatus = status + // Reset the timer as long as we're making forward progress on the load + if priorProgress != s.loadProgress { + slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress)) + stallTimer = time.Now().Add(stallDuration) + } time.Sleep(time.Millisecond * 250) continue } diff --git a/macapp/src/index.ts b/macapp/src/index.ts index 28dac136..a5d04d5f 100644 --- a/macapp/src/index.ts +++ b/macapp/src/index.ts @@ -162,7 +162,7 @@ app.on('before-quit', () => { } }) -const updateURL = `https://ollama.ai/api/update?os=${process.platform}&arch=${ +const updateURL = `https://ollama.com/api/update?os=${process.platform}&arch=${ process.arch }&version=${app.getVersion()}&id=${id()}` diff --git a/types/model/file.go b/parser/parser.go similarity index 92% rename from types/model/file.go rename to parser/parser.go index ee398309..4f44f6af 100644 --- a/types/model/file.go +++ b/parser/parser.go @@ -1,4 +1,4 @@ -package model +package parser import ( "bufio" @@ -8,6 +8,7 @@ import ( "io" "strconv" "strings" + "unicode" ) type File struct { @@ -68,6 +69,11 @@ func ParseFile(r io.Reader) (*File, error) { var b bytes.Buffer var role string + var lineCount int + var linePos int + + var utf16 bool + var f File br := bufio.NewReader(r) @@ -79,6 +85,17 @@ func ParseFile(r io.Reader) (*File, error) { return nil, err } + // the utf16 byte order mark will be read as "unreadable" by ReadRune() + if isUnreadable(r) && lineCount == 0 && linePos == 0 { + utf16 = true + continue + } + + // skip the second byte if we're reading utf16 + if utf16 && r == 0 { + continue + } + next, r, err := parseRuneForState(r, curr) if errors.Is(err, io.ErrUnexpectedEOF) { return nil, fmt.Errorf("%w: %s", err, b.String()) @@ -86,6 +103,13 @@ func ParseFile(r io.Reader) (*File, error) { return nil, err } + if isNewline(r) { + lineCount++ + linePos = 0 + } else { + linePos++ + } + // process the state transition, some transitions need to be intercepted and redirected if next != curr { switch curr { @@ -285,6 +309,10 @@ func isNewline(r rune) bool { return r == '\r' || r == '\n' } +func isUnreadable(r rune) bool { + return r == unicode.ReplacementChar +} + func isValidMessageRole(role string) bool { return role == "system" || role == "user" || role == "assistant" } diff --git a/types/model/file_test.go b/parser/parser_test.go similarity index 92% rename from types/model/file_test.go rename to parser/parser_test.go index 8e71760c..21223cb1 100644 --- a/types/model/file_test.go +++ b/parser/parser_test.go @@ -1,11 +1,13 @@ -package model +package parser import ( "bytes" + "encoding/binary" "fmt" "io" "strings" "testing" + "unicode/utf16" "github.com/stretchr/testify/assert" ) @@ -509,3 +511,37 @@ SYSTEM "" } } + +func TestParseFileUTF16ParseFile(t *testing.T) { + data := `FROM bob +PARAMETER param1 1 +PARAMETER param2 4096 +SYSTEM You are a utf16 file. +` + // simulate a utf16 le file + utf16File := utf16.Encode(append([]rune{'\ufffe'}, []rune(data)...)) + buf := new(bytes.Buffer) + err := binary.Write(buf, binary.LittleEndian, utf16File) + assert.NoError(t, err) + + actual, err := ParseFile(buf) + assert.NoError(t, err) + + expected := []Command{ + {Name: "model", Args: "bob"}, + {Name: "param1", Args: "1"}, + {Name: "param2", Args: "4096"}, + {Name: "system", Args: "You are a utf16 file."}, + } + + assert.Equal(t, expected, actual.Commands) + + // simulate a utf16 be file + buf = new(bytes.Buffer) + err = binary.Write(buf, binary.BigEndian, utf16File) + assert.NoError(t, err) + + actual, err = ParseFile(buf) + assert.NoError(t, err) + assert.Equal(t, expected, actual.Commands) +} diff --git a/server/envconfig/config.go b/server/envconfig/config.go index 9ad68180..ae7d89b2 100644 --- a/server/envconfig/config.go +++ b/server/envconfig/config.go @@ -31,6 +31,8 @@ var ( RunnersDir string // Set via OLLAMA_TMPDIR in the environment TmpDir string + // Experimental flash attention + FlashAttention bool ) func AsMap() map[string]string { @@ -45,6 +47,7 @@ func AsMap() map[string]string { "OLLAMA_NUM_PARALLEL": fmt.Sprintf("%v", NumParallel), "OLLAMA_RUNNERS_DIR": fmt.Sprintf("%v", RunnersDir), "OLLAMA_TMPDIR": fmt.Sprintf("%v", TmpDir), + "OLLAMA_FLASH_ATTENTION": fmt.Sprintf("%v", FlashAttention), } } @@ -78,6 +81,13 @@ func LoadConfig() { } } + if fa := clean("OLLAMA_FLASH_ATTENTION"); fa != "" { + d, err := strconv.ParseBool(fa) + if err == nil { + FlashAttention = d + } + } + RunnersDir = clean("OLLAMA_RUNNERS_DIR") if runtime.GOOS == "windows" && RunnersDir == "" { // On Windows we do not carry the payloads inside the main executable diff --git a/server/envconfig/config_test.go b/server/envconfig/config_test.go index bad7c4a7..429434ae 100644 --- a/server/envconfig/config_test.go +++ b/server/envconfig/config_test.go @@ -17,4 +17,7 @@ func TestConfig(t *testing.T) { t.Setenv("OLLAMA_DEBUG", "1") LoadConfig() require.True(t, Debug) + t.Setenv("OLLAMA_FLASH_ATTENTION", "1") + LoadConfig() + require.True(t, FlashAttention) } diff --git a/server/images.go b/server/images.go index 3f415b6d..520c899b 100644 --- a/server/images.go +++ b/server/images.go @@ -27,6 +27,7 @@ import ( "github.com/ollama/ollama/auth" "github.com/ollama/ollama/format" "github.com/ollama/ollama/llm" + "github.com/ollama/ollama/parser" "github.com/ollama/ollama/server/envconfig" "github.com/ollama/ollama/types/errtypes" "github.com/ollama/ollama/types/model" @@ -61,36 +62,36 @@ func (m *Model) IsEmbedding() bool { } func (m *Model) String() string { - var modelfile model.File + var modelfile parser.File - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "model", Args: m.ModelPath, }) for _, adapter := range m.AdapterPaths { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "adapter", Args: adapter, }) } for _, projector := range m.ProjectorPaths { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "model", Args: projector, }) } if m.Template != "" { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "template", Args: m.Template, }) } if m.System != "" { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "system", Args: m.System, }) @@ -100,13 +101,13 @@ func (m *Model) String() string { switch v := v.(type) { case []any: for _, s := range v { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: k, Args: fmt.Sprintf("%v", s), }) } default: - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: k, Args: fmt.Sprintf("%v", v), }) @@ -114,14 +115,14 @@ func (m *Model) String() string { } for _, license := range m.License { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "license", Args: license, }) } for _, msg := range m.Messages { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "message", Args: fmt.Sprintf("%s %s", msg.Role, msg.Content), }) @@ -314,7 +315,7 @@ func realpath(rel, from string) string { return abspath } -func CreateModel(ctx context.Context, name, modelFileDir, quantization string, modelfile *model.File, fn func(resp api.ProgressResponse)) (err error) { +func CreateModel(ctx context.Context, name, modelFileDir, quantization string, modelfile *parser.File, fn func(resp api.ProgressResponse)) (err error) { config := ConfigV2{ OS: "linux", Architecture: "amd64", @@ -339,7 +340,24 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m return err } } else if strings.HasPrefix(c.Args, "@") { - blobpath, err := GetBlobsPath(strings.TrimPrefix(c.Args, "@")) + digest := strings.TrimPrefix(c.Args, "@") + if ib, ok := intermediateBlobs[digest]; ok { + p, err := GetBlobsPath(ib) + if err != nil { + return err + } + + if _, err := os.Stat(p); errors.Is(err, os.ErrNotExist) { + // pass + } else if err != nil { + return err + } else { + fn(api.ProgressResponse{Status: fmt.Sprintf("using cached layer %s", ib)}) + digest = ib + } + } + + blobpath, err := GetBlobsPath(digest) if err != nil { return err } @@ -350,14 +368,14 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m } defer blob.Close() - baseLayers, err = parseFromFile(ctx, blob, fn) + baseLayers, err = parseFromFile(ctx, blob, digest, fn) if err != nil { return err } } else if file, err := os.Open(realpath(modelFileDir, c.Args)); err == nil { defer file.Close() - baseLayers, err = parseFromFile(ctx, file, fn) + baseLayers, err = parseFromFile(ctx, file, "", fn) if err != nil { return err } @@ -397,10 +415,17 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m return err } - baseLayer.Layer, err = NewLayer(temp, baseLayer.Layer.MediaType) + layers, err := parseFromFile(ctx, temp, "", fn) if err != nil { return err } + + if len(layers) != 1 { + return errors.New("quantization failed") + } + + baseLayer.Layer = layers[0].Layer + baseLayer.GGML = layers[0].GGML } } diff --git a/server/layer.go b/server/layer.go index dcca3854..9ca43046 100644 --- a/server/layer.go +++ b/server/layer.go @@ -80,7 +80,7 @@ func NewLayerFromLayer(digest, mediatype, from string) (*Layer, error) { }, nil } -func (l *Layer) Open() (io.ReadCloser, error) { +func (l *Layer) Open() (io.ReadSeekCloser, error) { blob, err := GetBlobsPath(l.Digest) if err != nil { return nil, err diff --git a/server/model.go b/server/model.go index eea5d13a..fcf406f6 100644 --- a/server/model.go +++ b/server/model.go @@ -17,6 +17,8 @@ import ( "github.com/ollama/ollama/types/model" ) +var intermediateBlobs map[string]string = make(map[string]string) + type layerWithGGML struct { *Layer *llm.GGML @@ -76,7 +78,7 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe return layers, nil } -func parseFromZipFile(_ context.Context, file *os.File, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { +func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { stat, err := file.Stat() if err != nil { return nil, err @@ -165,16 +167,11 @@ func parseFromZipFile(_ context.Context, file *os.File, fn func(api.ProgressResp } layer, err := NewLayer(temp, "application/vnd.ollama.image.model") - if err != nil { - return nil, fmt.Errorf("aaa: %w", err) - } - - blobpath, err := GetBlobsPath(layer.Digest) if err != nil { return nil, err } - bin, err := os.Open(blobpath) + bin, err := layer.Open() if err != nil { return nil, err } @@ -185,16 +182,13 @@ func parseFromZipFile(_ context.Context, file *os.File, fn func(api.ProgressResp return nil, err } - layer, err = NewLayerFromLayer(layer.Digest, layer.MediaType, "") - if err != nil { - return nil, err - } - layers = append(layers, &layerWithGGML{layer, ggml}) + + intermediateBlobs[digest] = layer.Digest return layers, nil } -func parseFromFile(ctx context.Context, file *os.File, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { +func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { sr := io.NewSectionReader(file, 0, 512) contentType, err := detectContentType(sr) if err != nil { @@ -205,7 +199,7 @@ func parseFromFile(ctx context.Context, file *os.File, fn func(api.ProgressRespo case "gguf", "ggla": // noop case "application/zip": - return parseFromZipFile(ctx, file, fn) + return parseFromZipFile(ctx, file, digest, fn) default: return nil, fmt.Errorf("unsupported content type: %s", contentType) } diff --git a/server/routes.go b/server/routes.go index 5fbc2b54..4b3239e1 100644 --- a/server/routes.go +++ b/server/routes.go @@ -29,6 +29,7 @@ import ( "github.com/ollama/ollama/gpu" "github.com/ollama/ollama/llm" "github.com/ollama/ollama/openai" + "github.com/ollama/ollama/parser" "github.com/ollama/ollama/server/envconfig" "github.com/ollama/ollama/types/errtypes" "github.com/ollama/ollama/types/model" @@ -539,7 +540,7 @@ func (s *Server) CreateModelHandler(c *gin.Context) { r = f } - modelfile, err := model.ParseFile(r) + modelfile, err := parser.ParseFile(r) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return @@ -840,6 +841,25 @@ func (s *Server) HeadBlobHandler(c *gin.Context) { } func (s *Server) CreateBlobHandler(c *gin.Context) { + if ib, ok := intermediateBlobs[c.Param("digest")]; ok { + p, err := GetBlobsPath(ib) + if err != nil { + c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + if _, err := os.Stat(p); errors.Is(err, os.ErrNotExist) { + slog.Info("evicting intermediate blob which no longer exists", "digest", ib) + delete(intermediateBlobs, c.Param("digest")) + } else if err != nil { + c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } else { + c.Status(http.StatusOK) + return + } + } + path, err := GetBlobsPath(c.Param("digest")) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()}) diff --git a/server/routes_test.go b/server/routes_test.go index e144c957..a48819fe 100644 --- a/server/routes_test.go +++ b/server/routes_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/ollama/ollama/api" - "github.com/ollama/ollama/types/model" + "github.com/ollama/ollama/parser" "github.com/ollama/ollama/version" ) @@ -56,7 +56,7 @@ func Test_Routes(t *testing.T) { fname := createTestFile(t, "ollama-model") r := strings.NewReader(fmt.Sprintf("FROM %s\nPARAMETER seed 42\nPARAMETER top_p 0.9\nPARAMETER stop foo\nPARAMETER stop bar", fname)) - modelfile, err := model.ParseFile(r) + modelfile, err := parser.ParseFile(r) assert.Nil(t, err) fn := func(resp api.ProgressResponse) { t.Logf("Status: %s", resp.Status) diff --git a/server/sched.go b/server/sched.go index ceddc526..8b97e354 100644 --- a/server/sched.go +++ b/server/sched.go @@ -220,7 +220,7 @@ func (s *Scheduler) processCompleted(ctx context.Context) { runner := s.loaded[finished.model.ModelPath] s.loadedMu.Unlock() if runner == nil { - slog.Error("finished requeset signal received after model unloaded", "modelPath", finished.model.ModelPath) + slog.Error("finished request signal received after model unloaded", "modelPath", finished.model.ModelPath) continue } runner.refMu.Lock() diff --git a/server/sched_test.go b/server/sched_test.go index 6a6dd04f..addc1ad8 100644 --- a/server/sched_test.go +++ b/server/sched_test.go @@ -151,7 +151,7 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV } func TestRequests(t *testing.T) { - ctx, done := context.WithTimeout(context.Background(), 500*time.Millisecond) + ctx, done := context.WithTimeout(context.Background(), time.Second) defer done() // Same model, same request