From 105186aa179c7ccbac03d6719ab1c58ab87d6477 Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Sat, 18 May 2024 11:51:57 -0700 Subject: [PATCH 01/31] add OLLAMA_NOHISTORY to turn off history in interactive mode (#4508) --- cmd/cmd.go | 31 ++++++++++++++++++++++++++----- cmd/interactive.go | 4 ++++ 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index dff8d7c1..3b60334c 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -1078,12 +1078,24 @@ func versionHandler(cmd *cobra.Command, _ []string) { } } -func appendHostEnvDocs(cmd *cobra.Command) { - const hostEnvDocs = ` +type EnvironmentVar struct { + Name string + Description string +} + +func appendEnvDocs(cmd *cobra.Command, envs []EnvironmentVar) { + if len(envs) == 0 { + return + } + + envUsage := ` Environment Variables: - OLLAMA_HOST The host:port or base URL of the Ollama server (e.g. http://localhost:11434) ` - cmd.SetUsageTemplate(cmd.UsageTemplate() + hostEnvDocs) + for _, e := range envs { + envUsage += fmt.Sprintf(" %-16s %s\n", e.Name, e.Description) + } + + cmd.SetUsageTemplate(cmd.UsageTemplate() + envUsage) } func NewCLI() *cobra.Command { @@ -1220,6 +1232,10 @@ Environment Variables: RunE: DeleteHandler, } + ollamaHostEnv := EnvironmentVar{"OLLAMA_HOST", "The host:port or base URL of the Ollama server (e.g. http://localhost:11434)"} + ollamaNoHistoryEnv := EnvironmentVar{"OLLAMA_NOHISTORY", "Disable readline history"} + envs := []EnvironmentVar{ollamaHostEnv} + for _, cmd := range []*cobra.Command{ createCmd, showCmd, @@ -1231,7 +1247,12 @@ Environment Variables: copyCmd, deleteCmd, } { - appendHostEnvDocs(cmd) + switch cmd { + case runCmd: + appendEnvDocs(cmd, []EnvironmentVar{ollamaHostEnv, ollamaNoHistoryEnv}) + default: + appendEnvDocs(cmd, envs) + } } rootCmd.AddCommand( diff --git a/cmd/interactive.go b/cmd/interactive.go index 1078590c..f9157bd8 100644 --- a/cmd/interactive.go +++ b/cmd/interactive.go @@ -182,6 +182,10 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error { return err } + if os.Getenv("OLLAMA_NOHISTORY") != "" { + scanner.HistoryDisable() + } + fmt.Print(readline.StartBracketedPaste) defer fmt.Printf(readline.EndBracketedPaste) From 63a453554d562fc07b885133b04639c41cd55f81 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sun, 19 May 2024 23:03:57 -0700 Subject: [PATCH 02/31] `go mod tidy` --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 784fa847..5d0d3c33 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( ) require ( + github.com/mattn/go-runewidth v0.0.14 github.com/nlpodyssey/gopickle v0.3.0 github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c ) @@ -33,7 +34,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/kr/text v0.2.0 // indirect - github.com/mattn/go-runewidth v0.0.14 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect From ccdf0b2a449d812a3708a3083f6a725289f4f750 Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Mon, 20 May 2024 11:26:45 -0700 Subject: [PATCH 03/31] Move the parser back + handle utf16 files (#4533) --- cmd/cmd.go | 3 +- types/model/file.go => parser/parser.go | 30 ++++++++++++++- .../file_test.go => parser/parser_test.go | 38 ++++++++++++++++++- server/images.go | 23 +++++------ server/routes.go | 3 +- server/routes_test.go | 4 +- 6 files changed, 84 insertions(+), 17 deletions(-) rename types/model/file.go => parser/parser.go (92%) rename types/model/file_test.go => parser/parser_test.go (92%) diff --git a/cmd/cmd.go b/cmd/cmd.go index 3b60334c..f79f8b97 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -35,6 +35,7 @@ import ( "github.com/ollama/ollama/api" "github.com/ollama/ollama/auth" "github.com/ollama/ollama/format" + "github.com/ollama/ollama/parser" "github.com/ollama/ollama/progress" "github.com/ollama/ollama/server" "github.com/ollama/ollama/types/errtypes" @@ -63,7 +64,7 @@ func CreateHandler(cmd *cobra.Command, args []string) error { } defer f.Close() - modelfile, err := model.ParseFile(f) + modelfile, err := parser.ParseFile(f) if err != nil { return err } diff --git a/types/model/file.go b/parser/parser.go similarity index 92% rename from types/model/file.go rename to parser/parser.go index ee398309..4f44f6af 100644 --- a/types/model/file.go +++ b/parser/parser.go @@ -1,4 +1,4 @@ -package model +package parser import ( "bufio" @@ -8,6 +8,7 @@ import ( "io" "strconv" "strings" + "unicode" ) type File struct { @@ -68,6 +69,11 @@ func ParseFile(r io.Reader) (*File, error) { var b bytes.Buffer var role string + var lineCount int + var linePos int + + var utf16 bool + var f File br := bufio.NewReader(r) @@ -79,6 +85,17 @@ func ParseFile(r io.Reader) (*File, error) { return nil, err } + // the utf16 byte order mark will be read as "unreadable" by ReadRune() + if isUnreadable(r) && lineCount == 0 && linePos == 0 { + utf16 = true + continue + } + + // skip the second byte if we're reading utf16 + if utf16 && r == 0 { + continue + } + next, r, err := parseRuneForState(r, curr) if errors.Is(err, io.ErrUnexpectedEOF) { return nil, fmt.Errorf("%w: %s", err, b.String()) @@ -86,6 +103,13 @@ func ParseFile(r io.Reader) (*File, error) { return nil, err } + if isNewline(r) { + lineCount++ + linePos = 0 + } else { + linePos++ + } + // process the state transition, some transitions need to be intercepted and redirected if next != curr { switch curr { @@ -285,6 +309,10 @@ func isNewline(r rune) bool { return r == '\r' || r == '\n' } +func isUnreadable(r rune) bool { + return r == unicode.ReplacementChar +} + func isValidMessageRole(role string) bool { return role == "system" || role == "user" || role == "assistant" } diff --git a/types/model/file_test.go b/parser/parser_test.go similarity index 92% rename from types/model/file_test.go rename to parser/parser_test.go index 8e71760c..21223cb1 100644 --- a/types/model/file_test.go +++ b/parser/parser_test.go @@ -1,11 +1,13 @@ -package model +package parser import ( "bytes" + "encoding/binary" "fmt" "io" "strings" "testing" + "unicode/utf16" "github.com/stretchr/testify/assert" ) @@ -509,3 +511,37 @@ SYSTEM "" } } + +func TestParseFileUTF16ParseFile(t *testing.T) { + data := `FROM bob +PARAMETER param1 1 +PARAMETER param2 4096 +SYSTEM You are a utf16 file. +` + // simulate a utf16 le file + utf16File := utf16.Encode(append([]rune{'\ufffe'}, []rune(data)...)) + buf := new(bytes.Buffer) + err := binary.Write(buf, binary.LittleEndian, utf16File) + assert.NoError(t, err) + + actual, err := ParseFile(buf) + assert.NoError(t, err) + + expected := []Command{ + {Name: "model", Args: "bob"}, + {Name: "param1", Args: "1"}, + {Name: "param2", Args: "4096"}, + {Name: "system", Args: "You are a utf16 file."}, + } + + assert.Equal(t, expected, actual.Commands) + + // simulate a utf16 be file + buf = new(bytes.Buffer) + err = binary.Write(buf, binary.BigEndian, utf16File) + assert.NoError(t, err) + + actual, err = ParseFile(buf) + assert.NoError(t, err) + assert.Equal(t, expected, actual.Commands) +} diff --git a/server/images.go b/server/images.go index 3f415b6d..0ccc90b9 100644 --- a/server/images.go +++ b/server/images.go @@ -27,6 +27,7 @@ import ( "github.com/ollama/ollama/auth" "github.com/ollama/ollama/format" "github.com/ollama/ollama/llm" + "github.com/ollama/ollama/parser" "github.com/ollama/ollama/server/envconfig" "github.com/ollama/ollama/types/errtypes" "github.com/ollama/ollama/types/model" @@ -61,36 +62,36 @@ func (m *Model) IsEmbedding() bool { } func (m *Model) String() string { - var modelfile model.File + var modelfile parser.File - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "model", Args: m.ModelPath, }) for _, adapter := range m.AdapterPaths { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "adapter", Args: adapter, }) } for _, projector := range m.ProjectorPaths { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "model", Args: projector, }) } if m.Template != "" { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "template", Args: m.Template, }) } if m.System != "" { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "system", Args: m.System, }) @@ -100,13 +101,13 @@ func (m *Model) String() string { switch v := v.(type) { case []any: for _, s := range v { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: k, Args: fmt.Sprintf("%v", s), }) } default: - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: k, Args: fmt.Sprintf("%v", v), }) @@ -114,14 +115,14 @@ func (m *Model) String() string { } for _, license := range m.License { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "license", Args: license, }) } for _, msg := range m.Messages { - modelfile.Commands = append(modelfile.Commands, model.Command{ + modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "message", Args: fmt.Sprintf("%s %s", msg.Role, msg.Content), }) @@ -314,7 +315,7 @@ func realpath(rel, from string) string { return abspath } -func CreateModel(ctx context.Context, name, modelFileDir, quantization string, modelfile *model.File, fn func(resp api.ProgressResponse)) (err error) { +func CreateModel(ctx context.Context, name, modelFileDir, quantization string, modelfile *parser.File, fn func(resp api.ProgressResponse)) (err error) { config := ConfigV2{ OS: "linux", Architecture: "amd64", diff --git a/server/routes.go b/server/routes.go index 5fbc2b54..fff228f3 100644 --- a/server/routes.go +++ b/server/routes.go @@ -29,6 +29,7 @@ import ( "github.com/ollama/ollama/gpu" "github.com/ollama/ollama/llm" "github.com/ollama/ollama/openai" + "github.com/ollama/ollama/parser" "github.com/ollama/ollama/server/envconfig" "github.com/ollama/ollama/types/errtypes" "github.com/ollama/ollama/types/model" @@ -539,7 +540,7 @@ func (s *Server) CreateModelHandler(c *gin.Context) { r = f } - modelfile, err := model.ParseFile(r) + modelfile, err := parser.ParseFile(r) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return diff --git a/server/routes_test.go b/server/routes_test.go index e144c957..a48819fe 100644 --- a/server/routes_test.go +++ b/server/routes_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/ollama/ollama/api" - "github.com/ollama/ollama/types/model" + "github.com/ollama/ollama/parser" "github.com/ollama/ollama/version" ) @@ -56,7 +56,7 @@ func Test_Routes(t *testing.T) { fname := createTestFile(t, "ollama-model") r := strings.NewReader(fmt.Sprintf("FROM %s\nPARAMETER seed 42\nPARAMETER top_p 0.9\nPARAMETER stop foo\nPARAMETER stop bar", fname)) - modelfile, err := model.ParseFile(r) + modelfile, err := parser.ParseFile(r) assert.Nil(t, err) fn := func(resp api.ProgressResponse) { t.Logf("Status: %s", resp.Status) From 3520c0e4d5c1cc845d178ec080b0967d18cf1796 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 10 May 2024 15:48:41 -0700 Subject: [PATCH 04/31] cache and reuse intermediate blobs particularly useful for zipfiles and f16s --- server/images.go | 27 ++++++++++++++++++++++++--- server/layer.go | 2 +- server/model.go | 23 +++++++++-------------- server/routes.go | 19 +++++++++++++++++++ 4 files changed, 53 insertions(+), 18 deletions(-) diff --git a/server/images.go b/server/images.go index 0ccc90b9..8e8fd921 100644 --- a/server/images.go +++ b/server/images.go @@ -340,7 +340,24 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m return err } } else if strings.HasPrefix(c.Args, "@") { - blobpath, err := GetBlobsPath(strings.TrimPrefix(c.Args, "@")) + digest := strings.TrimPrefix(c.Args, "@") + if ib, ok := intermediateBlobs.Load(digest); ok { + p, err := GetBlobsPath(ib.(string)) + if err != nil { + return err + } + + if _, err := os.Stat(p); errors.Is(err, os.ErrNotExist) { + // pass + } else if err != nil { + return err + } else { + fn(api.ProgressResponse{Status: fmt.Sprintf("using cached layer %s", ib.(string))}) + digest = ib.(string) + } + } + + blobpath, err := GetBlobsPath(digest) if err != nil { return err } @@ -351,14 +368,14 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m } defer blob.Close() - baseLayers, err = parseFromFile(ctx, blob, fn) + baseLayers, err = parseFromFile(ctx, blob, digest, fn) if err != nil { return err } } else if file, err := os.Open(realpath(modelFileDir, c.Args)); err == nil { defer file.Close() - baseLayers, err = parseFromFile(ctx, file, fn) + baseLayers, err = parseFromFile(ctx, file, "", fn) if err != nil { return err } @@ -398,10 +415,14 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m return err } + f16digest := baseLayer.Layer.Digest + baseLayer.Layer, err = NewLayer(temp, baseLayer.Layer.MediaType) if err != nil { return err } + + intermediateBlobs.Store(f16digest, baseLayer.Layer.Digest) } } diff --git a/server/layer.go b/server/layer.go index dcca3854..9ca43046 100644 --- a/server/layer.go +++ b/server/layer.go @@ -80,7 +80,7 @@ func NewLayerFromLayer(digest, mediatype, from string) (*Layer, error) { }, nil } -func (l *Layer) Open() (io.ReadCloser, error) { +func (l *Layer) Open() (io.ReadSeekCloser, error) { blob, err := GetBlobsPath(l.Digest) if err != nil { return nil, err diff --git a/server/model.go b/server/model.go index eea5d13a..eabb8f3d 100644 --- a/server/model.go +++ b/server/model.go @@ -10,6 +10,7 @@ import ( "net/http" "os" "path/filepath" + "sync" "github.com/ollama/ollama/api" "github.com/ollama/ollama/convert" @@ -17,6 +18,8 @@ import ( "github.com/ollama/ollama/types/model" ) +var intermediateBlobs sync.Map + type layerWithGGML struct { *Layer *llm.GGML @@ -76,7 +79,7 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe return layers, nil } -func parseFromZipFile(_ context.Context, file *os.File, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { +func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { stat, err := file.Stat() if err != nil { return nil, err @@ -169,12 +172,7 @@ func parseFromZipFile(_ context.Context, file *os.File, fn func(api.ProgressResp return nil, fmt.Errorf("aaa: %w", err) } - blobpath, err := GetBlobsPath(layer.Digest) - if err != nil { - return nil, err - } - - bin, err := os.Open(blobpath) + bin, err := layer.Open() if err != nil { return nil, err } @@ -185,16 +183,13 @@ func parseFromZipFile(_ context.Context, file *os.File, fn func(api.ProgressResp return nil, err } - layer, err = NewLayerFromLayer(layer.Digest, layer.MediaType, "") - if err != nil { - return nil, err - } - layers = append(layers, &layerWithGGML{layer, ggml}) + + intermediateBlobs.Store(digest, layer.Digest) return layers, nil } -func parseFromFile(ctx context.Context, file *os.File, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { +func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { sr := io.NewSectionReader(file, 0, 512) contentType, err := detectContentType(sr) if err != nil { @@ -205,7 +200,7 @@ func parseFromFile(ctx context.Context, file *os.File, fn func(api.ProgressRespo case "gguf", "ggla": // noop case "application/zip": - return parseFromZipFile(ctx, file, fn) + return parseFromZipFile(ctx, file, digest, fn) default: return nil, fmt.Errorf("unsupported content type: %s", contentType) } diff --git a/server/routes.go b/server/routes.go index fff228f3..12b11b5c 100644 --- a/server/routes.go +++ b/server/routes.go @@ -841,6 +841,25 @@ func (s *Server) HeadBlobHandler(c *gin.Context) { } func (s *Server) CreateBlobHandler(c *gin.Context) { + ib, ok := intermediateBlobs.Load(c.Param("digest")) + if ok { + p, err := GetBlobsPath(ib.(string)) + if err != nil { + c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + if _, err := os.Stat(p); errors.Is(err, os.ErrNotExist) { + intermediateBlobs.Delete(c.Param("digest")) + } else if err != nil { + c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } else { + c.Status(http.StatusOK) + return + } + } + path, err := GetBlobsPath(c.Param("digest")) if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()}) From e15307fdf4217f87a80fba3c9cd72d0f3d325848 Mon Sep 17 00:00:00 2001 From: Sam Date: Tue, 21 May 2024 06:36:03 +1000 Subject: [PATCH 05/31] feat: add support for flash_attn (#4120) * feat: enable flash attention if supported * feat: enable flash attention if supported * feat: enable flash attention if supported * feat: add flash_attn support --- llm/ext_server/server.cpp | 14 +++++++++++--- llm/server.go | 17 +++++++++++++++++ 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 0c339989..3e03bb34 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -2104,6 +2104,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled"); printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel); printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n"); + printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled"); printf(" -spf FNAME, --system-prompt-file FNAME\n"); printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n"); printf(" -ctk TYPE, --cache-type-k TYPE\n"); @@ -2501,7 +2502,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, { params.use_mmap = false; } - else if (arg == "--numa") { + else if (arg == "--numa") + { if (++i >= argc) { invalid_param = true; break; @@ -2521,6 +2523,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, { params.cont_batching = true; } + else if (arg == "-fa" || arg == "--flash-attn") + { + params.flash_attn = true; + } else if (arg == "-np" || arg == "--parallel") { if (++i >= argc) @@ -2529,7 +2535,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, break; } params.n_parallel = std::stoi(argv[i]); - } else if (arg == "-n" || arg == "--n-predict") + } + else if (arg == "-n" || arg == "--n-predict") { if (++i >= argc) { @@ -2537,7 +2544,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, break; } params.n_predict = std::stoi(argv[i]); - } else if (arg == "-spf" || arg == "--system-prompt-file") + } + else if (arg == "-spf" || arg == "--system-prompt-file") { if (++i >= argc) { diff --git a/llm/server.go b/llm/server.go index ccb1e419..ba25fa21 100644 --- a/llm/server.go +++ b/llm/server.go @@ -200,6 +200,23 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr params = append(params, "--numa") } + flashAttnSupported := true + + // partial offloading does not support flash attention + if uint64(opts.NumGPU) < ggml.KV().BlockCount() + 1 { + flashAttnSupported = false + } + + // only cuda (compute capability 7+) and metal support flash attention + for _, g := range gpus { + if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) { + flashAttnSupported = false + } + } + if flashAttnSupported { + params = append(params, "--flash-attn") + } + numParallel := envconfig.NumParallel // TODO (jmorganca): multimodal models don't support parallel yet From 8800c8a59b3175b096a6e22c1ee58b3e5344ee0c Mon Sep 17 00:00:00 2001 From: alwqx Date: Tue, 21 May 2024 05:19:03 +0800 Subject: [PATCH 06/31] chore: fix typo in docs (#4536) --- docs/troubleshooting.md | 2 +- docs/windows.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 2586e4e4..5971da5f 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -86,7 +86,7 @@ OLLAMA_TMPDIR=/usr/share/ollama/ ## Container fails to run on NVIDIA GPU -Make sure you've set up the conatiner runtime first as described in [docker.md](./docker.md) +Make sure you've set up the container runtime first as described in [docker.md](./docker.md) Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such diff --git a/docs/windows.md b/docs/windows.md index 242b810a..832b3d43 100644 --- a/docs/windows.md +++ b/docs/windows.md @@ -33,7 +33,7 @@ Here's a quick example showing API access from `powershell` ## Troubleshooting While we're in preview, `OLLAMA_DEBUG` is always enabled, which adds -a "view logs" menu item to the app, and increses logging for the GUI app and +a "view logs" menu item to the app, and increases logging for the GUI app and server. Ollama on Windows stores files in a few different locations. You can view them in From f36f1d6be988848d24d269a6bc5e3697fa0fb18a Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Mon, 20 May 2024 14:58:27 -0700 Subject: [PATCH 07/31] tidy intermediate blobs --- server/images.go | 12 ++++-------- server/model.go | 7 +++---- server/routes.go | 8 ++++---- 3 files changed, 11 insertions(+), 16 deletions(-) diff --git a/server/images.go b/server/images.go index 8e8fd921..aaf66ae0 100644 --- a/server/images.go +++ b/server/images.go @@ -341,8 +341,8 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m } } else if strings.HasPrefix(c.Args, "@") { digest := strings.TrimPrefix(c.Args, "@") - if ib, ok := intermediateBlobs.Load(digest); ok { - p, err := GetBlobsPath(ib.(string)) + if ib, ok := intermediateBlobs[digest]; ok { + p, err := GetBlobsPath(ib) if err != nil { return err } @@ -352,8 +352,8 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m } else if err != nil { return err } else { - fn(api.ProgressResponse{Status: fmt.Sprintf("using cached layer %s", ib.(string))}) - digest = ib.(string) + fn(api.ProgressResponse{Status: fmt.Sprintf("using cached layer %s", ib)}) + digest = ib } } @@ -415,14 +415,10 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m return err } - f16digest := baseLayer.Layer.Digest - baseLayer.Layer, err = NewLayer(temp, baseLayer.Layer.MediaType) if err != nil { return err } - - intermediateBlobs.Store(f16digest, baseLayer.Layer.Digest) } } diff --git a/server/model.go b/server/model.go index eabb8f3d..fcf406f6 100644 --- a/server/model.go +++ b/server/model.go @@ -10,7 +10,6 @@ import ( "net/http" "os" "path/filepath" - "sync" "github.com/ollama/ollama/api" "github.com/ollama/ollama/convert" @@ -18,7 +17,7 @@ import ( "github.com/ollama/ollama/types/model" ) -var intermediateBlobs sync.Map +var intermediateBlobs map[string]string = make(map[string]string) type layerWithGGML struct { *Layer @@ -169,7 +168,7 @@ func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(a layer, err := NewLayer(temp, "application/vnd.ollama.image.model") if err != nil { - return nil, fmt.Errorf("aaa: %w", err) + return nil, err } bin, err := layer.Open() @@ -185,7 +184,7 @@ func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(a layers = append(layers, &layerWithGGML{layer, ggml}) - intermediateBlobs.Store(digest, layer.Digest) + intermediateBlobs[digest] = layer.Digest return layers, nil } diff --git a/server/routes.go b/server/routes.go index 12b11b5c..4b3239e1 100644 --- a/server/routes.go +++ b/server/routes.go @@ -841,16 +841,16 @@ func (s *Server) HeadBlobHandler(c *gin.Context) { } func (s *Server) CreateBlobHandler(c *gin.Context) { - ib, ok := intermediateBlobs.Load(c.Param("digest")) - if ok { - p, err := GetBlobsPath(ib.(string)) + if ib, ok := intermediateBlobs[c.Param("digest")]; ok { + p, err := GetBlobsPath(ib) if err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return } if _, err := os.Stat(p); errors.Is(err, os.ErrNotExist) { - intermediateBlobs.Delete(c.Param("digest")) + slog.Info("evicting intermediate blob which no longer exists", "digest", ib) + delete(intermediateBlobs, c.Param("digest")) } else if err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return From 807d092761a1100704d260fccb6f0bc679f9b98e Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 17 May 2024 11:29:04 -0700 Subject: [PATCH 08/31] fix quantize file types --- server/images.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/server/images.go b/server/images.go index aaf66ae0..520c899b 100644 --- a/server/images.go +++ b/server/images.go @@ -415,10 +415,17 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m return err } - baseLayer.Layer, err = NewLayer(temp, baseLayer.Layer.MediaType) + layers, err := parseFromFile(ctx, temp, "", fn) if err != nil { return err } + + if len(layers) != 1 { + return errors.New("quantization failed") + } + + baseLayer.Layer = layers[0].Layer + baseLayer.GGML = layers[0].GGML } } From 8aadad9c721efc000d4ecc8cbdc1f4aeb7d64da1 Mon Sep 17 00:00:00 2001 From: Josh Yan Date: Mon, 20 May 2024 15:24:32 -0700 Subject: [PATCH 09/31] updated updateURL --- llm/llama.cpp | 2 +- macapp/src/index.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/llm/llama.cpp b/llm/llama.cpp index 614d3b91..952d03db 160000 --- a/llm/llama.cpp +++ b/llm/llama.cpp @@ -1 +1 @@ -Subproject commit 614d3b914e1c3e02596f869649eb4f1d3b68614d +Subproject commit 952d03dbead16e4dbdd1d3458486340673cc2465 diff --git a/macapp/src/index.ts b/macapp/src/index.ts index 28dac136..a5d04d5f 100644 --- a/macapp/src/index.ts +++ b/macapp/src/index.ts @@ -162,7 +162,7 @@ app.on('before-quit', () => { } }) -const updateURL = `https://ollama.ai/api/update?os=${process.platform}&arch=${ +const updateURL = `https://ollama.com/api/update?os=${process.platform}&arch=${ process.arch }&version=${app.getVersion()}&id=${id()}` From 5cab13739ea54658c001b03280597a87d94d5851 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Mon, 20 May 2024 15:28:17 -0700 Subject: [PATCH 10/31] set llama.cpp submodule commit to `614d3b9` --- llm/llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/llama.cpp b/llm/llama.cpp index 952d03db..614d3b91 160000 --- a/llm/llama.cpp +++ b/llm/llama.cpp @@ -1 +1 @@ -Subproject commit 952d03dbead16e4dbdd1d3458486340673cc2465 +Subproject commit 614d3b914e1c3e02596f869649eb4f1d3b68614d From d88582dffd4a4ff0dcf7f347091f023945f9a26f Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Thu, 18 Apr 2024 16:00:20 -0700 Subject: [PATCH 11/31] some changes for llama3 --- convert/convert.go | 3 ++- convert/torch.go | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/convert/convert.go b/convert/convert.go index f4210e50..dbc26da1 100644 --- a/convert/convert.go +++ b/convert/convert.go @@ -77,7 +77,8 @@ func GetModelFormat(dirname string) (ModelFormat, error) { slog.Debug(fmt.Sprintf("file = %s", fn)) if strings.HasSuffix(fn, ".safetensors") { return &SafetensorFormat{}, nil - } else if strings.HasSuffix(fn, ".bin") { + //} else if strings.HasSuffix(fn, ".bin") { + } else if strings.HasSuffix(fn, ".pth") { slog.Debug("model is torch") return &TorchFormat{}, nil } diff --git a/convert/torch.go b/convert/torch.go index 92c58872..0ad10c0e 100644 --- a/convert/torch.go +++ b/convert/torch.go @@ -33,7 +33,8 @@ type TorchFormat struct{} func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) { slog.Debug("getting torch tensors") - files, err := filepath.Glob(filepath.Join(dirpath, "pytorch_model-*.bin")) + //files, err := filepath.Glob(filepath.Join(dirpath, "pytorch_model-*.bin")) + files, err := filepath.Glob(filepath.Join(dirpath, "consolidatedr.*.pth")) if err != nil { slog.Error("didn't find any torch files") return nil, err @@ -120,7 +121,7 @@ func getAltParams(dirpath string) (*Params, error) { AttentionHeads int `json:"n_heads"` KeyValHeads int `json:"n_kv_heads"` HiddenLayers int `json:"n_layers"` - RopeTheta int `json:"rope_theta"` + RopeTheta float64 `json:"rope_theta"` NormEPS float64 `json:"norm_eps"` } @@ -133,6 +134,7 @@ func getAltParams(dirpath string) (*Params, error) { } params := &Params{ + Architectures: []string{"LlamaForCausalLM"}, HiddenSize: tparams.HiddenSize, AttentionHeads: tparams.AttentionHeads, KeyValHeads: tparams.KeyValHeads, From 4730762e5c9453f304aa456b549530e165ff1936 Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Wed, 24 Apr 2024 18:32:01 -0700 Subject: [PATCH 12/31] add safetensors version --- convert/llama.go | 15 +++++++++++---- convert/safetensors.go | 9 +++++++++ 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/convert/llama.go b/convert/llama.go index fb576e2e..5dfb8d7d 100644 --- a/convert/llama.go +++ b/convert/llama.go @@ -20,7 +20,7 @@ type LlamaModel struct { ModelData } -func llamaLayerHandler(w io.Writer, r torchWriterTo) error { +func llamaTorchLayerHandler(w io.Writer, r torchWriterTo) error { slog.Debug(fmt.Sprintf("repacking layer '%s'", r.t.Name)) data := r.storage.(*pytorch.HalfStorage).Data @@ -105,9 +105,16 @@ func (m *LlamaModel) GetTensors() error { matches := re.FindAllStringSubmatch(l.Name, -1) if len(matches) > 0 { slog.Debug(fmt.Sprintf("setting handler for: %s", l.Name)) - wt := l.WriterTo.(torchWriterTo) - wt.handler = llamaLayerHandler - l.WriterTo = wt + switch l.WriterTo.(type) { + case torchWriterTo: + wt := l.WriterTo.(torchWriterTo) + wt.handler = llamaTorchLayerHandler + l.WriterTo = wt + case safetensorWriterTo: + wt := l.WriterTo.(safetensorWriterTo) + wt.handler = mistralLayerHandler + l.WriterTo = wt + } } m.Tensors = append(m.Tensors, l) } diff --git a/convert/safetensors.go b/convert/safetensors.go index 69424c4d..64aaf866 100644 --- a/convert/safetensors.go +++ b/convert/safetensors.go @@ -281,6 +281,15 @@ func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (M return nil, fmt.Errorf("No architecture specified to convert") case 1: switch params.Architectures[0] { + case "LlamaForCausalLM": + return &LlamaModel{ + ModelData{ + Name: name, + Path: dirPath, + Params: params, + Format: m, + }, + }, nil case "MistralForCausalLM": return &MistralModel{ ModelData{ From c8cf0d94edeae0c71e3a0877895d9519b5d4d5e3 Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Sun, 28 Apr 2024 10:36:38 -0700 Subject: [PATCH 13/31] llama3 conversion --- convert/convert.go | 1 + convert/llama.go | 70 +++++++++++++++++++++++++++++++++++----------- llm/gguf.go | 1 + 3 files changed, 56 insertions(+), 16 deletions(-) diff --git a/convert/convert.go b/convert/convert.go index dbc26da1..899c8c44 100644 --- a/convert/convert.go +++ b/convert/convert.go @@ -93,6 +93,7 @@ type Vocab struct { Tokens []string Scores []float32 Types []int32 + Merges []string } func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) { diff --git a/convert/llama.go b/convert/llama.go index 5dfb8d7d..8cb162e7 100644 --- a/convert/llama.go +++ b/convert/llama.go @@ -5,6 +5,8 @@ import ( "fmt" "io" "log/slog" + "os" + "path/filepath" "regexp" "strings" @@ -105,12 +107,12 @@ func (m *LlamaModel) GetTensors() error { matches := re.FindAllStringSubmatch(l.Name, -1) if len(matches) > 0 { slog.Debug(fmt.Sprintf("setting handler for: %s", l.Name)) - switch l.WriterTo.(type) { - case torchWriterTo: + switch m.Format.(type) { + case *TorchFormat: wt := l.WriterTo.(torchWriterTo) wt.handler = llamaTorchLayerHandler l.WriterTo = wt - case safetensorWriterTo: + case *SafetensorFormat: wt := l.WriterTo.(safetensorWriterTo) wt.handler = mistralLayerHandler l.WriterTo = wt @@ -123,18 +125,46 @@ func (m *LlamaModel) GetTensors() error { } func (m *LlamaModel) LoadVocab() error { - var v *Vocab - var err error - - slog.Debug("loading vocab") - v, err = LoadSentencePieceTokens(m.Path, m.Params) - if err != nil { - return err + v := &Vocab{ + Tokens: []string{}, + Types: []int32{}, + Merges: []string{}, } - slog.Debug("vocab loaded") + tokpath := filepath.Join(m.Path, "tokenizer.json") + slog.Debug(fmt.Sprintf("looking for %s", tokpath)) + if _, err := os.Stat(tokpath); !os.IsNotExist(err) { + t, err := newTokenizer(tokpath) + if err != nil { + return err + } + for _, tok := range t.Model.Tokens { + v.Tokens = append(v.Tokens, tok.Content) + var tokType int32 + switch { + case tok.Special: + tokType = 3 + case tok.UserDefined: + tokType = 4 + default: + tokType = 1 + } + v.Types = append(v.Types, tokType) + } + v.Merges = t.Model.Merges + } else { + slog.Debug("loading sentence piece vocab") + v, err = LoadSentencePieceTokens(m.Path, m.Params) + if err != nil { + return err + } + + slog.Debug("vocab loaded") + + } m.Vocab = v + return nil } @@ -147,22 +177,30 @@ func (m *LlamaModel) WriteGGUF(ws io.WriteSeeker) error { "llama.embedding_length": uint32(m.Params.HiddenSize), "llama.block_count": uint32(m.Params.HiddenLayers), "llama.feed_forward_length": uint32(m.Params.IntermediateSize), + "llama.rope.freq_base": float32(m.Params.RopeFrequencyBase), "llama.rope.dimension_count": uint32(m.Params.HiddenSize / m.Params.AttentionHeads), "llama.attention.head_count": uint32(m.Params.AttentionHeads), "llama.attention.head_count_kv": uint32(m.Params.KeyValHeads), "llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS), - "general.file_type": uint32(1), - "tokenizer.ggml.model": "llama", + //"general.file_type": uint32(1), + "general.file_type": uint32(2), + //"tokenizer.ggml.model": "llama", + "tokenizer.ggml.model": "gpt2", "tokenizer.ggml.tokens": m.Vocab.Tokens, - "tokenizer.ggml.scores": m.Vocab.Scores, "tokenizer.ggml.token_type": m.Vocab.Types, "tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID), "tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID), "tokenizer.ggml.unknown_token_id": uint32(0), - "tokenizer.ggml.add_bos_token": true, - "tokenizer.ggml.add_eos_token": false, + //"tokenizer.ggml.add_bos_token": true, + //"tokenizer.ggml.add_eos_token": false, + } + + if len(m.Vocab.Merges) > 0 { + kv["tokenizer.ggml.merges"] = m.Vocab.Merges + } else { + kv["tokenizer.ggml.scores"] = m.Vocab.Scores } return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors) diff --git a/llm/gguf.go b/llm/gguf.go index 5f6e8004..c3cc3d41 100644 --- a/llm/gguf.go +++ b/llm/gguf.go @@ -483,6 +483,7 @@ var ggufKVOrder = map[string][]string{ "tokenizer.ggml.model", "tokenizer.ggml.tokens", "tokenizer.ggml.scores", + "tokenizer.ggml.merges", "tokenizer.ggml.token_type", "tokenizer.ggml.bos_token_id", "tokenizer.ggml.eos_token_id", From d355d2020fcfc54c375eb697b7873742c3851881 Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Wed, 8 May 2024 16:07:46 -0700 Subject: [PATCH 14/31] add fixes for llama --- cmd/cmd.go | 2 +- convert/convert.go | 4 +--- convert/llama.go | 33 +++++++++++++++++++-------------- convert/safetensors.go | 2 ++ convert/torch.go | 38 ++++++++++++++++++++++++++++++++------ 5 files changed, 55 insertions(+), 24 deletions(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index f79f8b97..5d919d9a 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -208,7 +208,7 @@ func tempZipFiles(path string) (string, error) { // pytorch files might also be unresolved git lfs references; skip if they are // covers pytorch_model-x-of-y.bin, pytorch_model.fp32-x-of-y.bin, pytorch_model.bin files = append(files, pt...) - } else if pt, _ := glob(filepath.Join(path, "consolidated*.pth"), "application/octet-stream"); len(pt) > 0 { + } else if pt, _ := glob(filepath.Join(path, "consolidated*.pth"), "application/zip"); len(pt) > 0 { // pytorch files might also be unresolved git lfs references; skip if they are // covers consolidated.x.pth, consolidated.pth files = append(files, pt...) diff --git a/convert/convert.go b/convert/convert.go index 899c8c44..9a05fb52 100644 --- a/convert/convert.go +++ b/convert/convert.go @@ -74,11 +74,9 @@ func GetModelFormat(dirname string) (ModelFormat, error) { } for _, fn := range files { - slog.Debug(fmt.Sprintf("file = %s", fn)) if strings.HasSuffix(fn, ".safetensors") { return &SafetensorFormat{}, nil - //} else if strings.HasSuffix(fn, ".bin") { - } else if strings.HasSuffix(fn, ".pth") { + } else if strings.HasSuffix(fn, ".bin") || strings.HasSuffix(fn, ".pth") { slog.Debug("model is torch") return &TorchFormat{}, nil } diff --git a/convert/llama.go b/convert/llama.go index 8cb162e7..9fdcd02b 100644 --- a/convert/llama.go +++ b/convert/llama.go @@ -23,12 +23,24 @@ type LlamaModel struct { } func llamaTorchLayerHandler(w io.Writer, r torchWriterTo) error { - slog.Debug(fmt.Sprintf("repacking layer '%s'", r.t.Name)) - data := r.storage.(*pytorch.HalfStorage).Data - tData := make([]uint16, len(data)) - for cnt, v := range data { - tData[cnt] = uint16(float16.Fromfloat32(v)) + var tData []uint16 + switch r.storage.(type) { + case *pytorch.HalfStorage: + data := r.storage.(*pytorch.HalfStorage).Data + tData = make([]uint16, len(data)) + for cnt, v := range data { + tData[cnt] = uint16(float16.Fromfloat32(v)) + } + case *pytorch.BFloat16Storage: + data := r.storage.(*pytorch.BFloat16Storage).Data + tData = make([]uint16, len(data)) + + for cnt, v := range data { + tData[cnt] = uint16(float16.Fromfloat32(v)) + } + default: + return fmt.Errorf("unknown storage type for torch") } var err error @@ -44,8 +56,6 @@ func llamaTorchLayerHandler(w io.Writer, r torchWriterTo) error { return fmt.Errorf("unknown layer type") } - slog.Debug(fmt.Sprintf("heads = %d", heads)) - tData, err = llamaRepack(tData, int(heads), r.t.Shape) if err != nil { return err @@ -106,7 +116,6 @@ func (m *LlamaModel) GetTensors() error { for _, l := range t { matches := re.FindAllStringSubmatch(l.Name, -1) if len(matches) > 0 { - slog.Debug(fmt.Sprintf("setting handler for: %s", l.Name)) switch m.Format.(type) { case *TorchFormat: wt := l.WriterTo.(torchWriterTo) @@ -182,10 +191,8 @@ func (m *LlamaModel) WriteGGUF(ws io.WriteSeeker) error { "llama.attention.head_count": uint32(m.Params.AttentionHeads), "llama.attention.head_count_kv": uint32(m.Params.KeyValHeads), "llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS), - //"general.file_type": uint32(1), - "general.file_type": uint32(2), - //"tokenizer.ggml.model": "llama", - "tokenizer.ggml.model": "gpt2", + "general.file_type": uint32(2), + "tokenizer.ggml.model": "gpt2", "tokenizer.ggml.tokens": m.Vocab.Tokens, "tokenizer.ggml.token_type": m.Vocab.Types, @@ -193,8 +200,6 @@ func (m *LlamaModel) WriteGGUF(ws io.WriteSeeker) error { "tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID), "tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID), "tokenizer.ggml.unknown_token_id": uint32(0), - //"tokenizer.ggml.add_bos_token": true, - //"tokenizer.ggml.add_eos_token": false, } if len(m.Vocab.Merges) > 0 { diff --git a/convert/safetensors.go b/convert/safetensors.go index 64aaf866..b52a048d 100644 --- a/convert/safetensors.go +++ b/convert/safetensors.go @@ -131,6 +131,8 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params) shape[i] = uint64(data.Shape[i]) } + slog.Debug(fmt.Sprintf("'%45s': '%30s' %10d [%#v]", k, ggufName, size, data.Shape)) + t := llm.Tensor{ Name: ggufName, Kind: kind, diff --git a/convert/torch.go b/convert/torch.go index 0ad10c0e..803827ba 100644 --- a/convert/torch.go +++ b/convert/torch.go @@ -33,11 +33,15 @@ type TorchFormat struct{} func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) { slog.Debug("getting torch tensors") - //files, err := filepath.Glob(filepath.Join(dirpath, "pytorch_model-*.bin")) - files, err := filepath.Glob(filepath.Join(dirpath, "consolidatedr.*.pth")) + var files []string + var err error + files, err = filepath.Glob(filepath.Join(dirpath, "consolidated.*.pth")) if err != nil { - slog.Error("didn't find any torch files") - return nil, err + files, err = filepath.Glob(filepath.Join(dirpath, "pytorch_model-*.bin")) + if err != nil { + slog.Error("didn't find any torch files") + return nil, err + } } var offset uint64 @@ -78,7 +82,7 @@ func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, slog.Error(err.Error()) return nil, err } - slog.Debug(fmt.Sprintf("finding name for '%s' -> '%s'", k.(string), ggufName)) + slog.Debug(fmt.Sprintf("'%35s': '%30s' %10d [%#v]", k.(string), ggufName, size, tshape)) shape := []uint64{0, 0, 0, 0} for i := range tshape { @@ -236,7 +240,7 @@ func (r torchWriterTo) WriteTo(w io.Writer) (n int64, err error) { return 0, r.handler(w, r) } - switch r.storage.(type) { + switch storage := r.storage.(type) { case *pytorch.FloatStorage: slog.Warn(fmt.Sprintf("unexpected storage found for layer '%s'; skipping", r.t.Name)) return 0, nil @@ -259,6 +263,28 @@ func (r torchWriterTo) WriteTo(w io.Writer) (n int64, err error) { return 0, err } } + case *pytorch.BFloat16Storage: + data := r.storage.(*pytorch.BFloat16Storage).Data + switch r.t.Kind { + case 0: + if err = binary.Write(w, r.bo, data); err != nil { + return 0, err + } + case 1: + tData := make([]uint16, len(data)) + + for cnt, v := range data { + tData[cnt] = uint16(float16.Fromfloat32(v)) + } + + if err = binary.Write(w, r.bo, tData); err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("unknown storage kind: %d", r.t.Kind) + } + default: + return 0, fmt.Errorf("unknown storage type: %T", storage) } return 0, nil From 2d315ba9a984f8db8f108b967b3af6fa4aa67669 Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Wed, 8 May 2024 16:56:18 -0700 Subject: [PATCH 15/31] add missing file --- convert/tokenizer.go | 72 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 convert/tokenizer.go diff --git a/convert/tokenizer.go b/convert/tokenizer.go new file mode 100644 index 00000000..a7da81e6 --- /dev/null +++ b/convert/tokenizer.go @@ -0,0 +1,72 @@ +package convert + +import ( + "encoding/json" + "io/ioutil" + "os" +) + +type Tokenizer struct { + Version string `json:"version"` + AddedTokens []Token `json:"added_tokens"` + Model TokenizerModel `json:"model"` +} + +type TokenizerModel struct { + Type string `json:"type"` + Vocab map[string]int `json:"vocab"` + Merges []string `json:"merges"` + Tokens []Token +} + +type Token struct { + ID int `json:"id"` + Content string `json:"content"` + Special bool `json:"special"` + UserDefined bool +} + +func (t *Tokenizer) getMaxID() int { + var maxID int + for _, v := range t.Model.Vocab { + maxID = max(maxID, v) + } + + for _, v := range t.AddedTokens { + maxID = max(maxID, v.ID) + } + return maxID +} + +func newTokenizer(dirpath string) (*Tokenizer, error) { + f, err := os.Open(dirpath) + if err != nil { + panic(err) + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + var tdata Tokenizer + + if err := json.Unmarshal(data, &tdata); err != nil { + return nil, err + } + + maxID := tdata.getMaxID() + tdata.Model.Tokens = make([]Token, maxID+1) + + for k, v := range tdata.Model.Vocab { + tdata.Model.Tokens[v] = Token{ID: v, Content: k, Special: false, UserDefined: false} + } + + for _, v := range tdata.AddedTokens { + v.UserDefined = true + tdata.Model.Tokens[v.ID] = v + } + + return &tdata, nil +} From 547132e820dcdc20c325d1de876a86a708b5744e Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 15 May 2024 11:53:14 -0700 Subject: [PATCH 16/31] bpe pretokenizer --- convert/convert.go | 2 + convert/llama.go | 46 +++++++--------------- convert/tokenizer.go | 93 +++++++++++++++++++++++++++++++------------- llm/gguf.go | 1 + 4 files changed, 83 insertions(+), 59 deletions(-) diff --git a/convert/convert.go b/convert/convert.go index 9a05fb52..e9c2ef2d 100644 --- a/convert/convert.go +++ b/convert/convert.go @@ -37,6 +37,8 @@ type Params struct { Experts int `json:"num_local_experts"` ExpertsUsed int `json:"num_experts_per_tok"` + PreTokenizer string + ByteOrder } diff --git a/convert/llama.go b/convert/llama.go index 9fdcd02b..83d942cb 100644 --- a/convert/llama.go +++ b/convert/llama.go @@ -2,9 +2,9 @@ package convert import ( "encoding/binary" + "errors" "fmt" "io" - "log/slog" "os" "path/filepath" "regexp" @@ -134,44 +134,27 @@ func (m *LlamaModel) GetTensors() error { } func (m *LlamaModel) LoadVocab() error { - v := &Vocab{ - Tokens: []string{}, - Types: []int32{}, - Merges: []string{}, - } + v := &Vocab{} tokpath := filepath.Join(m.Path, "tokenizer.json") - slog.Debug(fmt.Sprintf("looking for %s", tokpath)) - if _, err := os.Stat(tokpath); !os.IsNotExist(err) { - t, err := newTokenizer(tokpath) - if err != nil { - return err - } - - for _, tok := range t.Model.Tokens { - v.Tokens = append(v.Tokens, tok.Content) - var tokType int32 - switch { - case tok.Special: - tokType = 3 - case tok.UserDefined: - tokType = 4 - default: - tokType = 1 - } - v.Types = append(v.Types, tokType) - } - v.Merges = t.Model.Merges - } else { - slog.Debug("loading sentence piece vocab") + pre, ts, merges, err := parseTokens(tokpath) + if errors.Is(err, os.ErrNotExist) { v, err = LoadSentencePieceTokens(m.Path, m.Params) if err != nil { return err } + } else if err != nil { + return err + } else { + for _, t := range ts { + v.Tokens = append(v.Tokens, t.Content) + v.Types = append(v.Types, t.Type()) + } - slog.Debug("vocab loaded") - + m.Params.PreTokenizer = pre + v.Merges = merges } + m.Vocab = v return nil @@ -194,6 +177,7 @@ func (m *LlamaModel) WriteGGUF(ws io.WriteSeeker) error { "general.file_type": uint32(2), "tokenizer.ggml.model": "gpt2", + "tokenizer.ggml.pre": m.Params.PreTokenizer, "tokenizer.ggml.tokens": m.Vocab.Tokens, "tokenizer.ggml.token_type": m.Vocab.Types, diff --git a/convert/tokenizer.go b/convert/tokenizer.go index a7da81e6..a847a84c 100644 --- a/convert/tokenizer.go +++ b/convert/tokenizer.go @@ -1,15 +1,30 @@ package convert import ( + "cmp" + "crypto/sha256" "encoding/json" - "io/ioutil" + "fmt" + "log/slog" "os" + "slices" + + "golang.org/x/exp/maps" ) type Tokenizer struct { Version string `json:"version"` AddedTokens []Token `json:"added_tokens"` Model TokenizerModel `json:"model"` + + PreTokenizer struct { + PreTokenziers []struct { + Type string `json:"type"` + Pattern struct { + Regex string `json:"Regex"` + } `json:"pattern"` + } `json:"pretokenizers"` + } `json:"pre_tokenizer"` } type TokenizerModel struct { @@ -26,47 +41,69 @@ type Token struct { UserDefined bool } -func (t *Tokenizer) getMaxID() int { - var maxID int - for _, v := range t.Model.Vocab { - maxID = max(maxID, v) +func (t *Token) Type() int32 { + switch { + case t.Special: + return 3 + case t.UserDefined: + return 4 + default: + return 1 } - - for _, v := range t.AddedTokens { - maxID = max(maxID, v.ID) - } - return maxID } -func newTokenizer(dirpath string) (*Tokenizer, error) { +func (t *Tokenizer) maxID() int { + return max( + slices.Max(maps.Values(t.Model.Vocab)), + slices.MaxFunc(t.AddedTokens, func(a, b Token) int { + return cmp.Compare(a.ID, b.ID) + }).ID, + ) +} + +func parseTokens(dirpath string) (pre string, tokens []Token, merges []string, err error) { f, err := os.Open(dirpath) if err != nil { panic(err) } defer f.Close() - data, err := ioutil.ReadAll(f) - if err != nil { - return nil, err + var t Tokenizer + if err := json.NewDecoder(f).Decode(&t); err != nil { + return "", nil, nil, err } - var tdata Tokenizer - - if err := json.Unmarshal(data, &tdata); err != nil { - return nil, err + tokens = make([]Token, t.maxID()+1) + for k, v := range t.Model.Vocab { + tokens[v] = Token{ID: v, Content: k, Special: false, UserDefined: false} } - maxID := tdata.getMaxID() - tdata.Model.Tokens = make([]Token, maxID+1) - - for k, v := range tdata.Model.Vocab { - tdata.Model.Tokens[v] = Token{ID: v, Content: k, Special: false, UserDefined: false} - } - - for _, v := range tdata.AddedTokens { + for _, v := range t.AddedTokens { v.UserDefined = true - tdata.Model.Tokens[v.ID] = v + tokens[v.ID] = v } - return &tdata, nil + sha256sum := sha256.New() + for _, pt := range t.PreTokenizer.PreTokenziers { + switch pt.Type { + case "Split": + if pt.Pattern.Regex != "" { + sha256sum.Write([]byte(pt.Pattern.Regex)) + } + } + } + + switch digest := fmt.Sprintf("%x", sha256sum.Sum(nil)); digest { + case "d98f9631be1e9607a9848c26c1f9eac1aa9fc21ac6ba82a2fc0741af9780a48f": + pre = "llama-bpe" + case "03df5c5863ad70781dcfdef491ead25140f895fe8010964be0daefe27be32b02": + pre = "deepseek-llm" + case "21cde974d587f0d54dc8d56b183cc1e6239600172035c68fbd6d4b9f8da0576e": + pre = "deepseek-coder" + default: + slog.Warn("unknown pretokenizer, using default", "digest", digest) + pre = "default" + } + + return pre, tokens, t.Model.Merges, nil } diff --git a/llm/gguf.go b/llm/gguf.go index c3cc3d41..179b3255 100644 --- a/llm/gguf.go +++ b/llm/gguf.go @@ -480,6 +480,7 @@ var ggufKVOrder = map[string][]string{ "gemma.attention.key_length", "gemma.attention.value_length", "general.file_type", + "tokenizer.ggml.pre", "tokenizer.ggml.model", "tokenizer.ggml.tokens", "tokenizer.ggml.scores", From bbbd9f20f313af308bf4d573994e01fd5d5f7170 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 15 May 2024 14:55:57 -0700 Subject: [PATCH 17/31] cleanup --- convert/convert.go | 14 ++++++++++++-- convert/gemma.go | 2 -- convert/llama.go | 32 +++++++++++--------------------- convert/mistral.go | 2 -- convert/mixtral.go | 2 -- convert/safetensors.go | 5 +++++ convert/tokenizer.go | 6 +++--- convert/torch.go | 13 ++++--------- llm/gguf.go | 10 ---------- 9 files changed, 35 insertions(+), 51 deletions(-) diff --git a/convert/convert.go b/convert/convert.go index e9c2ef2d..e71a0ff3 100644 --- a/convert/convert.go +++ b/convert/convert.go @@ -18,6 +18,16 @@ import ( "github.com/ollama/ollama/llm" ) +const ( + _ int32 = iota + tokenTypeNormal + tokenTypeUnknown + tokenTypeControl + tokenTypeUserDefined + tokenTypeUnused + tokenTypeByte +) + type Params struct { Architectures []string `json:"architectures"` VocabSize int `json:"vocab_size"` @@ -172,7 +182,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) { } v.Tokens = append(v.Tokens, t.key) v.Scores = append(v.Scores, -1000.0) - v.Types = append(v.Types, int32(llm.GGUFTokenUserDefined)) + v.Types = append(v.Types, tokenTypeUserDefined) } slog.Info(fmt.Sprintf("vocab size w/ extra tokens: %d", len(v.Tokens))) @@ -182,7 +192,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) { for cnt := 0; cnt < missingTokens; cnt++ { v.Tokens = append(v.Tokens, fmt.Sprintf("", cnt+1)) v.Scores = append(v.Scores, -1) - v.Types = append(v.Types, int32(llm.GGUFTokenUserDefined)) + v.Types = append(v.Types, tokenTypeUserDefined) } } diff --git a/convert/gemma.go b/convert/gemma.go index 88abe646..e24b8ec5 100644 --- a/convert/gemma.go +++ b/convert/gemma.go @@ -71,8 +71,6 @@ func (m *GemmaModel) GetTensors() error { } slog.Debug(fmt.Sprintf("Total tensors: %d", len(t))) - - m.Tensors = []llm.Tensor{} for _, l := range t { if strings.HasSuffix(l.Name, "norm.weight") { wt := l.WriterTo.(safetensorWriterTo) diff --git a/convert/llama.go b/convert/llama.go index 83d942cb..a10670e6 100644 --- a/convert/llama.go +++ b/convert/llama.go @@ -105,8 +105,6 @@ func (m *LlamaModel) GetTensors() error { return err } - m.Tensors = []llm.Tensor{} - pattern := `^blk\.[0-9]+\.attn_(?Pq|k)\.weight$` re, err := regexp.Compile(pattern) if err != nil { @@ -133,30 +131,22 @@ func (m *LlamaModel) GetTensors() error { return nil } -func (m *LlamaModel) LoadVocab() error { - v := &Vocab{} - - tokpath := filepath.Join(m.Path, "tokenizer.json") - pre, ts, merges, err := parseTokens(tokpath) +func (m *LlamaModel) LoadVocab() (err error) { + pre, ts, merges, err := parseTokens(filepath.Join(m.Path, "tokenizer.json")) if errors.Is(err, os.ErrNotExist) { - v, err = LoadSentencePieceTokens(m.Path, m.Params) - if err != nil { - return err - } + return nil } else if err != nil { return err - } else { - for _, t := range ts { - v.Tokens = append(v.Tokens, t.Content) - v.Types = append(v.Types, t.Type()) - } - - m.Params.PreTokenizer = pre - v.Merges = merges } - m.Vocab = v + m.Vocab = &Vocab{} + for _, t := range ts { + m.Vocab.Tokens = append(m.Vocab.Tokens, t.Content) + m.Vocab.Types = append(m.Vocab.Types, t.Type()) + } + m.Vocab.Merges = merges + m.Params.PreTokenizer = pre return nil } @@ -174,7 +164,7 @@ func (m *LlamaModel) WriteGGUF(ws io.WriteSeeker) error { "llama.attention.head_count": uint32(m.Params.AttentionHeads), "llama.attention.head_count_kv": uint32(m.Params.KeyValHeads), "llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS), - "general.file_type": uint32(2), + "general.file_type": uint32(1), "tokenizer.ggml.model": "gpt2", "tokenizer.ggml.pre": m.Params.PreTokenizer, diff --git a/convert/mistral.go b/convert/mistral.go index f88de12b..89d2e084 100644 --- a/convert/mistral.go +++ b/convert/mistral.go @@ -102,8 +102,6 @@ func (m *MistralModel) GetTensors() error { return err } - m.Tensors = []llm.Tensor{} - pattern := `^blk\.[0-9]+\.attn_(?Pq|k)\.weight$` re, err := regexp.Compile(pattern) if err != nil { diff --git a/convert/mixtral.go b/convert/mixtral.go index 940df55d..66546fd7 100644 --- a/convert/mixtral.go +++ b/convert/mixtral.go @@ -17,8 +17,6 @@ func (m *MixtralModel) GetTensors() error { return err } - m.Tensors = []llm.Tensor{} - pattern := `^blk\.[0-9]+\.attn_(?Pq|k)\.weight$` re, err := regexp.Compile(pattern) if err != nil { diff --git a/convert/safetensors.go b/convert/safetensors.go index b52a048d..2107ae81 100644 --- a/convert/safetensors.go +++ b/convert/safetensors.go @@ -11,6 +11,7 @@ import ( "path/filepath" "regexp" "slices" + "strings" "github.com/d4l3k/go-bfloat16" "github.com/mitchellh/mapstructure" @@ -97,6 +98,10 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params) var tensors []llm.Tensor for _, k := range keys { + if strings.HasSuffix(k, "self_attn.rotary_emb.inv_freq") { + continue + } + vals := parsed[k].(map[string]interface{}) var data tensorMetaData if err = mapstructure.Decode(vals, &data); err != nil { diff --git a/convert/tokenizer.go b/convert/tokenizer.go index a847a84c..e0fe0bb7 100644 --- a/convert/tokenizer.go +++ b/convert/tokenizer.go @@ -44,11 +44,11 @@ type Token struct { func (t *Token) Type() int32 { switch { case t.Special: - return 3 + return tokenTypeControl case t.UserDefined: - return 4 + return tokenTypeUserDefined default: - return 1 + return tokenTypeNormal } } diff --git a/convert/torch.go b/convert/torch.go index 803827ba..cb8d74b0 100644 --- a/convert/torch.go +++ b/convert/torch.go @@ -34,18 +34,13 @@ func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, slog.Debug("getting torch tensors") var files []string - var err error - files, err = filepath.Glob(filepath.Join(dirpath, "consolidated.*.pth")) - if err != nil { - files, err = filepath.Glob(filepath.Join(dirpath, "pytorch_model-*.bin")) - if err != nil { - slog.Error("didn't find any torch files") - return nil, err - } + if pt, _ := filepath.Glob(filepath.Join(dirpath, "consolidated*.pth")); len(pt) > 0 { + files = append(files, pt...) + } else if pt, _ := filepath.Glob(filepath.Join(dirpath, "pytorch_model*.pth")); len(pt) > 0 { + files = append(files, pt...) } var offset uint64 - var tensors []llm.Tensor for _, fn := range files { m, err := pytorch.Load(fn) diff --git a/llm/gguf.go b/llm/gguf.go index 179b3255..eb7d7b75 100644 --- a/llm/gguf.go +++ b/llm/gguf.go @@ -62,16 +62,6 @@ func (c *containerGGUF) Decode(rs io.ReadSeeker) (model, error) { return model, nil } -const ( - _ uint32 = iota - GGUFTokenNormal - GGUFTokenUnknown - GGUFTokenControl - GGUFTokenUserDefined - GGUFTokenUnused - GGUFTokenByte -) - const ( ggufTypeUint8 uint32 = iota ggufTypeInt8 From 34d5ef29b3d01e2a0785af96df1135dfec567a3e Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 17 May 2024 12:11:49 -0700 Subject: [PATCH 18/31] fix conversion for f16 or f32 inputs --- convert/gemma.go | 49 +++++---------- convert/llama.go | 136 ++++++++++++++++------------------------- convert/mistral.go | 91 ++------------------------- convert/mixtral.go | 6 +- convert/safetensors.go | 85 ++++++++++++++------------ convert/torch.go | 77 +++++++++-------------- go.mod | 2 +- 7 files changed, 152 insertions(+), 294 deletions(-) diff --git a/convert/gemma.go b/convert/gemma.go index e24b8ec5..9dc406e0 100644 --- a/convert/gemma.go +++ b/convert/gemma.go @@ -1,14 +1,11 @@ package convert import ( - "encoding/binary" "fmt" "io" "log/slog" - "os" "strings" - "github.com/d4l3k/go-bfloat16" "github.com/pdevine/tensor" "github.com/pdevine/tensor/native" @@ -19,49 +16,27 @@ type GemmaModel struct { ModelData } -func gemmaLayerHandler(w io.Writer, r safetensorWriterTo, f *os.File) error { - slog.Debug(fmt.Sprintf("converting '%s'", r.t.Name)) - - data := make([]byte, r.end-r.start) - if err := binary.Read(f, r.bo, data); err != nil { - return err - } - - tDataF32 := bfloat16.DecodeFloat32(data) - - var err error - tDataF32, err = addOnes(tDataF32, int(r.t.Shape[0])) - if err != nil { - return err - } - - if err := binary.Write(w, r.bo, tDataF32); err != nil { - return err - } - return nil -} - func addOnes(data []float32, vectorSize int) ([]float32, error) { n := tensor.New(tensor.WithShape(vectorSize), tensor.WithBacking(data)) ones := tensor.Ones(tensor.Float32, vectorSize) - var err error - n, err = n.Add(ones) + n, err := n.Add(ones) if err != nil { - return []float32{}, err + return nil, err } - newN, err := native.SelectF32(n, 0) + ts, err := native.SelectF32(n, 0) if err != nil { - return []float32{}, err + return nil, err } - var fullTensor []float32 - for _, v := range newN { - fullTensor = append(fullTensor, v...) + var f32s []float32 + for _, t := range ts { + f32s = append(f32s, t...) } - return fullTensor, nil + + return f32s, nil } func (m *GemmaModel) GetTensors() error { @@ -74,7 +49,7 @@ func (m *GemmaModel) GetTensors() error { for _, l := range t { if strings.HasSuffix(l.Name, "norm.weight") { wt := l.WriterTo.(safetensorWriterTo) - wt.handler = gemmaLayerHandler + wt.repacker = m.Repack l.WriterTo = wt } m.Tensors = append(m.Tensors, l) @@ -92,6 +67,10 @@ func (m *GemmaModel) LoadVocab() error { return nil } +func (m *GemmaModel) Repack(_ string, data []float32, shape []uint64) ([]float32, error) { + return addOnes(data, int(shape[0])) +} + func (m *GemmaModel) WriteGGUF(ws io.WriteSeeker) error { kv := llm.KV{ "general.architecture": "gemma", diff --git a/convert/llama.go b/convert/llama.go index a10670e6..7853c4cf 100644 --- a/convert/llama.go +++ b/convert/llama.go @@ -1,7 +1,7 @@ package convert import ( - "encoding/binary" + "cmp" "errors" "fmt" "io" @@ -10,10 +10,8 @@ import ( "regexp" "strings" - "github.com/nlpodyssey/gopickle/pytorch" "github.com/pdevine/tensor" "github.com/pdevine/tensor/native" - "github.com/x448/float16" "github.com/ollama/ollama/llm" ) @@ -22,83 +20,6 @@ type LlamaModel struct { ModelData } -func llamaTorchLayerHandler(w io.Writer, r torchWriterTo) error { - - var tData []uint16 - switch r.storage.(type) { - case *pytorch.HalfStorage: - data := r.storage.(*pytorch.HalfStorage).Data - tData = make([]uint16, len(data)) - for cnt, v := range data { - tData[cnt] = uint16(float16.Fromfloat32(v)) - } - case *pytorch.BFloat16Storage: - data := r.storage.(*pytorch.BFloat16Storage).Data - tData = make([]uint16, len(data)) - - for cnt, v := range data { - tData[cnt] = uint16(float16.Fromfloat32(v)) - } - default: - return fmt.Errorf("unknown storage type for torch") - } - - var err error - var heads uint32 - if strings.Contains(r.t.Name, "attn_q") { - heads = uint32(r.params.AttentionHeads) - } else if strings.Contains(r.t.Name, "attn_k") { - heads = uint32(r.params.KeyValHeads) - if heads == 0 { - heads = uint32(r.params.AttentionHeads) - } - } else { - return fmt.Errorf("unknown layer type") - } - - tData, err = llamaRepack(tData, int(heads), r.t.Shape) - if err != nil { - return err - } - - if err = binary.Write(w, r.bo, tData); err != nil { - return err - } - return nil -} - -func llamaRepack(data []uint16, heads int, shape []uint64) ([]uint16, error) { - n := tensor.New(tensor.WithShape(int(shape[0]), int(shape[1])), tensor.WithBacking(data)) - origShape := n.Shape().Clone() - - // reshape the tensor and swap axes 1 and 2 to unpack the layer for gguf - if err := n.Reshape(heads, 2, origShape[0]/heads/2, origShape[1]); err != nil { - return nil, err - } - - if err := n.T(0, 2, 1, 3); err != nil { - return nil, err - } - - if err := n.Reshape(origShape...); err != nil { - return nil, err - } - - if err := n.Transpose(); err != nil { - return nil, err - } - newN, err := native.SelectU16(n, 1) - if err != nil { - return nil, err - } - - var fullTensor []uint16 - for _, v := range newN { - fullTensor = append(fullTensor, v...) - } - return fullTensor, nil -} - func (m *LlamaModel) GetTensors() error { t, err := m.Format.GetTensors(m.Path, m.Params) if err != nil { @@ -117,11 +38,11 @@ func (m *LlamaModel) GetTensors() error { switch m.Format.(type) { case *TorchFormat: wt := l.WriterTo.(torchWriterTo) - wt.handler = llamaTorchLayerHandler + wt.repacker = m.Repack l.WriterTo = wt case *SafetensorFormat: wt := l.WriterTo.(safetensorWriterTo) - wt.handler = mistralLayerHandler + wt.repacker = m.Repack l.WriterTo = wt } } @@ -184,3 +105,54 @@ func (m *LlamaModel) WriteGGUF(ws io.WriteSeeker) error { return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors) } + +func (m *LlamaModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) { + return llamaRepack(name, m.Params, data, shape) +} + +func llamaRepack(name string, params *Params, data []float32, shape []uint64) ([]float32, error) { + var dims []int + for _, dim := range shape { + if dim != 0 { + dims = append(dims, int(dim)) + } + } + + var heads int + if strings.HasSuffix(name, "attn_q.weight") { + heads = params.AttentionHeads + } else if strings.HasSuffix(name, "attn_k.weight") { + heads = cmp.Or(params.KeyValHeads, params.AttentionHeads) + } else { + return nil, fmt.Errorf("unknown tensor name: %s", name) + } + + n := tensor.New(tensor.WithShape(dims...), tensor.WithBacking(data)) + if err := n.Reshape(append([]int{heads, 2, dims[0] / heads / 2}, dims[1:]...)...); err != nil { + return nil, err + } + + if err := n.T(0, 2, 1, 3); err != nil { + return nil, err + } + + if err := n.Reshape(dims...); err != nil { + return nil, err + } + + if err := n.Transpose(); err != nil { + return nil, err + } + + ts, err := native.SelectF32(n, 1) + if err != nil { + return nil, err + } + + var f32s []float32 + for _, t := range ts { + f32s = append(f32s, t...) + } + + return f32s, nil +} diff --git a/convert/mistral.go b/convert/mistral.go index 89d2e084..da6874cf 100644 --- a/convert/mistral.go +++ b/convert/mistral.go @@ -1,17 +1,8 @@ package convert import ( - "encoding/binary" - "fmt" "io" - "os" "regexp" - "strings" - - "github.com/d4l3k/go-bfloat16" - "github.com/pdevine/tensor" - "github.com/pdevine/tensor/native" - "github.com/x448/float16" "github.com/ollama/ollama/llm" ) @@ -20,82 +11,6 @@ type MistralModel struct { ModelData } -func mistralLayerHandler(w io.Writer, r safetensorWriterTo, f *os.File) error { - layerSize := r.end - r.start - - var err error - tData := make([]uint16, layerSize/2) - if err = binary.Read(f, r.bo, tData); err != nil { - return err - } - - var heads uint32 - if strings.Contains(r.t.Name, "attn_q") { - heads = uint32(r.params.AttentionHeads) - } else if strings.Contains(r.t.Name, "attn_k") { - heads = uint32(r.params.KeyValHeads) - if heads == 0 { - heads = uint32(r.params.AttentionHeads) - } - } else { - return fmt.Errorf("unknown layer type") - } - - tData, err = repack(tData, int(heads), r.t.Shape) - if err != nil { - return err - } - - var buf []byte - for _, n := range tData { - buf = r.bo.AppendUint16(buf, n) - } - - tempBuf := make([]uint16, len(tData)) - tDataF32 := bfloat16.DecodeFloat32(buf) - for cnt, v := range tDataF32 { - tDataF16 := float16.Fromfloat32(v) - tempBuf[cnt] = uint16(tDataF16) - } - - if err = binary.Write(w, r.bo, tempBuf); err != nil { - return err - } - return nil -} - -func repack(data []uint16, heads int, shape []uint64) ([]uint16, error) { - n := tensor.New(tensor.WithShape(int(shape[0]), int(shape[1])), tensor.WithBacking(data)) - origShape := n.Shape().Clone() - - // reshape the tensor and swap axes 1 and 2 to unpack the layer for gguf - if err := n.Reshape(heads, 2, origShape[0]/heads/2, origShape[1]); err != nil { - return nil, err - } - - if err := n.T(0, 2, 1, 3); err != nil { - return nil, err - } - - if err := n.Reshape(origShape...); err != nil { - return nil, err - } - - if err := n.Transpose(); err != nil { - return nil, err - } - newN, err := native.SelectU16(n, 1) - if err != nil { - return nil, err - } - - var fullTensor []uint16 - for _, v := range newN { - fullTensor = append(fullTensor, v...) - } - return fullTensor, nil -} - func (m *MistralModel) GetTensors() error { t, err := m.Format.GetTensors(m.Path, m.Params) if err != nil { @@ -112,7 +27,7 @@ func (m *MistralModel) GetTensors() error { matches := re.FindAllStringSubmatch(l.Name, -1) if len(matches) > 0 { wt := l.WriterTo.(safetensorWriterTo) - wt.handler = mistralLayerHandler + wt.repacker = m.Repack l.WriterTo = wt } m.Tensors = append(m.Tensors, l) @@ -158,3 +73,7 @@ func (m *MistralModel) WriteGGUF(ws io.WriteSeeker) error { return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors) } + +func (m *MistralModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) { + return llamaRepack(name, m.Params, data, shape) +} diff --git a/convert/mixtral.go b/convert/mixtral.go index 66546fd7..baea68cd 100644 --- a/convert/mixtral.go +++ b/convert/mixtral.go @@ -27,7 +27,7 @@ func (m *MixtralModel) GetTensors() error { matches := re.FindAllStringSubmatch(l.Name, -1) if len(matches) > 0 { wt := l.WriterTo.(safetensorWriterTo) - wt.handler = mistralLayerHandler + wt.repacker = m.Repack l.WriterTo = wt } m.Tensors = append(m.Tensors, l) @@ -81,3 +81,7 @@ func (m *MixtralModel) WriteGGUF(ws io.WriteSeeker) error { return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors) } + +func (m *MixtralModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) { + return llamaRepack(name, m.Params, data, shape) +} diff --git a/convert/safetensors.go b/convert/safetensors.go index 2107ae81..9de9a002 100644 --- a/convert/safetensors.go +++ b/convert/safetensors.go @@ -27,9 +27,10 @@ type safetensorWriterTo struct { bo ByteOrder filename string + dtype string start, end, padding uint64 - handler func(w io.Writer, r safetensorWriterTo, f *os.File) error + repacker func(string, []float32, []uint64) ([]float32, error) } type tensorMetaData struct { @@ -150,6 +151,7 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params) params: params, bo: params.ByteOrder, filename: fn, + dtype: data.Type, start: uint64(data.Offsets[0]), end: uint64(data.Offsets[1]), padding: 8 + jsonSize, @@ -235,51 +237,54 @@ func (r safetensorWriterTo) WriteTo(w io.Writer) (n int64, err error) { return 0, err } - // use the handler if one is present - if r.handler != nil { - return 0, r.handler(w, r, f) - } - - remaining := r.end - r.start - - bufSize := uint64(10240) - var finished bool - for { - data := make([]byte, min(bufSize, remaining)) - - b, err := io.ReadFull(f, data) - remaining -= uint64(b) - - if err == io.EOF || remaining <= 0 { - finished = true - } else if err != nil { + var f32s []float32 + switch r.dtype { + case "F32": + f32s = make([]float32, (r.end-r.start)/4) + if err = binary.Read(f, r.bo, f32s); err != nil { + return 0, err + } + case "F16": + bts := make([]uint16, (r.end-r.start)/2) + if err = binary.Read(f, r.bo, bts); err != nil { return 0, err } - // convert bfloat16 -> ieee float32 - tDataF32 := bfloat16.DecodeFloat32(data) - - switch r.t.Kind { - case 0: - if err := binary.Write(w, r.bo, tDataF32); err != nil { - return 0, err - } - case 1: - // convert float32 -> float16 - tempBuf := make([]uint16, len(data)/2) - for cnt, v := range tDataF32 { - tDataF16 := float16.Fromfloat32(v) - tempBuf[cnt] = uint16(tDataF16) - } - if err := binary.Write(w, r.bo, tempBuf); err != nil { - return 0, err - } + for _, b := range bts { + f32s = append(f32s, float16.Frombits(b).Float32()) } - if finished { - break + + case "BF16": + bts := make([]byte, r.end-r.start) + if err = binary.Read(f, r.bo, bts); err != nil { + return 0, err + } + + f32s = bfloat16.DecodeFloat32(bts) + default: + return 0, fmt.Errorf("unknown data type: %s", r.dtype) + } + + if r.repacker != nil { + f32s, err = r.repacker(r.t.Name, f32s, r.t.Shape) + if err != nil { + return 0, err } } - return 0, nil + + switch r.t.Kind { + case 0: + return 0, binary.Write(w, r.bo, f32s) + case 1: + f16s := make([]uint16, len(f32s)) + for i := range f32s { + f16s[i] = float16.Fromfloat32(f32s[i]).Bits() + } + + return 0, binary.Write(w, r.bo, f16s) + default: + return 0, fmt.Errorf("unknown storage type: %d", r.t.Kind) + } } func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) { diff --git a/convert/torch.go b/convert/torch.go index cb8d74b0..b7ae0f76 100644 --- a/convert/torch.go +++ b/convert/torch.go @@ -24,8 +24,8 @@ type torchWriterTo struct { params *Params bo ByteOrder - storage pytorch.StorageInterface - handler func(w io.Writer, r torchWriterTo) error + storage pytorch.StorageInterface + repacker func(string, []float32, []uint64) ([]float32, error) } type TorchFormat struct{} @@ -230,59 +230,38 @@ func (m *TorchFormat) GetLayerName(n string) (string, error) { } func (r torchWriterTo) WriteTo(w io.Writer) (n int64, err error) { - // use the handler if one is present - if r.handler != nil { - return 0, r.handler(w, r) - } - - switch storage := r.storage.(type) { + var f32s []float32 + switch s := r.storage.(type) { case *pytorch.FloatStorage: - slog.Warn(fmt.Sprintf("unexpected storage found for layer '%s'; skipping", r.t.Name)) - return 0, nil + f32s = s.Data case *pytorch.HalfStorage: - switch r.t.Kind { - case 0: - data := r.storage.(*pytorch.HalfStorage).Data - slog.Debug(fmt.Sprintf("%35s F32 (%d)", r.t.Name, len(data))) - if err := binary.Write(w, r.bo, data); err != nil { - return 0, err - } - case 1: - data := r.storage.(*pytorch.HalfStorage).Data - tData := make([]uint16, len(data)) - for cnt, v := range data { - tData[cnt] = uint16(float16.Fromfloat32(v)) - } - slog.Debug(fmt.Sprintf("%35s F16 (%d)", r.t.Name, len(tData))) - if err := binary.Write(w, r.bo, tData); err != nil { - return 0, err - } - } + f32s = s.Data case *pytorch.BFloat16Storage: - data := r.storage.(*pytorch.BFloat16Storage).Data - switch r.t.Kind { - case 0: - if err = binary.Write(w, r.bo, data); err != nil { - return 0, err - } - case 1: - tData := make([]uint16, len(data)) - - for cnt, v := range data { - tData[cnt] = uint16(float16.Fromfloat32(v)) - } - - if err = binary.Write(w, r.bo, tData); err != nil { - return 0, err - } - default: - return 0, fmt.Errorf("unknown storage kind: %d", r.t.Kind) - } + f32s = s.Data default: - return 0, fmt.Errorf("unknown storage type: %T", storage) + return 0, fmt.Errorf("unknown data type: %T", s) } - return 0, nil + if r.repacker != nil { + f32s, err = r.repacker(r.t.Name, f32s, r.t.Shape) + if err != nil { + return 0, err + } + } + + switch r.t.Kind { + case 0: + return 0, binary.Write(w, r.bo, f32s) + case 1: + f16s := make([]uint16, len(f32s)) + for i := range f32s { + f16s[i] = float16.Fromfloat32(f32s[i]).Bits() + } + + return 0, binary.Write(w, r.bo, f16s) + default: + return 0, fmt.Errorf("unknown storage type: %d", r.t.Kind) + } } func (m *TorchFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) { diff --git a/go.mod b/go.mod index 5d0d3c33..255c8a04 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,6 @@ go 1.22.0 require ( github.com/containerd/console v1.0.3 - github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1 github.com/emirpasic/gods v1.18.1 github.com/gin-gonic/gin v1.10.0 github.com/golang/protobuf v1.5.4 // indirect @@ -18,6 +17,7 @@ require ( ) require ( + github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1 github.com/mattn/go-runewidth v0.0.14 github.com/nlpodyssey/gopickle v0.3.0 github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c From 3591bbe56fc3dba4d7cf9b77929143a58ffaaa59 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Tue, 21 May 2024 11:28:16 -0700 Subject: [PATCH 19/31] add test --- convert/convert_test.go | 103 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 convert/convert_test.go diff --git a/convert/convert_test.go b/convert/convert_test.go new file mode 100644 index 00000000..6aa33a49 --- /dev/null +++ b/convert/convert_test.go @@ -0,0 +1,103 @@ +//go:build slow + +package convert + +import ( + "os" + "path/filepath" + "testing" + + "github.com/ollama/ollama/llm" +) + +func convertFull(t *testing.T, p string) (llm.KV, llm.Tensors) { + t.Helper() + + mf, err := GetModelFormat(p) + if err != nil { + t.Fatal(err) + } + + params, err := mf.GetParams(p) + if err != nil { + t.Fatal(err) + } + + arch, err := mf.GetModelArch("", p, params) + if err != nil { + t.Fatal(err) + } + + if err := arch.LoadVocab(); err != nil { + t.Fatal(err) + } + + if err := arch.GetTensors(); err != nil { + t.Fatal(err) + } + + f, err := os.CreateTemp(t.TempDir(), "f16") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + if err := arch.WriteGGUF(f); err != nil { + t.Fatal(err) + } + + r, err := os.Open(f.Name()) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + m, _, err := llm.DecodeGGML(r) + if err != nil { + t.Fatal(err) + } + + return m.KV(), m.Tensors() +} + +func TestConvertFull(t *testing.T) { + cases := []struct { + path string + arch string + tensors int + layers int + }{ + {"Meta-Llama-3-8B-Instruct", "llama", 291, 35}, + {"Mistral-7B-Instruct-v0.2", "llama", 291, 35}, + {"Mixtral-8x7B-Instruct-v0.1", "llama", 291, 35}, + {"gemma-2b-it", "gemma", 164, 20}, + } + + for _, tt := range cases { + t.Run(tt.path, func(t *testing.T) { + p := filepath.Join("testdata", tt.path) + if _, err := os.Stat(p); err != nil { + t.Skipf("%s not found", p) + } + + kv, tensors := convertFull(t, p) + + if kv.Architecture() != tt.arch { + t.Fatalf("expected llama, got %s", kv.Architecture()) + } + + if kv.FileType().String() != "F16" { + t.Fatalf("expected F16, got %s", kv.FileType()) + } + + if len(tensors) != tt.tensors { + t.Fatalf("expected %d tensors, got %d", tt.tensors, len(tensors)) + } + + layers := tensors.Layers() + if len(layers) != tt.layers { + t.Fatalf("expected %d layers, got %d", tt.layers, len(layers)) + } + }) + } +} From 171eb040fc3dd8a9bee377e4c9a0e847a847c22c Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Mon, 20 May 2024 09:47:01 -0700 Subject: [PATCH 20/31] simplify safetensors reading --- convert/safetensors.go | 115 +++++++++++++++-------------------------- go.mod | 1 - go.sum | 2 - llm/ggla.go | 2 +- llm/ggml.go | 6 +-- llm/gguf.go | 4 +- 6 files changed, 49 insertions(+), 81 deletions(-) diff --git a/convert/safetensors.go b/convert/safetensors.go index 9de9a002..69270b87 100644 --- a/convert/safetensors.go +++ b/convert/safetensors.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "log/slog" "os" "path/filepath" "regexp" @@ -14,7 +13,6 @@ import ( "strings" "github.com/d4l3k/go-bfloat16" - "github.com/mitchellh/mapstructure" "github.com/x448/float16" "github.com/ollama/ollama/llm" @@ -29,38 +27,36 @@ type safetensorWriterTo struct { filename string dtype string - start, end, padding uint64 - repacker func(string, []float32, []uint64) ([]float32, error) + offset, size int64 + repacker func(string, []float32, []uint64) ([]float32, error) } -type tensorMetaData struct { - Type string `mapstructure:"dtype"` - Shape []int `mapstructure:"shape"` - Offsets []int `mapstructure:"data_offsets"` +type safetensorMetadata struct { + Type string `json:"dtype"` + Shape []uint64 `json:"shape"` + Offsets []int64 `json:"data_offsets"` } type SafetensorFormat struct{} func (m *SafetensorFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) { - slog.Debug("getting tensor data") var tensors []llm.Tensor - files, err := filepath.Glob(filepath.Join(dirpath, "/model-*.safetensors")) + matches, err := filepath.Glob(filepath.Join(dirpath, "*.safetensors")) if err != nil { return nil, err } var offset uint64 - for _, f := range files { + for _, f := range matches { var t []llm.Tensor var err error t, offset, err = m.readTensors(f, offset, params) if err != nil { - slog.Error(err.Error()) return nil, err } + tensors = append(tensors, t...) } - slog.Debug(fmt.Sprintf("all tensors = %d", len(tensors))) return tensors, nil } @@ -71,76 +67,57 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params) } defer f.Close() - var jsonSize uint64 - if err := binary.Read(f, binary.LittleEndian, &jsonSize); err != nil { + var n int64 + if err := binary.Read(f, binary.LittleEndian, &n); err != nil { return nil, 0, err } - buf := make([]byte, jsonSize) - _, err = io.ReadFull(f, buf) - if err != nil { + b := bytes.NewBuffer(make([]byte, 0, n)) + if _, err = io.CopyN(b, f, n); err != nil { return nil, 0, err } - d := json.NewDecoder(bytes.NewBuffer(buf)) - d.UseNumber() - var parsed map[string]interface{} - if err = d.Decode(&parsed); err != nil { + var headers map[string]safetensorMetadata + if err := json.NewDecoder(b).Decode(&headers); err != nil { return nil, 0, err } var keys []string - for k := range parsed { - keys = append(keys, k) + for key := range headers { + if !strings.HasSuffix(key, "self_attn.rotary_embd.inv_freq") { + keys = append(keys, key) + } } slices.Sort(keys) - slog.Info("converting layers") var tensors []llm.Tensor - for _, k := range keys { - if strings.HasSuffix(k, "self_attn.rotary_emb.inv_freq") { - continue - } + for _, key := range keys { + value := headers[key] - vals := parsed[k].(map[string]interface{}) - var data tensorMetaData - if err = mapstructure.Decode(vals, &data); err != nil { - slog.Error("couldn't decode properly") - return nil, 0, err - } - - var size uint64 var kind uint32 - switch len(data.Shape) { + switch len(value.Shape) { case 0: - // metadata + // valuedata continue - case 1: - // convert to float32 - kind = 0 - size = uint64(data.Shape[0] * 4) case 2: - // convert to float16 kind = 1 - size = uint64(data.Shape[0] * data.Shape[1] * 2) } - ggufName, err := m.GetLayerName(k) + name, err := m.GetLayerName(key) if err != nil { - slog.Error(err.Error()) return nil, 0, err } - shape := []uint64{0, 0, 0, 0} - for i := range data.Shape { - shape[i] = uint64(data.Shape[i]) - } + shape := make([]uint64, len(value.Shape)) + copy(shape, value.Shape) - slog.Debug(fmt.Sprintf("'%45s': '%30s' %10d [%#v]", k, ggufName, size, data.Shape)) + pad := func(s int64) int64 { + return 8 + n + s + } t := llm.Tensor{ - Name: ggufName, + Name: name, Kind: kind, Offset: offset, Shape: shape[:], @@ -151,19 +128,15 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params) params: params, bo: params.ByteOrder, filename: fn, - dtype: data.Type, - start: uint64(data.Offsets[0]), - end: uint64(data.Offsets[1]), - padding: 8 + jsonSize, + dtype: value.Type, + offset: pad(value.Offsets[0]), + size: pad(value.Offsets[1]) - pad(value.Offsets[0]), } - offset += size + offset += t.Size() tensors = append(tensors, t) } - slog.Debug(fmt.Sprintf("total tensors for file = %d", len(tensors))) - slog.Debug(fmt.Sprintf("offset = %d", offset)) - return tensors, offset, nil } @@ -176,9 +149,7 @@ func (m *SafetensorFormat) GetParams(dirpath string) (*Params, error) { var params Params - d := json.NewDecoder(f) - err = d.Decode(¶ms) - if err != nil { + if err := json.NewDecoder(f).Decode(¶ms); err != nil { return nil, err } @@ -233,34 +204,34 @@ func (r safetensorWriterTo) WriteTo(w io.Writer) (n int64, err error) { } defer f.Close() - if _, err = f.Seek(int64(r.padding+r.start), 0); err != nil { + if _, err = f.Seek(r.offset, io.SeekStart); err != nil { return 0, err } var f32s []float32 switch r.dtype { case "F32": - f32s = make([]float32, (r.end-r.start)/4) + f32s = make([]float32, r.size/4) if err = binary.Read(f, r.bo, f32s); err != nil { return 0, err } case "F16": - bts := make([]uint16, (r.end-r.start)/2) - if err = binary.Read(f, r.bo, bts); err != nil { + u16s := make([]uint16, r.size/2) + if err = binary.Read(f, r.bo, u16s); err != nil { return 0, err } - for _, b := range bts { + for _, b := range u16s { f32s = append(f32s, float16.Frombits(b).Float32()) } case "BF16": - bts := make([]byte, r.end-r.start) - if err = binary.Read(f, r.bo, bts); err != nil { + u8s := make([]uint8, r.size) + if err = binary.Read(f, r.bo, u8s); err != nil { return 0, err } - f32s = bfloat16.DecodeFloat32(bts) + f32s = bfloat16.DecodeFloat32(u8s) default: return 0, fmt.Errorf("unknown data type: %s", r.dtype) } diff --git a/go.mod b/go.mod index 255c8a04..2f3d4ca3 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/gin-gonic/gin v1.10.0 github.com/golang/protobuf v1.5.4 // indirect github.com/google/uuid v1.1.2 - github.com/mitchellh/mapstructure v1.5.0 github.com/olekukonko/tablewriter v0.0.5 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.9.0 diff --git a/go.sum b/go.sum index 945919d8..9e1baebe 100644 --- a/go.sum +++ b/go.sum @@ -135,8 +135,6 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/llm/ggla.go b/llm/ggla.go index cf14f214..a5d90b6c 100644 --- a/llm/ggla.go +++ b/llm/ggla.go @@ -119,7 +119,7 @@ func (llm *ggla) decode(rs io.ReadSeeker) error { t.Offset = uint64(offset) - if _, err := rs.Seek(int64(t.size()), io.SeekCurrent); err != nil { + if _, err := rs.Seek(int64(t.Size()), io.SeekCurrent); err != nil { return err } diff --git a/llm/ggml.go b/llm/ggml.go index 40089be2..48b69f51 100644 --- a/llm/ggml.go +++ b/llm/ggml.go @@ -106,7 +106,7 @@ type Layer map[string]*Tensor func (l Layer) size() (size uint64) { for _, t := range l { - size += t.size() + size += t.Size() } return size @@ -185,7 +185,7 @@ func (t Tensor) parameters() uint64 { return count } -func (t Tensor) size() uint64 { +func (t Tensor) Size() uint64 { return t.parameters() * t.typeSize() / t.blockSize() } @@ -288,7 +288,7 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui // mixtral 8x22b ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32)) partialOffload = max( - 3*ffnGateExpsWeight.size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV), + 3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV), 4*(context*batch*heads+context*embedding/heads*headsKV+batch*1024+embedding/heads*headsKV*batch), ) } else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok { diff --git a/llm/gguf.go b/llm/gguf.go index eb7d7b75..0ba48f76 100644 --- a/llm/gguf.go +++ b/llm/gguf.go @@ -241,11 +241,11 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error { } for _, tensor := range llm.tensors { - if _, err := rs.Seek(int64(tensor.size()), io.SeekCurrent); err != nil { + if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil { return err } - padding := llm.padding(int64(tensor.size()), int64(alignment)) + padding := llm.padding(int64(tensor.Size()), int64(alignment)) if _, err := rs.Seek(padding, io.SeekCurrent); err != nil { return err } From 4434d7f4475083d0a87c7f6558d2d42bdf3c8b4b Mon Sep 17 00:00:00 2001 From: Sang Park Date: Wed, 22 May 2024 05:39:01 +0900 Subject: [PATCH 21/31] Correct typo in error message (#4535) The spelling of the term "request" has been corrected, which was previously mistakenly written as "requeset" in the error log message. --- server/sched.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/sched.go b/server/sched.go index ceddc526..8b97e354 100644 --- a/server/sched.go +++ b/server/sched.go @@ -220,7 +220,7 @@ func (s *Scheduler) processCompleted(ctx context.Context) { runner := s.loaded[finished.model.ModelPath] s.loadedMu.Unlock() if runner == nil { - slog.Error("finished requeset signal received after model unloaded", "modelPath", finished.model.ModelPath) + slog.Error("finished request signal received after model unloaded", "modelPath", finished.model.ModelPath) continue } runner.refMu.Lock() From 3bade04e10fae0db1b0a62ec0d5d9883a6f3c3bf Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Tue, 21 May 2024 15:30:09 -0700 Subject: [PATCH 22/31] doc updates for the faq/troubleshooting (#4565) --- docs/faq.md | 93 ++++++++++++-------- docs/troubleshooting.md | 190 ++++++++++++++++++---------------------- 2 files changed, 141 insertions(+), 142 deletions(-) diff --git a/docs/faq.md b/docs/faq.md index 22bd4da7..b50a3138 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -6,7 +6,7 @@ Ollama on macOS and Windows will automatically download updates. Click on the ta On Linux, re-run the install script: -``` +```shell curl -fsSL https://ollama.com/install.sh | sh ``` @@ -30,7 +30,7 @@ To change this when using `ollama run`, use `/set parameter`: When using the API, specify the `num_ctx` parameter: -``` +```shell curl http://localhost:11434/api/generate -d '{ "model": "llama3", "prompt": "Why is the sky blue?", @@ -40,6 +40,21 @@ curl http://localhost:11434/api/generate -d '{ }' ``` +## How can I tell if my model was loaded onto the GPU? + +Use the `ollama ps` command to see what models are currently loaded into memory. + +```shell +ollama ps +NAME ID SIZE PROCESSOR UNTIL +llama3:70b bcfb190ca3a7 42 GB 100% GPU 4 minutes from now +``` + +The `Processor` column will show which memory the model was loaded in to: +* `100% GPU` means the model was loaded entirely into the GPU +* `100% CPU` means the model was loaded entirely in system memory +* `48%/52% CPU/GPU` means the model was loaded partially onto both the GPU and into system memory + ## How do I configure Ollama server? Ollama server can be configured with environment variables. @@ -94,6 +109,34 @@ On Windows, Ollama inherits your user and system environment variables. 6. Start the Ollama application from the Windows Start menu. +## How do I use Ollama behind a proxy? + +Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values. When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate. Refer to the section above for how to use environment variables on your platform. + +### How do I use Ollama behind a proxy in Docker? + +The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container. + +Alternatively, the Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy). + +Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate. + +```dockerfile +FROM ollama/ollama +COPY my-ca.pem /usr/local/share/ca-certificates/my-ca.crt +RUN update-ca-certificates +``` + +Build and run this image: + +```shell +docker build -t ollama-with-ca . +docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca +``` + +## Does Ollama send my prompts and answers back to ollama.com? + +No. Ollama runs locally, and conversation data does not leave your machine. ## How can I expose Ollama on my network? @@ -120,7 +163,7 @@ server { Ollama can be accessed using a range of tools for tunneling tools. For example with Ngrok: -``` +```shell ngrok http 11434 --host-header="localhost:11434" ``` @@ -128,7 +171,7 @@ ngrok http 11434 --host-header="localhost:11434" To use Ollama with Cloudflare Tunnel, use the `--url` and `--http-host-header` flags: -``` +```shell cloudflared tunnel --url http://localhost:11434 --http-host-header="localhost:11434" ``` @@ -150,39 +193,10 @@ If a different directory needs to be used, set the environment variable `OLLAMA_ Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform. -## Does Ollama send my prompts and answers back to ollama.com? - -No. Ollama runs locally, and conversation data does not leave your machine. - ## How can I use Ollama in Visual Studio Code? There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/ollama/ollama#extensions--plugins) at the bottom of the main repository readme. -## How do I use Ollama behind a proxy? - -Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values. When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate. Refer to the section above for how to use environment variables on your platform. - -### How do I use Ollama behind a proxy in Docker? - -The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container. - -Alternatively, the Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy). - -Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate. - -```dockerfile -FROM ollama/ollama -COPY my-ca.pem /usr/local/share/ca-certificates/my-ca.crt -RUN update-ca-certificates -``` - -Build and run this image: - -```shell -docker build -t ollama-with-ca . -docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca -``` - ## How do I use Ollama with GPU acceleration in Docker? The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details. @@ -197,7 +211,7 @@ Open `Control Panel > Networking and Internet > View network status and tasks` a Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these properties. -## How can I pre-load a model to get faster response times? +## How can I preload a model into Ollama to get faster response times? If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints. @@ -211,6 +225,11 @@ To use the chat completions endpoint, use: curl http://localhost:11434/api/chat -d '{"model": "mistral"}' ``` +To preload a model using the CLI, use the command: +```shell +ollama run llama3 "" +``` + ## How do I keep a model loaded in memory or make it unload immediately? By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory. @@ -235,8 +254,6 @@ Alternatively, you can change the amount of time all models are loaded into memo If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` API parameter with the `/api/generate` or `/api/chat` API endpoints. -## How do I manage the maximum number of requests the server can queue +## How do I manage the maximum number of requests the Ollama server can queue? -If too many requests are sent to the server, it will respond with a 503 error -indicating the server is overloaded. You can adjust how many requests may be -queue by setting `OLLAMA_MAX_QUEUE` +If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`. diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 5971da5f..729ec96c 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,104 +1,86 @@ -# How to troubleshoot issues - -Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command: - -```shell -cat ~/.ollama/logs/server.log -``` - -On **Linux** systems with systemd, the logs can be found with this command: - -```shell -journalctl -u ollama -``` - -When you run Ollama in a **container**, the logs go to stdout/stderr in the container: - -```shell -docker logs -``` -(Use `docker ps` to find the container name) - -If manually running `ollama serve` in a terminal, the logs will be on that terminal. - -When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `+R` and type in: -- `explorer %LOCALAPPDATA%\Ollama` to view logs -- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH) -- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored -- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories - -To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal -```powershell -$env:OLLAMA_DEBUG="1" -& "ollama app.exe" -``` - -Join the [Discord](https://discord.gg/ollama) for help interpreting the logs. - -## LLM libraries - -Ollama includes multiple LLM libraries compiled for different GPUs and CPU -vector features. Ollama tries to pick the best one based on the capabilities of -your system. If this autodetection has problems, or you run into other problems -(e.g. crashes in your GPU) you can workaround this by forcing a specific LLM -library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest -but most compatible is `cpu`. Rosetta emulation under MacOS will work with the -`cpu` library. - -In the server log, you will see a message that looks something like this (varies -from release to release): - -``` -Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5] -``` - -**Experimental LLM Library Override** - -You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass -autodetection, so for example, if you have a CUDA card, but want to force the -CPU LLM library with AVX2 vector support, use: - -``` -OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve -``` - -You can see what features your CPU has with the following. -``` -cat /proc/cpuinfo| grep flags | head -1 -``` - -## Installing older or pre-release versions on Linux - -If you run into problems on Linux and want to install an older version, or you'd -like to try out a pre-release before it's officially released, you can tell the -install script which version to install. - -```sh -curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh -``` - -## Linux tmp noexec - -If your system is configured with the "noexec" flag where Ollama stores its -temporary executable files, you can specify an alternate location by setting -OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example -OLLAMA_TMPDIR=/usr/share/ollama/ - -## Container fails to run on NVIDIA GPU - -Make sure you've set up the container runtime first as described in [docker.md](./docker.md) - -Sometimes the container runtime can have difficulties initializing the GPU. -When you check the server logs, this can show up as various error codes, such -as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" -(unknown), or others. The following troubleshooting techniques may help resolve -the problem - -- Is the uvm driver not loaded? `sudo nvidia-modprobe -u` -- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm` -- Try rebooting -- Make sure you're running the latest nvidia drivers - -If none of those resolve the problem, gather additional information and file an issue: -- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs -- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia` +# How to troubleshoot issues + +Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command: + +```shell +cat ~/.ollama/logs/server.log +``` + +On **Linux** systems with systemd, the logs can be found with this command: + +```shell +journalctl -u ollama +``` + +When you run Ollama in a **container**, the logs go to stdout/stderr in the container: + +```shell +docker logs +``` +(Use `docker ps` to find the container name) + +If manually running `ollama serve` in a terminal, the logs will be on that terminal. + +When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `+R` and type in: +- `explorer %LOCALAPPDATA%\Ollama` to view logs +- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH) +- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored +- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories + +To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal +```powershell +$env:OLLAMA_DEBUG="1" +& "ollama app.exe" +``` + +Join the [Discord](https://discord.gg/ollama) for help interpreting the logs. + +## LLM libraries + +Ollama includes multiple LLM libraries compiled for different GPUs and CPU vector features. Ollama tries to pick the best one based on the capabilities of your system. If this autodetection has problems, or you run into other problems (e.g. crashes in your GPU) you can workaround this by forcing a specific LLM library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest but most compatible is `cpu`. Rosetta emulation under MacOS will work with the `cpu` library. + +In the server log, you will see a message that looks something like this (varies from release to release): + +``` +Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5] +``` + +**Experimental LLM Library Override** + +You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass autodetection, so for example, if you have a CUDA card, but want to force the CPU LLM library with AVX2 vector support, use: + +``` +OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve +``` + +You can see what features your CPU has with the following. +``` +cat /proc/cpuinfo| grep flags | head -1 +``` + +## Installing older or pre-release versions on Linux + +If you run into problems on Linux and want to install an older version, or you'd like to try out a pre-release before it's officially released, you can tell the install script which version to install. + +```sh +curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh +``` + +## Linux tmp noexec + +If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/ + +## Container fails to run on NVIDIA GPU + +Make sure you've set up the container runtime first as described in [docker.md](./docker.md) + +Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem + +- Is the uvm driver not loaded? `sudo nvidia-modprobe -u` +- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm` +- Try rebooting +- Make sure you're running the latest nvidia drivers + +If none of those resolve the problem, gather additional information and file an issue: +- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs +- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia` From 353f83a9c788410f36a670e6750bb4162fb8f440 Mon Sep 17 00:00:00 2001 From: Josh Yan Date: Tue, 21 May 2024 16:55:09 -0700 Subject: [PATCH 23/31] add Ctrl + W shortcut --- cmd/interactive.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/interactive.go b/cmd/interactive.go index f9157bd8..0a31efb5 100644 --- a/cmd/interactive.go +++ b/cmd/interactive.go @@ -138,6 +138,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error { fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word") fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor") fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor") + fmt.Fprintln(os.Stderr, " Ctrl + w Delete the word before the cursor") fmt.Fprintln(os.Stderr, "") fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen") fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding") From 955c317cabe1344c9f0ed7a71e33f6b4f0919e5e Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Wed, 22 May 2024 16:25:23 +0900 Subject: [PATCH 24/31] chore: update tokenizer.go (#4571) PreTokenziers -> PreTokenizers --- convert/tokenizer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/convert/tokenizer.go b/convert/tokenizer.go index e0fe0bb7..efeb5491 100644 --- a/convert/tokenizer.go +++ b/convert/tokenizer.go @@ -18,7 +18,7 @@ type Tokenizer struct { Model TokenizerModel `json:"model"` PreTokenizer struct { - PreTokenziers []struct { + PreTokenizers []struct { Type string `json:"type"` Pattern struct { Regex string `json:"Regex"` @@ -84,7 +84,7 @@ func parseTokens(dirpath string) (pre string, tokens []Token, merges []string, e } sha256sum := sha256.New() - for _, pt := range t.PreTokenizer.PreTokenziers { + for _, pt := range t.PreTokenizer.PreTokenizers { switch pt.Type { case "Split": if pt.Pattern.Regex != "" { From 73630a7e85ba58c65cc1ad06d8b13149904b076a Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 22 May 2024 12:53:45 -0400 Subject: [PATCH 25/31] add phi 3 medium (#4578) --- README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 5d3daec5..81c0b061 100644 --- a/README.md +++ b/README.md @@ -51,15 +51,17 @@ Here are some example models that can be downloaded: | ------------------ | ---------- | ----- | ------------------------------ | | Llama 3 | 8B | 4.7GB | `ollama run llama3` | | Llama 3 | 70B | 40GB | `ollama run llama3:70b` | -| Phi-3 | 3.8B | 2.3GB | `ollama run phi3` | +| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` | +| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` | +| Gemma | 2B | 1.4GB | `ollama run gemma:2b` | +| Gemma | 7B | 4.8GB | `ollama run gemma:7b` | | Mistral | 7B | 4.1GB | `ollama run mistral` | +| Moondream 2 | 1.4B | 829MB | `ollama run moondream` | | Neural Chat | 7B | 4.1GB | `ollama run neural-chat` | | Starling | 7B | 4.1GB | `ollama run starling-lm` | | Code Llama | 7B | 3.8GB | `ollama run codellama` | | Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` | | LLaVA | 7B | 4.5GB | `ollama run llava` | -| Gemma | 2B | 1.4GB | `ollama run gemma:2b` | -| Gemma | 7B | 4.8GB | `ollama run gemma:7b` | | Solar | 10.7B | 6.1GB | `ollama run solar` | > Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models. From 38255d2af15932150606e19bea8200b386cfd36d Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Wed, 22 May 2024 21:52:09 -0700 Subject: [PATCH 26/31] Use flash attention flag for now (#4580) * put flash attention behind flag for now * add test * remove print * up timeout for sheduler tests --- llm/server.go | 10 +++++----- server/envconfig/config.go | 10 ++++++++++ server/envconfig/config_test.go | 3 +++ server/sched_test.go | 2 +- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/llm/server.go b/llm/server.go index ba25fa21..c63a76a4 100644 --- a/llm/server.go +++ b/llm/server.go @@ -200,20 +200,20 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr params = append(params, "--numa") } - flashAttnSupported := true + flashAttnEnabled := envconfig.FlashAttention // partial offloading does not support flash attention - if uint64(opts.NumGPU) < ggml.KV().BlockCount() + 1 { - flashAttnSupported = false + if uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 { + flashAttnEnabled = false } // only cuda (compute capability 7+) and metal support flash attention for _, g := range gpus { if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) { - flashAttnSupported = false + flashAttnEnabled = false } } - if flashAttnSupported { + if flashAttnEnabled { params = append(params, "--flash-attn") } diff --git a/server/envconfig/config.go b/server/envconfig/config.go index 9ad68180..ae7d89b2 100644 --- a/server/envconfig/config.go +++ b/server/envconfig/config.go @@ -31,6 +31,8 @@ var ( RunnersDir string // Set via OLLAMA_TMPDIR in the environment TmpDir string + // Experimental flash attention + FlashAttention bool ) func AsMap() map[string]string { @@ -45,6 +47,7 @@ func AsMap() map[string]string { "OLLAMA_NUM_PARALLEL": fmt.Sprintf("%v", NumParallel), "OLLAMA_RUNNERS_DIR": fmt.Sprintf("%v", RunnersDir), "OLLAMA_TMPDIR": fmt.Sprintf("%v", TmpDir), + "OLLAMA_FLASH_ATTENTION": fmt.Sprintf("%v", FlashAttention), } } @@ -78,6 +81,13 @@ func LoadConfig() { } } + if fa := clean("OLLAMA_FLASH_ATTENTION"); fa != "" { + d, err := strconv.ParseBool(fa) + if err == nil { + FlashAttention = d + } + } + RunnersDir = clean("OLLAMA_RUNNERS_DIR") if runtime.GOOS == "windows" && RunnersDir == "" { // On Windows we do not carry the payloads inside the main executable diff --git a/server/envconfig/config_test.go b/server/envconfig/config_test.go index bad7c4a7..429434ae 100644 --- a/server/envconfig/config_test.go +++ b/server/envconfig/config_test.go @@ -17,4 +17,7 @@ func TestConfig(t *testing.T) { t.Setenv("OLLAMA_DEBUG", "1") LoadConfig() require.True(t, Debug) + t.Setenv("OLLAMA_FLASH_ATTENTION", "1") + LoadConfig() + require.True(t, FlashAttention) } diff --git a/server/sched_test.go b/server/sched_test.go index 6a6dd04f..addc1ad8 100644 --- a/server/sched_test.go +++ b/server/sched_test.go @@ -151,7 +151,7 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV } func TestRequests(t *testing.T) { - ctx, done := context.WithTimeout(context.Background(), 500*time.Millisecond) + ctx, done := context.WithTimeout(context.Background(), time.Second) defer done() // Same model, same request From d6f692ad1a7d28b8eed3e4eb88c136d0fb12e5b6 Mon Sep 17 00:00:00 2001 From: Bruce MacDonald Date: Thu, 23 May 2024 13:21:49 -0700 Subject: [PATCH 27/31] Add support for IQ1_S, IQ3_S, IQ2_S, IQ4_XS. IQ4_NL (#4322) Co-authored-by: ManniX-ITA <20623405+mann1x@users.noreply.github.com> --- llm/filetype.go | 50 ++++++++++++++++++++++++++++++++++++++++++++----- llm/ggml.go | 32 ++++++++++++++++++++++++++----- 2 files changed, 72 insertions(+), 10 deletions(-) diff --git a/llm/filetype.go b/llm/filetype.go index e5e9410d..7a8e9f69 100644 --- a/llm/filetype.go +++ b/llm/filetype.go @@ -27,8 +27,16 @@ const ( fileTypeIQ2_XXS fileTypeIQ2_XS fileTypeQ2_K_S - fileTypeQ3_K_XS + fileTypeIQ3_XS fileTypeIQ3_XXS + fileTypeIQ1_S + fileTypeIQ4_NL + fileTypeIQ3_S + fileTypeIQ2_S + fileTypeIQ4_XS + fileTypeIQ2_M + fileTypeIQ1_M + fileTypeBF16 fileTypeUnknown ) @@ -75,10 +83,26 @@ func ParseFileType(s string) (fileType, error) { return fileTypeIQ2_XS, nil case "Q2_K_S": return fileTypeQ2_K_S, nil - case "Q3_K_XS": - return fileTypeQ3_K_XS, nil + case "IQ3_XS": + return fileTypeIQ3_XS, nil case "IQ3_XXS": return fileTypeIQ3_XXS, nil + case "IQ1_S": + return fileTypeIQ1_S, nil + case "IQ4_NL": + return fileTypeIQ4_NL, nil + case "IQ3_S": + return fileTypeIQ3_S, nil + case "IQ2_S": + return fileTypeIQ2_S, nil + case "IQ4_XS": + return fileTypeIQ4_XS, nil + case "IQ2_M": + return fileTypeIQ2_M, nil + case "IQ1_M": + return fileTypeIQ1_M, nil + case "BF16": + return fileTypeBF16, nil default: return fileTypeUnknown, fmt.Errorf("unknown fileType: %s", s) } @@ -126,10 +150,26 @@ func (t fileType) String() string { return "IQ2_XS" case fileTypeQ2_K_S: return "Q2_K_S" - case fileTypeQ3_K_XS: - return "Q3_K_XS" + case fileTypeIQ3_XS: + return "IQ3_XS" case fileTypeIQ3_XXS: return "IQ3_XXS" + case fileTypeIQ1_S: + return "IQ1_S" + case fileTypeIQ4_NL: + return "IQ4_NL" + case fileTypeIQ3_S: + return "IQ3_S" + case fileTypeIQ2_S: + return "IQ2_S" + case fileTypeIQ4_XS: + return "IQ4_XS" + case fileTypeIQ2_M: + return "IQ2_M" + case fileTypeIQ1_M: + return "IQ1_M" + case fileTypeBF16: + return "BF16" default: return "unknown" } diff --git a/llm/ggml.go b/llm/ggml.go index 48b69f51..9b6da425 100644 --- a/llm/ggml.go +++ b/llm/ggml.go @@ -124,12 +124,12 @@ type Tensor struct { } func (t Tensor) blockSize() uint64 { - switch { - case t.Kind < 2: + switch t.Kind { + case 0, 1, 24, 25, 26, 27, 28, 31: // F32, F16, I8, I16, I32, I64, F64, BF16 return 1 - case t.Kind < 10: + case 2, 3, 8, 9, 20: // Q4_0, Q4_1, Q8_0, Q8_1, IQ4_NL return 32 - default: + default: // All others return 256 } } @@ -171,7 +171,29 @@ func (t Tensor) typeSize() uint64 { case 17: // IQ2_XS return 2 + 2*blockSize/8 + blockSize/32 case 18: // IQ3_XXS - return 2 + 3*blockSize/8 + return 2 + blockSize/4 + blockSize/8 + case 19: // IQ1_S + return 2 + blockSize/8 + blockSize/16 + case 20: // IQ4_NL + return 2 + blockSize/2 + case 21: // IQ3_S + return 2 + blockSize/4 + blockSize/8 + blockSize/32 + 4 + case 22: // IQ2_S + return 2 + blockSize/4 + blockSize/16 + case 23: // IQ4_XS + return 2 + 2 + blockSize/2 + blockSize/64 + case 24: // I8 + return 1 + case 25: // I16 + return 2 + case 26: // I32 + return 4 + case 27: // I64 + return 8 + case 28: // F64 + return 8 + case 29: // IQ1_M + return blockSize/8 + blockSize/16 + blockSize/32 default: return 0 } From b37b496a12ebad0105ed17826d838346bff6e5ef Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Mon, 20 May 2024 16:41:43 -0700 Subject: [PATCH 28/31] Wire up load progress This doesn't expose a UX yet, but wires the initial server portion of progress reporting during load --- llm/ext_server/server.cpp | 14 +++++++++++++- llm/patches/01-load-progress.diff | 31 +++++++++++++++++++++++++++++++ llm/server.go | 24 +++++++++++++++++------- 3 files changed, 61 insertions(+), 8 deletions(-) create mode 100644 llm/patches/01-load-progress.diff diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 3e03bb34..e342d5f1 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -334,6 +334,7 @@ struct server_metrics { struct llama_server_context { llama_model *model = nullptr; + float modelProgress = 0.0; llama_context *ctx = nullptr; clip_ctx *clp_ctx = nullptr; @@ -2779,6 +2780,12 @@ inline void signal_handler(int signal) { shutdown_handler(signal); } +static bool update_load_progress(float progress, void *data) +{ + ((llama_server_context*)data)->modelProgress = progress; + return true; +} + #if defined(_WIN32) char* wchar_to_char(const wchar_t* wstr) { if (wstr == nullptr) return nullptr; @@ -2884,7 +2891,9 @@ int main(int argc, char **argv) { break; } case SERVER_STATE_LOADING_MODEL: - res.set_content(R"({"status": "loading model"})", "application/json"); + char buf[128]; + snprintf(&buf[0], 128, R"({"status": "loading model", "progress": %0.2f})", llama.modelProgress); + res.set_content(buf, "application/json"); res.status = 503; // HTTP Service Unavailable break; case SERVER_STATE_ERROR: @@ -3079,6 +3088,9 @@ int main(int argc, char **argv) { }); // load the model + params.progress_callback = update_load_progress; + params.progress_callback_user_data = (void*)&llama; + if (!llama.load_model(params)) { state.store(SERVER_STATE_ERROR); diff --git a/llm/patches/01-load-progress.diff b/llm/patches/01-load-progress.diff new file mode 100644 index 00000000..acd44d20 --- /dev/null +++ b/llm/patches/01-load-progress.diff @@ -0,0 +1,31 @@ +diff --git a/common/common.cpp b/common/common.cpp +index ba1ecf0e..cead57cc 100644 +--- a/common/common.cpp ++++ b/common/common.cpp +@@ -1836,6 +1836,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & + mparams.use_mmap = params.use_mmap; + mparams.use_mlock = params.use_mlock; + mparams.check_tensors = params.check_tensors; ++ mparams.progress_callback = params.progress_callback; ++ mparams.progress_callback_user_data = params.progress_callback_user_data; + if (params.kv_overrides.empty()) { + mparams.kv_overrides = NULL; + } else { +diff --git a/common/common.h b/common/common.h +index d80344f2..71e84834 100644 +--- a/common/common.h ++++ b/common/common.h +@@ -174,6 +174,13 @@ struct gpt_params { + // multimodal models (see examples/llava) + std::string mmproj = ""; // path to multimodal projector + std::vector image; // path to image file(s) ++ ++ // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. ++ // If the provided progress_callback returns true, model loading continues. ++ // If it returns false, model loading is immediately aborted. ++ llama_progress_callback progress_callback = NULL; ++ // context pointer passed to the progress callback ++ void * progress_callback_user_data; + }; + + void gpt_params_handle_model_default(gpt_params & params); diff --git a/llm/server.go b/llm/server.go index c63a76a4..384d31ca 100644 --- a/llm/server.go +++ b/llm/server.go @@ -55,6 +55,7 @@ type llmServer struct { totalLayers uint64 gpuCount int loadDuration time.Duration // Record how long it took the model to load + loadProgress float32 sem *semaphore.Weighted } @@ -425,10 +426,11 @@ func (s ServerStatus) ToString() string { } type ServerStatusResp struct { - Status string `json:"status"` - SlotsIdle int `json:"slots_idle"` - SlotsProcessing int `json:"slots_processing"` - Error string `json:"error"` + Status string `json:"status"` + SlotsIdle int `json:"slots_idle"` + SlotsProcessing int `json:"slots_processing"` + Error string `json:"error"` + Progress float32 `json:"progress"` } func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) { @@ -476,6 +478,7 @@ func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) { case "no slot available": return ServerStatusNoSlotsAvailable, nil case "loading model": + s.loadProgress = status.Progress return ServerStatusLoadingModel, nil default: return ServerStatusError, fmt.Errorf("server error: %+v", status) @@ -516,7 +519,8 @@ func (s *llmServer) Ping(ctx context.Context) error { func (s *llmServer) WaitUntilRunning(ctx context.Context) error { start := time.Now() - expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load + stallDuration := 60 * time.Second + stallTimer := time.Now().Add(stallDuration) // give up if we stall for slog.Info("waiting for llama runner to start responding") var lastStatus ServerStatus = -1 @@ -534,13 +538,13 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error { return fmt.Errorf("llama runner process has terminated: %v %s", err, msg) default: } - if time.Now().After(expiresAt) { + if time.Now().After(stallTimer) { // timeout msg := "" if s.status != nil && s.status.LastErrMsg != "" { msg = s.status.LastErrMsg } - return fmt.Errorf("timed out waiting for llama runner to start: %s", msg) + return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg) } if s.cmd.ProcessState != nil { msg := "" @@ -551,6 +555,7 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error { } ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond) defer cancel() + priorProgress := s.loadProgress status, _ := s.getServerStatus(ctx) if lastStatus != status && status != ServerStatusReady { // Only log on status changes @@ -563,6 +568,11 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error { return nil default: lastStatus = status + // Reset the timer as long as we're making forward progress on the load + if priorProgress != s.loadProgress { + slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress)) + stallTimer = time.Now().Add(stallDuration) + } time.Sleep(time.Millisecond * 250) continue } From 714adb8bd1e580c42b0223d1966fe13f542d899d Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 23 May 2024 14:16:26 -0700 Subject: [PATCH 29/31] bump (#4597) --- llm/ext_server/server.cpp | 4 +-- llm/llama.cpp | 2 +- llm/patches/03-load_exception.diff | 23 ++++++++++++---- llm/patches/05-default-pretokenizer.diff | 35 ++++++++++++++++++++++++ 4 files changed, 56 insertions(+), 8 deletions(-) create mode 100644 llm/patches/05-default-pretokenizer.diff diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index e342d5f1..e0424a92 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -738,7 +738,7 @@ struct llama_server_context sampler_names.emplace_back(sampler_name); } } - slot->sparams.samplers_sequence = sampler_types_from_names(sampler_names, false); + slot->sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false); } else { @@ -1096,7 +1096,7 @@ struct llama_server_context std::vector samplers_sequence; for (const auto &sampler_type : slot.sparams.samplers_sequence) { - samplers_sequence.emplace_back(sampler_type_to_name_string(sampler_type)); + samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type)); } return json { diff --git a/llm/llama.cpp b/llm/llama.cpp index 614d3b91..74f33adf 160000 --- a/llm/llama.cpp +++ b/llm/llama.cpp @@ -1 +1 @@ -Subproject commit 614d3b914e1c3e02596f869649eb4f1d3b68614d +Subproject commit 74f33adf5f8b20b08fc5a6aa17ce081abe86ef2f diff --git a/llm/patches/03-load_exception.diff b/llm/patches/03-load_exception.diff index 9e838fa9..eb245c2a 100644 --- a/llm/patches/03-load_exception.diff +++ b/llm/patches/03-load_exception.diff @@ -1,8 +1,17 @@ +From 544a2d2e646d39e878d87dfbb3398a356bc560ab Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Thu, 23 May 2024 11:18:45 -0700 +Subject: [PATCH] throw exception on load errors + +--- + llama.cpp | 25 ++++++++++++++++--------- + 1 file changed, 16 insertions(+), 9 deletions(-) + diff --git a/llama.cpp b/llama.cpp -index 4225f955..7b762f86 100644 +index 15c66077..8ba90b6a 100644 --- a/llama.cpp +++ b/llama.cpp -@@ -4756,7 +4756,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam +@@ -6346,7 +6346,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam } } catch (const std::exception & err) { LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what()); @@ -11,10 +20,10 @@ index 4225f955..7b762f86 100644 } return 0; -@@ -12102,16 +12102,22 @@ struct llama_model * llama_load_model_from_file( - }; +@@ -15600,16 +15600,23 @@ struct llama_model * llama_load_model_from_file( + } + model->rpc_servers.push_back(servers); } - - int status = llama_model_load(path_model, *model, params); - GGML_ASSERT(status <= 0); - if (status < 0) { @@ -22,6 +31,7 @@ index 4225f955..7b762f86 100644 - LLAMA_LOG_ERROR("%s: failed to load model\n", __func__); - } else if (status == -2) { - LLAMA_LOG_INFO("%s: cancelled model load\n", __func__); ++ + try { + int status = llama_model_load(path_model, *model, params); + GGML_ASSERT(status <= 0); @@ -42,3 +52,6 @@ index 4225f955..7b762f86 100644 } return model; +-- +2.45.1 + diff --git a/llm/patches/05-default-pretokenizer.diff b/llm/patches/05-default-pretokenizer.diff new file mode 100644 index 00000000..0d0bf05d --- /dev/null +++ b/llm/patches/05-default-pretokenizer.diff @@ -0,0 +1,35 @@ +From d02a06f3f45a09255ace8684a66590e06ce44605 Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Thu, 23 May 2024 11:33:20 -0700 +Subject: [PATCH] default pretokenizer on unrecognized type + +--- + llama.cpp | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +diff --git a/llama.cpp b/llama.cpp +index 15c66077..af1aede3 100644 +--- a/llama.cpp ++++ b/llama.cpp +@@ -4504,9 +4504,6 @@ static void llm_load_vocab( + LLAMA_LOG_WARN("%s: ************************************ \n", __func__); + LLAMA_LOG_WARN("%s: \n", __func__); + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; +- } else if ( +- tokenizer_pre == "default") { +- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + } else if ( + tokenizer_pre == "llama3" || + tokenizer_pre == "llama-v3" || +@@ -4553,7 +4550,7 @@ static void llm_load_vocab( + tokenizer_pre == "dbrx") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX; + } else { +- throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); ++ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + } + } else { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; +-- +2.45.1 + From 1b2d15609407fde0365218ecf7f9d917839a9ec8 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Thu, 23 May 2024 14:24:07 -0700 Subject: [PATCH 30/31] Tidy up developer guide a little --- README.md | 20 +------------------- docs/development.md | 2 ++ 2 files changed, 3 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 81c0b061..76ade550 100644 --- a/README.md +++ b/README.md @@ -194,25 +194,7 @@ ollama list ## Building -Install `cmake` and `go`: - -``` -brew install cmake go -``` - -Then generate dependencies: - -``` -go generate ./... -``` - -Then build the binary: - -``` -go build . -``` - -More detailed instructions can be found in the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md) +See the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md) ### Running local builds diff --git a/docs/development.md b/docs/development.md index 2f7b9ecf..8c035a51 100644 --- a/docs/development.md +++ b/docs/development.md @@ -6,6 +6,8 @@ Install required tools: - go version 1.22 or higher - gcc version 11.4.0 or higher +### MacOS + ```bash brew install go cmake gcc ``` From afd2b058b4ee36230ab2a06927bdc0ff41b1e7ae Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Thu, 23 May 2024 22:46:23 -0700 Subject: [PATCH 31/31] set codesign timeout to longer (#4605) --- .github/workflows/release.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 4adab4f8..40f9c41f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -28,6 +28,7 @@ jobs: security unlock-keychain -p password build.keychain security import certificate.p12 -k build.keychain -P $MACOS_SIGNING_KEY_PASSWORD -T /usr/bin/codesign security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k password build.keychain + security set-keychain-settings -lut 3600 build.keychain - uses: actions/setup-go@v5 with: go-version-file: go.mod