Compare commits

...

80 Commits

Author SHA1 Message Date
likelovewant
6f03952a62 Update amd_linux.go
remove old gpu limits due to the new rocm supports
2024-05-24 15:30:41 +08:00
likelovewant
0e5b263a60 Update amd_windows.go
add igpu support ,remove those ,otherwise ,gfx1035 report not work
2024-05-24 15:26:24 +08:00
likelovewant
4e37e24b04 Merge branch 'ollama:main' into main 2024-05-24 15:19:08 +08:00
Jeffrey Morgan
afd2b058b4 set codesign timeout to longer (#4605) 2024-05-23 22:46:23 -07:00
Daniel Hiltgen
89bf98bcf2 Merge pull request #4598 from dhiltgen/docs
Tidy up developer guide a little
2024-05-23 15:14:29 -07:00
Daniel Hiltgen
1b2d156094 Tidy up developer guide a little 2024-05-23 15:14:05 -07:00
Michael Yang
714adb8bd1 bump (#4597) 2024-05-23 14:16:26 -07:00
Daniel Hiltgen
95b1133d0c Merge pull request #4547 from dhiltgen/load_progress
Wire up load progress
2024-05-23 14:06:02 -07:00
Daniel Hiltgen
b37b496a12 Wire up load progress
This doesn't expose a UX yet, but wires the initial server portion
of progress reporting during load
2024-05-23 13:36:48 -07:00
Bruce MacDonald
d6f692ad1a Add support for IQ1_S, IQ3_S, IQ2_S, IQ4_XS. IQ4_NL (#4322)
Co-authored-by: ManniX-ITA <20623405+mann1x@users.noreply.github.com>
2024-05-23 13:21:49 -07:00
Jeffrey Morgan
38255d2af1 Use flash attention flag for now (#4580)
* put flash attention behind flag for now

* add test

* remove print

* up timeout for sheduler tests
2024-05-22 21:52:09 -07:00
Michael
73630a7e85 add phi 3 medium (#4578) 2024-05-22 12:53:45 -04:00
Ikko Eltociear Ashimine
955c317cab chore: update tokenizer.go (#4571)
PreTokenziers -> PreTokenizers
2024-05-22 00:25:23 -07:00
Josh
9f18b88a06 Merge pull request #4566 from ollama/jyan/shortcuts
add Ctrl + W shortcut
2024-05-21 22:49:36 -07:00
Josh Yan
353f83a9c7 add Ctrl + W shortcut 2024-05-21 16:55:09 -07:00
Patrick Devine
3bade04e10 doc updates for the faq/troubleshooting (#4565) 2024-05-21 15:30:09 -07:00
Michael Yang
a6d0f443eb Merge pull request #4543 from ollama/mxyng/simple-safetensors
simplify safetensors reading
2024-05-21 14:43:55 -07:00
Michael Yang
96236b7968 Merge pull request #4268 from ollama/pdevine/llama3
Convert directly from llama3
2024-05-21 14:43:37 -07:00
Sang Park
4434d7f447 Correct typo in error message (#4535)
The spelling of the term "request" has been corrected, which was previously mistakenly written as "requeset" in the error log message.
2024-05-21 13:39:01 -07:00
Michael Yang
171eb040fc simplify safetensors reading 2024-05-21 11:28:22 -07:00
Michael Yang
3591bbe56f add test 2024-05-21 11:28:22 -07:00
Michael Yang
34d5ef29b3 fix conversion for f16 or f32 inputs 2024-05-21 11:28:22 -07:00
Michael Yang
bbbd9f20f3 cleanup 2024-05-20 16:13:57 -07:00
Michael Yang
547132e820 bpe pretokenizer 2024-05-20 16:13:57 -07:00
Patrick Devine
2d315ba9a9 add missing file 2024-05-20 16:13:57 -07:00
Patrick Devine
d355d2020f add fixes for llama 2024-05-20 16:13:57 -07:00
Patrick Devine
c8cf0d94ed llama3 conversion 2024-05-20 16:13:57 -07:00
Patrick Devine
4730762e5c add safetensors version 2024-05-20 16:13:57 -07:00
Patrick Devine
d88582dffd some changes for llama3 2024-05-20 16:13:57 -07:00
Michael Yang
2f81b3dce2 Merge pull request #4502 from ollama/mxyng/fix-quantize
fix quantize file types
2024-05-20 16:09:27 -07:00
jmorganca
5cab13739e set llama.cpp submodule commit to 614d3b9 2024-05-20 15:28:17 -07:00
Josh Yan
8aadad9c72 updated updateURL 2024-05-20 15:24:32 -07:00
Michael Yang
807d092761 fix quantize file types 2024-05-20 15:22:11 -07:00
Michael Yang
f36f1d6be9 tidy intermediate blobs 2024-05-20 15:15:06 -07:00
alwqx
8800c8a59b chore: fix typo in docs (#4536) 2024-05-20 14:19:03 -07:00
Michael Yang
b4dce13309 Merge pull request #4330 from ollama/mxyng/cache-intermediate-layers
cache and reuse intermediate blobs
2024-05-20 13:54:41 -07:00
Sam
e15307fdf4 feat: add support for flash_attn (#4120)
* feat: enable flash attention if supported

* feat: enable flash attention if supported

* feat: enable flash attention if supported

* feat: add flash_attn support
2024-05-20 13:36:03 -07:00
Michael Yang
3520c0e4d5 cache and reuse intermediate blobs
particularly useful for zipfiles and f16s
2024-05-20 13:25:10 -07:00
Patrick Devine
ccdf0b2a44 Move the parser back + handle utf16 files (#4533) 2024-05-20 11:26:45 -07:00
jmorganca
63a453554d go mod tidy 2024-05-19 23:03:57 -07:00
Patrick Devine
105186aa17 add OLLAMA_NOHISTORY to turn off history in interactive mode (#4508) 2024-05-18 11:51:57 -07:00
likelovewant
fc2f25c1d5 Merge branch 'ollama:main' into main 2024-05-18 13:52:47 +08:00
Daniel Hiltgen
ba04afc9a4 Merge pull request #4483 from dhiltgen/clean_exit
Don't return error on signal exit
2024-05-17 11:41:57 -07:00
Daniel Hiltgen
7e1e0086e7 Merge pull request #4482 from dhiltgen/integration_improvements
Skip max queue test on remote
2024-05-16 16:43:48 -07:00
Daniel Hiltgen
02b31c9dc8 Don't return error on signal exit 2024-05-16 16:25:38 -07:00
Daniel Hiltgen
7f2fbad736 Skip max queue test on remote
This test needs to be able to adjust the queue size down from
our default setting for a reliable test, so it needs to skip on
remote test execution mode.
2024-05-16 16:24:18 -07:00
Josh
5bece94509 Merge pull request #4463 from ollama/jyan/line-display
changed line display to be calculated with runewidth
2024-05-16 14:15:08 -07:00
Josh Yan
3d90156e99 removed comment 2024-05-16 14:12:03 -07:00
Rose Heart
5e46c5c435 Updating software for read me (#4467)
* Update README.md

Added chat/moderation bot to list of software.

* Update README.md

Fixed link error.
2024-05-16 13:55:14 -07:00
Jeffrey Morgan
583c1f472c update llama.cpp submodule to 614d3b9 (#4414) 2024-05-16 13:53:09 -07:00
likelovewant
d497e31f4b Merge branch 'ollama:main' into main 2024-05-16 22:24:44 +08:00
Josh Yan
26bfc1c443 go fmt'd cmd.go 2024-05-15 17:26:39 -07:00
Josh Yan
799aa9883c go fmt'd cmd.go 2024-05-15 17:24:17 -07:00
Michael Yang
84ed77cbd8 Merge pull request #4436 from ollama/mxyng/done-part
return on part done
2024-05-15 17:16:24 -07:00
Josh Yan
c9e584fb90 updated double-width display 2024-05-15 16:45:24 -07:00
Josh Yan
17b1e81ca1 fixed width and word count for double spacing 2024-05-15 16:29:33 -07:00
Daniel Hiltgen
7e9a2da097 Merge pull request #4462 from dhiltgen/opt_out_build
Port cuda/rocm skip build vars to linux
2024-05-15 16:27:47 -07:00
Daniel Hiltgen
c48c1d7c46 Port cuda/rocm skip build vars to linux
Windows already implements these, carry over to linux.
2024-05-15 15:56:43 -07:00
Patrick Devine
d1692fd3e0 fix the cpu estimatedTotal memory + get the expiry time for loading models (#4461) 2024-05-15 15:43:16 -07:00
Daniel Hiltgen
5fa36a0833 Merge pull request #4459 from dhiltgen/sanitize_env_log
Sanitize the env var debug log
2024-05-15 14:58:55 -07:00
Daniel Hiltgen
853ae490e1 Sanitize the env var debug log
Only dump env vars we care about in the logs
2024-05-15 14:42:57 -07:00
Patrick Devine
f2cf97d6f1 fix typo in modelfile generation (#4439) 2024-05-14 15:34:29 -07:00
Patrick Devine
c344da4c5a fix keepalive for non-interactive mode (#4438) 2024-05-14 15:17:04 -07:00
Michael Yang
0e331c7168 Merge pull request #4328 from ollama/mxyng/mem
count memory up to NumGPU if set by user
2024-05-14 13:47:44 -07:00
Michael Yang
ac145f75ca return on part done 2024-05-14 13:04:30 -07:00
Patrick Devine
a4b8d1f89a re-add system context (#4435) 2024-05-14 11:38:20 -07:00
Ryo Machida
798b107f19 Fixed the API endpoint /api/tags when the model list is empty. (#4424)
* Fixed the API endpoint /api/tags to return {models: []} instead of {models: null} when the model list is empty.

* Update server/routes.go

---------

Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>
2024-05-14 11:18:10 -07:00
Daniel Hiltgen
6a1b471365 Merge pull request #4430 from dhiltgen/gpu_info
Remove VRAM convergence check for windows
2024-05-14 10:59:06 -07:00
Daniel Hiltgen
ec231a7923 Remove VRAM convergence check for windows
The APIs we query are optimistic on free space, and windows pages
VRAM, so we don't have to wait to see reported usage recover on unload
2024-05-14 09:53:46 -07:00
Patrick Devine
7ca71a6b0f don't abort when an invalid model name is used in /save (#4416) 2024-05-13 18:48:28 -07:00
Josh
7607e6e902 Merge pull request #4379 from WolfTheDeveloper/main
Update `LlamaScript` to point to new link from Legacy link.
2024-05-13 18:08:32 -07:00
Patrick Devine
f1548ef62d update the FAQ to be more clear about windows env variables (#4415) 2024-05-13 18:01:13 -07:00
Patrick Devine
6845988807 Ollama ps command for showing currently loaded models (#4327) 2024-05-13 17:17:36 -07:00
Josh
9eed4a90ce Merge pull request #4411 from joshyan1/main
removed inconsistent punctuation
2024-05-13 15:30:45 -07:00
Josh Yan
f8464785a6 removed inconsistencies 2024-05-13 14:50:52 -07:00
Michael Yang
1d359e737e typo 2024-05-13 14:18:34 -07:00
Michael Yang
50b9056e09 count memory up to NumGPU 2024-05-13 14:13:10 -07:00
Josh Yan
91a090a485 removed inconsistent punctuation 2024-05-13 14:08:22 -07:00
睡觉型学渣
9c76b30d72 Correct typos. (#4387)
* Correct typos.

* Correct typos.
2024-05-12 18:21:11 -07:00
Zander Lewis
93f19910c5 Update LlamaScript to point to new link.
Still used Legacy link.
2024-05-12 11:24:21 -04:00
54 changed files with 1377 additions and 785 deletions

View File

@@ -28,6 +28,7 @@ jobs:
security unlock-keychain -p password build.keychain security unlock-keychain -p password build.keychain
security import certificate.p12 -k build.keychain -P $MACOS_SIGNING_KEY_PASSWORD -T /usr/bin/codesign security import certificate.p12 -k build.keychain -P $MACOS_SIGNING_KEY_PASSWORD -T /usr/bin/codesign
security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k password build.keychain security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k password build.keychain
security set-keychain-settings -lut 3600 build.keychain
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5
with: with:
go-version-file: go.mod go-version-file: go.mod

View File

@@ -69,15 +69,17 @@ Here are some example models that can be downloaded:
| ------------------ | ---------- | ----- | ------------------------------ | | ------------------ | ---------- | ----- | ------------------------------ |
| Llama 3 | 8B | 4.7GB | `ollama run llama3` | | Llama 3 | 8B | 4.7GB | `ollama run llama3` |
| Llama 3 | 70B | 40GB | `ollama run llama3:70b` | | Llama 3 | 70B | 40GB | `ollama run llama3:70b` |
| Phi-3 | 3.8B | 2.3GB | `ollama run phi3` | | Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` |
| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` |
| Gemma | 2B | 1.4GB | `ollama run gemma:2b` |
| Gemma | 7B | 4.8GB | `ollama run gemma:7b` |
| Mistral | 7B | 4.1GB | `ollama run mistral` | | Mistral | 7B | 4.1GB | `ollama run mistral` |
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` | | Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
| Starling | 7B | 4.1GB | `ollama run starling-lm` | | Starling | 7B | 4.1GB | `ollama run starling-lm` |
| Code Llama | 7B | 3.8GB | `ollama run codellama` | | Code Llama | 7B | 3.8GB | `ollama run codellama` |
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` | | Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
| LLaVA | 7B | 4.5GB | `ollama run llava` | | LLaVA | 7B | 4.5GB | `ollama run llava` |
| Gemma | 2B | 1.4GB | `ollama run gemma:2b` |
| Gemma | 7B | 4.8GB | `ollama run gemma:7b` |
| Solar | 10.7B | 6.1GB | `ollama run solar` | | Solar | 10.7B | 6.1GB | `ollama run solar` |
> Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models. > Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
@@ -210,25 +212,7 @@ ollama list
## Building ## Building
Install `cmake` and `go`: See the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md)
```
brew install cmake go
```
Then generate dependencies:
```
go generate ./...
```
Then build the binary:
```
go build .
```
More detailed instructions can be found in the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md)
### Running local builds ### Running local builds
@@ -377,7 +361,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
- [Testcontainers](https://testcontainers.com/modules/ollama/) - [Testcontainers](https://testcontainers.com/modules/ollama/)
- [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama) - [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama)
- [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl) with an [example](https://svilupp.github.io/PromptingTools.jl/dev/examples/working_with_ollama) - [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl) with an [example](https://svilupp.github.io/PromptingTools.jl/dev/examples/working_with_ollama)
- [LlamaScript](https://github.com/WolfTheDeveloper/llamascript) - [LlamaScript](https://github.com/Project-Llama/llamascript)
### Mobile ### Mobile
- [Enchanted](https://github.com/AugustDev/enchanted) - [Enchanted](https://github.com/AugustDev/enchanted)
@@ -409,6 +393,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
- [AI Telegram Bot](https://github.com/tusharhero/aitelegrambot) (Telegram bot using Ollama in backend) - [AI Telegram Bot](https://github.com/tusharhero/aitelegrambot) (Telegram bot using Ollama in backend)
- [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (Sublime Text 4 AI assistant plugin with Ollama support) - [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (Sublime Text 4 AI assistant plugin with Ollama support)
- [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) (Generalized TypeScript Discord Bot w/ Tuning Documentation) - [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) (Generalized TypeScript Discord Bot w/ Tuning Documentation)
- [Discord AI chat/moderation bot](https://github.com/rapmd73/Companion) Chat/moderation bot written in python. Uses Ollama to create personalities.
### Supported backends ### Supported backends
- [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov. - [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov.

View File

@@ -354,6 +354,15 @@ func (c *Client) List(ctx context.Context) (*ListResponse, error) {
return &lr, nil return &lr, nil
} }
// List running models.
func (c *Client) ListRunning(ctx context.Context) (*ListResponse, error) {
var lr ListResponse
if err := c.do(ctx, http.MethodGet, "/api/ps", nil, &lr); err != nil {
return nil, err
}
return &lr, nil
}
// Copy copies a model - creating a model with another name from an existing // Copy copies a model - creating a model with another name from an existing
// model. // model.
func (c *Client) Copy(ctx context.Context, req *CopyRequest) error { func (c *Client) Copy(ctx context.Context, req *CopyRequest) error {

View File

@@ -289,10 +289,12 @@ type ListResponse struct {
type ModelResponse struct { type ModelResponse struct {
Name string `json:"name"` Name string `json:"name"`
Model string `json:"model"` Model string `json:"model"`
ModifiedAt time.Time `json:"modified_at"` ModifiedAt time.Time `json:"modified_at,omitempty"`
Size int64 `json:"size"` Size int64 `json:"size"`
Digest string `json:"digest"` Digest string `json:"digest"`
Details ModelDetails `json:"details,omitempty"` Details ModelDetails `json:"details,omitempty"`
ExpiresAt time.Time `json:"expires_at,omitempty"`
SizeVRAM int64 `json:"size_vram,omitempty"`
} }
type TokenResponse struct { type TokenResponse struct {

View File

@@ -12,6 +12,7 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"math"
"net" "net"
"net/http" "net/http"
"os" "os"
@@ -24,7 +25,7 @@ import (
"time" "time"
"github.com/containerd/console" "github.com/containerd/console"
"github.com/mattn/go-runewidth"
"github.com/olekukonko/tablewriter" "github.com/olekukonko/tablewriter"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
@@ -34,6 +35,7 @@ import (
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/auth" "github.com/ollama/ollama/auth"
"github.com/ollama/ollama/format" "github.com/ollama/ollama/format"
"github.com/ollama/ollama/parser"
"github.com/ollama/ollama/progress" "github.com/ollama/ollama/progress"
"github.com/ollama/ollama/server" "github.com/ollama/ollama/server"
"github.com/ollama/ollama/types/errtypes" "github.com/ollama/ollama/types/errtypes"
@@ -62,7 +64,7 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
} }
defer f.Close() defer f.Close()
modelfile, err := model.ParseFile(f) modelfile, err := parser.ParseFile(f)
if err != nil { if err != nil {
return err return err
} }
@@ -206,7 +208,7 @@ func tempZipFiles(path string) (string, error) {
// pytorch files might also be unresolved git lfs references; skip if they are // pytorch files might also be unresolved git lfs references; skip if they are
// covers pytorch_model-x-of-y.bin, pytorch_model.fp32-x-of-y.bin, pytorch_model.bin // covers pytorch_model-x-of-y.bin, pytorch_model.fp32-x-of-y.bin, pytorch_model.bin
files = append(files, pt...) files = append(files, pt...)
} else if pt, _ := glob(filepath.Join(path, "consolidated*.pth"), "application/octet-stream"); len(pt) > 0 { } else if pt, _ := glob(filepath.Join(path, "consolidated*.pth"), "application/zip"); len(pt) > 0 {
// pytorch files might also be unresolved git lfs references; skip if they are // pytorch files might also be unresolved git lfs references; skip if they are
// covers consolidated.x.pth, consolidated.pth // covers consolidated.x.pth, consolidated.pth
files = append(files, pt...) files = append(files, pt...)
@@ -324,6 +326,18 @@ func RunHandler(cmd *cobra.Command, args []string) error {
} }
opts.Format = format opts.Format = format
keepAlive, err := cmd.Flags().GetString("keepalive")
if err != nil {
return err
}
if keepAlive != "" {
d, err := time.ParseDuration(keepAlive)
if err != nil {
return err
}
opts.KeepAlive = &api.Duration{Duration: d}
}
prompts := args[1:] prompts := args[1:]
// prepend stdin to the prompt if provided // prepend stdin to the prompt if provided
if !term.IsTerminal(int(os.Stdin.Fd())) { if !term.IsTerminal(int(os.Stdin.Fd())) {
@@ -496,6 +510,52 @@ func ListHandler(cmd *cobra.Command, args []string) error {
return nil return nil
} }
func ListRunningHandler(cmd *cobra.Command, args []string) error {
client, err := api.ClientFromEnvironment()
if err != nil {
return err
}
models, err := client.ListRunning(cmd.Context())
if err != nil {
return err
}
var data [][]string
for _, m := range models.Models {
if len(args) == 0 || strings.HasPrefix(m.Name, args[0]) {
var procStr string
switch {
case m.SizeVRAM == 0:
procStr = "100% CPU"
case m.SizeVRAM == m.Size:
procStr = "100% GPU"
case m.SizeVRAM > m.Size || m.Size == 0:
procStr = "Unknown"
default:
sizeCPU := m.Size - m.SizeVRAM
cpuPercent := math.Round(float64(sizeCPU) / float64(m.Size) * 100)
procStr = fmt.Sprintf("%d%%/%d%% CPU/GPU", int(cpuPercent), int(100-cpuPercent))
}
data = append(data, []string{m.Name, m.Digest[:12], format.HumanBytes(m.Size), procStr, format.HumanTime(m.ExpiresAt, "Never")})
}
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"NAME", "ID", "SIZE", "PROCESSOR", "UNTIL"})
table.SetHeaderAlignment(tablewriter.ALIGN_LEFT)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetHeaderLine(false)
table.SetBorder(false)
table.SetNoWhiteSpace(true)
table.SetTablePadding("\t")
table.AppendBulk(data)
table.Render()
return nil
}
func DeleteHandler(cmd *cobra.Command, args []string) error { func DeleteHandler(cmd *cobra.Command, args []string) error {
client, err := api.ClientFromEnvironment() client, err := api.ClientFromEnvironment()
if err != nil { if err != nil {
@@ -672,6 +732,7 @@ type runOptions struct {
Images []api.ImageData Images []api.ImageData
Options map[string]interface{} Options map[string]interface{}
MultiModal bool MultiModal bool
KeepAlive *api.Duration
} }
type displayResponseState struct { type displayResponseState struct {
@@ -684,7 +745,8 @@ func displayResponse(content string, wordWrap bool, state *displayResponseState)
if wordWrap && termWidth >= 10 { if wordWrap && termWidth >= 10 {
for _, ch := range content { for _, ch := range content {
if state.lineLength+1 > termWidth-5 { if state.lineLength+1 > termWidth-5 {
if len(state.wordBuffer) > termWidth-10 {
if runewidth.StringWidth(state.wordBuffer) > termWidth-10 {
fmt.Printf("%s%c", state.wordBuffer, ch) fmt.Printf("%s%c", state.wordBuffer, ch)
state.wordBuffer = "" state.wordBuffer = ""
state.lineLength = 0 state.lineLength = 0
@@ -692,12 +754,18 @@ func displayResponse(content string, wordWrap bool, state *displayResponseState)
} }
// backtrack the length of the last word and clear to the end of the line // backtrack the length of the last word and clear to the end of the line
fmt.Printf("\x1b[%dD\x1b[K\n", len(state.wordBuffer)) fmt.Printf("\x1b[%dD\x1b[K\n", runewidth.StringWidth(state.wordBuffer))
fmt.Printf("%s%c", state.wordBuffer, ch) fmt.Printf("%s%c", state.wordBuffer, ch)
state.lineLength = len(state.wordBuffer) + 1 chWidth := runewidth.RuneWidth(ch)
state.lineLength = runewidth.StringWidth(state.wordBuffer) + chWidth
} else { } else {
fmt.Print(string(ch)) fmt.Print(string(ch))
state.lineLength += 1 state.lineLength += runewidth.RuneWidth(ch)
if runewidth.RuneWidth(ch) >= 2 {
state.wordBuffer = ""
continue
}
switch ch { switch ch {
case ' ': case ' ':
@@ -766,6 +834,10 @@ func chat(cmd *cobra.Command, opts runOptions) (*api.Message, error) {
Options: opts.Options, Options: opts.Options,
} }
if opts.KeepAlive != nil {
req.KeepAlive = opts.KeepAlive
}
if err := client.Chat(cancelCtx, req, fn); err != nil { if err := client.Chat(cancelCtx, req, fn); err != nil {
if errors.Is(err, context.Canceled) { if errors.Is(err, context.Canceled) {
return nil, nil return nil, nil
@@ -841,14 +913,15 @@ func generate(cmd *cobra.Command, opts runOptions) error {
} }
request := api.GenerateRequest{ request := api.GenerateRequest{
Model: opts.Model, Model: opts.Model,
Prompt: opts.Prompt, Prompt: opts.Prompt,
Context: generateContext, Context: generateContext,
Images: opts.Images, Images: opts.Images,
Format: opts.Format, Format: opts.Format,
System: opts.System, System: opts.System,
Template: opts.Template, Template: opts.Template,
Options: opts.Options, Options: opts.Options,
KeepAlive: opts.KeepAlive,
} }
if err := client.Generate(ctx, &request, fn); err != nil { if err := client.Generate(ctx, &request, fn); err != nil {
@@ -1006,12 +1079,24 @@ func versionHandler(cmd *cobra.Command, _ []string) {
} }
} }
func appendHostEnvDocs(cmd *cobra.Command) { type EnvironmentVar struct {
const hostEnvDocs = ` Name string
Description string
}
func appendEnvDocs(cmd *cobra.Command, envs []EnvironmentVar) {
if len(envs) == 0 {
return
}
envUsage := `
Environment Variables: Environment Variables:
OLLAMA_HOST The host:port or base URL of the Ollama server (e.g. http://localhost:11434)
` `
cmd.SetUsageTemplate(cmd.UsageTemplate() + hostEnvDocs) for _, e := range envs {
envUsage += fmt.Sprintf(" %-16s %s\n", e.Name, e.Description)
}
cmd.SetUsageTemplate(cmd.UsageTemplate() + envUsage)
} }
func NewCLI() *cobra.Command { func NewCLI() *cobra.Command {
@@ -1075,6 +1160,7 @@ func NewCLI() *cobra.Command {
RunE: RunHandler, RunE: RunHandler,
} }
runCmd.Flags().String("keepalive", "", "Duration to keep a model loaded (e.g. 5m)")
runCmd.Flags().Bool("verbose", false, "Show timings for response") runCmd.Flags().Bool("verbose", false, "Show timings for response")
runCmd.Flags().Bool("insecure", false, "Use an insecure registry") runCmd.Flags().Bool("insecure", false, "Use an insecure registry")
runCmd.Flags().Bool("nowordwrap", false, "Don't wrap words to the next line automatically") runCmd.Flags().Bool("nowordwrap", false, "Don't wrap words to the next line automatically")
@@ -1090,9 +1176,9 @@ func NewCLI() *cobra.Command {
Environment Variables: Environment Variables:
OLLAMA_HOST The host:port to bind to (default "127.0.0.1:11434") OLLAMA_HOST The host:port to bind to (default "127.0.0.1:11434")
OLLAMA_ORIGINS A comma separated list of allowed origins. OLLAMA_ORIGINS A comma separated list of allowed origins
OLLAMA_MODELS The path to the models directory (default is "~/.ollama/models") OLLAMA_MODELS The path to the models directory (default "~/.ollama/models")
OLLAMA_KEEP_ALIVE The duration that models stay loaded in memory (default is "5m") OLLAMA_KEEP_ALIVE The duration that models stay loaded in memory (default "5m")
OLLAMA_DEBUG Set to 1 to enable additional debug logging OLLAMA_DEBUG Set to 1 to enable additional debug logging
`) `)
@@ -1123,6 +1209,14 @@ Environment Variables:
PreRunE: checkServerHeartbeat, PreRunE: checkServerHeartbeat,
RunE: ListHandler, RunE: ListHandler,
} }
psCmd := &cobra.Command{
Use: "ps",
Short: "List running models",
PreRunE: checkServerHeartbeat,
RunE: ListRunningHandler,
}
copyCmd := &cobra.Command{ copyCmd := &cobra.Command{
Use: "cp SOURCE DESTINATION", Use: "cp SOURCE DESTINATION",
Short: "Copy a model", Short: "Copy a model",
@@ -1139,6 +1233,10 @@ Environment Variables:
RunE: DeleteHandler, RunE: DeleteHandler,
} }
ollamaHostEnv := EnvironmentVar{"OLLAMA_HOST", "The host:port or base URL of the Ollama server (e.g. http://localhost:11434)"}
ollamaNoHistoryEnv := EnvironmentVar{"OLLAMA_NOHISTORY", "Disable readline history"}
envs := []EnvironmentVar{ollamaHostEnv}
for _, cmd := range []*cobra.Command{ for _, cmd := range []*cobra.Command{
createCmd, createCmd,
showCmd, showCmd,
@@ -1146,10 +1244,16 @@ Environment Variables:
pullCmd, pullCmd,
pushCmd, pushCmd,
listCmd, listCmd,
psCmd,
copyCmd, copyCmd,
deleteCmd, deleteCmd,
} { } {
appendHostEnvDocs(cmd) switch cmd {
case runCmd:
appendEnvDocs(cmd, []EnvironmentVar{ollamaHostEnv, ollamaNoHistoryEnv})
default:
appendEnvDocs(cmd, envs)
}
} }
rootCmd.AddCommand( rootCmd.AddCommand(
@@ -1160,6 +1264,7 @@ Environment Variables:
pullCmd, pullCmd,
pushCmd, pushCmd,
listCmd, listCmd,
psCmd,
copyCmd, copyCmd,
deleteCmd, deleteCmd,
) )

View File

@@ -17,6 +17,7 @@ import (
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/progress" "github.com/ollama/ollama/progress"
"github.com/ollama/ollama/readline" "github.com/ollama/ollama/readline"
"github.com/ollama/ollama/types/errtypes"
) )
type MultilineState int type MultilineState int
@@ -56,6 +57,11 @@ func loadModel(cmd *cobra.Command, opts *runOptions) error {
Model: opts.Model, Model: opts.Model,
Messages: []api.Message{}, Messages: []api.Message{},
} }
if opts.KeepAlive != nil {
chatReq.KeepAlive = opts.KeepAlive
}
err = client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error { err = client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error {
p.StopAndClear() p.StopAndClear()
if len(opts.Messages) > 0 { if len(opts.Messages) > 0 {
@@ -132,6 +138,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word") fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word")
fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor") fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor")
fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor") fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor")
fmt.Fprintln(os.Stderr, " Ctrl + w Delete the word before the cursor")
fmt.Fprintln(os.Stderr, "") fmt.Fprintln(os.Stderr, "")
fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen") fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen")
fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding") fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding")
@@ -176,6 +183,10 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
return err return err
} }
if os.Getenv("OLLAMA_NOHISTORY") != "" {
scanner.HistoryDisable()
}
fmt.Print(readline.StartBracketedPaste) fmt.Print(readline.StartBracketedPaste)
defer fmt.Printf(readline.EndBracketedPaste) defer fmt.Printf(readline.EndBracketedPaste)
@@ -276,13 +287,20 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
fn := func(resp api.ProgressResponse) error { return nil } fn := func(resp api.ProgressResponse) error { return nil }
err = client.Create(cmd.Context(), req, fn) err = client.Create(cmd.Context(), req, fn)
if err != nil { if err != nil {
fmt.Println("error: couldn't save model") if strings.Contains(err.Error(), errtypes.InvalidModelNameErrMsg) {
fmt.Printf("error: The model name '%s' is invalid\n", args[1])
continue
}
return err return err
} }
fmt.Printf("Created new model '%s'\n", args[1]) fmt.Printf("Created new model '%s'\n", args[1])
continue continue
case strings.HasPrefix(line, "/clear"): case strings.HasPrefix(line, "/clear"):
opts.Messages = []api.Message{} opts.Messages = []api.Message{}
if opts.System != "" {
newMessage := api.Message{Role: "system", Content: opts.System}
opts.Messages = append(opts.Messages, newMessage)
}
fmt.Println("Cleared session context") fmt.Println("Cleared session context")
continue continue
case strings.HasPrefix(line, "/set"): case strings.HasPrefix(line, "/set"):

View File

@@ -18,6 +18,16 @@ import (
"github.com/ollama/ollama/llm" "github.com/ollama/ollama/llm"
) )
const (
_ int32 = iota
tokenTypeNormal
tokenTypeUnknown
tokenTypeControl
tokenTypeUserDefined
tokenTypeUnused
tokenTypeByte
)
type Params struct { type Params struct {
Architectures []string `json:"architectures"` Architectures []string `json:"architectures"`
VocabSize int `json:"vocab_size"` VocabSize int `json:"vocab_size"`
@@ -37,6 +47,8 @@ type Params struct {
Experts int `json:"num_local_experts"` Experts int `json:"num_local_experts"`
ExpertsUsed int `json:"num_experts_per_tok"` ExpertsUsed int `json:"num_experts_per_tok"`
PreTokenizer string
ByteOrder ByteOrder
} }
@@ -74,10 +86,9 @@ func GetModelFormat(dirname string) (ModelFormat, error) {
} }
for _, fn := range files { for _, fn := range files {
slog.Debug(fmt.Sprintf("file = %s", fn))
if strings.HasSuffix(fn, ".safetensors") { if strings.HasSuffix(fn, ".safetensors") {
return &SafetensorFormat{}, nil return &SafetensorFormat{}, nil
} else if strings.HasSuffix(fn, ".bin") { } else if strings.HasSuffix(fn, ".bin") || strings.HasSuffix(fn, ".pth") {
slog.Debug("model is torch") slog.Debug("model is torch")
return &TorchFormat{}, nil return &TorchFormat{}, nil
} }
@@ -92,6 +103,7 @@ type Vocab struct {
Tokens []string Tokens []string
Scores []float32 Scores []float32
Types []int32 Types []int32
Merges []string
} }
func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) { func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
@@ -170,7 +182,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
} }
v.Tokens = append(v.Tokens, t.key) v.Tokens = append(v.Tokens, t.key)
v.Scores = append(v.Scores, -1000.0) v.Scores = append(v.Scores, -1000.0)
v.Types = append(v.Types, int32(llm.GGUFTokenUserDefined)) v.Types = append(v.Types, tokenTypeUserDefined)
} }
slog.Info(fmt.Sprintf("vocab size w/ extra tokens: %d", len(v.Tokens))) slog.Info(fmt.Sprintf("vocab size w/ extra tokens: %d", len(v.Tokens)))
@@ -180,7 +192,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
for cnt := 0; cnt < missingTokens; cnt++ { for cnt := 0; cnt < missingTokens; cnt++ {
v.Tokens = append(v.Tokens, fmt.Sprintf("<dummy%05d>", cnt+1)) v.Tokens = append(v.Tokens, fmt.Sprintf("<dummy%05d>", cnt+1))
v.Scores = append(v.Scores, -1) v.Scores = append(v.Scores, -1)
v.Types = append(v.Types, int32(llm.GGUFTokenUserDefined)) v.Types = append(v.Types, tokenTypeUserDefined)
} }
} }

103
convert/convert_test.go Normal file
View File

@@ -0,0 +1,103 @@
//go:build slow
package convert
import (
"os"
"path/filepath"
"testing"
"github.com/ollama/ollama/llm"
)
func convertFull(t *testing.T, p string) (llm.KV, llm.Tensors) {
t.Helper()
mf, err := GetModelFormat(p)
if err != nil {
t.Fatal(err)
}
params, err := mf.GetParams(p)
if err != nil {
t.Fatal(err)
}
arch, err := mf.GetModelArch("", p, params)
if err != nil {
t.Fatal(err)
}
if err := arch.LoadVocab(); err != nil {
t.Fatal(err)
}
if err := arch.GetTensors(); err != nil {
t.Fatal(err)
}
f, err := os.CreateTemp(t.TempDir(), "f16")
if err != nil {
t.Fatal(err)
}
defer f.Close()
if err := arch.WriteGGUF(f); err != nil {
t.Fatal(err)
}
r, err := os.Open(f.Name())
if err != nil {
t.Fatal(err)
}
defer r.Close()
m, _, err := llm.DecodeGGML(r)
if err != nil {
t.Fatal(err)
}
return m.KV(), m.Tensors()
}
func TestConvertFull(t *testing.T) {
cases := []struct {
path string
arch string
tensors int
layers int
}{
{"Meta-Llama-3-8B-Instruct", "llama", 291, 35},
{"Mistral-7B-Instruct-v0.2", "llama", 291, 35},
{"Mixtral-8x7B-Instruct-v0.1", "llama", 291, 35},
{"gemma-2b-it", "gemma", 164, 20},
}
for _, tt := range cases {
t.Run(tt.path, func(t *testing.T) {
p := filepath.Join("testdata", tt.path)
if _, err := os.Stat(p); err != nil {
t.Skipf("%s not found", p)
}
kv, tensors := convertFull(t, p)
if kv.Architecture() != tt.arch {
t.Fatalf("expected llama, got %s", kv.Architecture())
}
if kv.FileType().String() != "F16" {
t.Fatalf("expected F16, got %s", kv.FileType())
}
if len(tensors) != tt.tensors {
t.Fatalf("expected %d tensors, got %d", tt.tensors, len(tensors))
}
layers := tensors.Layers()
if len(layers) != tt.layers {
t.Fatalf("expected %d layers, got %d", tt.layers, len(layers))
}
})
}
}

View File

@@ -1,14 +1,11 @@
package convert package convert
import ( import (
"encoding/binary"
"fmt" "fmt"
"io" "io"
"log/slog" "log/slog"
"os"
"strings" "strings"
"github.com/d4l3k/go-bfloat16"
"github.com/pdevine/tensor" "github.com/pdevine/tensor"
"github.com/pdevine/tensor/native" "github.com/pdevine/tensor/native"
@@ -19,49 +16,27 @@ type GemmaModel struct {
ModelData ModelData
} }
func gemmaLayerHandler(w io.Writer, r safetensorWriterTo, f *os.File) error {
slog.Debug(fmt.Sprintf("converting '%s'", r.t.Name))
data := make([]byte, r.end-r.start)
if err := binary.Read(f, r.bo, data); err != nil {
return err
}
tDataF32 := bfloat16.DecodeFloat32(data)
var err error
tDataF32, err = addOnes(tDataF32, int(r.t.Shape[0]))
if err != nil {
return err
}
if err := binary.Write(w, r.bo, tDataF32); err != nil {
return err
}
return nil
}
func addOnes(data []float32, vectorSize int) ([]float32, error) { func addOnes(data []float32, vectorSize int) ([]float32, error) {
n := tensor.New(tensor.WithShape(vectorSize), tensor.WithBacking(data)) n := tensor.New(tensor.WithShape(vectorSize), tensor.WithBacking(data))
ones := tensor.Ones(tensor.Float32, vectorSize) ones := tensor.Ones(tensor.Float32, vectorSize)
var err error n, err := n.Add(ones)
n, err = n.Add(ones)
if err != nil { if err != nil {
return []float32{}, err return nil, err
} }
newN, err := native.SelectF32(n, 0) ts, err := native.SelectF32(n, 0)
if err != nil { if err != nil {
return []float32{}, err return nil, err
} }
var fullTensor []float32 var f32s []float32
for _, v := range newN { for _, t := range ts {
fullTensor = append(fullTensor, v...) f32s = append(f32s, t...)
} }
return fullTensor, nil
return f32s, nil
} }
func (m *GemmaModel) GetTensors() error { func (m *GemmaModel) GetTensors() error {
@@ -71,12 +46,10 @@ func (m *GemmaModel) GetTensors() error {
} }
slog.Debug(fmt.Sprintf("Total tensors: %d", len(t))) slog.Debug(fmt.Sprintf("Total tensors: %d", len(t)))
m.Tensors = []llm.Tensor{}
for _, l := range t { for _, l := range t {
if strings.HasSuffix(l.Name, "norm.weight") { if strings.HasSuffix(l.Name, "norm.weight") {
wt := l.WriterTo.(safetensorWriterTo) wt := l.WriterTo.(safetensorWriterTo)
wt.handler = gemmaLayerHandler wt.repacker = m.Repack
l.WriterTo = wt l.WriterTo = wt
} }
m.Tensors = append(m.Tensors, l) m.Tensors = append(m.Tensors, l)
@@ -94,6 +67,10 @@ func (m *GemmaModel) LoadVocab() error {
return nil return nil
} }
func (m *GemmaModel) Repack(_ string, data []float32, shape []uint64) ([]float32, error) {
return addOnes(data, int(shape[0]))
}
func (m *GemmaModel) WriteGGUF(ws io.WriteSeeker) error { func (m *GemmaModel) WriteGGUF(ws io.WriteSeeker) error {
kv := llm.KV{ kv := llm.KV{
"general.architecture": "gemma", "general.architecture": "gemma",

View File

@@ -1,17 +1,17 @@
package convert package convert
import ( import (
"encoding/binary" "cmp"
"errors"
"fmt" "fmt"
"io" "io"
"log/slog" "os"
"path/filepath"
"regexp" "regexp"
"strings" "strings"
"github.com/nlpodyssey/gopickle/pytorch"
"github.com/pdevine/tensor" "github.com/pdevine/tensor"
"github.com/pdevine/tensor/native" "github.com/pdevine/tensor/native"
"github.com/x448/float16"
"github.com/ollama/ollama/llm" "github.com/ollama/ollama/llm"
) )
@@ -20,81 +20,12 @@ type LlamaModel struct {
ModelData ModelData
} }
func llamaLayerHandler(w io.Writer, r torchWriterTo) error {
slog.Debug(fmt.Sprintf("repacking layer '%s'", r.t.Name))
data := r.storage.(*pytorch.HalfStorage).Data
tData := make([]uint16, len(data))
for cnt, v := range data {
tData[cnt] = uint16(float16.Fromfloat32(v))
}
var err error
var heads uint32
if strings.Contains(r.t.Name, "attn_q") {
heads = uint32(r.params.AttentionHeads)
} else if strings.Contains(r.t.Name, "attn_k") {
heads = uint32(r.params.KeyValHeads)
if heads == 0 {
heads = uint32(r.params.AttentionHeads)
}
} else {
return fmt.Errorf("unknown layer type")
}
slog.Debug(fmt.Sprintf("heads = %d", heads))
tData, err = llamaRepack(tData, int(heads), r.t.Shape)
if err != nil {
return err
}
if err = binary.Write(w, r.bo, tData); err != nil {
return err
}
return nil
}
func llamaRepack(data []uint16, heads int, shape []uint64) ([]uint16, error) {
n := tensor.New(tensor.WithShape(int(shape[0]), int(shape[1])), tensor.WithBacking(data))
origShape := n.Shape().Clone()
// reshape the tensor and swap axes 1 and 2 to unpack the layer for gguf
if err := n.Reshape(heads, 2, origShape[0]/heads/2, origShape[1]); err != nil {
return nil, err
}
if err := n.T(0, 2, 1, 3); err != nil {
return nil, err
}
if err := n.Reshape(origShape...); err != nil {
return nil, err
}
if err := n.Transpose(); err != nil {
return nil, err
}
newN, err := native.SelectU16(n, 1)
if err != nil {
return nil, err
}
var fullTensor []uint16
for _, v := range newN {
fullTensor = append(fullTensor, v...)
}
return fullTensor, nil
}
func (m *LlamaModel) GetTensors() error { func (m *LlamaModel) GetTensors() error {
t, err := m.Format.GetTensors(m.Path, m.Params) t, err := m.Format.GetTensors(m.Path, m.Params)
if err != nil { if err != nil {
return err return err
} }
m.Tensors = []llm.Tensor{}
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$` pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
re, err := regexp.Compile(pattern) re, err := regexp.Compile(pattern)
if err != nil { if err != nil {
@@ -104,10 +35,16 @@ func (m *LlamaModel) GetTensors() error {
for _, l := range t { for _, l := range t {
matches := re.FindAllStringSubmatch(l.Name, -1) matches := re.FindAllStringSubmatch(l.Name, -1)
if len(matches) > 0 { if len(matches) > 0 {
slog.Debug(fmt.Sprintf("setting handler for: %s", l.Name)) switch m.Format.(type) {
wt := l.WriterTo.(torchWriterTo) case *TorchFormat:
wt.handler = llamaLayerHandler wt := l.WriterTo.(torchWriterTo)
l.WriterTo = wt wt.repacker = m.Repack
l.WriterTo = wt
case *SafetensorFormat:
wt := l.WriterTo.(safetensorWriterTo)
wt.repacker = m.Repack
l.WriterTo = wt
}
} }
m.Tensors = append(m.Tensors, l) m.Tensors = append(m.Tensors, l)
} }
@@ -115,19 +52,22 @@ func (m *LlamaModel) GetTensors() error {
return nil return nil
} }
func (m *LlamaModel) LoadVocab() error { func (m *LlamaModel) LoadVocab() (err error) {
var v *Vocab pre, ts, merges, err := parseTokens(filepath.Join(m.Path, "tokenizer.json"))
var err error if errors.Is(err, os.ErrNotExist) {
return nil
slog.Debug("loading vocab") } else if err != nil {
v, err = LoadSentencePieceTokens(m.Path, m.Params)
if err != nil {
return err return err
} }
slog.Debug("vocab loaded") m.Vocab = &Vocab{}
for _, t := range ts {
m.Vocab.Tokens = append(m.Vocab.Tokens, t.Content)
m.Vocab.Types = append(m.Vocab.Types, t.Type())
}
m.Vocab = v m.Vocab.Merges = merges
m.Params.PreTokenizer = pre
return nil return nil
} }
@@ -140,23 +80,79 @@ func (m *LlamaModel) WriteGGUF(ws io.WriteSeeker) error {
"llama.embedding_length": uint32(m.Params.HiddenSize), "llama.embedding_length": uint32(m.Params.HiddenSize),
"llama.block_count": uint32(m.Params.HiddenLayers), "llama.block_count": uint32(m.Params.HiddenLayers),
"llama.feed_forward_length": uint32(m.Params.IntermediateSize), "llama.feed_forward_length": uint32(m.Params.IntermediateSize),
"llama.rope.freq_base": float32(m.Params.RopeFrequencyBase),
"llama.rope.dimension_count": uint32(m.Params.HiddenSize / m.Params.AttentionHeads), "llama.rope.dimension_count": uint32(m.Params.HiddenSize / m.Params.AttentionHeads),
"llama.attention.head_count": uint32(m.Params.AttentionHeads), "llama.attention.head_count": uint32(m.Params.AttentionHeads),
"llama.attention.head_count_kv": uint32(m.Params.KeyValHeads), "llama.attention.head_count_kv": uint32(m.Params.KeyValHeads),
"llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS), "llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS),
"general.file_type": uint32(1), "general.file_type": uint32(1),
"tokenizer.ggml.model": "llama", "tokenizer.ggml.model": "gpt2",
"tokenizer.ggml.pre": m.Params.PreTokenizer,
"tokenizer.ggml.tokens": m.Vocab.Tokens, "tokenizer.ggml.tokens": m.Vocab.Tokens,
"tokenizer.ggml.scores": m.Vocab.Scores,
"tokenizer.ggml.token_type": m.Vocab.Types, "tokenizer.ggml.token_type": m.Vocab.Types,
"tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID), "tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID),
"tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID), "tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID),
"tokenizer.ggml.unknown_token_id": uint32(0), "tokenizer.ggml.unknown_token_id": uint32(0),
"tokenizer.ggml.add_bos_token": true, }
"tokenizer.ggml.add_eos_token": false,
if len(m.Vocab.Merges) > 0 {
kv["tokenizer.ggml.merges"] = m.Vocab.Merges
} else {
kv["tokenizer.ggml.scores"] = m.Vocab.Scores
} }
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors) return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
} }
func (m *LlamaModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) {
return llamaRepack(name, m.Params, data, shape)
}
func llamaRepack(name string, params *Params, data []float32, shape []uint64) ([]float32, error) {
var dims []int
for _, dim := range shape {
if dim != 0 {
dims = append(dims, int(dim))
}
}
var heads int
if strings.HasSuffix(name, "attn_q.weight") {
heads = params.AttentionHeads
} else if strings.HasSuffix(name, "attn_k.weight") {
heads = cmp.Or(params.KeyValHeads, params.AttentionHeads)
} else {
return nil, fmt.Errorf("unknown tensor name: %s", name)
}
n := tensor.New(tensor.WithShape(dims...), tensor.WithBacking(data))
if err := n.Reshape(append([]int{heads, 2, dims[0] / heads / 2}, dims[1:]...)...); err != nil {
return nil, err
}
if err := n.T(0, 2, 1, 3); err != nil {
return nil, err
}
if err := n.Reshape(dims...); err != nil {
return nil, err
}
if err := n.Transpose(); err != nil {
return nil, err
}
ts, err := native.SelectF32(n, 1)
if err != nil {
return nil, err
}
var f32s []float32
for _, t := range ts {
f32s = append(f32s, t...)
}
return f32s, nil
}

View File

@@ -1,17 +1,8 @@
package convert package convert
import ( import (
"encoding/binary"
"fmt"
"io" "io"
"os"
"regexp" "regexp"
"strings"
"github.com/d4l3k/go-bfloat16"
"github.com/pdevine/tensor"
"github.com/pdevine/tensor/native"
"github.com/x448/float16"
"github.com/ollama/ollama/llm" "github.com/ollama/ollama/llm"
) )
@@ -20,90 +11,12 @@ type MistralModel struct {
ModelData ModelData
} }
func mistralLayerHandler(w io.Writer, r safetensorWriterTo, f *os.File) error {
layerSize := r.end - r.start
var err error
tData := make([]uint16, layerSize/2)
if err = binary.Read(f, r.bo, tData); err != nil {
return err
}
var heads uint32
if strings.Contains(r.t.Name, "attn_q") {
heads = uint32(r.params.AttentionHeads)
} else if strings.Contains(r.t.Name, "attn_k") {
heads = uint32(r.params.KeyValHeads)
if heads == 0 {
heads = uint32(r.params.AttentionHeads)
}
} else {
return fmt.Errorf("unknown layer type")
}
tData, err = repack(tData, int(heads), r.t.Shape)
if err != nil {
return err
}
var buf []byte
for _, n := range tData {
buf = r.bo.AppendUint16(buf, n)
}
tempBuf := make([]uint16, len(tData))
tDataF32 := bfloat16.DecodeFloat32(buf)
for cnt, v := range tDataF32 {
tDataF16 := float16.Fromfloat32(v)
tempBuf[cnt] = uint16(tDataF16)
}
if err = binary.Write(w, r.bo, tempBuf); err != nil {
return err
}
return nil
}
func repack(data []uint16, heads int, shape []uint64) ([]uint16, error) {
n := tensor.New(tensor.WithShape(int(shape[0]), int(shape[1])), tensor.WithBacking(data))
origShape := n.Shape().Clone()
// reshape the tensor and swap axes 1 and 2 to unpack the layer for gguf
if err := n.Reshape(heads, 2, origShape[0]/heads/2, origShape[1]); err != nil {
return nil, err
}
if err := n.T(0, 2, 1, 3); err != nil {
return nil, err
}
if err := n.Reshape(origShape...); err != nil {
return nil, err
}
if err := n.Transpose(); err != nil {
return nil, err
}
newN, err := native.SelectU16(n, 1)
if err != nil {
return nil, err
}
var fullTensor []uint16
for _, v := range newN {
fullTensor = append(fullTensor, v...)
}
return fullTensor, nil
}
func (m *MistralModel) GetTensors() error { func (m *MistralModel) GetTensors() error {
t, err := m.Format.GetTensors(m.Path, m.Params) t, err := m.Format.GetTensors(m.Path, m.Params)
if err != nil { if err != nil {
return err return err
} }
m.Tensors = []llm.Tensor{}
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$` pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
re, err := regexp.Compile(pattern) re, err := regexp.Compile(pattern)
if err != nil { if err != nil {
@@ -114,7 +27,7 @@ func (m *MistralModel) GetTensors() error {
matches := re.FindAllStringSubmatch(l.Name, -1) matches := re.FindAllStringSubmatch(l.Name, -1)
if len(matches) > 0 { if len(matches) > 0 {
wt := l.WriterTo.(safetensorWriterTo) wt := l.WriterTo.(safetensorWriterTo)
wt.handler = mistralLayerHandler wt.repacker = m.Repack
l.WriterTo = wt l.WriterTo = wt
} }
m.Tensors = append(m.Tensors, l) m.Tensors = append(m.Tensors, l)
@@ -160,3 +73,7 @@ func (m *MistralModel) WriteGGUF(ws io.WriteSeeker) error {
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors) return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
} }
func (m *MistralModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) {
return llamaRepack(name, m.Params, data, shape)
}

View File

@@ -17,8 +17,6 @@ func (m *MixtralModel) GetTensors() error {
return err return err
} }
m.Tensors = []llm.Tensor{}
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$` pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
re, err := regexp.Compile(pattern) re, err := regexp.Compile(pattern)
if err != nil { if err != nil {
@@ -29,7 +27,7 @@ func (m *MixtralModel) GetTensors() error {
matches := re.FindAllStringSubmatch(l.Name, -1) matches := re.FindAllStringSubmatch(l.Name, -1)
if len(matches) > 0 { if len(matches) > 0 {
wt := l.WriterTo.(safetensorWriterTo) wt := l.WriterTo.(safetensorWriterTo)
wt.handler = mistralLayerHandler wt.repacker = m.Repack
l.WriterTo = wt l.WriterTo = wt
} }
m.Tensors = append(m.Tensors, l) m.Tensors = append(m.Tensors, l)
@@ -83,3 +81,7 @@ func (m *MixtralModel) WriteGGUF(ws io.WriteSeeker) error {
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors) return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
} }
func (m *MixtralModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) {
return llamaRepack(name, m.Params, data, shape)
}

View File

@@ -6,14 +6,13 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"path/filepath" "path/filepath"
"regexp" "regexp"
"slices" "slices"
"strings"
"github.com/d4l3k/go-bfloat16" "github.com/d4l3k/go-bfloat16"
"github.com/mitchellh/mapstructure"
"github.com/x448/float16" "github.com/x448/float16"
"github.com/ollama/ollama/llm" "github.com/ollama/ollama/llm"
@@ -26,39 +25,38 @@ type safetensorWriterTo struct {
bo ByteOrder bo ByteOrder
filename string filename string
dtype string
start, end, padding uint64 offset, size int64
handler func(w io.Writer, r safetensorWriterTo, f *os.File) error repacker func(string, []float32, []uint64) ([]float32, error)
} }
type tensorMetaData struct { type safetensorMetadata struct {
Type string `mapstructure:"dtype"` Type string `json:"dtype"`
Shape []int `mapstructure:"shape"` Shape []uint64 `json:"shape"`
Offsets []int `mapstructure:"data_offsets"` Offsets []int64 `json:"data_offsets"`
} }
type SafetensorFormat struct{} type SafetensorFormat struct{}
func (m *SafetensorFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) { func (m *SafetensorFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) {
slog.Debug("getting tensor data")
var tensors []llm.Tensor var tensors []llm.Tensor
files, err := filepath.Glob(filepath.Join(dirpath, "/model-*.safetensors")) matches, err := filepath.Glob(filepath.Join(dirpath, "*.safetensors"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
var offset uint64 var offset uint64
for _, f := range files { for _, f := range matches {
var t []llm.Tensor var t []llm.Tensor
var err error var err error
t, offset, err = m.readTensors(f, offset, params) t, offset, err = m.readTensors(f, offset, params)
if err != nil { if err != nil {
slog.Error(err.Error())
return nil, err return nil, err
} }
tensors = append(tensors, t...) tensors = append(tensors, t...)
} }
slog.Debug(fmt.Sprintf("all tensors = %d", len(tensors)))
return tensors, nil return tensors, nil
} }
@@ -69,70 +67,57 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params)
} }
defer f.Close() defer f.Close()
var jsonSize uint64 var n int64
if err := binary.Read(f, binary.LittleEndian, &jsonSize); err != nil { if err := binary.Read(f, binary.LittleEndian, &n); err != nil {
return nil, 0, err return nil, 0, err
} }
buf := make([]byte, jsonSize) b := bytes.NewBuffer(make([]byte, 0, n))
_, err = io.ReadFull(f, buf) if _, err = io.CopyN(b, f, n); err != nil {
if err != nil {
return nil, 0, err return nil, 0, err
} }
d := json.NewDecoder(bytes.NewBuffer(buf)) var headers map[string]safetensorMetadata
d.UseNumber() if err := json.NewDecoder(b).Decode(&headers); err != nil {
var parsed map[string]interface{}
if err = d.Decode(&parsed); err != nil {
return nil, 0, err return nil, 0, err
} }
var keys []string var keys []string
for k := range parsed { for key := range headers {
keys = append(keys, k) if !strings.HasSuffix(key, "self_attn.rotary_embd.inv_freq") {
keys = append(keys, key)
}
} }
slices.Sort(keys) slices.Sort(keys)
slog.Info("converting layers")
var tensors []llm.Tensor var tensors []llm.Tensor
for _, k := range keys { for _, key := range keys {
vals := parsed[k].(map[string]interface{}) value := headers[key]
var data tensorMetaData
if err = mapstructure.Decode(vals, &data); err != nil {
slog.Error("couldn't decode properly")
return nil, 0, err
}
var size uint64
var kind uint32 var kind uint32
switch len(data.Shape) { switch len(value.Shape) {
case 0: case 0:
// metadata // valuedata
continue continue
case 1:
// convert to float32
kind = 0
size = uint64(data.Shape[0] * 4)
case 2: case 2:
// convert to float16
kind = 1 kind = 1
size = uint64(data.Shape[0] * data.Shape[1] * 2)
} }
ggufName, err := m.GetLayerName(k) name, err := m.GetLayerName(key)
if err != nil { if err != nil {
slog.Error(err.Error())
return nil, 0, err return nil, 0, err
} }
shape := []uint64{0, 0, 0, 0} shape := make([]uint64, len(value.Shape))
for i := range data.Shape { copy(shape, value.Shape)
shape[i] = uint64(data.Shape[i])
pad := func(s int64) int64 {
return 8 + n + s
} }
t := llm.Tensor{ t := llm.Tensor{
Name: ggufName, Name: name,
Kind: kind, Kind: kind,
Offset: offset, Offset: offset,
Shape: shape[:], Shape: shape[:],
@@ -143,18 +128,15 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params)
params: params, params: params,
bo: params.ByteOrder, bo: params.ByteOrder,
filename: fn, filename: fn,
start: uint64(data.Offsets[0]), dtype: value.Type,
end: uint64(data.Offsets[1]), offset: pad(value.Offsets[0]),
padding: 8 + jsonSize, size: pad(value.Offsets[1]) - pad(value.Offsets[0]),
} }
offset += size offset += t.Size()
tensors = append(tensors, t) tensors = append(tensors, t)
} }
slog.Debug(fmt.Sprintf("total tensors for file = %d", len(tensors)))
slog.Debug(fmt.Sprintf("offset = %d", offset))
return tensors, offset, nil return tensors, offset, nil
} }
@@ -167,9 +149,7 @@ func (m *SafetensorFormat) GetParams(dirpath string) (*Params, error) {
var params Params var params Params
d := json.NewDecoder(f) if err := json.NewDecoder(f).Decode(&params); err != nil {
err = d.Decode(&params)
if err != nil {
return nil, err return nil, err
} }
@@ -224,55 +204,58 @@ func (r safetensorWriterTo) WriteTo(w io.Writer) (n int64, err error) {
} }
defer f.Close() defer f.Close()
if _, err = f.Seek(int64(r.padding+r.start), 0); err != nil { if _, err = f.Seek(r.offset, io.SeekStart); err != nil {
return 0, err return 0, err
} }
// use the handler if one is present var f32s []float32
if r.handler != nil { switch r.dtype {
return 0, r.handler(w, r, f) case "F32":
} f32s = make([]float32, r.size/4)
if err = binary.Read(f, r.bo, f32s); err != nil {
remaining := r.end - r.start return 0, err
}
bufSize := uint64(10240) case "F16":
var finished bool u16s := make([]uint16, r.size/2)
for { if err = binary.Read(f, r.bo, u16s); err != nil {
data := make([]byte, min(bufSize, remaining))
b, err := io.ReadFull(f, data)
remaining -= uint64(b)
if err == io.EOF || remaining <= 0 {
finished = true
} else if err != nil {
return 0, err return 0, err
} }
// convert bfloat16 -> ieee float32 for _, b := range u16s {
tDataF32 := bfloat16.DecodeFloat32(data) f32s = append(f32s, float16.Frombits(b).Float32())
switch r.t.Kind {
case 0:
if err := binary.Write(w, r.bo, tDataF32); err != nil {
return 0, err
}
case 1:
// convert float32 -> float16
tempBuf := make([]uint16, len(data)/2)
for cnt, v := range tDataF32 {
tDataF16 := float16.Fromfloat32(v)
tempBuf[cnt] = uint16(tDataF16)
}
if err := binary.Write(w, r.bo, tempBuf); err != nil {
return 0, err
}
} }
if finished {
break case "BF16":
u8s := make([]uint8, r.size)
if err = binary.Read(f, r.bo, u8s); err != nil {
return 0, err
}
f32s = bfloat16.DecodeFloat32(u8s)
default:
return 0, fmt.Errorf("unknown data type: %s", r.dtype)
}
if r.repacker != nil {
f32s, err = r.repacker(r.t.Name, f32s, r.t.Shape)
if err != nil {
return 0, err
} }
} }
return 0, nil
switch r.t.Kind {
case 0:
return 0, binary.Write(w, r.bo, f32s)
case 1:
f16s := make([]uint16, len(f32s))
for i := range f32s {
f16s[i] = float16.Fromfloat32(f32s[i]).Bits()
}
return 0, binary.Write(w, r.bo, f16s)
default:
return 0, fmt.Errorf("unknown storage type: %d", r.t.Kind)
}
} }
func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) { func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) {
@@ -281,6 +264,15 @@ func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (M
return nil, fmt.Errorf("No architecture specified to convert") return nil, fmt.Errorf("No architecture specified to convert")
case 1: case 1:
switch params.Architectures[0] { switch params.Architectures[0] {
case "LlamaForCausalLM":
return &LlamaModel{
ModelData{
Name: name,
Path: dirPath,
Params: params,
Format: m,
},
}, nil
case "MistralForCausalLM": case "MistralForCausalLM":
return &MistralModel{ return &MistralModel{
ModelData{ ModelData{

109
convert/tokenizer.go Normal file
View File

@@ -0,0 +1,109 @@
package convert
import (
"cmp"
"crypto/sha256"
"encoding/json"
"fmt"
"log/slog"
"os"
"slices"
"golang.org/x/exp/maps"
)
type Tokenizer struct {
Version string `json:"version"`
AddedTokens []Token `json:"added_tokens"`
Model TokenizerModel `json:"model"`
PreTokenizer struct {
PreTokenizers []struct {
Type string `json:"type"`
Pattern struct {
Regex string `json:"Regex"`
} `json:"pattern"`
} `json:"pretokenizers"`
} `json:"pre_tokenizer"`
}
type TokenizerModel struct {
Type string `json:"type"`
Vocab map[string]int `json:"vocab"`
Merges []string `json:"merges"`
Tokens []Token
}
type Token struct {
ID int `json:"id"`
Content string `json:"content"`
Special bool `json:"special"`
UserDefined bool
}
func (t *Token) Type() int32 {
switch {
case t.Special:
return tokenTypeControl
case t.UserDefined:
return tokenTypeUserDefined
default:
return tokenTypeNormal
}
}
func (t *Tokenizer) maxID() int {
return max(
slices.Max(maps.Values(t.Model.Vocab)),
slices.MaxFunc(t.AddedTokens, func(a, b Token) int {
return cmp.Compare(a.ID, b.ID)
}).ID,
)
}
func parseTokens(dirpath string) (pre string, tokens []Token, merges []string, err error) {
f, err := os.Open(dirpath)
if err != nil {
panic(err)
}
defer f.Close()
var t Tokenizer
if err := json.NewDecoder(f).Decode(&t); err != nil {
return "", nil, nil, err
}
tokens = make([]Token, t.maxID()+1)
for k, v := range t.Model.Vocab {
tokens[v] = Token{ID: v, Content: k, Special: false, UserDefined: false}
}
for _, v := range t.AddedTokens {
v.UserDefined = true
tokens[v.ID] = v
}
sha256sum := sha256.New()
for _, pt := range t.PreTokenizer.PreTokenizers {
switch pt.Type {
case "Split":
if pt.Pattern.Regex != "" {
sha256sum.Write([]byte(pt.Pattern.Regex))
}
}
}
switch digest := fmt.Sprintf("%x", sha256sum.Sum(nil)); digest {
case "d98f9631be1e9607a9848c26c1f9eac1aa9fc21ac6ba82a2fc0741af9780a48f":
pre = "llama-bpe"
case "03df5c5863ad70781dcfdef491ead25140f895fe8010964be0daefe27be32b02":
pre = "deepseek-llm"
case "21cde974d587f0d54dc8d56b183cc1e6239600172035c68fbd6d4b9f8da0576e":
pre = "deepseek-coder"
default:
slog.Warn("unknown pretokenizer, using default", "digest", digest)
pre = "default"
}
return pre, tokens, t.Model.Merges, nil
}

View File

@@ -24,8 +24,8 @@ type torchWriterTo struct {
params *Params params *Params
bo ByteOrder bo ByteOrder
storage pytorch.StorageInterface storage pytorch.StorageInterface
handler func(w io.Writer, r torchWriterTo) error repacker func(string, []float32, []uint64) ([]float32, error)
} }
type TorchFormat struct{} type TorchFormat struct{}
@@ -33,14 +33,14 @@ type TorchFormat struct{}
func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) { func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) {
slog.Debug("getting torch tensors") slog.Debug("getting torch tensors")
files, err := filepath.Glob(filepath.Join(dirpath, "pytorch_model-*.bin")) var files []string
if err != nil { if pt, _ := filepath.Glob(filepath.Join(dirpath, "consolidated*.pth")); len(pt) > 0 {
slog.Error("didn't find any torch files") files = append(files, pt...)
return nil, err } else if pt, _ := filepath.Glob(filepath.Join(dirpath, "pytorch_model*.pth")); len(pt) > 0 {
files = append(files, pt...)
} }
var offset uint64 var offset uint64
var tensors []llm.Tensor var tensors []llm.Tensor
for _, fn := range files { for _, fn := range files {
m, err := pytorch.Load(fn) m, err := pytorch.Load(fn)
@@ -77,7 +77,7 @@ func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor,
slog.Error(err.Error()) slog.Error(err.Error())
return nil, err return nil, err
} }
slog.Debug(fmt.Sprintf("finding name for '%s' -> '%s'", k.(string), ggufName)) slog.Debug(fmt.Sprintf("'%35s': '%30s' %10d [%#v]", k.(string), ggufName, size, tshape))
shape := []uint64{0, 0, 0, 0} shape := []uint64{0, 0, 0, 0}
for i := range tshape { for i := range tshape {
@@ -120,7 +120,7 @@ func getAltParams(dirpath string) (*Params, error) {
AttentionHeads int `json:"n_heads"` AttentionHeads int `json:"n_heads"`
KeyValHeads int `json:"n_kv_heads"` KeyValHeads int `json:"n_kv_heads"`
HiddenLayers int `json:"n_layers"` HiddenLayers int `json:"n_layers"`
RopeTheta int `json:"rope_theta"` RopeTheta float64 `json:"rope_theta"`
NormEPS float64 `json:"norm_eps"` NormEPS float64 `json:"norm_eps"`
} }
@@ -133,6 +133,7 @@ func getAltParams(dirpath string) (*Params, error) {
} }
params := &Params{ params := &Params{
Architectures: []string{"LlamaForCausalLM"},
HiddenSize: tparams.HiddenSize, HiddenSize: tparams.HiddenSize,
AttentionHeads: tparams.AttentionHeads, AttentionHeads: tparams.AttentionHeads,
KeyValHeads: tparams.KeyValHeads, KeyValHeads: tparams.KeyValHeads,
@@ -229,37 +230,38 @@ func (m *TorchFormat) GetLayerName(n string) (string, error) {
} }
func (r torchWriterTo) WriteTo(w io.Writer) (n int64, err error) { func (r torchWriterTo) WriteTo(w io.Writer) (n int64, err error) {
// use the handler if one is present var f32s []float32
if r.handler != nil { switch s := r.storage.(type) {
return 0, r.handler(w, r) case *pytorch.FloatStorage:
f32s = s.Data
case *pytorch.HalfStorage:
f32s = s.Data
case *pytorch.BFloat16Storage:
f32s = s.Data
default:
return 0, fmt.Errorf("unknown data type: %T", s)
} }
switch r.storage.(type) { if r.repacker != nil {
case *pytorch.FloatStorage: f32s, err = r.repacker(r.t.Name, f32s, r.t.Shape)
slog.Warn(fmt.Sprintf("unexpected storage found for layer '%s'; skipping", r.t.Name)) if err != nil {
return 0, nil return 0, err
case *pytorch.HalfStorage:
switch r.t.Kind {
case 0:
data := r.storage.(*pytorch.HalfStorage).Data
slog.Debug(fmt.Sprintf("%35s F32 (%d)", r.t.Name, len(data)))
if err := binary.Write(w, r.bo, data); err != nil {
return 0, err
}
case 1:
data := r.storage.(*pytorch.HalfStorage).Data
tData := make([]uint16, len(data))
for cnt, v := range data {
tData[cnt] = uint16(float16.Fromfloat32(v))
}
slog.Debug(fmt.Sprintf("%35s F16 (%d)", r.t.Name, len(tData)))
if err := binary.Write(w, r.bo, tData); err != nil {
return 0, err
}
} }
} }
return 0, nil switch r.t.Kind {
case 0:
return 0, binary.Write(w, r.bo, f32s)
case 1:
f16s := make([]uint16, len(f32s))
for i := range f32s {
f16s[i] = float16.Fromfloat32(f32s[i]).Bits()
}
return 0, binary.Write(w, r.bo, f16s)
default:
return 0, fmt.Errorf("unknown storage type: %d", r.t.Kind)
}
} }
func (m *TorchFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) { func (m *TorchFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) {

View File

@@ -797,9 +797,9 @@ curl http://localhost:11434/api/show -d '{
```json ```json
{ {
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llava:latest\n\nFROM /Users/matt/.ollama/models/blobs/sha256:200765e1283640ffbd013184bf496e261032fa75b99498a9613be4e94d63ad52\nTEMPLATE \"\"\"{{ .System }}\nUSER: {{ .Prompt }}\nASSSISTANT: \"\"\"\nPARAMETER num_ctx 4096\nPARAMETER stop \"\u003c/s\u003e\"\nPARAMETER stop \"USER:\"\nPARAMETER stop \"ASSSISTANT:\"", "modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llava:latest\n\nFROM /Users/matt/.ollama/models/blobs/sha256:200765e1283640ffbd013184bf496e261032fa75b99498a9613be4e94d63ad52\nTEMPLATE \"\"\"{{ .System }}\nUSER: {{ .Prompt }}\nASSISTANT: \"\"\"\nPARAMETER num_ctx 4096\nPARAMETER stop \"\u003c/s\u003e\"\nPARAMETER stop \"USER:\"\nPARAMETER stop \"ASSISTANT:\"",
"parameters": "num_ctx 4096\nstop \u003c/s\u003e\nstop USER:\nstop ASSSISTANT:", "parameters": "num_ctx 4096\nstop \u003c/s\u003e\nstop USER:\nstop ASSISTANT:",
"template": "{{ .System }}\nUSER: {{ .Prompt }}\nASSSISTANT: ", "template": "{{ .System }}\nUSER: {{ .Prompt }}\nASSISTANT: ",
"details": { "details": {
"format": "gguf", "format": "gguf",
"family": "llama", "family": "llama",

View File

@@ -6,6 +6,8 @@ Install required tools:
- go version 1.22 or higher - go version 1.22 or higher
- gcc version 11.4.0 or higher - gcc version 11.4.0 or higher
### MacOS
```bash ```bash
brew install go cmake gcc brew install go cmake gcc
``` ```

View File

@@ -6,7 +6,7 @@ Ollama on macOS and Windows will automatically download updates. Click on the ta
On Linux, re-run the install script: On Linux, re-run the install script:
``` ```shell
curl -fsSL https://ollama.com/install.sh | sh curl -fsSL https://ollama.com/install.sh | sh
``` ```
@@ -30,7 +30,7 @@ To change this when using `ollama run`, use `/set parameter`:
When using the API, specify the `num_ctx` parameter: When using the API, specify the `num_ctx` parameter:
``` ```shell
curl http://localhost:11434/api/generate -d '{ curl http://localhost:11434/api/generate -d '{
"model": "llama3", "model": "llama3",
"prompt": "Why is the sky blue?", "prompt": "Why is the sky blue?",
@@ -40,6 +40,21 @@ curl http://localhost:11434/api/generate -d '{
}' }'
``` ```
## How can I tell if my model was loaded onto the GPU?
Use the `ollama ps` command to see what models are currently loaded into memory.
```shell
ollama ps
NAME ID SIZE PROCESSOR UNTIL
llama3:70b bcfb190ca3a7 42 GB 100% GPU 4 minutes from now
```
The `Processor` column will show which memory the model was loaded in to:
* `100% GPU` means the model was loaded entirely into the GPU
* `100% CPU` means the model was loaded entirely in system memory
* `48%/52% CPU/GPU` means the model was loaded partially onto both the GPU and into system memory
## How do I configure Ollama server? ## How do I configure Ollama server?
Ollama server can be configured with environment variables. Ollama server can be configured with environment variables.
@@ -80,81 +95,19 @@ If Ollama is run as a systemd service, environment variables should be set using
### Setting environment variables on Windows ### Setting environment variables on Windows
On windows, Ollama inherits your user and system environment variables. On Windows, Ollama inherits your user and system environment variables.
1. First Quit Ollama by clicking on it in the task bar 1. First Quit Ollama by clicking on it in the task bar.
2. Edit system environment variables from the control panel 2. Start the Settings (Windows 11) or Control Panel (Windows 10) application and search for _environment variables_.
3. Edit or create New variable(s) for your user account for `OLLAMA_HOST`, `OLLAMA_MODELS`, etc. 3. Click on _Edit environment variables for your account_.
4. Click OK/Apply to save 4. Edit or create a new variable for your user account for `OLLAMA_HOST`, `OLLAMA_MODELS`, etc.
5. Run `ollama` from a new terminal window 5. Click OK/Apply to save.
6. Start the Ollama application from the Windows Start menu.
## How can I expose Ollama on my network?
Ollama binds 127.0.0.1 port 11434 by default. Change the bind address with the `OLLAMA_HOST` environment variable.
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
## How can I use Ollama with a proxy server?
Ollama runs an HTTP server and can be exposed using a proxy server such as Nginx. To do so, configure the proxy to forward requests and optionally set required headers (if not exposing Ollama on the network). For example, with Nginx:
```
server {
listen 80;
server_name example.com; # Replace with your domain or IP
location / {
proxy_pass http://localhost:11434;
proxy_set_header Host localhost:11434;
}
}
```
## How can I use Ollama with ngrok?
Ollama can be accessed using a range of tools for tunneling tools. For example with Ngrok:
```
ngrok http 11434 --host-header="localhost:11434"
```
## How can I use Ollama with Cloudflare Tunnel?
To use Ollama with Cloudflare Tunnel, use the `--url` and `--http-host-header` flags:
```
cloudflared tunnel --url http://localhost:11434 --http-host-header="localhost:11434"
```
## How can I allow additional web origins to access Ollama?
Ollama allows cross-origin requests from `127.0.0.1` and `0.0.0.0` by default. Additional origins can be configured with `OLLAMA_ORIGINS`.
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
## Where are models stored?
- macOS: `~/.ollama/models`
- Linux: `/usr/share/ollama/.ollama/models`
- Windows: `C:\Users\%username%\.ollama\models`
### How do I set them to a different location?
If a different directory needs to be used, set the environment variable `OLLAMA_MODELS` to the chosen directory.
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
## Does Ollama send my prompts and answers back to ollama.com?
No. Ollama runs locally, and conversation data does not leave your machine.
## How can I use Ollama in Visual Studio Code?
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/ollama/ollama#extensions--plugins) at the bottom of the main repository readme.
## How do I use Ollama behind a proxy? ## How do I use Ollama behind a proxy?
@@ -181,6 +134,69 @@ docker build -t ollama-with-ca .
docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca
``` ```
## Does Ollama send my prompts and answers back to ollama.com?
No. Ollama runs locally, and conversation data does not leave your machine.
## How can I expose Ollama on my network?
Ollama binds 127.0.0.1 port 11434 by default. Change the bind address with the `OLLAMA_HOST` environment variable.
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
## How can I use Ollama with a proxy server?
Ollama runs an HTTP server and can be exposed using a proxy server such as Nginx. To do so, configure the proxy to forward requests and optionally set required headers (if not exposing Ollama on the network). For example, with Nginx:
```
server {
listen 80;
server_name example.com; # Replace with your domain or IP
location / {
proxy_pass http://localhost:11434;
proxy_set_header Host localhost:11434;
}
}
```
## How can I use Ollama with ngrok?
Ollama can be accessed using a range of tools for tunneling tools. For example with Ngrok:
```shell
ngrok http 11434 --host-header="localhost:11434"
```
## How can I use Ollama with Cloudflare Tunnel?
To use Ollama with Cloudflare Tunnel, use the `--url` and `--http-host-header` flags:
```shell
cloudflared tunnel --url http://localhost:11434 --http-host-header="localhost:11434"
```
## How can I allow additional web origins to access Ollama?
Ollama allows cross-origin requests from `127.0.0.1` and `0.0.0.0` by default. Additional origins can be configured with `OLLAMA_ORIGINS`.
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
## Where are models stored?
- macOS: `~/.ollama/models`
- Linux: `/usr/share/ollama/.ollama/models`
- Windows: `C:\Users\%username%\.ollama\models`
### How do I set them to a different location?
If a different directory needs to be used, set the environment variable `OLLAMA_MODELS` to the chosen directory.
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
## How can I use Ollama in Visual Studio Code?
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/ollama/ollama#extensions--plugins) at the bottom of the main repository readme.
## How do I use Ollama with GPU acceleration in Docker? ## How do I use Ollama with GPU acceleration in Docker?
The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details. The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details.
@@ -195,7 +211,7 @@ Open `Control Panel > Networking and Internet > View network status and tasks` a
Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these
properties. properties.
## How can I pre-load a model to get faster response times? ## How can I preload a model into Ollama to get faster response times?
If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints. If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints.
@@ -209,6 +225,11 @@ To use the chat completions endpoint, use:
curl http://localhost:11434/api/chat -d '{"model": "mistral"}' curl http://localhost:11434/api/chat -d '{"model": "mistral"}'
``` ```
To preload a model using the CLI, use the command:
```shell
ollama run llama3 ""
```
## How do I keep a model loaded in memory or make it unload immediately? ## How do I keep a model loaded in memory or make it unload immediately?
By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory. By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory.
@@ -233,8 +254,6 @@ Alternatively, you can change the amount of time all models are loaded into memo
If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` API parameter with the `/api/generate` or `/api/chat` API endpoints. If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` API parameter with the `/api/generate` or `/api/chat` API endpoints.
## How do I manage the maximum number of requests the server can queue ## How do I manage the maximum number of requests the Ollama server can queue?
If too many requests are sent to the server, it will respond with a 503 error If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`.
indicating the server is overloaded. You can adjust how many requests may be
queue by setting `OLLAMA_MAX_QUEUE`

View File

@@ -1,104 +1,86 @@
# How to troubleshoot issues # How to troubleshoot issues
Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command: Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command:
```shell ```shell
cat ~/.ollama/logs/server.log cat ~/.ollama/logs/server.log
``` ```
On **Linux** systems with systemd, the logs can be found with this command: On **Linux** systems with systemd, the logs can be found with this command:
```shell ```shell
journalctl -u ollama journalctl -u ollama
``` ```
When you run Ollama in a **container**, the logs go to stdout/stderr in the container: When you run Ollama in a **container**, the logs go to stdout/stderr in the container:
```shell ```shell
docker logs <container-name> docker logs <container-name>
``` ```
(Use `docker ps` to find the container name) (Use `docker ps` to find the container name)
If manually running `ollama serve` in a terminal, the logs will be on that terminal. If manually running `ollama serve` in a terminal, the logs will be on that terminal.
When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `<cmd>+R` and type in: When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `<cmd>+R` and type in:
- `explorer %LOCALAPPDATA%\Ollama` to view logs - `explorer %LOCALAPPDATA%\Ollama` to view logs
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH) - `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored - `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories - `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories
To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal
```powershell ```powershell
$env:OLLAMA_DEBUG="1" $env:OLLAMA_DEBUG="1"
& "ollama app.exe" & "ollama app.exe"
``` ```
Join the [Discord](https://discord.gg/ollama) for help interpreting the logs. Join the [Discord](https://discord.gg/ollama) for help interpreting the logs.
## LLM libraries ## LLM libraries
Ollama includes multiple LLM libraries compiled for different GPUs and CPU Ollama includes multiple LLM libraries compiled for different GPUs and CPU vector features. Ollama tries to pick the best one based on the capabilities of your system. If this autodetection has problems, or you run into other problems (e.g. crashes in your GPU) you can workaround this by forcing a specific LLM library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest but most compatible is `cpu`. Rosetta emulation under MacOS will work with the `cpu` library.
vector features. Ollama tries to pick the best one based on the capabilities of
your system. If this autodetection has problems, or you run into other problems In the server log, you will see a message that looks something like this (varies from release to release):
(e.g. crashes in your GPU) you can workaround this by forcing a specific LLM
library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest ```
but most compatible is `cpu`. Rosetta emulation under MacOS will work with the Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5]
`cpu` library. ```
In the server log, you will see a message that looks something like this (varies **Experimental LLM Library Override**
from release to release):
You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass autodetection, so for example, if you have a CUDA card, but want to force the CPU LLM library with AVX2 vector support, use:
```
Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5] ```
``` OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve
```
**Experimental LLM Library Override**
You can see what features your CPU has with the following.
You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass ```
autodetection, so for example, if you have a CUDA card, but want to force the cat /proc/cpuinfo| grep flags | head -1
CPU LLM library with AVX2 vector support, use: ```
``` ## Installing older or pre-release versions on Linux
OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve
``` If you run into problems on Linux and want to install an older version, or you'd like to try out a pre-release before it's officially released, you can tell the install script which version to install.
You can see what features your CPU has with the following. ```sh
``` curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh
cat /proc/cpuinfo| grep flags | head -1 ```
```
## Linux tmp noexec
## Installing older or pre-release versions on Linux
If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/
If you run into problems on Linux and want to install an older version, or you'd
like to try out a pre-release before it's officially released, you can tell the ## Container fails to run on NVIDIA GPU
install script which version to install.
Make sure you've set up the container runtime first as described in [docker.md](./docker.md)
```sh
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem
```
- Is the uvm driver not loaded? `sudo nvidia-modprobe -u`
## Linux tmp noexec - Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
- Try rebooting
If your system is configured with the "noexec" flag where Ollama stores its - Make sure you're running the latest nvidia drivers
temporary executable files, you can specify an alternate location by setting
OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example If none of those resolve the problem, gather additional information and file an issue:
OLLAMA_TMPDIR=/usr/share/ollama/ - Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs
- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia`
## Container fails to run on NVIDIA GPU
Make sure you've set up the conatiner runtime first as described in [docker.md](./docker.md)
Sometimes the container runtime can have difficulties initializing the GPU.
When you check the server logs, this can show up as various error codes, such
as "3" (not initialized), "46" (device unavailable), "100" (no device), "999"
(unknown), or others. The following troubleshooting techniques may help resolve
the problem
- Is the uvm driver not loaded? `sudo nvidia-modprobe -u`
- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
- Try rebooting
- Make sure you're running the latest nvidia drivers
If none of those resolve the problem, gather additional information and file an issue:
- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs
- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia`

View File

@@ -33,7 +33,7 @@ Here's a quick example showing API access from `powershell`
## Troubleshooting ## Troubleshooting
While we're in preview, `OLLAMA_DEBUG` is always enabled, which adds While we're in preview, `OLLAMA_DEBUG` is always enabled, which adds
a "view logs" menu item to the app, and increses logging for the GUI app and a "view logs" menu item to the app, and increases logging for the GUI app and
server. server.
Ollama on Windows stores files in a few different locations. You can view them in Ollama on Windows stores files in a few different locations. You can view them in

View File

@@ -60,7 +60,9 @@ func humanTime(t time.Time, zeroValue string) string {
} }
delta := time.Since(t) delta := time.Since(t)
if delta < 0 { if int(delta.Hours())/24/365 < -20 {
return "Forever"
} else if delta < 0 {
return humanDuration(-delta) + " from now" return humanDuration(-delta) + " from now"
} }

View File

@@ -32,4 +32,14 @@ func TestHumanTime(t *testing.T) {
v := now.Add(800 * time.Millisecond) v := now.Add(800 * time.Millisecond)
assertEqual(t, HumanTime(v, ""), "Less than a second from now") assertEqual(t, HumanTime(v, ""), "Less than a second from now")
}) })
t.Run("time way in the future", func(t *testing.T) {
v := now.Add(24 * time.Hour * 365 * 200)
assertEqual(t, HumanTime(v, ""), "Forever")
})
t.Run("time way in the future lowercase", func(t *testing.T) {
v := now.Add(24 * time.Hour * 365 * 200)
assertEqual(t, HumanTimeLower(v, ""), "forever")
})
} }

5
go.mod
View File

@@ -4,12 +4,10 @@ go 1.22.0
require ( require (
github.com/containerd/console v1.0.3 github.com/containerd/console v1.0.3
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
github.com/emirpasic/gods v1.18.1 github.com/emirpasic/gods v1.18.1
github.com/gin-gonic/gin v1.10.0 github.com/gin-gonic/gin v1.10.0
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/google/uuid v1.1.2 github.com/google/uuid v1.1.2
github.com/mitchellh/mapstructure v1.5.0
github.com/olekukonko/tablewriter v0.0.5 github.com/olekukonko/tablewriter v0.0.5
github.com/spf13/cobra v1.7.0 github.com/spf13/cobra v1.7.0
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
@@ -18,6 +16,8 @@ require (
) )
require ( require (
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
github.com/mattn/go-runewidth v0.0.14
github.com/nlpodyssey/gopickle v0.3.0 github.com/nlpodyssey/gopickle v0.3.0
github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c
) )
@@ -33,7 +33,6 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/google/flatbuffers v24.3.25+incompatible // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.2.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect

2
go.sum
View File

@@ -135,8 +135,6 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=

View File

@@ -159,10 +159,10 @@ func AMDGetGPUInfo() []GpuInfo {
return []GpuInfo{} return []GpuInfo{}
} }
if int(major) < RocmComputeMin { //if int(major) < RocmComputeMin {
slog.Warn(fmt.Sprintf("amdgpu too old gfx%d%x%x", major, minor, patch), "gpu", gpuID) // slog.Warn(fmt.Sprintf("amdgpu too old gfx%d%x%x", major, minor, patch), "gpu", gpuID)
continue // continue
} //}
// Look up the memory for the current node // Look up the memory for the current node
totalMemory := uint64(0) totalMemory := uint64(0)

View File

@@ -86,10 +86,10 @@ func AMDGetGPUInfo() []GpuInfo {
slog.Debug("hip device", "id", i, "name", name, "gfx", gfx) slog.Debug("hip device", "id", i, "name", name, "gfx", gfx)
//slog.Info(fmt.Sprintf("[%d] Integrated: %d", i, props.iGPU)) // DOESN'T REPORT CORRECTLY! Always 0 //slog.Info(fmt.Sprintf("[%d] Integrated: %d", i, props.iGPU)) // DOESN'T REPORT CORRECTLY! Always 0
// TODO Why isn't props.iGPU accurate!? // TODO Why isn't props.iGPU accurate!?
if strings.EqualFold(name, iGPUName) { //if strings.EqualFold(name, iGPUName) {
slog.Info("unsupported Radeon iGPU detected skipping", "id", i, "name", name, "gfx", gfx) // slog.Info("unsupported Radeon iGPU detected skipping", "id", i, "name", name, "gfx", gfx)
continue // continue
} //}
if gfxOverride == "" { if gfxOverride == "" {
if !slices.Contains[[]string, string](supported, gfx) { if !slices.Contains[[]string, string](supported, gfx) {
slog.Warn("amdgpu is not supported", "gpu", i, "gpu_type", gfx, "library", libDir, "supported_types", supported) slog.Warn("amdgpu is not supported", "gpu", i, "gpu_type", gfx, "library", libDir, "supported_types", supported)
@@ -108,10 +108,10 @@ func AMDGetGPUInfo() []GpuInfo {
} }
// iGPU detection, remove this check once we can support an iGPU variant of the rocm library // iGPU detection, remove this check once we can support an iGPU variant of the rocm library
if totalMemory < IGPUMemLimit { //if totalMemory < IGPUMemLimit {
slog.Info("amdgpu appears to be an iGPU, skipping", "gpu", i, "total", format.HumanBytes2(totalMemory)) // slog.Info("amdgpu appears to be an iGPU, skipping", "gpu", i, "total", format.HumanBytes2(totalMemory))
continue // continue
} //}
// TODO revisit this once ROCm v6 is available on windows. // TODO revisit this once ROCm v6 is available on windows.
// v5.7 only reports VRAM used by this process, so it's completely wrong and unusable // v5.7 only reports VRAM used by this process, so it's completely wrong and unusable

View File

@@ -19,6 +19,11 @@ import (
) )
func TestMaxQueue(t *testing.T) { func TestMaxQueue(t *testing.T) {
if os.Getenv("OLLAMA_TEST_EXISTING") != "" {
t.Skip("Max Queue test requires spawing a local server so we can adjust the queue size")
return
}
// Note: This test can be quite slow when running in CPU mode, so keep the threadCount low unless your on GPU // Note: This test can be quite slow when running in CPU mode, so keep the threadCount low unless your on GPU
// Also note that by default Darwin can't sustain > ~128 connections without adjusting limits // Also note that by default Darwin can't sustain > ~128 connections without adjusting limits
threadCount := 32 threadCount := 32
@@ -109,9 +114,9 @@ func TestMaxQueue(t *testing.T) {
slog.Info("generate done, waiting for embeds") slog.Info("generate done, waiting for embeds")
embedwg.Wait() embedwg.Wait()
slog.Info("embeds completed", "success", succesCount, "busy", busyCount, "reset", resetByPeerCount, "canceled", canceledCount)
require.Equal(t, resetByPeerCount, 0, "Connections reset by peer, have you updated your fd and socket limits?") require.Equal(t, resetByPeerCount, 0, "Connections reset by peer, have you updated your fd and socket limits?")
require.True(t, busyCount > 0, "no requests hit busy error but some should have") require.True(t, busyCount > 0, "no requests hit busy error but some should have")
require.True(t, canceledCount == 0, "no requests should have been canceled due to timeout") require.True(t, canceledCount == 0, "no requests should have been canceled due to timeout")
slog.Info("embeds completed", "success", succesCount, "busy", busyCount, "reset", resetByPeerCount, "canceled", canceledCount)
} }

View File

@@ -334,6 +334,7 @@ struct server_metrics {
struct llama_server_context struct llama_server_context
{ {
llama_model *model = nullptr; llama_model *model = nullptr;
float modelProgress = 0.0;
llama_context *ctx = nullptr; llama_context *ctx = nullptr;
clip_ctx *clp_ctx = nullptr; clip_ctx *clp_ctx = nullptr;
@@ -737,7 +738,7 @@ struct llama_server_context
sampler_names.emplace_back(sampler_name); sampler_names.emplace_back(sampler_name);
} }
} }
slot->sparams.samplers_sequence = sampler_types_from_names(sampler_names, false); slot->sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false);
} }
else else
{ {
@@ -1095,7 +1096,7 @@ struct llama_server_context
std::vector<std::string> samplers_sequence; std::vector<std::string> samplers_sequence;
for (const auto &sampler_type : slot.sparams.samplers_sequence) for (const auto &sampler_type : slot.sparams.samplers_sequence)
{ {
samplers_sequence.emplace_back(sampler_type_to_name_string(sampler_type)); samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type));
} }
return json { return json {
@@ -2104,6 +2105,7 @@ static void server_print_usage(const char *argv0, const gpt_params &params,
printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled"); printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel); printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n"); printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled");
printf(" -spf FNAME, --system-prompt-file FNAME\n"); printf(" -spf FNAME, --system-prompt-file FNAME\n");
printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n"); printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
printf(" -ctk TYPE, --cache-type-k TYPE\n"); printf(" -ctk TYPE, --cache-type-k TYPE\n");
@@ -2501,7 +2503,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
{ {
params.use_mmap = false; params.use_mmap = false;
} }
else if (arg == "--numa") { else if (arg == "--numa")
{
if (++i >= argc) { if (++i >= argc) {
invalid_param = true; invalid_param = true;
break; break;
@@ -2521,6 +2524,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
{ {
params.cont_batching = true; params.cont_batching = true;
} }
else if (arg == "-fa" || arg == "--flash-attn")
{
params.flash_attn = true;
}
else if (arg == "-np" || arg == "--parallel") else if (arg == "-np" || arg == "--parallel")
{ {
if (++i >= argc) if (++i >= argc)
@@ -2529,7 +2536,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
break; break;
} }
params.n_parallel = std::stoi(argv[i]); params.n_parallel = std::stoi(argv[i]);
} else if (arg == "-n" || arg == "--n-predict") }
else if (arg == "-n" || arg == "--n-predict")
{ {
if (++i >= argc) if (++i >= argc)
{ {
@@ -2537,7 +2545,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
break; break;
} }
params.n_predict = std::stoi(argv[i]); params.n_predict = std::stoi(argv[i]);
} else if (arg == "-spf" || arg == "--system-prompt-file") }
else if (arg == "-spf" || arg == "--system-prompt-file")
{ {
if (++i >= argc) if (++i >= argc)
{ {
@@ -2771,6 +2780,12 @@ inline void signal_handler(int signal) {
shutdown_handler(signal); shutdown_handler(signal);
} }
static bool update_load_progress(float progress, void *data)
{
((llama_server_context*)data)->modelProgress = progress;
return true;
}
#if defined(_WIN32) #if defined(_WIN32)
char* wchar_to_char(const wchar_t* wstr) { char* wchar_to_char(const wchar_t* wstr) {
if (wstr == nullptr) return nullptr; if (wstr == nullptr) return nullptr;
@@ -2876,7 +2891,9 @@ int main(int argc, char **argv) {
break; break;
} }
case SERVER_STATE_LOADING_MODEL: case SERVER_STATE_LOADING_MODEL:
res.set_content(R"({"status": "loading model"})", "application/json"); char buf[128];
snprintf(&buf[0], 128, R"({"status": "loading model", "progress": %0.2f})", llama.modelProgress);
res.set_content(buf, "application/json");
res.status = 503; // HTTP Service Unavailable res.status = 503; // HTTP Service Unavailable
break; break;
case SERVER_STATE_ERROR: case SERVER_STATE_ERROR:
@@ -3071,6 +3088,9 @@ int main(int argc, char **argv) {
}); });
// load the model // load the model
params.progress_callback = update_load_progress;
params.progress_callback_user_data = (void*)&llama;
if (!llama.load_model(params)) if (!llama.load_model(params))
{ {
state.store(SERVER_STATE_ERROR); state.store(SERVER_STATE_ERROR);

View File

@@ -27,8 +27,16 @@ const (
fileTypeIQ2_XXS fileTypeIQ2_XXS
fileTypeIQ2_XS fileTypeIQ2_XS
fileTypeQ2_K_S fileTypeQ2_K_S
fileTypeQ3_K_XS fileTypeIQ3_XS
fileTypeIQ3_XXS fileTypeIQ3_XXS
fileTypeIQ1_S
fileTypeIQ4_NL
fileTypeIQ3_S
fileTypeIQ2_S
fileTypeIQ4_XS
fileTypeIQ2_M
fileTypeIQ1_M
fileTypeBF16
fileTypeUnknown fileTypeUnknown
) )
@@ -75,10 +83,26 @@ func ParseFileType(s string) (fileType, error) {
return fileTypeIQ2_XS, nil return fileTypeIQ2_XS, nil
case "Q2_K_S": case "Q2_K_S":
return fileTypeQ2_K_S, nil return fileTypeQ2_K_S, nil
case "Q3_K_XS": case "IQ3_XS":
return fileTypeQ3_K_XS, nil return fileTypeIQ3_XS, nil
case "IQ3_XXS": case "IQ3_XXS":
return fileTypeIQ3_XXS, nil return fileTypeIQ3_XXS, nil
case "IQ1_S":
return fileTypeIQ1_S, nil
case "IQ4_NL":
return fileTypeIQ4_NL, nil
case "IQ3_S":
return fileTypeIQ3_S, nil
case "IQ2_S":
return fileTypeIQ2_S, nil
case "IQ4_XS":
return fileTypeIQ4_XS, nil
case "IQ2_M":
return fileTypeIQ2_M, nil
case "IQ1_M":
return fileTypeIQ1_M, nil
case "BF16":
return fileTypeBF16, nil
default: default:
return fileTypeUnknown, fmt.Errorf("unknown fileType: %s", s) return fileTypeUnknown, fmt.Errorf("unknown fileType: %s", s)
} }
@@ -126,10 +150,26 @@ func (t fileType) String() string {
return "IQ2_XS" return "IQ2_XS"
case fileTypeQ2_K_S: case fileTypeQ2_K_S:
return "Q2_K_S" return "Q2_K_S"
case fileTypeQ3_K_XS: case fileTypeIQ3_XS:
return "Q3_K_XS" return "IQ3_XS"
case fileTypeIQ3_XXS: case fileTypeIQ3_XXS:
return "IQ3_XXS" return "IQ3_XXS"
case fileTypeIQ1_S:
return "IQ1_S"
case fileTypeIQ4_NL:
return "IQ4_NL"
case fileTypeIQ3_S:
return "IQ3_S"
case fileTypeIQ2_S:
return "IQ2_S"
case fileTypeIQ4_XS:
return "IQ4_XS"
case fileTypeIQ2_M:
return "IQ2_M"
case fileTypeIQ1_M:
return "IQ1_M"
case fileTypeBF16:
return "BF16"
default: default:
return "unknown" return "unknown"
} }

View File

@@ -165,7 +165,7 @@ if [ -z "${CUDART_LIB_DIR}" ]; then
CUDART_LIB_DIR="${CUDA_LIB_DIR}" CUDART_LIB_DIR="${CUDA_LIB_DIR}"
fi fi
if [ -d "${CUDA_LIB_DIR}" ]; then if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then
echo "CUDA libraries detected - building dynamic CUDA library" echo "CUDA libraries detected - building dynamic CUDA library"
init_vars init_vars
CUDA_MAJOR=$(ls "${CUDA_LIB_DIR}"/libcudart.so.* | head -1 | cut -f3 -d. || true) CUDA_MAJOR=$(ls "${CUDA_LIB_DIR}"/libcudart.so.* | head -1 | cut -f3 -d. || true)
@@ -227,7 +227,7 @@ if [ -z "${CLBlast_DIR}" ]; then
fi fi
fi fi
if [ -d "${ROCM_PATH}" ]; then if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then
echo "ROCm libraries detected - building dynamic ROCm library" echo "ROCm libraries detected - building dynamic ROCm library"
if [ -f ${ROCM_PATH}/lib/librocblas.so.*.*.????? ]; then if [ -f ${ROCM_PATH}/lib/librocblas.so.*.*.????? ]; then
ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true) ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true)

View File

@@ -119,7 +119,7 @@ func (llm *ggla) decode(rs io.ReadSeeker) error {
t.Offset = uint64(offset) t.Offset = uint64(offset)
if _, err := rs.Seek(int64(t.size()), io.SeekCurrent); err != nil { if _, err := rs.Seek(int64(t.Size()), io.SeekCurrent); err != nil {
return err return err
} }

View File

@@ -106,7 +106,7 @@ type Layer map[string]*Tensor
func (l Layer) size() (size uint64) { func (l Layer) size() (size uint64) {
for _, t := range l { for _, t := range l {
size += t.size() size += t.Size()
} }
return size return size
@@ -124,12 +124,12 @@ type Tensor struct {
} }
func (t Tensor) blockSize() uint64 { func (t Tensor) blockSize() uint64 {
switch { switch t.Kind {
case t.Kind < 2: case 0, 1, 24, 25, 26, 27, 28, 31: // F32, F16, I8, I16, I32, I64, F64, BF16
return 1 return 1
case t.Kind < 10: case 2, 3, 8, 9, 20: // Q4_0, Q4_1, Q8_0, Q8_1, IQ4_NL
return 32 return 32
default: default: // All others
return 256 return 256
} }
} }
@@ -171,7 +171,29 @@ func (t Tensor) typeSize() uint64 {
case 17: // IQ2_XS case 17: // IQ2_XS
return 2 + 2*blockSize/8 + blockSize/32 return 2 + 2*blockSize/8 + blockSize/32
case 18: // IQ3_XXS case 18: // IQ3_XXS
return 2 + 3*blockSize/8 return 2 + blockSize/4 + blockSize/8
case 19: // IQ1_S
return 2 + blockSize/8 + blockSize/16
case 20: // IQ4_NL
return 2 + blockSize/2
case 21: // IQ3_S
return 2 + blockSize/4 + blockSize/8 + blockSize/32 + 4
case 22: // IQ2_S
return 2 + blockSize/4 + blockSize/16
case 23: // IQ4_XS
return 2 + 2 + blockSize/2 + blockSize/64
case 24: // I8
return 1
case 25: // I16
return 2
case 26: // I32
return 4
case 27: // I64
return 8
case 28: // F64
return 8
case 29: // IQ1_M
return blockSize/8 + blockSize/16 + blockSize/32
default: default:
return 0 return 0
} }
@@ -185,7 +207,7 @@ func (t Tensor) parameters() uint64 {
return count return count
} }
func (t Tensor) size() uint64 { func (t Tensor) Size() uint64 {
return t.parameters() * t.typeSize() / t.blockSize() return t.parameters() * t.typeSize() / t.blockSize()
} }
@@ -288,7 +310,7 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
// mixtral 8x22b // mixtral 8x22b
ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32)) ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32))
partialOffload = max( partialOffload = max(
3*ffnGateExpsWeight.size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV), 3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV),
4*(context*batch*heads+context*embedding/heads*headsKV+batch*1024+embedding/heads*headsKV*batch), 4*(context*batch*heads+context*embedding/heads*headsKV+batch*1024+embedding/heads*headsKV*batch),
) )
} else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok { } else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok {

View File

@@ -62,16 +62,6 @@ func (c *containerGGUF) Decode(rs io.ReadSeeker) (model, error) {
return model, nil return model, nil
} }
const (
_ uint32 = iota
GGUFTokenNormal
GGUFTokenUnknown
GGUFTokenControl
GGUFTokenUserDefined
GGUFTokenUnused
GGUFTokenByte
)
const ( const (
ggufTypeUint8 uint32 = iota ggufTypeUint8 uint32 = iota
ggufTypeInt8 ggufTypeInt8
@@ -251,11 +241,11 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error {
} }
for _, tensor := range llm.tensors { for _, tensor := range llm.tensors {
if _, err := rs.Seek(int64(tensor.size()), io.SeekCurrent); err != nil { if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil {
return err return err
} }
padding := llm.padding(int64(tensor.size()), int64(alignment)) padding := llm.padding(int64(tensor.Size()), int64(alignment))
if _, err := rs.Seek(padding, io.SeekCurrent); err != nil { if _, err := rs.Seek(padding, io.SeekCurrent); err != nil {
return err return err
} }
@@ -480,9 +470,11 @@ var ggufKVOrder = map[string][]string{
"gemma.attention.key_length", "gemma.attention.key_length",
"gemma.attention.value_length", "gemma.attention.value_length",
"general.file_type", "general.file_type",
"tokenizer.ggml.pre",
"tokenizer.ggml.model", "tokenizer.ggml.model",
"tokenizer.ggml.tokens", "tokenizer.ggml.tokens",
"tokenizer.ggml.scores", "tokenizer.ggml.scores",
"tokenizer.ggml.merges",
"tokenizer.ggml.token_type", "tokenizer.ggml.token_type",
"tokenizer.ggml.bos_token_id", "tokenizer.ggml.bos_token_id",
"tokenizer.ggml.eos_token_id", "tokenizer.ggml.eos_token_id",

View File

@@ -53,6 +53,12 @@ func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts
opts.NumCtx = max(opts.NumCtx, 2048) opts.NumCtx = max(opts.NumCtx, 2048)
} }
layers := ggml.Tensors().Layers()
// add one layer worth of memory as a buffer
if blk0, ok := layers["blk.0"]; ok {
memoryMinimum += blk0.size()
}
// fp16 k,v = (1 (k) + 1 (v)) * sizeof(float16) * n_ctx * n_layer * n_embd / n_head * n_head_kv // fp16 k,v = (1 (k) + 1 (v)) * sizeof(float16) * n_ctx * n_layer * n_embd / n_head * n_head_kv
var kv uint64 = 2 * 2 * uint64(opts.NumCtx) * ggml.KV().BlockCount() * ggml.KV().EmbeddingLength() / ggml.KV().HeadCount() * ggml.KV().HeadCountKV() var kv uint64 = 2 * 2 * uint64(opts.NumCtx) * ggml.KV().BlockCount() * ggml.KV().EmbeddingLength() / ggml.KV().HeadCount() * ggml.KV().HeadCountKV()
@@ -73,13 +79,11 @@ func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts
graphPartialOffload = graphFullOffload graphPartialOffload = graphFullOffload
} }
layers := ggml.Tensors().Layers()
// memoryRequiredTotal represents the memory required for full GPU offloading (all layers) // memoryRequiredTotal represents the memory required for full GPU offloading (all layers)
memoryRequiredTotal := memoryMinimum + graphFullOffload + layers["blk.0"].size() memoryRequiredTotal := memoryMinimum + graphFullOffload
// memoryRequiredPartial represents the memory required for partial GPU offloading (n > 0, n < layers) // memoryRequiredPartial represents the memory required for partial GPU offloading (n > 0, n < layers)
memoryRequiredPartial := memoryMinimum + graphPartialOffload + layers["blk.0"].size() memoryRequiredPartial := memoryMinimum + graphPartialOffload
var memoryLayerOutput uint64 var memoryLayerOutput uint64
if layer, ok := layers["output_norm"]; ok { if layer, ok := layers["output_norm"]; ok {
@@ -100,15 +104,17 @@ func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts
var layerCount int var layerCount int
for i := 0; i < int(ggml.KV().BlockCount()); i++ { for i := 0; i < int(ggml.KV().BlockCount()); i++ {
memoryLayer := layers[fmt.Sprintf("blk.%d", i)].size() if blk, ok := layers[fmt.Sprintf("blk.%d", i)]; ok {
memoryLayer := blk.size()
// KV is proportional to the number of layers // KV is proportional to the number of layers
memoryLayer += kv / ggml.KV().BlockCount() memoryLayer += kv / ggml.KV().BlockCount()
memoryRequiredTotal += memoryLayer memoryRequiredTotal += memoryLayer
if memoryAvailable > memoryRequiredPartial+memoryLayer { if (opts.NumGPU >= 0 && layerCount+1 <= opts.NumGPU) || (opts.NumGPU < 0 && memoryAvailable > memoryRequiredPartial+memoryLayer) {
memoryRequiredPartial += memoryLayer memoryRequiredPartial += memoryLayer
layerCount++ layerCount++
}
} }
} }
@@ -117,7 +123,7 @@ func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts
memoryRequiredTotal += memoryLayerOutput memoryRequiredTotal += memoryLayerOutput
} }
if memoryAvailable > memoryRequiredTotal { if (opts.NumGPU >= 0 && layerCount+1 <= opts.NumGPU) || (opts.NumGPU < 0 && memoryAvailable > memoryRequiredTotal) {
layerCount = int(ggml.KV().BlockCount()) + 1 layerCount = int(ggml.KV().BlockCount()) + 1
memoryRequiredPartial = memoryRequiredTotal memoryRequiredPartial = memoryRequiredTotal
} }
@@ -128,10 +134,10 @@ func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts
"offload to gpu", "offload to gpu",
slog.Group( slog.Group(
"layers", "layers",
// actual number of layers offloaded // requested number of layers to offload
"real", opts.NumGPU, "requested", opts.NumGPU,
// estimated number of layers that can be offloaded // estimated number of layers that can be offloaded
"estimate", layerCount, "real", layerCount,
), ),
slog.Group( slog.Group(
"memory", "memory",

View File

@@ -0,0 +1,31 @@
diff --git a/common/common.cpp b/common/common.cpp
index ba1ecf0e..cead57cc 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -1836,6 +1836,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
mparams.use_mmap = params.use_mmap;
mparams.use_mlock = params.use_mlock;
mparams.check_tensors = params.check_tensors;
+ mparams.progress_callback = params.progress_callback;
+ mparams.progress_callback_user_data = params.progress_callback_user_data;
if (params.kv_overrides.empty()) {
mparams.kv_overrides = NULL;
} else {
diff --git a/common/common.h b/common/common.h
index d80344f2..71e84834 100644
--- a/common/common.h
+++ b/common/common.h
@@ -174,6 +174,13 @@ struct gpt_params {
// multimodal models (see examples/llava)
std::string mmproj = ""; // path to multimodal projector
std::vector<std::string> image; // path to image file(s)
+
+ // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
+ // If the provided progress_callback returns true, model loading continues.
+ // If it returns false, model loading is immediately aborted.
+ llama_progress_callback progress_callback = NULL;
+ // context pointer passed to the progress callback
+ void * progress_callback_user_data;
};
void gpt_params_handle_model_default(gpt_params & params);

View File

@@ -1,8 +1,17 @@
From 544a2d2e646d39e878d87dfbb3398a356bc560ab Mon Sep 17 00:00:00 2001
From: Michael Yang <mxyng@pm.me>
Date: Thu, 23 May 2024 11:18:45 -0700
Subject: [PATCH] throw exception on load errors
---
llama.cpp | 25 ++++++++++++++++---------
1 file changed, 16 insertions(+), 9 deletions(-)
diff --git a/llama.cpp b/llama.cpp diff --git a/llama.cpp b/llama.cpp
index 4225f955..7b762f86 100644 index 15c66077..8ba90b6a 100644
--- a/llama.cpp --- a/llama.cpp
+++ b/llama.cpp +++ b/llama.cpp
@@ -4756,7 +4756,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam @@ -6346,7 +6346,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
} }
} catch (const std::exception & err) { } catch (const std::exception & err) {
LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what()); LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
@@ -11,10 +20,10 @@ index 4225f955..7b762f86 100644
} }
return 0; return 0;
@@ -12102,16 +12102,22 @@ struct llama_model * llama_load_model_from_file( @@ -15600,16 +15600,23 @@ struct llama_model * llama_load_model_from_file(
}; }
model->rpc_servers.push_back(servers);
} }
- int status = llama_model_load(path_model, *model, params); - int status = llama_model_load(path_model, *model, params);
- GGML_ASSERT(status <= 0); - GGML_ASSERT(status <= 0);
- if (status < 0) { - if (status < 0) {
@@ -22,6 +31,7 @@ index 4225f955..7b762f86 100644
- LLAMA_LOG_ERROR("%s: failed to load model\n", __func__); - LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
- } else if (status == -2) { - } else if (status == -2) {
- LLAMA_LOG_INFO("%s: cancelled model load\n", __func__); - LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
+
+ try { + try {
+ int status = llama_model_load(path_model, *model, params); + int status = llama_model_load(path_model, *model, params);
+ GGML_ASSERT(status <= 0); + GGML_ASSERT(status <= 0);
@@ -42,3 +52,6 @@ index 4225f955..7b762f86 100644
} }
return model; return model;
--
2.45.1

View File

@@ -1,24 +0,0 @@
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
index e3c9bcd4..b43f892d 100644
--- a/examples/llava/clip.cpp
+++ b/examples/llava/clip.cpp
@@ -573,14 +573,16 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
struct ggml_tensor * embeddings = inp;
if (ctx->has_class_embedding) {
embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
+ }
+ ggml_set_name(embeddings, "embeddings");
+ ggml_set_input(embeddings);
+
+ if (ctx->has_class_embedding) {
embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
embeddings = ggml_acc(ctx0, embeddings, inp,
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
}
- ggml_set_name(embeddings, "embeddings");
- ggml_set_input(embeddings);
-
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
ggml_set_name(positions, "positions");

View File

@@ -0,0 +1,35 @@
From d02a06f3f45a09255ace8684a66590e06ce44605 Mon Sep 17 00:00:00 2001
From: Michael Yang <mxyng@pm.me>
Date: Thu, 23 May 2024 11:33:20 -0700
Subject: [PATCH] default pretokenizer on unrecognized type
---
llama.cpp | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/llama.cpp b/llama.cpp
index 15c66077..af1aede3 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -4504,9 +4504,6 @@ static void llm_load_vocab(
LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
LLAMA_LOG_WARN("%s: \n", __func__);
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
- } else if (
- tokenizer_pre == "default") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
} else if (
tokenizer_pre == "llama3" ||
tokenizer_pre == "llama-v3" ||
@@ -4553,7 +4550,7 @@ static void llm_load_vocab(
tokenizer_pre == "dbrx") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
} else {
- throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
}
} else {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
--
2.45.1

View File

@@ -38,6 +38,7 @@ type LlamaServer interface {
Detokenize(ctx context.Context, tokens []int) (string, error) Detokenize(ctx context.Context, tokens []int) (string, error)
Close() error Close() error
EstimatedVRAM() uint64 EstimatedVRAM() uint64
EstimatedTotal() uint64
} }
// llmServer is an instance of the llama.cpp server // llmServer is an instance of the llama.cpp server
@@ -54,6 +55,7 @@ type llmServer struct {
totalLayers uint64 totalLayers uint64
gpuCount int gpuCount int
loadDuration time.Duration // Record how long it took the model to load loadDuration time.Duration // Record how long it took the model to load
loadProgress float32
sem *semaphore.Weighted sem *semaphore.Weighted
} }
@@ -88,6 +90,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
cpuRunner = serverForCpu() cpuRunner = serverForCpu()
gpuCount = 0 gpuCount = 0
_, _, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
} else { } else {
if gpus[0].Library == "metal" { if gpus[0].Library == "metal" {
memInfo, err := gpu.GetCPUMem() memInfo, err := gpu.GetCPUMem()
@@ -198,6 +201,23 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
params = append(params, "--numa") params = append(params, "--numa")
} }
flashAttnEnabled := envconfig.FlashAttention
// partial offloading does not support flash attention
if uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
flashAttnEnabled = false
}
// only cuda (compute capability 7+) and metal support flash attention
for _, g := range gpus {
if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
flashAttnEnabled = false
}
}
if flashAttnEnabled {
params = append(params, "--flash-attn")
}
numParallel := envconfig.NumParallel numParallel := envconfig.NumParallel
// TODO (jmorganca): multimodal models don't support parallel yet // TODO (jmorganca): multimodal models don't support parallel yet
@@ -316,8 +336,22 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
} }
slog.Info("starting llama server", "cmd", s.cmd.String()) slog.Info("starting llama server", "cmd", s.cmd.String())
// Log at debug as the environment is inherited and might contain sensitive information if envconfig.Debug {
slog.Debug("subprocess", "environment", s.cmd.Env) filteredEnv := []string{}
for _, ev := range s.cmd.Env {
if strings.HasPrefix(ev, "CUDA_") ||
strings.HasPrefix(ev, "ROCM_") ||
strings.HasPrefix(ev, "HIP_") ||
strings.HasPrefix(ev, "HSA_") ||
strings.HasPrefix(ev, "GGML_") ||
strings.HasPrefix(ev, "PATH=") ||
strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
filteredEnv = append(filteredEnv, ev)
}
}
// Log at debug as the environment is inherited and might contain sensitive information
slog.Debug("subprocess", "environment", filteredEnv)
}
if err = s.cmd.Start(); err != nil { if err = s.cmd.Start(); err != nil {
// Detect permission denied and augment them essage about noexec // Detect permission denied and augment them essage about noexec
@@ -392,10 +426,11 @@ func (s ServerStatus) ToString() string {
} }
type ServerStatusResp struct { type ServerStatusResp struct {
Status string `json:"status"` Status string `json:"status"`
SlotsIdle int `json:"slots_idle"` SlotsIdle int `json:"slots_idle"`
SlotsProcessing int `json:"slots_processing"` SlotsProcessing int `json:"slots_processing"`
Error string `json:"error"` Error string `json:"error"`
Progress float32 `json:"progress"`
} }
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) { func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
@@ -443,6 +478,7 @@ func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
case "no slot available": case "no slot available":
return ServerStatusNoSlotsAvailable, nil return ServerStatusNoSlotsAvailable, nil
case "loading model": case "loading model":
s.loadProgress = status.Progress
return ServerStatusLoadingModel, nil return ServerStatusLoadingModel, nil
default: default:
return ServerStatusError, fmt.Errorf("server error: %+v", status) return ServerStatusError, fmt.Errorf("server error: %+v", status)
@@ -483,7 +519,8 @@ func (s *llmServer) Ping(ctx context.Context) error {
func (s *llmServer) WaitUntilRunning(ctx context.Context) error { func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
start := time.Now() start := time.Now()
expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load stallDuration := 60 * time.Second
stallTimer := time.Now().Add(stallDuration) // give up if we stall for
slog.Info("waiting for llama runner to start responding") slog.Info("waiting for llama runner to start responding")
var lastStatus ServerStatus = -1 var lastStatus ServerStatus = -1
@@ -501,13 +538,13 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
return fmt.Errorf("llama runner process has terminated: %v %s", err, msg) return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
default: default:
} }
if time.Now().After(expiresAt) { if time.Now().After(stallTimer) {
// timeout // timeout
msg := "" msg := ""
if s.status != nil && s.status.LastErrMsg != "" { if s.status != nil && s.status.LastErrMsg != "" {
msg = s.status.LastErrMsg msg = s.status.LastErrMsg
} }
return fmt.Errorf("timed out waiting for llama runner to start: %s", msg) return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
} }
if s.cmd.ProcessState != nil { if s.cmd.ProcessState != nil {
msg := "" msg := ""
@@ -518,6 +555,7 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
} }
ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond) ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
defer cancel() defer cancel()
priorProgress := s.loadProgress
status, _ := s.getServerStatus(ctx) status, _ := s.getServerStatus(ctx)
if lastStatus != status && status != ServerStatusReady { if lastStatus != status && status != ServerStatusReady {
// Only log on status changes // Only log on status changes
@@ -530,6 +568,11 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
return nil return nil
default: default:
lastStatus = status lastStatus = status
// Reset the timer as long as we're making forward progress on the load
if priorProgress != s.loadProgress {
slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
stallTimer = time.Now().Add(stallDuration)
}
time.Sleep(time.Millisecond * 250) time.Sleep(time.Millisecond * 250)
continue continue
} }
@@ -955,6 +998,10 @@ func (s *llmServer) EstimatedVRAM() uint64 {
return s.estimatedVRAM return s.estimatedVRAM
} }
func (s *llmServer) EstimatedTotal() uint64 {
return s.estimatedTotal
}
func parseDurationMs(ms float64) time.Duration { func parseDurationMs(ms float64) time.Duration {
dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms)) dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
if err != nil { if err != nil {

View File

@@ -162,7 +162,7 @@ app.on('before-quit', () => {
} }
}) })
const updateURL = `https://ollama.ai/api/update?os=${process.platform}&arch=${ const updateURL = `https://ollama.com/api/update?os=${process.platform}&arch=${
process.arch process.arch
}&version=${app.getVersion()}&id=${id()}` }&version=${app.getVersion()}&id=${id()}`

View File

@@ -1,4 +1,4 @@
package model package parser
import ( import (
"bufio" "bufio"
@@ -8,6 +8,7 @@ import (
"io" "io"
"strconv" "strconv"
"strings" "strings"
"unicode"
) )
type File struct { type File struct {
@@ -68,6 +69,11 @@ func ParseFile(r io.Reader) (*File, error) {
var b bytes.Buffer var b bytes.Buffer
var role string var role string
var lineCount int
var linePos int
var utf16 bool
var f File var f File
br := bufio.NewReader(r) br := bufio.NewReader(r)
@@ -79,6 +85,17 @@ func ParseFile(r io.Reader) (*File, error) {
return nil, err return nil, err
} }
// the utf16 byte order mark will be read as "unreadable" by ReadRune()
if isUnreadable(r) && lineCount == 0 && linePos == 0 {
utf16 = true
continue
}
// skip the second byte if we're reading utf16
if utf16 && r == 0 {
continue
}
next, r, err := parseRuneForState(r, curr) next, r, err := parseRuneForState(r, curr)
if errors.Is(err, io.ErrUnexpectedEOF) { if errors.Is(err, io.ErrUnexpectedEOF) {
return nil, fmt.Errorf("%w: %s", err, b.String()) return nil, fmt.Errorf("%w: %s", err, b.String())
@@ -86,6 +103,13 @@ func ParseFile(r io.Reader) (*File, error) {
return nil, err return nil, err
} }
if isNewline(r) {
lineCount++
linePos = 0
} else {
linePos++
}
// process the state transition, some transitions need to be intercepted and redirected // process the state transition, some transitions need to be intercepted and redirected
if next != curr { if next != curr {
switch curr { switch curr {
@@ -285,6 +309,10 @@ func isNewline(r rune) bool {
return r == '\r' || r == '\n' return r == '\r' || r == '\n'
} }
func isUnreadable(r rune) bool {
return r == unicode.ReplacementChar
}
func isValidMessageRole(role string) bool { func isValidMessageRole(role string) bool {
return role == "system" || role == "user" || role == "assistant" return role == "system" || role == "user" || role == "assistant"
} }

View File

@@ -1,11 +1,13 @@
package model package parser
import ( import (
"bytes" "bytes"
"encoding/binary"
"fmt" "fmt"
"io" "io"
"strings" "strings"
"testing" "testing"
"unicode/utf16"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -509,3 +511,37 @@ SYSTEM ""
} }
} }
func TestParseFileUTF16ParseFile(t *testing.T) {
data := `FROM bob
PARAMETER param1 1
PARAMETER param2 4096
SYSTEM You are a utf16 file.
`
// simulate a utf16 le file
utf16File := utf16.Encode(append([]rune{'\ufffe'}, []rune(data)...))
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.LittleEndian, utf16File)
assert.NoError(t, err)
actual, err := ParseFile(buf)
assert.NoError(t, err)
expected := []Command{
{Name: "model", Args: "bob"},
{Name: "param1", Args: "1"},
{Name: "param2", Args: "4096"},
{Name: "system", Args: "You are a utf16 file."},
}
assert.Equal(t, expected, actual.Commands)
// simulate a utf16 be file
buf = new(bytes.Buffer)
err = binary.Write(buf, binary.BigEndian, utf16File)
assert.NoError(t, err)
actual, err = ParseFile(buf)
assert.NoError(t, err)
assert.Equal(t, expected, actual.Commands)
}

View File

@@ -221,7 +221,7 @@ func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, w
} }
defer resp.Body.Close() defer resp.Body.Close()
n, err := io.Copy(w, io.TeeReader(resp.Body, part)) n, err := io.CopyN(w, io.TeeReader(resp.Body, part), part.Size)
if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.ErrUnexpectedEOF) { if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.ErrUnexpectedEOF) {
// rollback progress // rollback progress
b.Completed.Add(-n) b.Completed.Add(-n)

View File

@@ -31,6 +31,8 @@ var (
RunnersDir string RunnersDir string
// Set via OLLAMA_TMPDIR in the environment // Set via OLLAMA_TMPDIR in the environment
TmpDir string TmpDir string
// Experimental flash attention
FlashAttention bool
) )
func AsMap() map[string]string { func AsMap() map[string]string {
@@ -45,6 +47,7 @@ func AsMap() map[string]string {
"OLLAMA_NUM_PARALLEL": fmt.Sprintf("%v", NumParallel), "OLLAMA_NUM_PARALLEL": fmt.Sprintf("%v", NumParallel),
"OLLAMA_RUNNERS_DIR": fmt.Sprintf("%v", RunnersDir), "OLLAMA_RUNNERS_DIR": fmt.Sprintf("%v", RunnersDir),
"OLLAMA_TMPDIR": fmt.Sprintf("%v", TmpDir), "OLLAMA_TMPDIR": fmt.Sprintf("%v", TmpDir),
"OLLAMA_FLASH_ATTENTION": fmt.Sprintf("%v", FlashAttention),
} }
} }
@@ -78,6 +81,13 @@ func LoadConfig() {
} }
} }
if fa := clean("OLLAMA_FLASH_ATTENTION"); fa != "" {
d, err := strconv.ParseBool(fa)
if err == nil {
FlashAttention = d
}
}
RunnersDir = clean("OLLAMA_RUNNERS_DIR") RunnersDir = clean("OLLAMA_RUNNERS_DIR")
if runtime.GOOS == "windows" && RunnersDir == "" { if runtime.GOOS == "windows" && RunnersDir == "" {
// On Windows we do not carry the payloads inside the main executable // On Windows we do not carry the payloads inside the main executable

View File

@@ -17,4 +17,7 @@ func TestConfig(t *testing.T) {
t.Setenv("OLLAMA_DEBUG", "1") t.Setenv("OLLAMA_DEBUG", "1")
LoadConfig() LoadConfig()
require.True(t, Debug) require.True(t, Debug)
t.Setenv("OLLAMA_FLASH_ATTENTION", "1")
LoadConfig()
require.True(t, FlashAttention)
} }

View File

@@ -27,6 +27,7 @@ import (
"github.com/ollama/ollama/auth" "github.com/ollama/ollama/auth"
"github.com/ollama/ollama/format" "github.com/ollama/ollama/format"
"github.com/ollama/ollama/llm" "github.com/ollama/ollama/llm"
"github.com/ollama/ollama/parser"
"github.com/ollama/ollama/server/envconfig" "github.com/ollama/ollama/server/envconfig"
"github.com/ollama/ollama/types/errtypes" "github.com/ollama/ollama/types/errtypes"
"github.com/ollama/ollama/types/model" "github.com/ollama/ollama/types/model"
@@ -61,36 +62,36 @@ func (m *Model) IsEmbedding() bool {
} }
func (m *Model) String() string { func (m *Model) String() string {
var modelfile model.File var modelfile parser.File
modelfile.Commands = append(modelfile.Commands, model.Command{ modelfile.Commands = append(modelfile.Commands, parser.Command{
Name: "model", Name: "model",
Args: m.ModelPath, Args: m.ModelPath,
}) })
for _, adapter := range m.AdapterPaths { for _, adapter := range m.AdapterPaths {
modelfile.Commands = append(modelfile.Commands, model.Command{ modelfile.Commands = append(modelfile.Commands, parser.Command{
Name: "adapter", Name: "adapter",
Args: adapter, Args: adapter,
}) })
} }
for _, projector := range m.ProjectorPaths { for _, projector := range m.ProjectorPaths {
modelfile.Commands = append(modelfile.Commands, model.Command{ modelfile.Commands = append(modelfile.Commands, parser.Command{
Name: "model", Name: "model",
Args: projector, Args: projector,
}) })
} }
if m.Template != "" { if m.Template != "" {
modelfile.Commands = append(modelfile.Commands, model.Command{ modelfile.Commands = append(modelfile.Commands, parser.Command{
Name: "template", Name: "template",
Args: m.Template, Args: m.Template,
}) })
} }
if m.System != "" { if m.System != "" {
modelfile.Commands = append(modelfile.Commands, model.Command{ modelfile.Commands = append(modelfile.Commands, parser.Command{
Name: "system", Name: "system",
Args: m.System, Args: m.System,
}) })
@@ -100,13 +101,13 @@ func (m *Model) String() string {
switch v := v.(type) { switch v := v.(type) {
case []any: case []any:
for _, s := range v { for _, s := range v {
modelfile.Commands = append(modelfile.Commands, model.Command{ modelfile.Commands = append(modelfile.Commands, parser.Command{
Name: k, Name: k,
Args: fmt.Sprintf("%v", s), Args: fmt.Sprintf("%v", s),
}) })
} }
default: default:
modelfile.Commands = append(modelfile.Commands, model.Command{ modelfile.Commands = append(modelfile.Commands, parser.Command{
Name: k, Name: k,
Args: fmt.Sprintf("%v", v), Args: fmt.Sprintf("%v", v),
}) })
@@ -114,14 +115,14 @@ func (m *Model) String() string {
} }
for _, license := range m.License { for _, license := range m.License {
modelfile.Commands = append(modelfile.Commands, model.Command{ modelfile.Commands = append(modelfile.Commands, parser.Command{
Name: "license", Name: "license",
Args: license, Args: license,
}) })
} }
for _, msg := range m.Messages { for _, msg := range m.Messages {
modelfile.Commands = append(modelfile.Commands, model.Command{ modelfile.Commands = append(modelfile.Commands, parser.Command{
Name: "message", Name: "message",
Args: fmt.Sprintf("%s %s", msg.Role, msg.Content), Args: fmt.Sprintf("%s %s", msg.Role, msg.Content),
}) })
@@ -314,7 +315,7 @@ func realpath(rel, from string) string {
return abspath return abspath
} }
func CreateModel(ctx context.Context, name, modelFileDir, quantization string, modelfile *model.File, fn func(resp api.ProgressResponse)) (err error) { func CreateModel(ctx context.Context, name, modelFileDir, quantization string, modelfile *parser.File, fn func(resp api.ProgressResponse)) (err error) {
config := ConfigV2{ config := ConfigV2{
OS: "linux", OS: "linux",
Architecture: "amd64", Architecture: "amd64",
@@ -339,7 +340,24 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m
return err return err
} }
} else if strings.HasPrefix(c.Args, "@") { } else if strings.HasPrefix(c.Args, "@") {
blobpath, err := GetBlobsPath(strings.TrimPrefix(c.Args, "@")) digest := strings.TrimPrefix(c.Args, "@")
if ib, ok := intermediateBlobs[digest]; ok {
p, err := GetBlobsPath(ib)
if err != nil {
return err
}
if _, err := os.Stat(p); errors.Is(err, os.ErrNotExist) {
// pass
} else if err != nil {
return err
} else {
fn(api.ProgressResponse{Status: fmt.Sprintf("using cached layer %s", ib)})
digest = ib
}
}
blobpath, err := GetBlobsPath(digest)
if err != nil { if err != nil {
return err return err
} }
@@ -350,14 +368,14 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m
} }
defer blob.Close() defer blob.Close()
baseLayers, err = parseFromFile(ctx, blob, fn) baseLayers, err = parseFromFile(ctx, blob, digest, fn)
if err != nil { if err != nil {
return err return err
} }
} else if file, err := os.Open(realpath(modelFileDir, c.Args)); err == nil { } else if file, err := os.Open(realpath(modelFileDir, c.Args)); err == nil {
defer file.Close() defer file.Close()
baseLayers, err = parseFromFile(ctx, file, fn) baseLayers, err = parseFromFile(ctx, file, "", fn)
if err != nil { if err != nil {
return err return err
} }
@@ -397,10 +415,17 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m
return err return err
} }
baseLayer.Layer, err = NewLayer(temp, baseLayer.Layer.MediaType) layers, err := parseFromFile(ctx, temp, "", fn)
if err != nil { if err != nil {
return err return err
} }
if len(layers) != 1 {
return errors.New("quantization failed")
}
baseLayer.Layer = layers[0].Layer
baseLayer.GGML = layers[0].GGML
} }
} }

View File

@@ -80,7 +80,7 @@ func NewLayerFromLayer(digest, mediatype, from string) (*Layer, error) {
}, nil }, nil
} }
func (l *Layer) Open() (io.ReadCloser, error) { func (l *Layer) Open() (io.ReadSeekCloser, error) {
blob, err := GetBlobsPath(l.Digest) blob, err := GetBlobsPath(l.Digest)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -17,6 +17,8 @@ import (
"github.com/ollama/ollama/types/model" "github.com/ollama/ollama/types/model"
) )
var intermediateBlobs map[string]string = make(map[string]string)
type layerWithGGML struct { type layerWithGGML struct {
*Layer *Layer
*llm.GGML *llm.GGML
@@ -76,7 +78,7 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe
return layers, nil return layers, nil
} }
func parseFromZipFile(_ context.Context, file *os.File, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) {
stat, err := file.Stat() stat, err := file.Stat()
if err != nil { if err != nil {
return nil, err return nil, err
@@ -165,16 +167,11 @@ func parseFromZipFile(_ context.Context, file *os.File, fn func(api.ProgressResp
} }
layer, err := NewLayer(temp, "application/vnd.ollama.image.model") layer, err := NewLayer(temp, "application/vnd.ollama.image.model")
if err != nil {
return nil, fmt.Errorf("aaa: %w", err)
}
blobpath, err := GetBlobsPath(layer.Digest)
if err != nil { if err != nil {
return nil, err return nil, err
} }
bin, err := os.Open(blobpath) bin, err := layer.Open()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -185,16 +182,13 @@ func parseFromZipFile(_ context.Context, file *os.File, fn func(api.ProgressResp
return nil, err return nil, err
} }
layer, err = NewLayerFromLayer(layer.Digest, layer.MediaType, "")
if err != nil {
return nil, err
}
layers = append(layers, &layerWithGGML{layer, ggml}) layers = append(layers, &layerWithGGML{layer, ggml})
intermediateBlobs[digest] = layer.Digest
return layers, nil return layers, nil
} }
func parseFromFile(ctx context.Context, file *os.File, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) {
sr := io.NewSectionReader(file, 0, 512) sr := io.NewSectionReader(file, 0, 512)
contentType, err := detectContentType(sr) contentType, err := detectContentType(sr)
if err != nil { if err != nil {
@@ -205,7 +199,7 @@ func parseFromFile(ctx context.Context, file *os.File, fn func(api.ProgressRespo
case "gguf", "ggla": case "gguf", "ggla":
// noop // noop
case "application/zip": case "application/zip":
return parseFromZipFile(ctx, file, fn) return parseFromZipFile(ctx, file, digest, fn)
default: default:
return nil, fmt.Errorf("unsupported content type: %s", contentType) return nil, fmt.Errorf("unsupported content type: %s", contentType)
} }

View File

@@ -29,7 +29,9 @@ import (
"github.com/ollama/ollama/gpu" "github.com/ollama/ollama/gpu"
"github.com/ollama/ollama/llm" "github.com/ollama/ollama/llm"
"github.com/ollama/ollama/openai" "github.com/ollama/ollama/openai"
"github.com/ollama/ollama/parser"
"github.com/ollama/ollama/server/envconfig" "github.com/ollama/ollama/server/envconfig"
"github.com/ollama/ollama/types/errtypes"
"github.com/ollama/ollama/types/model" "github.com/ollama/ollama/types/model"
"github.com/ollama/ollama/version" "github.com/ollama/ollama/version"
) )
@@ -517,7 +519,7 @@ func (s *Server) CreateModelHandler(c *gin.Context) {
name := model.ParseName(cmp.Or(req.Model, req.Name)) name := model.ParseName(cmp.Or(req.Model, req.Name))
if !name.IsValid() { if !name.IsValid() {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "invalid model name"}) c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": errtypes.InvalidModelNameErrMsg})
return return
} }
@@ -538,7 +540,7 @@ func (s *Server) CreateModelHandler(c *gin.Context) {
r = f r = f
} }
modelfile, err := model.ParseFile(r) modelfile, err := parser.ParseFile(r)
if err != nil { if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()}) c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return return
@@ -708,7 +710,7 @@ func GetModelInfo(req api.ShowRequest) (*api.ShowResponse, error) {
} }
var sb strings.Builder var sb strings.Builder
fmt.Fprintln(&sb, "# Modelfile generate by \"ollama show\"") fmt.Fprintln(&sb, "# Modelfile generated by \"ollama show\"")
fmt.Fprintln(&sb, "# To build a new Modelfile based on this, replace FROM with:") fmt.Fprintln(&sb, "# To build a new Modelfile based on this, replace FROM with:")
fmt.Fprintf(&sb, "# FROM %s\n\n", model.ShortName) fmt.Fprintf(&sb, "# FROM %s\n\n", model.ShortName)
fmt.Fprint(&sb, model.String()) fmt.Fprint(&sb, model.String())
@@ -724,7 +726,7 @@ func (s *Server) ListModelsHandler(c *gin.Context) {
return return
} }
var models []api.ModelResponse models := []api.ModelResponse{}
if err := filepath.Walk(manifests, func(path string, info os.FileInfo, _ error) error { if err := filepath.Walk(manifests, func(path string, info os.FileInfo, _ error) error {
if !info.IsDir() { if !info.IsDir() {
rel, err := filepath.Rel(manifests, path) rel, err := filepath.Rel(manifests, path)
@@ -839,6 +841,25 @@ func (s *Server) HeadBlobHandler(c *gin.Context) {
} }
func (s *Server) CreateBlobHandler(c *gin.Context) { func (s *Server) CreateBlobHandler(c *gin.Context) {
if ib, ok := intermediateBlobs[c.Param("digest")]; ok {
p, err := GetBlobsPath(ib)
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if _, err := os.Stat(p); errors.Is(err, os.ErrNotExist) {
slog.Info("evicting intermediate blob which no longer exists", "digest", ib)
delete(intermediateBlobs, c.Param("digest"))
} else if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
} else {
c.Status(http.StatusOK)
return
}
}
path, err := GetBlobsPath(c.Param("digest")) path, err := GetBlobsPath(c.Param("digest"))
if err != nil { if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()}) c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
@@ -979,6 +1000,7 @@ func (s *Server) GenerateRoutes() http.Handler {
r.POST("/api/show", s.ShowModelHandler) r.POST("/api/show", s.ShowModelHandler)
r.POST("/api/blobs/:digest", s.CreateBlobHandler) r.POST("/api/blobs/:digest", s.CreateBlobHandler)
r.HEAD("/api/blobs/:digest", s.HeadBlobHandler) r.HEAD("/api/blobs/:digest", s.HeadBlobHandler)
r.GET("/api/ps", s.ProcessHandler)
// Compatibility endpoints // Compatibility endpoints
r.POST("/v1/chat/completions", openai.Middleware(), s.ChatHandler) r.POST("/v1/chat/completions", openai.Middleware(), s.ChatHandler)
@@ -1084,7 +1106,7 @@ func Serve(ln net.Listener) error {
return err return err
} }
<-ctx.Done() <-ctx.Done()
return err return nil
} }
func waitForStream(c *gin.Context, ch chan interface{}) { func waitForStream(c *gin.Context, ch chan interface{}) {
@@ -1137,6 +1159,42 @@ func streamResponse(c *gin.Context, ch chan any) {
}) })
} }
func (s *Server) ProcessHandler(c *gin.Context) {
models := []api.ModelResponse{}
for _, v := range s.sched.loaded {
model := v.model
modelDetails := api.ModelDetails{
Format: model.Config.ModelFormat,
Family: model.Config.ModelFamily,
Families: model.Config.ModelFamilies,
ParameterSize: model.Config.ModelType,
QuantizationLevel: model.Config.FileType,
}
mr := api.ModelResponse{
Model: model.ShortName,
Name: model.ShortName,
Size: int64(v.estimatedTotal),
SizeVRAM: int64(v.estimatedVRAM),
Digest: model.Digest,
Details: modelDetails,
ExpiresAt: v.expiresAt,
}
// The scheduler waits to set expiresAt, so if a model is loading it's
// possible that it will be set to the unix epoch. For those cases, just
// calculate the time w/ the sessionDuration instead.
var epoch time.Time
if v.expiresAt == epoch {
mr.ExpiresAt = time.Now().Add(v.sessionDuration)
}
models = append(models, mr)
}
c.JSON(http.StatusOK, api.ListResponse{Models: models})
}
// ChatPrompt builds up a prompt from a series of messages for the currently `loaded` model // ChatPrompt builds up a prompt from a series of messages for the currently `loaded` model
func chatPrompt(ctx context.Context, runner *runnerRef, template string, messages []api.Message, numCtx int) (string, error) { func chatPrompt(ctx context.Context, runner *runnerRef, template string, messages []api.Message, numCtx int) (string, error) {
encode := func(s string) ([]int, error) { encode := func(s string) ([]int, error) {

View File

@@ -17,7 +17,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/types/model" "github.com/ollama/ollama/parser"
"github.com/ollama/ollama/version" "github.com/ollama/ollama/version"
) )
@@ -56,7 +56,7 @@ func Test_Routes(t *testing.T) {
fname := createTestFile(t, "ollama-model") fname := createTestFile(t, "ollama-model")
r := strings.NewReader(fmt.Sprintf("FROM %s\nPARAMETER seed 42\nPARAMETER top_p 0.9\nPARAMETER stop foo\nPARAMETER stop bar", fname)) r := strings.NewReader(fmt.Sprintf("FROM %s\nPARAMETER seed 42\nPARAMETER top_p 0.9\nPARAMETER stop foo\nPARAMETER stop bar", fname))
modelfile, err := model.ParseFile(r) modelfile, err := parser.ParseFile(r)
assert.Nil(t, err) assert.Nil(t, err)
fn := func(resp api.ProgressResponse) { fn := func(resp api.ProgressResponse) {
t.Logf("Status: %s", resp.Status) t.Logf("Status: %s", resp.Status)
@@ -95,6 +95,7 @@ func Test_Routes(t *testing.T) {
err = json.Unmarshal(body, &modelList) err = json.Unmarshal(body, &modelList)
assert.Nil(t, err) assert.Nil(t, err)
assert.NotNil(t, modelList.Models)
assert.Equal(t, 0, len(modelList.Models)) assert.Equal(t, 0, len(modelList.Models))
}, },
}, },

View File

@@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"log/slog" "log/slog"
"reflect" "reflect"
"runtime"
"sort" "sort"
"strings" "strings"
"sync" "sync"
@@ -177,7 +178,7 @@ func (s *Scheduler) processPending(ctx context.Context) {
} }
// Trigger an expiration to unload once it's done // Trigger an expiration to unload once it's done
runnerToExpire.refMu.Lock() runnerToExpire.refMu.Lock()
slog.Debug("resetting model to expire immediately to make room", "model", runnerToExpire.model, "refCount", runnerToExpire.refCount) slog.Debug("resetting model to expire immediately to make room", "modelPath", runnerToExpire.modelPath, "refCount", runnerToExpire.refCount)
if runnerToExpire.expireTimer != nil { if runnerToExpire.expireTimer != nil {
runnerToExpire.expireTimer.Stop() runnerToExpire.expireTimer.Stop()
runnerToExpire.expireTimer = nil runnerToExpire.expireTimer = nil
@@ -190,13 +191,13 @@ func (s *Scheduler) processPending(ctx context.Context) {
// Wait for the unload to happen // Wait for the unload to happen
// Note: at this point we're queueing up all incoming requests, even if they were for // Note: at this point we're queueing up all incoming requests, even if they were for
// a different model that's loaded and not scheduled to be removed. // a different model that's loaded and not scheduled to be removed.
slog.Debug("waiting for pending requests to complete and unload to occur", "model", runnerToExpire.model) slog.Debug("waiting for pending requests to complete and unload to occur", "modelPath", runnerToExpire.modelPath)
select { select {
case <-ctx.Done(): case <-ctx.Done():
slog.Debug("shutting down scheduler pending loop") slog.Debug("shutting down scheduler pending loop")
return return
case <-s.unloadedCh: case <-s.unloadedCh:
slog.Debug("unload completed", "model", runnerToExpire.model) slog.Debug("unload completed", "modelPath", runnerToExpire.modelPath)
continue continue
} }
} }
@@ -219,23 +220,23 @@ func (s *Scheduler) processCompleted(ctx context.Context) {
runner := s.loaded[finished.model.ModelPath] runner := s.loaded[finished.model.ModelPath]
s.loadedMu.Unlock() s.loadedMu.Unlock()
if runner == nil { if runner == nil {
slog.Error("finished requeset signal received after model unloaded", "model", finished.model.ModelPath) slog.Error("finished request signal received after model unloaded", "modelPath", finished.model.ModelPath)
continue continue
} }
runner.refMu.Lock() runner.refMu.Lock()
runner.refCount-- runner.refCount--
if runner.refCount <= 0 { if runner.refCount <= 0 {
if runner.sessionDuration <= 0 { if runner.sessionDuration <= 0 {
slog.Debug("runner with zero duration has gone idle, expiring to unload", "model", runner.model) slog.Debug("runner with zero duration has gone idle, expiring to unload", "modelPath", runner.modelPath)
if runner.expireTimer != nil { if runner.expireTimer != nil {
runner.expireTimer.Stop() runner.expireTimer.Stop()
runner.expireTimer = nil runner.expireTimer = nil
} }
s.expiredCh <- runner s.expiredCh <- runner
} else if runner.expireTimer == nil { } else if runner.expireTimer == nil {
slog.Debug("runner with non-zero duration has gone idle, adding timer", "model", runner.model, "duration", runner.sessionDuration) slog.Debug("runner with non-zero duration has gone idle, adding timer", "modelPath", runner.modelPath, "duration", runner.sessionDuration)
runner.expireTimer = time.AfterFunc(runner.sessionDuration, func() { runner.expireTimer = time.AfterFunc(runner.sessionDuration, func() {
slog.Debug("timer expired, expiring to unload", "model", runner.model) slog.Debug("timer expired, expiring to unload", "modelPath", runner.modelPath)
runner.refMu.Lock() runner.refMu.Lock()
defer runner.refMu.Unlock() defer runner.refMu.Unlock()
if runner.expireTimer != nil { if runner.expireTimer != nil {
@@ -244,19 +245,21 @@ func (s *Scheduler) processCompleted(ctx context.Context) {
} }
s.expiredCh <- runner s.expiredCh <- runner
}) })
runner.expiresAt = time.Now().Add(runner.sessionDuration)
} else { } else {
slog.Debug("runner with non-zero duration has gone idle, resetting timer", "model", runner.model, "duration", runner.sessionDuration) slog.Debug("runner with non-zero duration has gone idle, resetting timer", "modelPath", runner.modelPath, "duration", runner.sessionDuration)
runner.expireTimer.Reset(runner.sessionDuration) runner.expireTimer.Reset(runner.sessionDuration)
runner.expiresAt = time.Now().Add(runner.sessionDuration)
} }
} }
slog.Debug("after processing request finished event", "model", runner.model, "refCount", runner.refCount) slog.Debug("after processing request finished event", "modelPath", runner.modelPath, "refCount", runner.refCount)
runner.refMu.Unlock() runner.refMu.Unlock()
case runner := <-s.expiredCh: case runner := <-s.expiredCh:
slog.Debug("runner expired event received", "model", runner.model) slog.Debug("runner expired event received", "modelPath", runner.modelPath)
runner.refMu.Lock() runner.refMu.Lock()
if runner.refCount > 0 { if runner.refCount > 0 {
// Shouldn't happen, but safeguard to ensure no leaked runners // Shouldn't happen, but safeguard to ensure no leaked runners
slog.Debug("expired event with positive ref count, retrying", "model", runner.model, "refCount", runner.refCount) slog.Debug("expired event with positive ref count, retrying", "modelPath", runner.modelPath, "refCount", runner.refCount)
go func(runner *runnerRef) { go func(runner *runnerRef) {
// We can't unload yet, but want to as soon as the current request completes // We can't unload yet, but want to as soon as the current request completes
// So queue up another expired event // So queue up another expired event
@@ -268,16 +271,16 @@ func (s *Scheduler) processCompleted(ctx context.Context) {
} }
s.loadedMu.Lock() s.loadedMu.Lock()
slog.Debug("got lock to unload", "model", runner.model) slog.Debug("got lock to unload", "modelPath", runner.modelPath)
finished := runner.waitForVRAMRecovery() finished := runner.waitForVRAMRecovery()
runner.unload() runner.unload()
delete(s.loaded, runner.model) delete(s.loaded, runner.modelPath)
s.loadedMu.Unlock() s.loadedMu.Unlock()
slog.Debug("runner released", "model", runner.model) slog.Debug("runner released", "modelPath", runner.modelPath)
runner.refMu.Unlock() runner.refMu.Unlock()
<-finished <-finished
slog.Debug("sending an unloaded event", "model", runner.model) slog.Debug("sending an unloaded event", "modelPath", runner.modelPath)
s.unloadedCh <- struct{}{} s.unloadedCh <- struct{}{}
} }
} }
@@ -316,18 +319,20 @@ func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList)
req.errCh <- err req.errCh <- err
return return
} }
runner := &runnerRef{} runner := &runnerRef{
runner.model = req.model.ModelPath model: req.model,
runner.adapters = req.model.AdapterPaths modelPath: req.model.ModelPath,
runner.projectors = req.model.ProjectorPaths llama: llama,
runner.llama = llama Options: &req.opts,
runner.Options = &req.opts sessionDuration: req.sessionDuration,
runner.sessionDuration = req.sessionDuration gpus: gpus,
runner.gpus = gpus estimatedVRAM: llama.EstimatedVRAM(),
runner.estimatedVRAM = llama.EstimatedVRAM() estimatedTotal: llama.EstimatedTotal(),
runner.loading = true loading: true,
runner.refCount = 1 refCount: 1,
}
runner.refMu.Lock() runner.refMu.Lock()
s.loadedMu.Lock() s.loadedMu.Lock()
s.loaded[req.model.ModelPath] = runner s.loaded[req.model.ModelPath] = runner
slog.Info("loaded runners", "count", len(s.loaded)) slog.Info("loaded runners", "count", len(s.loaded))
@@ -339,7 +344,7 @@ func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList)
slog.Error("error loading llama server", "error", err) slog.Error("error loading llama server", "error", err)
runner.refCount-- runner.refCount--
req.errCh <- err req.errCh <- err
slog.Debug("triggering expiration for failed load", "model", runner.model) slog.Debug("triggering expiration for failed load", "model", runner.modelPath)
s.expiredCh <- runner s.expiredCh <- runner
return return
} }
@@ -408,17 +413,18 @@ type runnerRef struct {
refCount uint // prevent unloading if > 0 refCount uint // prevent unloading if > 0
// unloading bool // set to true when we are trying to unload the runner // unloading bool // set to true when we are trying to unload the runner
llama llm.LlamaServer llama llm.LlamaServer
loading bool // True only during initial load, then false forever loading bool // True only during initial load, then false forever
gpus gpu.GpuInfoList // Recorded at time of provisioning gpus gpu.GpuInfoList // Recorded at time of provisioning
estimatedVRAM uint64 estimatedVRAM uint64
estimatedTotal uint64
sessionDuration time.Duration sessionDuration time.Duration
expireTimer *time.Timer expireTimer *time.Timer
expiresAt time.Time
model string model *Model
adapters []string modelPath string
projectors []string
*api.Options *api.Options
} }
@@ -431,9 +437,8 @@ func (runner *runnerRef) unload() {
if runner.llama != nil { if runner.llama != nil {
runner.llama.Close() runner.llama.Close()
} }
runner.model = nil
runner.llama = nil runner.llama = nil
runner.adapters = nil
runner.projectors = nil
runner.Options = nil runner.Options = nil
runner.gpus = nil runner.gpus = nil
} }
@@ -462,8 +467,8 @@ func (runner *runnerRef) needsReload(ctx context.Context, req *LlmRequest) bool
ctx, cancel := context.WithTimeout(ctx, timeout) ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel() defer cancel()
if !reflect.DeepEqual(runner.adapters, req.model.AdapterPaths) || // have the adapters changed? if !reflect.DeepEqual(runner.model.AdapterPaths, req.model.AdapterPaths) || // have the adapters changed?
!reflect.DeepEqual(runner.projectors, req.model.ProjectorPaths) || // have the projectors changed? !reflect.DeepEqual(runner.model.ProjectorPaths, req.model.ProjectorPaths) || // have the projectors changed?
!reflect.DeepEqual(optsExisting, optsNew) || // have the runner options changed? !reflect.DeepEqual(optsExisting, optsNew) || // have the runner options changed?
runner.llama.Ping(ctx) != nil { runner.llama.Ping(ctx) != nil {
return true return true
@@ -483,8 +488,8 @@ func (runner *runnerRef) needsReload(ctx context.Context, req *LlmRequest) bool
func (runner *runnerRef) waitForVRAMRecovery() chan interface{} { func (runner *runnerRef) waitForVRAMRecovery() chan interface{} {
finished := make(chan interface{}, 1) finished := make(chan interface{}, 1)
// CPU or Metal don't need checking, so no waiting required // CPU or Metal don't need checking, so no waiting required, windows can page VRAM, and the APIs we query tend to be optimistic on free space
if len(runner.gpus) == 1 && (runner.gpus[0].Library == "cpu" || runner.gpus[0].Library == "metal") { if (len(runner.gpus) == 1 && (runner.gpus[0].Library == "cpu" || runner.gpus[0].Library == "metal")) || runtime.GOOS == "windows" {
finished <- struct{}{} finished <- struct{}{}
return finished return finished
} }

View File

@@ -151,7 +151,7 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV
} }
func TestRequests(t *testing.T) { func TestRequests(t *testing.T) {
ctx, done := context.WithTimeout(context.Background(), 500*time.Millisecond) ctx, done := context.WithTimeout(context.Background(), time.Second)
defer done() defer done()
// Same model, same request // Same model, same request
@@ -164,7 +164,8 @@ func TestRequests(t *testing.T) {
// simple reload of same model // simple reload of same model
scenario2a := newScenario(t, ctx, "ollama-model-1", 20) scenario2a := newScenario(t, ctx, "ollama-model-1", 20)
scenario2a.req.model = scenario1a.req.model tmpModel := *scenario1a.req.model
scenario2a.req.model = &tmpModel
scenario2a.ggml = scenario1a.ggml scenario2a.ggml = scenario1a.ggml
// Multiple loaded models // Multiple loaded models
@@ -496,10 +497,9 @@ func TestNeedsReload(t *testing.T) {
llm := &mockLlm{} llm := &mockLlm{}
do := api.DefaultOptions() do := api.DefaultOptions()
runner := &runnerRef{ runner := &runnerRef{
adapters: []string{"adapter1"}, model: &Model{AdapterPaths: []string{"adapter1"}, ProjectorPaths: []string{"projector1"}},
projectors: []string{"projector1"}, Options: &do,
Options: &do, llama: llm,
llama: llm,
} }
req := &LlmRequest{ req := &LlmRequest{
model: &Model{ model: &Model{
@@ -510,10 +510,10 @@ func TestNeedsReload(t *testing.T) {
} }
resp := runner.needsReload(ctx, req) resp := runner.needsReload(ctx, req)
require.True(t, resp) require.True(t, resp)
req.model.AdapterPaths = runner.adapters req.model.AdapterPaths = runner.model.AdapterPaths
resp = runner.needsReload(ctx, req) resp = runner.needsReload(ctx, req)
require.True(t, resp) require.True(t, resp)
req.model.ProjectorPaths = runner.projectors req.model.ProjectorPaths = runner.model.ProjectorPaths
runner.loading = true runner.loading = true
req.opts.NumBatch = 1234 req.opts.NumBatch = 1234
resp = runner.needsReload(ctx, req) resp = runner.needsReload(ctx, req)
@@ -558,11 +558,11 @@ func TestUnloadAllRunners(t *testing.T) {
func TestUnload(t *testing.T) { func TestUnload(t *testing.T) {
llm1 := &mockLlm{} llm1 := &mockLlm{}
r1 := &runnerRef{llama: llm1} r1 := &runnerRef{llama: llm1}
r2 := &runnerRef{adapters: []string{"A"}} r2 := &runnerRef{model: &Model{AdapterPaths: []string{"A"}}}
r1.unload() r1.unload()
require.True(t, llm1.closeCalled) require.True(t, llm1.closeCalled)
r2.unload() r2.unload()
require.Nil(t, r2.adapters) require.Nil(t, r2.model)
} }
type mockLlm struct { type mockLlm struct {
@@ -578,6 +578,7 @@ type mockLlm struct {
closeResp error closeResp error
closeCalled bool closeCalled bool
estimatedVRAM uint64 estimatedVRAM uint64
estimatedTotal uint64
} }
func (s *mockLlm) Ping(ctx context.Context) error { return s.pingResp } func (s *mockLlm) Ping(ctx context.Context) error { return s.pingResp }
@@ -598,4 +599,5 @@ func (s *mockLlm) Close() error {
s.closeCalled = true s.closeCalled = true
return s.closeResp return s.closeResp
} }
func (s *mockLlm) EstimatedVRAM() uint64 { return s.estimatedVRAM } func (s *mockLlm) EstimatedVRAM() uint64 { return s.estimatedVRAM }
func (s *mockLlm) EstimatedTotal() uint64 { return s.estimatedTotal }

View File

@@ -7,6 +7,7 @@ import (
) )
const UnknownOllamaKeyErrMsg = "unknown ollama key" const UnknownOllamaKeyErrMsg = "unknown ollama key"
const InvalidModelNameErrMsg = "invalid model name"
// TODO: This should have a structured response from the API // TODO: This should have a structured response from the API
type UnknownOllamaKey struct { type UnknownOllamaKey struct {