Compare commits

...

65 Commits

Author SHA1 Message Date
likelovewant
ff50cfb582 Merge branch 'ollama:main' into main 2024-06-14 00:59:47 +08:00
Patrick Devine
c69bc19e46 move OLLAMA_HOST to envconfig (#5009) 2024-06-12 18:48:16 -04:00
Michael Yang
bba5d177aa Merge pull request #5004 from ollama/mxyng/fix-templates
fix: multiple templates when creating from model
2024-06-12 14:39:29 -07:00
Michael Yang
c16f8af911 fix: multiple templates when creating from model
multiple templates may appear in a model if a model is created from
another model that 1) has an autodetected template and 2) defines a
custom template
2024-06-12 13:35:49 -07:00
likelovewant
edaec3183a Merge branch 'ollama:main' into main 2024-06-12 15:35:25 +08:00
Michael Yang
217f60c3d9 Merge pull request #4987 from ollama/mxyng/revert-byte-order
Revert "Merge pull request #4938 from ollama/mxyng/fix-byte-order"
2024-06-11 16:04:20 -07:00
Michael Yang
7bdcd1da94 Revert "Merge pull request #4938 from ollama/mxyng/fix-byte-order"
This reverts commit f5f245cc15, reversing
changes made to 94d37fdcae.

this change broke gguf v2 which is incorrectly detected as big endian
2024-06-11 15:56:17 -07:00
Jeffrey Morgan
ead259d877 llm: fix seed value not being applied to requests (#4986) 2024-06-11 14:24:41 -07:00
James Montgomery
2ff45d571d Add Ollama-hpp to Community Libraries in README. (#4983) 2024-06-11 11:15:05 -07:00
Michael Yang
0f3cf1d42e Merge pull request #4715 from ollama/mxyng/utf16-parser
proper utf16 support
2024-06-10 11:41:29 -07:00
Michael Yang
5bc029c529 Merge pull request #4921 from ollama/mxyng/import-md
update import.md
2024-06-10 11:41:09 -07:00
Michael Yang
e9a9c6a8e8 Merge pull request #4965 from ollama/mxyng/skip-layer-remove
fix: skip removing layers that no longer exist
2024-06-10 11:40:03 -07:00
Michael Yang
515f497e6d fix: skip removing layers that no longer exist 2024-06-10 11:32:19 -07:00
Michael Yang
b27268aaef add test 2024-06-10 11:32:15 -07:00
Michael Yang
f5f245cc15 Merge pull request #4938 from ollama/mxyng/fix-byte-order
fix parsing big endian gguf
2024-06-10 09:38:12 -07:00
Jim Scardelis
94d37fdcae fix: examples/langchain-python-rag-privategpt/requirements.txt (#3382) 2024-06-09 10:58:09 -07:00
Craig Hughes
b84aea1685 Critical fix from llama.cpp JSON grammar to forbid un-escaped escape characters inside strings, which breaks parsing. (#3782) 2024-06-09 10:57:09 -07:00
Napuh
896495de7b Add instructions to easily install specific versions on faq.md (#4084)
* Added instructions to easily install specific versions on faq.md

* Small typo

* Moved instructions on how to install specific version to linux.md

* Update docs/linux.md

* Update docs/linux.md

---------

Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>
2024-06-09 10:49:03 -07:00
dcasota
5528dd9d11 Error handling load_single_document() in ingest.py (#4852)
load_single_document() handles
- corrupt files
- empty (zero byte) files
- unsupported file extensions
2024-06-09 10:41:07 -07:00
Jeffrey Morgan
943172cbf4 Update api.md 2024-06-08 23:04:32 -07:00
likelovewant
1b5848cbf2 remove gfx906 has conflicts with gfx906:xnack- 2024-06-09 11:46:22 +08:00
likelovewant
76026b4a35 Merge branch 'ollama:main' into main 2024-06-09 10:10:23 +08:00
Nischal Jain
85169e8d6f Added headless-ollama (#4612) 2024-06-08 18:51:16 -07:00
Jeffrey Morgan
34f142797a llm: always add bos token to prompt (#4941)
* fix embedding by adding fixes from llama.cpp upstream

* remove assert

---------

Co-authored-by: Jesper Ek <deadbeef84@gmail.com>
2024-06-08 18:47:10 -07:00
Erhan
46a7f1e74a Update README.md with LangChainRust (#4854) 2024-06-08 17:29:36 -07:00
Michael Yang
620d5c569e fix parsing big endian gguf 2024-06-08 12:35:26 -07:00
Michael Yang
b9ce7bf75e update import.md 2024-06-07 16:45:15 -07:00
Daniel Hiltgen
cddc63381c Merge pull request #4909 from dhiltgen/oneapi_disable
Add ability to skip oneapi generate
2024-06-07 14:07:15 -07:00
Michael Yang
385a32ecb5 Merge pull request #4910 from ollama/mxyng/detect-chat-template
fix create model when template detection errors
2024-06-07 11:07:39 -07:00
Michael Yang
030e765e76 fix create model when template detection errors 2024-06-07 10:51:35 -07:00
Daniel Hiltgen
ab8c929e20 Add ability to skip oneapi generate
This follows the same pattern for cuda and rocm to allow
disabling the build even when we detect the dependent libraries
2024-06-07 08:32:49 -07:00
likelovewant
27e7397b11 Update gen_windows.ps1 2024-06-07 17:35:15 +08:00
likelovewant
a6390a8992 Merge branch 'ollama:main' into main 2024-06-07 17:25:53 +08:00
Jeffrey Morgan
ce0dc33cb8 llm: patch to fix qwen 2 temporarily on nvidia (#4897) 2024-06-06 23:14:33 -07:00
Michael Yang
78f81fc0e5 Merge pull request #4800 from ollama/mxyng/detect-chat-template
detect chat template from KV
2024-06-06 16:17:18 -07:00
Michael Yang
9b6c2e6eb6 detect chat template from KV 2024-06-06 16:03:47 -07:00
royjhan
1a29e9a879 API app/browser access (#4879)
* API app/browser access

* Add tauri (resolves #2291, #4791, #3799, #4388)
2024-06-06 15:19:03 -07:00
royjhan
4bf1da4944 Separate ListResponse and ModelResponse for api/tags vs api/ps (#4842)
* Remove false time fields

* Struct Separation for List and Process

* Remove Marshaler
2024-06-06 10:11:45 -07:00
Blake Mizerany
de5beb06b3 server: skip blob verification for already verified blobs 2024-06-05 16:39:11 -07:00
Sam
98e65929dc docs(tools): add gollama (#4829) 2024-06-05 14:13:39 -07:00
Michael Yang
66ab48772f proper utf16 support 2024-06-05 13:11:50 -07:00
Michael Yang
22fcf8f7de Merge pull request #3737 from ollama/mxyng/modelname-4
update create handler to use model.Name
2024-06-05 12:05:05 -07:00
royjhan
28c7813ac4 API PS Documentation (#4822)
* API PS Documentation
2024-06-05 11:06:53 -07:00
Kartikeya Mishra
1d8616d30f docs: update to add LLocal.in to web & desktop integrations (#4719) 2024-06-04 14:43:59 -07:00
Michael Yang
d61ef8b954 update create handler to use model.Name 2024-06-04 13:28:25 -07:00
Michael Yang
89d9900152 Merge pull request #4570 from ollama/mxyng/slices
lint some of the things
2024-06-04 13:27:05 -07:00
Michael
4a048715b6 local wording was confusing people
local wording was confusing people -- Ollama runs on cloud providers
2024-06-04 13:25:25 -07:00
Michael Yang
6297f85606 gofmt, goimports 2024-06-04 13:20:24 -07:00
Michael Yang
ed56428dd7 warn on intrange, usestdlibvars 2024-06-04 11:52:48 -07:00
Michael Yang
ad40b92b6a disable intrange 2024-06-04 11:35:30 -07:00
Michael Yang
8ce4032e72 more lint 2024-06-04 11:13:30 -07:00
Michael Yang
42660466f8 no usestdlibvars 2024-06-04 11:13:30 -07:00
Michael Yang
e919f6811f lint windows 2024-06-04 11:13:30 -07:00
Michael Yang
bf7edb0d5d lint linux 2024-06-04 11:13:30 -07:00
Michael Yang
f38353d6b9 stdin.fd 2024-06-04 11:13:30 -07:00
Michael Yang
201d853fdf nolintlint 2024-06-04 11:13:30 -07:00
Michael Yang
e40145a39d lint 2024-06-04 11:13:30 -07:00
Michael Yang
c895a7d13f some gocritic 2024-06-04 11:13:30 -07:00
Michael Yang
dad7a987ae nosprintfhostport 2024-06-04 11:13:30 -07:00
Michael Yang
8ffb51749f nolintlint 2024-06-04 11:13:30 -07:00
Michael Yang
55f6eba049 gofmt 2024-06-04 11:13:30 -07:00
Michael Yang
04f3c12bb7 replace x/exp/slices with slices 2024-06-04 11:13:30 -07:00
Shubham
60323e0805 add embed model command and fix question invoke (#4766)
* add embed model command and fix question invoke

* Update docs/tutorials/langchainpy.md

Co-authored-by: Kim Hallberg <hallberg.kim@gmail.com>

* Update docs/tutorials/langchainpy.md

---------

Co-authored-by: Kim Hallberg <hallberg.kim@gmail.com>
Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>
2024-06-03 22:20:48 -07:00
likelovewant
71ae05239e Update README.md 2024-06-03 15:54:48 +08:00
likelovewant
a4a435bf8f Update amd_windows.go 2024-06-03 14:55:48 +08:00
98 changed files with 1639 additions and 698 deletions

View File

@@ -269,9 +269,9 @@ jobs:
mkdir -p llm/build/darwin/$ARCH/stub/bin mkdir -p llm/build/darwin/$ARCH/stub/bin
touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server
if: ${{ startsWith(matrix.os, 'macos-') }} if: ${{ startsWith(matrix.os, 'macos-') }}
- uses: golangci/golangci-lint-action@v4 - uses: golangci/golangci-lint-action@v6
with: with:
args: --timeout 8m0s -v args: --timeout 8m0s -v ${{ startsWith(matrix.os, 'windows-') && '' || '--disable gofmt --disable goimports' }}
test: test:
strategy: strategy:
matrix: matrix:

View File

@@ -9,9 +9,26 @@ linters:
- contextcheck - contextcheck
- exportloopref - exportloopref
- gocheckcompilerdirectives - gocheckcompilerdirectives
# FIXME: for some reason this errors on windows # conditionally enable this on linux/macos
# - gofmt # - gofmt
# - goimports # - goimports
- intrange
- misspell - misspell
- nilerr - nilerr
- nolintlint
- nosprintfhostport
- testifylint
- unconvert
- unused - unused
- wastedassign
- whitespace
- usestdlibvars
severity:
default-severity: error
rules:
- linters:
- gofmt
- goimports
- intrange
- usestdlibvars
severity: info

View File

@@ -6,7 +6,7 @@
[![Discord](https://dcbadge.vercel.app/api/server/ollama?style=flat&compact=true)](https://discord.gg/ollama) [![Discord](https://dcbadge.vercel.app/api/server/ollama?style=flat&compact=true)](https://discord.gg/ollama)
Get up and running with large language models locally. Get up and running with large language models.
### macOS ### macOS
@@ -30,7 +30,7 @@ Example extra list add on this repo.
``` ```
Please follow the [wiki](https://github.com/likelovewant/ollama-for-amd/wiki) guide to build or use the pre-release version. Please follow the [wiki](https://github.com/likelovewant/ollama-for-amd/wiki) guide to build or use the pre-release version.
Note: `gfx803, gfx1010` reported not working by the wiki method ,expected a future support Note: `gfx803` reported partialy working by the wiki method ,expected a future support
@@ -303,6 +303,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
- [macai](https://github.com/Renset/macai) (macOS client for Ollama, ChatGPT, and other compatible API back-ends) - [macai](https://github.com/Renset/macai) (macOS client for Ollama, ChatGPT, and other compatible API back-ends)
- [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama) - [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama)
- [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS) - [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS)
- [LLocal.in](https://github.com/kartikm7/llocal) (Easy to use Electron Desktop Client for Ollama)
### Terminal ### Terminal
@@ -325,6 +326,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
- [ShellOracle](https://github.com/djcopley/ShellOracle) - [ShellOracle](https://github.com/djcopley/ShellOracle)
- [tlm](https://github.com/yusufcanb/tlm) - [tlm](https://github.com/yusufcanb/tlm)
- [podman-ollama](https://github.com/ericcurtin/podman-ollama) - [podman-ollama](https://github.com/ericcurtin/podman-ollama)
- [gollama](https://github.com/sammcj/gollama)
### Database ### Database
@@ -342,11 +344,13 @@ See the [API documentation](./docs/api.md) for all endpoints.
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa) - [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
- [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example) - [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example)
- [LangChain4j](https://github.com/langchain4j/langchain4j) with [example](https://github.com/langchain4j/langchain4j-examples/tree/main/ollama-examples/src/main/java) - [LangChain4j](https://github.com/langchain4j/langchain4j) with [example](https://github.com/langchain4j/langchain4j-examples/tree/main/ollama-examples/src/main/java)
- [LangChainRust](https://github.com/Abraxas-365/langchain-rust) with [example](https://github.com/Abraxas-365/langchain-rust/blob/main/examples/llm_ollama.rs)
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html) - [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
- [LiteLLM](https://github.com/BerriAI/litellm) - [LiteLLM](https://github.com/BerriAI/litellm)
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp) - [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
- [Ollama for Ruby](https://github.com/gbaptista/ollama-ai) - [Ollama for Ruby](https://github.com/gbaptista/ollama-ai)
- [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs) - [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs)
- [Ollama-hpp for C++](https://github.com/jmont-dev/ollama-hpp)
- [Ollama4j for Java](https://github.com/amithkoujalgi/ollama4j) - [Ollama4j for Java](https://github.com/amithkoujalgi/ollama4j)
- [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama) - [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama)
- [OllamaKit for Swift](https://github.com/kevinhermawan/OllamaKit) - [OllamaKit for Swift](https://github.com/kevinhermawan/OllamaKit)
@@ -364,6 +368,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
- [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama) - [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama)
- [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl) with an [example](https://svilupp.github.io/PromptingTools.jl/dev/examples/working_with_ollama) - [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl) with an [example](https://svilupp.github.io/PromptingTools.jl/dev/examples/working_with_ollama)
- [LlamaScript](https://github.com/Project-Llama/llamascript) - [LlamaScript](https://github.com/Project-Llama/llamascript)
### Mobile ### Mobile
- [Enchanted](https://github.com/AugustDev/enchanted) - [Enchanted](https://github.com/AugustDev/enchanted)
@@ -396,7 +401,9 @@ See the [API documentation](./docs/api.md) for all endpoints.
- [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (Sublime Text 4 AI assistant plugin with Ollama support) - [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (Sublime Text 4 AI assistant plugin with Ollama support)
- [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) (Generalized TypeScript Discord Bot w/ Tuning Documentation) - [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) (Generalized TypeScript Discord Bot w/ Tuning Documentation)
- [Discord AI chat/moderation bot](https://github.com/rapmd73/Companion) Chat/moderation bot written in python. Uses Ollama to create personalities. - [Discord AI chat/moderation bot](https://github.com/rapmd73/Companion) Chat/moderation bot written in python. Uses Ollama to create personalities.
- [Headless Ollama](https://github.com/nischalj10/headless-ollama) (Scripts to automatically install ollama client & models on any OS for apps that depends on ollama server)
### Supported backends
### Supported backends
- [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov. - [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov.

View File

@@ -23,11 +23,9 @@ import (
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
"os"
"runtime" "runtime"
"strconv"
"strings"
"github.com/ollama/ollama/envconfig"
"github.com/ollama/ollama/format" "github.com/ollama/ollama/format"
"github.com/ollama/ollama/version" "github.com/ollama/ollama/version"
) )
@@ -65,10 +63,7 @@ func checkError(resp *http.Response, body []byte) error {
// If the variable is not specified, a default ollama host and port will be // If the variable is not specified, a default ollama host and port will be
// used. // used.
func ClientFromEnvironment() (*Client, error) { func ClientFromEnvironment() (*Client, error) {
ollamaHost, err := GetOllamaHost() ollamaHost := envconfig.Host
if err != nil {
return nil, err
}
return &Client{ return &Client{
base: &url.URL{ base: &url.URL{
@@ -79,52 +74,6 @@ func ClientFromEnvironment() (*Client, error) {
}, nil }, nil
} }
type OllamaHost struct {
Scheme string
Host string
Port string
}
func GetOllamaHost() (OllamaHost, error) {
defaultPort := "11434"
hostVar := os.Getenv("OLLAMA_HOST")
hostVar = strings.TrimSpace(strings.Trim(strings.TrimSpace(hostVar), "\"'"))
scheme, hostport, ok := strings.Cut(hostVar, "://")
switch {
case !ok:
scheme, hostport = "http", hostVar
case scheme == "http":
defaultPort = "80"
case scheme == "https":
defaultPort = "443"
}
// trim trailing slashes
hostport = strings.TrimRight(hostport, "/")
host, port, err := net.SplitHostPort(hostport)
if err != nil {
host, port = "127.0.0.1", defaultPort
if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
host = ip.String()
} else if hostport != "" {
host = hostport
}
}
if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 0 {
return OllamaHost{}, ErrInvalidHostPort
}
return OllamaHost{
Scheme: scheme,
Host: host,
Port: port,
}, nil
}
func NewClient(base *url.URL, http *http.Client) *Client { func NewClient(base *url.URL, http *http.Client) *Client {
return &Client{ return &Client{
base: base, base: base,
@@ -355,8 +304,8 @@ func (c *Client) List(ctx context.Context) (*ListResponse, error) {
} }
// List running models. // List running models.
func (c *Client) ListRunning(ctx context.Context) (*ListResponse, error) { func (c *Client) ListRunning(ctx context.Context) (*ProcessResponse, error) {
var lr ListResponse var lr ProcessResponse
if err := c.do(ctx, http.MethodGet, "/api/ps", nil, &lr); err != nil { if err := c.do(ctx, http.MethodGet, "/api/ps", nil, &lr); err != nil {
return nil, err return nil, err
} }

View File

@@ -1,11 +1,9 @@
package api package api
import ( import (
"fmt"
"net"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/ollama/ollama/envconfig"
) )
func TestClientFromEnvironment(t *testing.T) { func TestClientFromEnvironment(t *testing.T) {
@@ -35,6 +33,7 @@ func TestClientFromEnvironment(t *testing.T) {
for k, v := range testCases { for k, v := range testCases {
t.Run(k, func(t *testing.T) { t.Run(k, func(t *testing.T) {
t.Setenv("OLLAMA_HOST", v.value) t.Setenv("OLLAMA_HOST", v.value)
envconfig.LoadConfig()
client, err := ClientFromEnvironment() client, err := ClientFromEnvironment()
if err != v.err { if err != v.err {
@@ -46,40 +45,4 @@ func TestClientFromEnvironment(t *testing.T) {
} }
}) })
} }
hostTestCases := map[string]*testCase{
"empty": {value: "", expect: "127.0.0.1:11434"},
"only address": {value: "1.2.3.4", expect: "1.2.3.4:11434"},
"only port": {value: ":1234", expect: ":1234"},
"address and port": {value: "1.2.3.4:1234", expect: "1.2.3.4:1234"},
"hostname": {value: "example.com", expect: "example.com:11434"},
"hostname and port": {value: "example.com:1234", expect: "example.com:1234"},
"zero port": {value: ":0", expect: ":0"},
"too large port": {value: ":66000", err: ErrInvalidHostPort},
"too small port": {value: ":-1", err: ErrInvalidHostPort},
"ipv6 localhost": {value: "[::1]", expect: "[::1]:11434"},
"ipv6 world open": {value: "[::]", expect: "[::]:11434"},
"ipv6 no brackets": {value: "::1", expect: "[::1]:11434"},
"ipv6 + port": {value: "[::1]:1337", expect: "[::1]:1337"},
"extra space": {value: " 1.2.3.4 ", expect: "1.2.3.4:11434"},
"extra quotes": {value: "\"1.2.3.4\"", expect: "1.2.3.4:11434"},
"extra space+quotes": {value: " \" 1.2.3.4 \" ", expect: "1.2.3.4:11434"},
"extra single quotes": {value: "'1.2.3.4'", expect: "1.2.3.4:11434"},
}
for k, v := range hostTestCases {
t.Run(k, func(t *testing.T) {
t.Setenv("OLLAMA_HOST", v.value)
oh, err := GetOllamaHost()
if err != v.err {
t.Fatalf("expected %s, got %s", v.err, err)
}
if err == nil {
host := net.JoinHostPort(oh.Host, oh.Port)
assert.Equal(t, v.expect, host, fmt.Sprintf("%s: expected %s, got %s", k, v.expect, host))
}
})
}
} }

View File

@@ -2,7 +2,6 @@ package api
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"math" "math"
@@ -282,19 +281,33 @@ type PushRequest struct {
// ListResponse is the response from [Client.List]. // ListResponse is the response from [Client.List].
type ListResponse struct { type ListResponse struct {
Models []ModelResponse `json:"models"` Models []ListModelResponse `json:"models"`
} }
// ModelResponse is a single model description in [ListResponse]. // ProcessResponse is the response from [Client.Process].
type ModelResponse struct { type ProcessResponse struct {
Models []ProcessModelResponse `json:"models"`
}
// ListModelResponse is a single model description in [ListResponse].
type ListModelResponse struct {
Name string `json:"name"` Name string `json:"name"`
Model string `json:"model"` Model string `json:"model"`
ModifiedAt time.Time `json:"modified_at,omitempty"` ModifiedAt time.Time `json:"modified_at"`
Size int64 `json:"size"` Size int64 `json:"size"`
Digest string `json:"digest"` Digest string `json:"digest"`
Details ModelDetails `json:"details,omitempty"` Details ModelDetails `json:"details,omitempty"`
ExpiresAt time.Time `json:"expires_at,omitempty"` }
SizeVRAM int64 `json:"size_vram,omitempty"`
// ProcessModelResponse is a single model description in [ProcessResponse].
type ProcessModelResponse struct {
Name string `json:"name"`
Model string `json:"model"`
Size int64 `json:"size"`
Digest string `json:"digest"`
Details ModelDetails `json:"details,omitempty"`
ExpiresAt time.Time `json:"expires_at"`
SizeVRAM int64 `json:"size_vram"`
} }
type TokenResponse struct { type TokenResponse struct {
@@ -306,7 +319,7 @@ type GenerateResponse struct {
// Model is the model name that generated the response. // Model is the model name that generated the response.
Model string `json:"model"` Model string `json:"model"`
//CreatedAt is the timestamp of the response. // CreatedAt is the timestamp of the response.
CreatedAt time.Time `json:"created_at"` CreatedAt time.Time `json:"created_at"`
// Response is the textual response itself. // Response is the textual response itself.
@@ -363,8 +376,6 @@ func (m *Metrics) Summary() {
} }
} }
var ErrInvalidHostPort = errors.New("invalid port specified in OLLAMA_HOST")
func (opts *Options) FromMap(m map[string]interface{}) error { func (opts *Options) FromMap(m map[string]interface{}) error {
valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct
typeOpts := reflect.TypeOf(opts).Elem() // types of the fields in the options struct typeOpts := reflect.TypeOf(opts).Elem() // types of the fields in the options struct

View File

@@ -72,13 +72,13 @@ func TestDurationMarshalUnmarshal(t *testing.T) {
}, },
{ {
"positive duration", "positive duration",
time.Duration(42 * time.Second), 42 * time.Second,
time.Duration(42 * time.Second), 42 * time.Second,
}, },
{ {
"another positive duration", "another positive duration",
time.Duration(42 * time.Minute), 42 * time.Minute,
time.Duration(42 * time.Minute), 42 * time.Minute,
}, },
{ {
"zero duration", "zero duration",

View File

@@ -69,7 +69,6 @@ func init() {
slog.Error(fmt.Sprintf("create ollama dir %s: %v", AppDataDir, err)) slog.Error(fmt.Sprintf("create ollama dir %s: %v", AppDataDir, err))
} }
} }
} else if runtime.GOOS == "darwin" { } else if runtime.GOOS == "darwin" {
// TODO // TODO
AppName += ".app" AppName += ".app"

View File

@@ -15,7 +15,7 @@ import (
) )
func getCLIFullPath(command string) string { func getCLIFullPath(command string) string {
cmdPath := "" var cmdPath string
appExe, err := os.Executable() appExe, err := os.Executable()
if err == nil { if err == nil {
cmdPath = filepath.Join(filepath.Dir(appExe), command) cmdPath = filepath.Join(filepath.Dir(appExe), command)
@@ -65,7 +65,6 @@ func start(ctx context.Context, command string) (*exec.Cmd, error) {
if err != nil { if err != nil {
if !errors.Is(err, os.ErrNotExist) { if !errors.Is(err, os.ErrNotExist) {
return nil, fmt.Errorf("stat ollama server log dir %s: %v", logDir, err) return nil, fmt.Errorf("stat ollama server log dir %s: %v", logDir, err)
} }
if err := os.MkdirAll(logDir, 0o755); err != nil { if err := os.MkdirAll(logDir, 0o755); err != nil {

View File

@@ -24,7 +24,8 @@ func terminate(cmd *exec.Cmd) error {
if err != nil { if err != nil {
return err return err
} }
defer dll.Release() // nolint: errcheck //nolint:errcheck
defer dll.Release()
pid := cmd.Process.Pid pid := cmd.Process.Pid
@@ -73,7 +74,8 @@ func isProcessExited(pid int) (bool, error) {
if err != nil { if err != nil {
return false, fmt.Errorf("failed to open process: %v", err) return false, fmt.Errorf("failed to open process: %v", err)
} }
defer windows.CloseHandle(hProcess) // nolint: errcheck //nolint:errcheck
defer windows.CloseHandle(hProcess)
var exitCode uint32 var exitCode uint32
err = windows.GetExitCodeProcess(hProcess, &exitCode) err = windows.GetExitCodeProcess(hProcess, &exitCode)

View File

@@ -78,7 +78,7 @@ func IsNewReleaseAvailable(ctx context.Context) (bool, UpdateResponse) {
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode == 204 { if resp.StatusCode == http.StatusNoContent {
slog.Debug("check update response 204 (current version is up to date)") slog.Debug("check update response 204 (current version is up to date)")
return false, updateResp return false, updateResp
} }
@@ -87,7 +87,7 @@ func IsNewReleaseAvailable(ctx context.Context) (bool, UpdateResponse) {
slog.Warn(fmt.Sprintf("failed to read body response: %s", err)) slog.Warn(fmt.Sprintf("failed to read body response: %s", err))
} }
if resp.StatusCode != 200 { if resp.StatusCode != http.StatusOK {
slog.Info(fmt.Sprintf("check update error %d - %.96s", resp.StatusCode, string(body))) slog.Info(fmt.Sprintf("check update error %d - %.96s", resp.StatusCode, string(body)))
return false, updateResp return false, updateResp
} }
@@ -114,7 +114,7 @@ func DownloadNewRelease(ctx context.Context, updateResp UpdateResponse) error {
if err != nil { if err != nil {
return fmt.Errorf("error checking update: %w", err) return fmt.Errorf("error checking update: %w", err)
} }
if resp.StatusCode != 200 { if resp.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status attempting to download update %d", resp.StatusCode) return fmt.Errorf("unexpected status attempting to download update %d", resp.StatusCode)
} }
resp.Body.Close() resp.Body.Close()

View File

@@ -29,7 +29,6 @@ func GetID() string {
initStore() initStore()
} }
return store.ID return store.ID
} }
func GetFirstTimeRun() bool { func GetFirstTimeRun() bool {

View File

@@ -47,7 +47,6 @@ func nativeLoop() {
default: default:
pTranslateMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck pTranslateMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
pDispatchMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck pDispatchMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
} }
} }
} }
@@ -160,8 +159,8 @@ func (t *winTray) wndProc(hWnd windows.Handle, message uint32, wParam, lParam ui
lResult, _, _ = pDefWindowProc.Call( lResult, _, _ = pDefWindowProc.Call(
uintptr(hWnd), uintptr(hWnd),
uintptr(message), uintptr(message),
uintptr(wParam), wParam,
uintptr(lParam), lParam,
) )
} }
return return

View File

@@ -186,7 +186,7 @@ func (t *winTray) initInstance() error {
t.muNID.Lock() t.muNID.Lock()
defer t.muNID.Unlock() defer t.muNID.Unlock()
t.nid = &notifyIconData{ t.nid = &notifyIconData{
Wnd: windows.Handle(t.window), Wnd: t.window,
ID: 100, ID: 100,
Flags: NIF_MESSAGE, Flags: NIF_MESSAGE,
CallbackMessage: t.wmSystrayMessage, CallbackMessage: t.wmSystrayMessage,
@@ -197,7 +197,6 @@ func (t *winTray) initInstance() error {
} }
func (t *winTray) createMenu() error { func (t *winTray) createMenu() error {
menuHandle, _, err := pCreatePopupMenu.Call() menuHandle, _, err := pCreatePopupMenu.Call()
if menuHandle == 0 { if menuHandle == 0 {
return err return err
@@ -246,7 +245,7 @@ func (t *winTray) addOrUpdateMenuItem(menuItemId uint32, parentId uint32, title
mi := menuItemInfo{ mi := menuItemInfo{
Mask: MIIM_FTYPE | MIIM_STRING | MIIM_ID | MIIM_STATE, Mask: MIIM_FTYPE | MIIM_STRING | MIIM_ID | MIIM_STATE,
Type: MFT_STRING, Type: MFT_STRING,
ID: uint32(menuItemId), ID: menuItemId,
TypeData: titlePtr, TypeData: titlePtr,
Cch: uint32(len(title)), Cch: uint32(len(title)),
} }
@@ -302,11 +301,10 @@ func (t *winTray) addOrUpdateMenuItem(menuItemId uint32, parentId uint32, title
} }
func (t *winTray) addSeparatorMenuItem(menuItemId, parentId uint32) error { func (t *winTray) addSeparatorMenuItem(menuItemId, parentId uint32) error {
mi := menuItemInfo{ mi := menuItemInfo{
Mask: MIIM_FTYPE | MIIM_ID | MIIM_STATE, Mask: MIIM_FTYPE | MIIM_ID | MIIM_STATE,
Type: MFT_SEPARATOR, Type: MFT_SEPARATOR,
ID: uint32(menuItemId), ID: menuItemId,
} }
mi.Size = uint32(unsafe.Sizeof(mi)) mi.Size = uint32(unsafe.Sizeof(mi))
@@ -426,7 +424,6 @@ func iconBytesToFilePath(iconBytes []byte) (string, error) {
// Loads an image from file and shows it in tray. // Loads an image from file and shows it in tray.
// Shell_NotifyIcon: https://msdn.microsoft.com/en-us/library/windows/desktop/bb762159(v=vs.85).aspx // Shell_NotifyIcon: https://msdn.microsoft.com/en-us/library/windows/desktop/bb762159(v=vs.85).aspx
func (t *winTray) setIcon(src string) error { func (t *winTray) setIcon(src string) error {
h, err := t.loadIconFrom(src) h, err := t.loadIconFrom(src)
if err != nil { if err != nil {
return err return err
@@ -444,7 +441,6 @@ func (t *winTray) setIcon(src string) error {
// Loads an image from file to be shown in tray or menu item. // Loads an image from file to be shown in tray or menu item.
// LoadImage: https://msdn.microsoft.com/en-us/library/windows/desktop/ms648045(v=vs.85).aspx // LoadImage: https://msdn.microsoft.com/en-us/library/windows/desktop/ms648045(v=vs.85).aspx
func (t *winTray) loadIconFrom(src string) (windows.Handle, error) { func (t *winTray) loadIconFrom(src string) (windows.Handle, error) {
// Save and reuse handles of loaded images // Save and reuse handles of loaded images
t.muLoadedImages.RLock() t.muLoadedImages.RLock()
h, ok := t.loadedImages[src] h, ok := t.loadedImages[src]

View File

@@ -20,6 +20,7 @@ import (
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime" "runtime"
"slices"
"strings" "strings"
"syscall" "syscall"
"time" "time"
@@ -29,7 +30,6 @@ import (
"github.com/olekukonko/tablewriter" "github.com/olekukonko/tablewriter"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
"golang.org/x/exp/slices"
"golang.org/x/term" "golang.org/x/term"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
@@ -746,7 +746,6 @@ func displayResponse(content string, wordWrap bool, state *displayResponseState)
if wordWrap && termWidth >= 10 { if wordWrap && termWidth >= 10 {
for _, ch := range content { for _, ch := range content {
if state.lineLength+1 > termWidth-5 { if state.lineLength+1 > termWidth-5 {
if runewidth.StringWidth(state.wordBuffer) > termWidth-10 { if runewidth.StringWidth(state.wordBuffer) > termWidth-10 {
fmt.Printf("%s%c", state.wordBuffer, ch) fmt.Printf("%s%c", state.wordBuffer, ch)
state.wordBuffer = "" state.wordBuffer = ""
@@ -961,17 +960,11 @@ func generate(cmd *cobra.Command, opts runOptions) error {
} }
func RunServer(cmd *cobra.Command, _ []string) error { func RunServer(cmd *cobra.Command, _ []string) error {
// retrieve the OLLAMA_HOST environment variable
ollamaHost, err := api.GetOllamaHost()
if err != nil {
return err
}
if err := initializeKeypair(); err != nil { if err := initializeKeypair(); err != nil {
return err return err
} }
ln, err := net.Listen("tcp", net.JoinHostPort(ollamaHost.Host, ollamaHost.Port)) ln, err := net.Listen("tcp", net.JoinHostPort(envconfig.Host.Host, envconfig.Host.Port))
if err != nil { if err != nil {
return err return err
} }
@@ -1030,24 +1023,6 @@ func initializeKeypair() error {
return nil return nil
} }
//nolint:unused
func waitForServer(ctx context.Context, client *api.Client) error {
// wait for the server to start
timeout := time.After(5 * time.Second)
tick := time.Tick(500 * time.Millisecond)
for {
select {
case <-timeout:
return errors.New("timed out waiting for server to start")
case <-tick:
if err := client.Heartbeat(ctx); err == nil {
return nil // server has started
}
}
}
}
func checkServerHeartbeat(cmd *cobra.Command, _ []string) error { func checkServerHeartbeat(cmd *cobra.Command, _ []string) error {
client, err := api.ClientFromEnvironment() client, err := api.ClientFromEnvironment()
if err != nil { if err != nil {

View File

@@ -8,11 +8,11 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"regexp" "regexp"
"slices"
"sort" "sort"
"strings" "strings"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.org/x/exp/slices"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/envconfig"

View File

@@ -6,6 +6,7 @@ import (
"text/template" "text/template"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
) )
@@ -85,11 +86,11 @@ MESSAGE assistant """Yes it is true, I am half horse, half shark."""
` `
tmpl, err := template.New("").Parse(expectedModelfile) tmpl, err := template.New("").Parse(expectedModelfile)
assert.Nil(t, err) require.NoError(t, err)
var buf bytes.Buffer var buf bytes.Buffer
err = tmpl.Execute(&buf, opts) err = tmpl.Execute(&buf, opts)
assert.Nil(t, err) require.NoError(t, err)
assert.Equal(t, buf.String(), mf) assert.Equal(t, buf.String(), mf)
opts.ParentModel = "horseshark" opts.ParentModel = "horseshark"
@@ -107,10 +108,10 @@ MESSAGE assistant """Yes it is true, I am half horse, half shark."""
` `
tmpl, err = template.New("").Parse(expectedModelfile) tmpl, err = template.New("").Parse(expectedModelfile)
assert.Nil(t, err) require.NoError(t, err)
var parentBuf bytes.Buffer var parentBuf bytes.Buffer
err = tmpl.Execute(&parentBuf, opts) err = tmpl.Execute(&parentBuf, opts)
assert.Nil(t, err) require.NoError(t, err)
assert.Equal(t, parentBuf.String(), mf) assert.Equal(t, parentBuf.String(), mf)
} }

27
cmd/start.go Normal file
View File

@@ -0,0 +1,27 @@
//go:build darwin || windows
package cmd
import (
"context"
"errors"
"time"
"github.com/ollama/ollama/api"
)
func waitForServer(ctx context.Context, client *api.Client) error {
// wait for the server to start
timeout := time.After(5 * time.Second)
tick := time.Tick(500 * time.Millisecond)
for {
select {
case <-timeout:
return errors.New("timed out waiting for server to start")
case <-tick:
if err := client.Heartbeat(ctx); err == nil {
return nil // server has started
}
}
}
}

View File

@@ -189,7 +189,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
if params.VocabSize > len(v.Tokens) { if params.VocabSize > len(v.Tokens) {
missingTokens := params.VocabSize - len(v.Tokens) missingTokens := params.VocabSize - len(v.Tokens)
slog.Warn(fmt.Sprintf("vocab is missing %d tokens", missingTokens)) slog.Warn(fmt.Sprintf("vocab is missing %d tokens", missingTokens))
for cnt := 0; cnt < missingTokens; cnt++ { for cnt := range missingTokens {
v.Tokens = append(v.Tokens, fmt.Sprintf("<dummy%05d>", cnt+1)) v.Tokens = append(v.Tokens, fmt.Sprintf("<dummy%05d>", cnt+1))
v.Scores = append(v.Scores, -1) v.Scores = append(v.Scores, -1)
v.Types = append(v.Types, tokenTypeUserDefined) v.Types = append(v.Types, tokenTypeUserDefined)

View File

@@ -35,7 +35,6 @@ func addOnes(data []float32, vectorSize int) ([]float32, error) {
f32s = append(f32s, t...) f32s = append(f32s, t...)
} }
return f32s, nil return f32s, nil
} }

View File

@@ -119,11 +119,12 @@ func llamaRepack(name string, params *Params, data []float32, shape []uint64) ([
} }
var heads int var heads int
if strings.HasSuffix(name, "attn_q.weight") { switch {
case strings.HasSuffix(name, "attn_q.weight"):
heads = params.AttentionHeads heads = params.AttentionHeads
} else if strings.HasSuffix(name, "attn_k.weight") { case strings.HasSuffix(name, "attn_k.weight"):
heads = cmp.Or(params.KeyValHeads, params.AttentionHeads) heads = cmp.Or(params.KeyValHeads, params.AttentionHeads)
} else { default:
return nil, fmt.Errorf("unknown tensor name: %s", name) return nil, fmt.Errorf("unknown tensor name: %s", name)
} }

View File

@@ -120,7 +120,7 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params)
Name: name, Name: name,
Kind: kind, Kind: kind,
Offset: offset, Offset: offset,
Shape: shape[:], Shape: shape,
} }
t.WriterTo = safetensorWriterTo{ t.WriterTo = safetensorWriterTo{

View File

@@ -85,11 +85,8 @@ func parseTokens(dirpath string) (pre string, tokens []Token, merges []string, e
sha256sum := sha256.New() sha256sum := sha256.New()
for _, pt := range t.PreTokenizer.PreTokenizers { for _, pt := range t.PreTokenizer.PreTokenizers {
switch pt.Type { if pt.Type == "Split" && pt.Pattern.Regex != "" {
case "Split": sha256sum.Write([]byte(pt.Pattern.Regex))
if pt.Pattern.Regex != "" {
sha256sum.Write([]byte(pt.Pattern.Regex))
}
} }
} }

View File

@@ -88,7 +88,7 @@ func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor,
Name: ggufName, Name: ggufName,
Kind: kind, Kind: kind,
Offset: offset, // calculate the offset Offset: offset, // calculate the offset
Shape: shape[:], Shape: shape,
} }
tensor.WriterTo = torchWriterTo{ tensor.WriterTo = torchWriterTo{
@@ -104,7 +104,6 @@ func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor,
} }
return tensors, nil return tensors, nil
} }
func getAltParams(dirpath string) (*Params, error) { func getAltParams(dirpath string) (*Params, error) {

View File

@@ -12,6 +12,7 @@
- [Pull a Model](#pull-a-model) - [Pull a Model](#pull-a-model)
- [Push a Model](#push-a-model) - [Push a Model](#push-a-model)
- [Generate Embeddings](#generate-embeddings) - [Generate Embeddings](#generate-embeddings)
- [List Running Models](#list-running-models)
## Conventions ## Conventions
@@ -249,7 +250,7 @@ curl http://localhost:11434/api/generate -d '{
#### Request (Reproducible outputs) #### Request (Reproducible outputs)
For reproducible outputs, set `temperature` to 0 and `seed` to a number: For reproducible outputs, set `seed` to a number:
##### Request ##### Request
@@ -258,8 +259,7 @@ curl http://localhost:11434/api/generate -d '{
"model": "mistral", "model": "mistral",
"prompt": "Why is the sky blue?", "prompt": "Why is the sky blue?",
"options": { "options": {
"seed": 123, "seed": 123
"temperature": 0
} }
}' }'
``` ```
@@ -1035,3 +1035,47 @@ curl http://localhost:11434/api/embeddings -d '{
] ]
} }
``` ```
## List Running Models
```shell
GET /api/ps
```
List models that are currently loaded into memory.
#### Examples
### Request
```shell
curl http://localhost:11434/api/ps
```
#### Response
A single JSON object will be returned.
```json
{
"models": [
{
"name": "mistral:latest",
"model": "mistral:latest",
"size": 5137025024,
"digest": "2ae6f6dd7a3dd734790bbbf58b8909a606e0e7e97e94b7604e0aa7ae4490e6d8",
"details": {
"parent_model": "",
"format": "gguf",
"family": "llama",
"families": [
"llama"
],
"parameter_size": "7.2B",
"quantization_level": "Q4_0"
},
"expires_at": "2024-06-04T14:38:31.83753-07:00",
"size_vram": 5137025024
}
]
}
```

View File

@@ -1,170 +1,99 @@
# Import a model # Import
This guide walks through importing a GGUF, PyTorch or Safetensors model. GGUF models and select Safetensors models can be imported directly into Ollama.
## Importing (GGUF) ## Import GGUF
### Step 1: Write a `Modelfile` A binary GGUF file can be imported directly into Ollama through a Modelfile.
Start by creating a `Modelfile`. This file is the blueprint for your model, specifying weights, parameters, prompt templates and more. ```dockerfile
FROM /path/to/file.gguf
```
FROM ./mistral-7b-v0.1.Q4_0.gguf
``` ```
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`: ## Import Safetensors
``` If the model being imported is one of these architectures, it can be imported directly into Ollama through a Modelfile:
FROM ./mistral-7b-v0.1.Q4_0.gguf
TEMPLATE "[INST] {{ .Prompt }} [/INST]" - LlamaForCausalLM
- MistralForCausalLM
- GemmaForCausalLM
```dockerfile
FROM /path/to/safetensors/directory
``` ```
### Step 2: Create the Ollama model For architectures not directly convertable by Ollama, see llama.cpp's [guide](https://github.com/ggerganov/llama.cpp/blob/master/README.md#prepare-and-quantize) on conversion. After conversion, see [Import GGUF](#import-gguf).
Finally, create a model from your `Modelfile`: ## Automatic Quantization
> [!NOTE]
> Automatic quantization requires v0.1.35 or higher.
Ollama is capable of quantizing FP16 or FP32 models to any of the supported quantizations with the `-q/--quantize` flag in `ollama create`.
```dockerfile
FROM /path/to/my/gemma/f16/model
``` ```
ollama create example -f Modelfile
```
### Step 3: Run your model
Next, test the model with `ollama run`:
```
ollama run example "What is your favourite condiment?"
```
## Importing (PyTorch & Safetensors)
> Importing from PyTorch and Safetensors is a longer process than importing from GGUF. Improvements that make it easier are a work in progress.
### Setup
First, clone the `ollama/ollama` repo:
```
git clone git@github.com:ollama/ollama.git ollama
cd ollama
```
and then fetch its `llama.cpp` submodule:
```shell ```shell
git submodule init $ ollama create -q Q4_K_M mymodel
git submodule update llm/llama.cpp transferring model data
quantizing F16 model to Q4_K_M
creating new layer sha256:735e246cc1abfd06e9cdcf95504d6789a6cd1ad7577108a70d9902fef503c1bd
creating new layer sha256:0853f0ad24e5865173bbf9ffcc7b0f5d56b66fd690ab1009867e45e7d2c4db0f
writing manifest
success
``` ```
Next, install the Python dependencies: ### Supported Quantizations
``` <details>
python3 -m venv llm/llama.cpp/.venv <summary>Legacy Quantization</summary>
source llm/llama.cpp/.venv/bin/activate
pip install -r llm/llama.cpp/requirements.txt - `Q4_0`
- `Q4_1`
- `Q5_0`
- `Q5_1`
- `Q8_0`
</details>
<details>
<summary>K-means Quantization</summary>`
- `Q3_K_S`
- `Q3_K_M`
- `Q3_K_L`
- `Q4_K_S`
- `Q4_K_M`
- `Q5_K_S`
- `Q5_K_M`
- `Q6_K`
</details>
> [!NOTE]
> Activation-aware Weight Quantization (i.e. IQ) are not currently supported for automatic quantization however you can still import the quantized model into Ollama, see [Import GGUF](#import-gguf).
## Template Detection
> [!NOTE]
> Template detection requires v0.1.42 or higher.
Ollama uses model metadata, specifically `tokenizer.chat_template`, to automatically create a template appropriate for the model you're importing.
```dockerfile
FROM /path/to/my/gemma/model
``` ```
Then build the `quantize` tool: ```shell
$ ollama create mymodel
``` transferring model data
make -C llm/llama.cpp quantize using autodetected template gemma-instruct
creating new layer sha256:baa2a0edc27d19cc6b7537578a9a7ba1a4e3214dc185ed5ae43692b319af7b84
creating new layer sha256:ba66c3309914dbef07e5149a648fd1877f030d337a4f240d444ea335008943cb
writing manifest
success
``` ```
### Clone the HuggingFace repository (optional) Defining a template in the Modelfile will disable this feature which may be useful if you want to use a different template than the autodetected one.
If the model is currently hosted in a HuggingFace repository, first clone that repository to download the raw model.
Install [Git LFS](https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage), verify it's installed, and then clone the model's repository:
```
git lfs install
git clone https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1 model
```
### Convert the model
> Note: some model architectures require using specific convert scripts. For example, Qwen models require running `convert-hf-to-gguf.py` instead of `convert.py`
```
python llm/llama.cpp/convert.py ./model --outtype f16 --outfile converted.bin
```
### Quantize the model
```
llm/llama.cpp/quantize converted.bin quantized.bin q4_0
```
### Step 3: Write a `Modelfile`
Next, create a `Modelfile` for your model:
```
FROM quantized.bin
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
```
### Step 4: Create the Ollama model
Finally, create a model from your `Modelfile`:
```
ollama create example -f Modelfile
```
### Step 5: Run your model
Next, test the model with `ollama run`:
```
ollama run example "What is your favourite condiment?"
```
## Publishing your model (optional early alpha)
Publishing models is in early alpha. If you'd like to publish your model to share with others, follow these steps:
1. Create [an account](https://ollama.com/signup)
2. Copy your Ollama public key:
- macOS: `cat ~/.ollama/id_ed25519.pub | pbcopy`
- Windows: `type %USERPROFILE%\.ollama\id_ed25519.pub`
- Linux: `cat /usr/share/ollama/.ollama/id_ed25519.pub`
3. Add your public key to your [Ollama account](https://ollama.com/settings/keys)
Next, copy your model to your username's namespace:
```
ollama cp example <your username>/example
```
> Note: model names may only contain lowercase letters, digits, and the characters `.`, `-`, and `_`.
Then push the model:
```
ollama push <your username>/example
```
After publishing, your model will be available at `https://ollama.com/<your username>/example`.
## Quantization reference
The quantization options are as follow (from highest highest to lowest levels of quantization). Note: some architectures such as Falcon do not support K quants.
- `q2_K`
- `q3_K`
- `q3_K_S`
- `q3_K_M`
- `q3_K_L`
- `q4_0` (recommended)
- `q4_1`
- `q4_K`
- `q4_K_S`
- `q4_K_M`
- `q5_0`
- `q5_1`
- `q5_K`
- `q5_K_S`
- `q5_K_M`
- `q6_K`
- `q8_0`
- `f16`

View File

@@ -100,6 +100,16 @@ sudo curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/bin/ollama
sudo chmod +x /usr/bin/ollama sudo chmod +x /usr/bin/ollama
``` ```
## Installing specific versions
Use `OLLAMA_VERSION` environment variable with the install script to install a specific version of Ollama, including pre-releases. You can find the version numbers in the [releases page](https://github.com/ollama/ollama/releases).
For example:
```
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION=0.1.32 sh
```
## Viewing logs ## Viewing logs
To view logs of Ollama running as a startup service, run: To view logs of Ollama running as a startup service, run:

View File

@@ -45,7 +45,7 @@ all_splits = text_splitter.split_documents(data)
``` ```
It's split up, but we have to find the relevant splits and then submit those to the model. We can do this by creating embeddings and storing them in a vector database. We can use Ollama directly to instantiate an embedding model. We will use ChromaDB in this example for a vector database. `pip install chromadb` It's split up, but we have to find the relevant splits and then submit those to the model. We can do this by creating embeddings and storing them in a vector database. We can use Ollama directly to instantiate an embedding model. We will use ChromaDB in this example for a vector database. `pip install chromadb`
We also need to pull embedding model: `ollama pull nomic-embed-text`
```python ```python
from langchain.embeddings import OllamaEmbeddings from langchain.embeddings import OllamaEmbeddings
from langchain.vectorstores import Chroma from langchain.vectorstores import Chroma
@@ -68,7 +68,8 @@ The next thing is to send the question and the relevant parts of the docs to the
```python ```python
from langchain.chains import RetrievalQA from langchain.chains import RetrievalQA
qachain=RetrievalQA.from_chain_type(ollama, retriever=vectorstore.as_retriever()) qachain=RetrievalQA.from_chain_type(ollama, retriever=vectorstore.as_retriever())
qachain.invoke({"query": question}) res = qachain.invoke({"query": question})
print(res['result'])
``` ```
The answer received from this chain was: The answer received from this chain was:

View File

@@ -1,8 +1,10 @@
package envconfig package envconfig
import ( import (
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net"
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
@@ -10,6 +12,18 @@ import (
"strings" "strings"
) )
type OllamaHost struct {
Scheme string
Host string
Port string
}
func (o OllamaHost) String() string {
return fmt.Sprintf("%s://%s:%s", o.Scheme, o.Host, o.Port)
}
var ErrInvalidHostPort = errors.New("invalid port specified in OLLAMA_HOST")
var ( var (
// Set via OLLAMA_ORIGINS in the environment // Set via OLLAMA_ORIGINS in the environment
AllowOrigins []string AllowOrigins []string
@@ -33,6 +47,8 @@ var (
NoPrune bool NoPrune bool
// Set via OLLAMA_NUM_PARALLEL in the environment // Set via OLLAMA_NUM_PARALLEL in the environment
NumParallel int NumParallel int
// Set via OLLAMA_HOST in the environment
Host *OllamaHost
// Set via OLLAMA_RUNNERS_DIR in the environment // Set via OLLAMA_RUNNERS_DIR in the environment
RunnersDir string RunnersDir string
// Set via OLLAMA_TMPDIR in the environment // Set via OLLAMA_TMPDIR in the environment
@@ -49,7 +65,7 @@ func AsMap() map[string]EnvVar {
return map[string]EnvVar{ return map[string]EnvVar{
"OLLAMA_DEBUG": {"OLLAMA_DEBUG", Debug, "Show additional debug information (e.g. OLLAMA_DEBUG=1)"}, "OLLAMA_DEBUG": {"OLLAMA_DEBUG", Debug, "Show additional debug information (e.g. OLLAMA_DEBUG=1)"},
"OLLAMA_FLASH_ATTENTION": {"OLLAMA_FLASH_ATTENTION", FlashAttention, "Enabled flash attention"}, "OLLAMA_FLASH_ATTENTION": {"OLLAMA_FLASH_ATTENTION", FlashAttention, "Enabled flash attention"},
"OLLAMA_HOST": {"OLLAMA_HOST", "", "IP Address for the ollama server (default 127.0.0.1:11434)"}, "OLLAMA_HOST": {"OLLAMA_HOST", Host, "IP Address for the ollama server (default 127.0.0.1:11434)"},
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"}, "OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"},
"OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"}, "OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"},
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models (default 1)"}, "OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models (default 1)"},
@@ -126,7 +142,7 @@ func LoadConfig() {
var paths []string var paths []string
for _, root := range []string{filepath.Dir(appExe), cwd} { for _, root := range []string{filepath.Dir(appExe), cwd} {
paths = append(paths, paths = append(paths,
filepath.Join(root), root,
filepath.Join(root, "windows-"+runtime.GOARCH), filepath.Join(root, "windows-"+runtime.GOARCH),
filepath.Join(root, "dist", "windows-"+runtime.GOARCH), filepath.Join(root, "dist", "windows-"+runtime.GOARCH),
) )
@@ -184,11 +200,17 @@ func LoadConfig() {
AllowOrigins = append(AllowOrigins, AllowOrigins = append(AllowOrigins,
fmt.Sprintf("http://%s", allowOrigin), fmt.Sprintf("http://%s", allowOrigin),
fmt.Sprintf("https://%s", allowOrigin), fmt.Sprintf("https://%s", allowOrigin),
fmt.Sprintf("http://%s:*", allowOrigin), fmt.Sprintf("http://%s", net.JoinHostPort(allowOrigin, "*")),
fmt.Sprintf("https://%s:*", allowOrigin), fmt.Sprintf("https://%s", net.JoinHostPort(allowOrigin, "*")),
) )
} }
AllowOrigins = append(AllowOrigins,
"app://*",
"file://*",
"tauri://*",
)
maxRunners := clean("OLLAMA_MAX_LOADED_MODELS") maxRunners := clean("OLLAMA_MAX_LOADED_MODELS")
if maxRunners != "" { if maxRunners != "" {
m, err := strconv.Atoi(maxRunners) m, err := strconv.Atoi(maxRunners)
@@ -209,4 +231,54 @@ func LoadConfig() {
} }
KeepAlive = clean("OLLAMA_KEEP_ALIVE") KeepAlive = clean("OLLAMA_KEEP_ALIVE")
var err error
Host, err = getOllamaHost()
if err != nil {
slog.Error("invalid setting", "OLLAMA_HOST", Host, "error", err, "using default port", Host.Port)
}
}
func getOllamaHost() (*OllamaHost, error) {
defaultPort := "11434"
hostVar := os.Getenv("OLLAMA_HOST")
hostVar = strings.TrimSpace(strings.Trim(strings.TrimSpace(hostVar), "\"'"))
scheme, hostport, ok := strings.Cut(hostVar, "://")
switch {
case !ok:
scheme, hostport = "http", hostVar
case scheme == "http":
defaultPort = "80"
case scheme == "https":
defaultPort = "443"
}
// trim trailing slashes
hostport = strings.TrimRight(hostport, "/")
host, port, err := net.SplitHostPort(hostport)
if err != nil {
host, port = "127.0.0.1", defaultPort
if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
host = ip.String()
} else if hostport != "" {
host = hostport
}
}
if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 0 {
return &OllamaHost{
Scheme: scheme,
Host: host,
Port: defaultPort,
}, ErrInvalidHostPort
}
return &OllamaHost{
Scheme: scheme,
Host: host,
Port: port,
}, nil
} }

View File

@@ -1,8 +1,11 @@
package envconfig package envconfig
import ( import (
"fmt"
"net"
"testing" "testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -21,3 +24,48 @@ func TestConfig(t *testing.T) {
LoadConfig() LoadConfig()
require.True(t, FlashAttention) require.True(t, FlashAttention)
} }
func TestClientFromEnvironment(t *testing.T) {
type testCase struct {
value string
expect string
err error
}
hostTestCases := map[string]*testCase{
"empty": {value: "", expect: "127.0.0.1:11434"},
"only address": {value: "1.2.3.4", expect: "1.2.3.4:11434"},
"only port": {value: ":1234", expect: ":1234"},
"address and port": {value: "1.2.3.4:1234", expect: "1.2.3.4:1234"},
"hostname": {value: "example.com", expect: "example.com:11434"},
"hostname and port": {value: "example.com:1234", expect: "example.com:1234"},
"zero port": {value: ":0", expect: ":0"},
"too large port": {value: ":66000", err: ErrInvalidHostPort},
"too small port": {value: ":-1", err: ErrInvalidHostPort},
"ipv6 localhost": {value: "[::1]", expect: "[::1]:11434"},
"ipv6 world open": {value: "[::]", expect: "[::]:11434"},
"ipv6 no brackets": {value: "::1", expect: "[::1]:11434"},
"ipv6 + port": {value: "[::1]:1337", expect: "[::1]:1337"},
"extra space": {value: " 1.2.3.4 ", expect: "1.2.3.4:11434"},
"extra quotes": {value: "\"1.2.3.4\"", expect: "1.2.3.4:11434"},
"extra space+quotes": {value: " \" 1.2.3.4 \" ", expect: "1.2.3.4:11434"},
"extra single quotes": {value: "'1.2.3.4'", expect: "1.2.3.4:11434"},
}
for k, v := range hostTestCases {
t.Run(k, func(t *testing.T) {
t.Setenv("OLLAMA_HOST", v.value)
LoadConfig()
oh, err := getOllamaHost()
if err != v.err {
t.Fatalf("expected %s, got %s", v.err, err)
}
if err == nil {
host := net.JoinHostPort(oh.Host, oh.Port)
assert.Equal(t, v.expect, host, fmt.Sprintf("%s: expected %s, got %s", k, v.expect, host))
}
})
}
}

View File

@@ -77,13 +77,21 @@ LOADER_MAPPING = {
def load_single_document(file_path: str) -> List[Document]: def load_single_document(file_path: str) -> List[Document]:
ext = "." + file_path.rsplit(".", 1)[-1] if os.path.getsize(file_path) != 0:
if ext in LOADER_MAPPING: filename, ext = os.path.splitext(file_path)
loader_class, loader_args = LOADER_MAPPING[ext] if ext in LOADER_MAPPING:
loader = loader_class(file_path, **loader_args) loader_class, loader_args = LOADER_MAPPING[ext]
return loader.load() try:
loader = loader_class(file_path, **loader_args)
if loader:
return loader.load()
except:
print(f"Corrupted file {file_path}. Ignoring it.")
else:
print(f"Unsupported file {file_path}. Ignoring it.")
else:
print(f"Empty file {file_path}. Ignoring it.")
raise ValueError(f"Unsupported file extension '{ext}'")
def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]: def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]:
""" """
@@ -100,7 +108,8 @@ def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Docum
results = [] results = []
with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar: with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar:
for i, docs in enumerate(pool.imap_unordered(load_single_document, filtered_files)): for i, docs in enumerate(pool.imap_unordered(load_single_document, filtered_files)):
results.extend(docs) if docs:
results.extend(docs)
pbar.update() pbar.update()
return results return results

View File

@@ -11,4 +11,5 @@ tabulate==0.9.0
pandoc==2.3 pandoc==2.3
pypandoc==1.11 pypandoc==1.11
tqdm==4.66.1 tqdm==4.66.1
sentence_transformers==2.2.2 sentence_transformers==2.2.2
numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability

View File

@@ -5,7 +5,6 @@ import (
) )
func TestHumanNumber(t *testing.T) { func TestHumanNumber(t *testing.T) {
type testCase struct { type testCase struct {
input uint64 input uint64
expected string expected string

1
go.mod
View File

@@ -16,6 +16,7 @@ require (
) )
require ( require (
github.com/agnivade/levenshtein v1.1.1
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1 github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
github.com/mattn/go-runewidth v0.0.14 github.com/mattn/go-runewidth v0.0.14
github.com/nlpodyssey/gopickle v0.3.0 github.com/nlpodyssey/gopickle v0.3.0

6
go.sum
View File

@@ -4,10 +4,14 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 h1:q4dksr6ICHXqG5hm0ZW5IHyeEJXoIJSOZeBLmWPNeIQ= github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 h1:q4dksr6ICHXqG5hm0ZW5IHyeEJXoIJSOZeBLmWPNeIQ=
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
@@ -36,6 +40,8 @@ github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1/go.mod h1:uw2gLc
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=

View File

@@ -7,7 +7,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"slices" "slices"
"strings" // "strings"
"github.com/ollama/ollama/format" "github.com/ollama/ollama/format"
) )
@@ -65,7 +65,7 @@ func AMDGetGPUInfo() []GpuInfo {
slog.Debug("detected hip devices", "count", count) slog.Debug("detected hip devices", "count", count)
// TODO how to determine the underlying device ID when visible devices is causing this to subset? // TODO how to determine the underlying device ID when visible devices is causing this to subset?
for i := 0; i < count; i++ { for i := range count {
err = hl.HipSetDevice(i) err = hl.HipSetDevice(i)
if err != nil { if err != nil {
slog.Warn("set device", "id", i, "error", err) slog.Warn("set device", "id", i, "error", err)

View File

@@ -80,7 +80,7 @@ func cleanupTmpDirs() {
if err == nil { if err == nil {
pid, err := strconv.Atoi(string(raw)) pid, err := strconv.Atoi(string(raw))
if err == nil { if err == nil {
if proc, err := os.FindProcess(int(pid)); err == nil && !errors.Is(proc.Signal(syscall.Signal(0)), os.ErrProcessDone) { if proc, err := os.FindProcess(pid); err == nil && !errors.Is(proc.Signal(syscall.Signal(0)), os.ErrProcessDone) {
// Another running ollama, ignore this tmpdir // Another running ollama, ignore this tmpdir
continue continue
} }

View File

@@ -18,5 +18,4 @@ func cudaGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) {
ids = append(ids, info.ID) ids = append(ids, info.ID)
} }
return "CUDA_VISIBLE_DEVICES", strings.Join(ids, ",") return "CUDA_VISIBLE_DEVICES", strings.Join(ids, ",")
} }

View File

@@ -187,7 +187,7 @@ func GetGPUInfo() GpuInfoList {
resp := []GpuInfo{} resp := []GpuInfo{}
// NVIDIA first // NVIDIA first
for i := 0; i < gpuHandles.deviceCount; i++ { for i := range gpuHandles.deviceCount {
// TODO once we support CPU compilation variants of GPU libraries refine this... // TODO once we support CPU compilation variants of GPU libraries refine this...
if cpuVariant == "" && runtime.GOARCH == "amd64" { if cpuVariant == "" && runtime.GOARCH == "amd64" {
continue continue
@@ -221,8 +221,8 @@ func GetGPUInfo() GpuInfoList {
gpuInfo.MinimumMemory = cudaMinimumMemory gpuInfo.MinimumMemory = cudaMinimumMemory
gpuInfo.DependencyPath = depPath gpuInfo.DependencyPath = depPath
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0]) gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
gpuInfo.DriverMajor = int(driverMajor) gpuInfo.DriverMajor = driverMajor
gpuInfo.DriverMinor = int(driverMinor) gpuInfo.DriverMinor = driverMinor
// TODO potentially sort on our own algorithm instead of what the underlying GPU library does... // TODO potentially sort on our own algorithm instead of what the underlying GPU library does...
resp = append(resp, gpuInfo) resp = append(resp, gpuInfo)

View File

@@ -5,11 +5,12 @@ import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestBasicGetGPUInfo(t *testing.T) { func TestBasicGetGPUInfo(t *testing.T) {
info := GetGPUInfo() info := GetGPUInfo()
assert.Greater(t, len(info), 0) assert.NotEmpty(t, len(info))
assert.Contains(t, "cuda rocm cpu metal", info[0].Library) assert.Contains(t, "cuda rocm cpu metal", info[0].Library)
if info[0].Library != "cpu" { if info[0].Library != "cpu" {
assert.Greater(t, info[0].TotalMemory, uint64(0)) assert.Greater(t, info[0].TotalMemory, uint64(0))
@@ -19,7 +20,7 @@ func TestBasicGetGPUInfo(t *testing.T) {
func TestCPUMemInfo(t *testing.T) { func TestCPUMemInfo(t *testing.T) {
info, err := GetCPUMem() info, err := GetCPUMem()
assert.NoError(t, err) require.NoError(t, err)
switch runtime.GOOS { switch runtime.GOOS {
case "darwin": case "darwin":
t.Skip("CPU memory not populated on darwin") t.Skip("CPU memory not populated on darwin")

View File

@@ -359,7 +359,6 @@ struct llama_server_context
// slots / clients // slots / clients
std::vector<server_slot> slots; std::vector<server_slot> slots;
json default_generation_settings_for_props;
llama_server_queue queue_tasks; llama_server_queue queue_tasks;
llama_server_response queue_results; llama_server_response queue_results;
@@ -483,9 +482,6 @@ struct llama_server_context
slots.push_back(slot); slots.push_back(slot);
} }
default_generation_settings_for_props = get_formated_generation(slots.front());
default_generation_settings_for_props["seed"] = -1;
batch = llama_batch_init(n_ctx, 0, params.n_parallel); batch = llama_batch_init(n_ctx, 0, params.n_parallel);
} }
@@ -584,7 +580,7 @@ struct llama_server_context
slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta); slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl); slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep); slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep);
slot->params.seed = json_value(data, "seed", default_params.seed); slot->sparams.seed = json_value(data, "seed", default_params.seed);
slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar); slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs); slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep); slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
@@ -811,7 +807,6 @@ struct llama_server_context
llama_sampling_free(slot->ctx_sampling); llama_sampling_free(slot->ctx_sampling);
} }
slot->ctx_sampling = llama_sampling_init(slot->sparams); slot->ctx_sampling = llama_sampling_init(slot->sparams);
llama_set_rng_seed(ctx, slot->params.seed);
slot->command = LOAD_PROMPT; slot->command = LOAD_PROMPT;
all_slots_are_idle = false; all_slots_are_idle = false;
@@ -835,7 +830,7 @@ struct llama_server_context
system_tokens.clear(); system_tokens.clear();
if (!system_prompt.empty()) { if (!system_prompt.empty()) {
system_tokens = ::llama_tokenize(ctx, system_prompt, add_bos_token); system_tokens = ::llama_tokenize(ctx, system_prompt, true);
llama_batch_clear(batch); llama_batch_clear(batch);
@@ -1656,7 +1651,7 @@ struct llama_server_context
slot.t_start_process_prompt = ggml_time_us(); slot.t_start_process_prompt = ggml_time_us();
slot.t_start_genereration = 0; slot.t_start_genereration = 0;
prompt_tokens = tokenize(slot.prompt, system_prompt.empty() && add_bos_token); // add BOS if there isn't system prompt prompt_tokens = tokenize(slot.prompt, system_prompt.empty()); // add BOS if there isn't system prompt
slot.n_prompt_tokens = prompt_tokens.size(); slot.n_prompt_tokens = prompt_tokens.size();

View File

@@ -220,7 +220,7 @@ if [ -z "${ONEAPI_ROOT}" ]; then
ONEAPI_ROOT=/opt/intel/oneapi ONEAPI_ROOT=/opt/intel/oneapi
fi fi
if [ -d "${ONEAPI_ROOT}" ]; then if [ -z "${OLLAMA_SKIP_ONEAPI_GENERATE}" -a -d "${ONEAPI_ROOT}" ]; then
echo "OneAPI libraries detected - building dynamic OneAPI library" echo "OneAPI libraries detected - building dynamic OneAPI library"
init_vars init_vars
source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI

View File

@@ -12,6 +12,7 @@ function amdGPUs {
"gfx900" "gfx900"
"gfx902" "gfx902"
"gfx904" "gfx904"
"gfx90c"
"gfx906:xnack-" "gfx906:xnack-"
"gfx908:xnack-" "gfx908:xnack-"
"gfx90a:xnack+" "gfx90a:xnack+"
@@ -301,7 +302,7 @@ function build_cuda() {
} }
function build_oneapi() { function build_oneapi() {
if ((-not "${env:OLLAMA_SKIP_CUDA_GENERATE}") -and ("${env:ONEAPI_ROOT}")) { if ((-not "${env:OLLAMA_SKIP_ONEAPI_GENERATE}") -and ("${env:ONEAPI_ROOT}")) {
# Get oneAPI version # Get oneAPI version
$script:ONEAPI_VERSION = icpx --version $script:ONEAPI_VERSION = icpx --version
$script:ONEAPI_VERSION = [regex]::Match($script:ONEAPI_VERSION, '(?<=oneAPI DPC\+\+/C\+\+ Compiler )(?<version>\d+\.\d+\.\d+)').Value $script:ONEAPI_VERSION = [regex]::Match($script:ONEAPI_VERSION, '(?<=oneAPI DPC\+\+/C\+\+ Compiler )(?<version>\d+\.\d+\.\d+)').Value

View File

@@ -81,6 +81,11 @@ func (kv KV) ContextLength() uint64 {
return kv.u64(fmt.Sprintf("%s.context_length", kv.Architecture())) return kv.u64(fmt.Sprintf("%s.context_length", kv.Architecture()))
} }
func (kv KV) ChatTemplate() string {
s, _ := kv["tokenizer.chat_template"].(string)
return s
}
type Tensors []*Tensor type Tensors []*Tensor
func (ts Tensors) Layers() map[string]Layer { func (ts Tensors) Layers() map[string]Layer {

View File

@@ -592,8 +592,8 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
return err return err
} }
dims := 0 var dims int
for cnt := 0; cnt < len(tensor.Shape); cnt++ { for cnt := range len(tensor.Shape) {
if tensor.Shape[cnt] > 0 { if tensor.Shape[cnt] > 0 {
dims++ dims++
} }
@@ -603,8 +603,8 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
return err return err
} }
for i := 0; i < dims; i++ { for i := range dims {
if err := binary.Write(ws, llm.ByteOrder, uint64(tensor.Shape[dims-1-i])); err != nil { if err := binary.Write(ws, llm.ByteOrder, tensor.Shape[dims-1-i]); err != nil {
return err return err
} }
} }
@@ -618,22 +618,8 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
} }
} }
offset, err := ws.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
var alignment int64 = 32 var alignment int64 = 32
padding := llm.padding(offset, alignment)
if err := binary.Write(ws, llm.ByteOrder, bytes.Repeat([]byte{0}, int(padding))); err != nil {
return err
}
for _, tensor := range tensors { for _, tensor := range tensors {
if _, err := tensor.WriteTo(ws); err != nil {
return err
}
offset, err := ws.Seek(0, io.SeekCurrent) offset, err := ws.Seek(0, io.SeekCurrent)
if err != nil { if err != nil {
return err return err
@@ -643,6 +629,10 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
if err := binary.Write(ws, llm.ByteOrder, bytes.Repeat([]byte{0}, int(padding))); err != nil { if err := binary.Write(ws, llm.ByteOrder, bytes.Repeat([]byte{0}, int(padding))); err != nil {
return err return err
} }
if _, err := tensor.WriteTo(ws); err != nil {
return err
}
} }
return nil return nil

View File

@@ -5,9 +5,9 @@ import (
"log/slog" "log/slog"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/envconfig"
"github.com/ollama/ollama/format" "github.com/ollama/ollama/format"
"github.com/ollama/ollama/gpu" "github.com/ollama/ollama/gpu"
"github.com/ollama/ollama/envconfig"
) )
// This algorithm looks for a complete fit to determine if we need to unload other models // This algorithm looks for a complete fit to determine if we need to unload other models
@@ -103,7 +103,7 @@ func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts
} }
var layerCount int var layerCount int
for i := 0; i < int(ggml.KV().BlockCount()); i++ { for i := range int(ggml.KV().BlockCount()) {
if blk, ok := layers[fmt.Sprintf("blk.%d", i)]; ok { if blk, ok := layers[fmt.Sprintf("blk.%d", i)]; ok {
memoryLayer := blk.size() memoryLayer := blk.size()

13
llm/patches/06-qwen2.diff Normal file
View File

@@ -0,0 +1,13 @@
diff --git a/llama.cpp b/llama.cpp
index 40d2ec2c..f34eb79a 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -6943,7 +6943,7 @@ static struct ggml_tensor * llm_build_kqv(
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
cb(kq, "kq", il);
- if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX) {
+ if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2) {
// for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
// ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
ggml_mul_mat_set_prec(kq, GGML_PREC_F32);

View File

@@ -10,9 +10,9 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
"slices"
"strings" "strings"
"golang.org/x/exp/slices"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"github.com/ollama/ollama/gpu" "github.com/ollama/ollama/gpu"

View File

@@ -85,7 +85,6 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
var systemMemory uint64 var systemMemory uint64
gpuCount := len(gpus) gpuCount := len(gpus)
if (len(gpus) == 1 && gpus[0].Library == "cpu") || opts.NumGPU == 0 { if (len(gpus) == 1 && gpus[0].Library == "cpu") || opts.NumGPU == 0 {
// TODO evaluate system memory to see if we should block the load, or force an unload of another CPU runner // TODO evaluate system memory to see if we should block the load, or force an unload of another CPU runner
cpuRunner = serverForCpu() cpuRunner = serverForCpu()
@@ -104,21 +103,22 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
var layers int var layers int
layers, estimatedVRAM, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts) layers, estimatedVRAM, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
if gpus[0].Library == "metal" && estimatedVRAM > systemMemory { switch {
case gpus[0].Library == "metal" && estimatedVRAM > systemMemory:
// disable partial offloading when model is greater than total system memory as this // disable partial offloading when model is greater than total system memory as this
// can lead to locking up the system // can lead to locking up the system
opts.NumGPU = 0 opts.NumGPU = 0
} else if gpus[0].Library != "metal" && layers == 0 { case gpus[0].Library != "metal" && layers == 0:
// Don't bother loading into the GPU if no layers can fit // Don't bother loading into the GPU if no layers can fit
cpuRunner = serverForCpu() cpuRunner = serverForCpu()
gpuCount = 0 gpuCount = 0
} else if opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu" { case opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu":
opts.NumGPU = layers opts.NumGPU = layers
} }
} }
// Loop through potential servers // Loop through potential servers
finalErr := fmt.Errorf("no suitable llama servers found") finalErr := errors.New("no suitable llama servers found")
if len(adapters) > 1 { if len(adapters) > 1 {
return nil, errors.New("ollama supports only one lora adapter, but multiple were provided") return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
@@ -232,7 +232,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
params = append(params, "--parallel", fmt.Sprintf("%d", numParallel)) params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
for i := 0; i < len(servers); i++ { for i := range len(servers) {
dir := availableServers[servers[i]] dir := availableServers[servers[i]]
if dir == "" { if dir == "" {
// Shouldn't happen // Shouldn't happen
@@ -284,7 +284,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
server := filepath.Join(dir, "ollama_llama_server") server := filepath.Join(dir, "ollama_llama_server")
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
server = server + ".exe" server += ".exe"
} }
// Detect tmp cleaners wiping out the file // Detect tmp cleaners wiping out the file
@@ -315,7 +315,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
s.cmd.Stdout = os.Stdout s.cmd.Stdout = os.Stdout
s.cmd.Stderr = s.status s.cmd.Stderr = s.status
visibleDevicesEnv, visibleDevicesEnvVal := gpu.GpuInfoList(gpus).GetVisibleDevicesEnv() visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator)) pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
// Update or add the path and visible devices variable with our adjusted version // Update or add the path and visible devices variable with our adjusted version
@@ -459,7 +459,7 @@ func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
if errors.Is(err, context.DeadlineExceeded) { if errors.Is(err, context.DeadlineExceeded) {
return ServerStatusNotResponding, fmt.Errorf("server not responding") return ServerStatusNotResponding, errors.New("server not responding")
} }
return ServerStatusError, fmt.Errorf("health resp: %w", err) return ServerStatusError, fmt.Errorf("health resp: %w", err)
} }
@@ -606,7 +606,7 @@ array ::=
string ::= string ::=
"\"" ( "\"" (
[^"\\] | [^"\\\x7F\x00-\x1F] |
"\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
)* "\"" ws )* "\"" ws

View File

@@ -245,7 +245,6 @@ func (w *writer) writeResponse(data []byte) (int, error) {
d, err := json.Marshal(toChunk(w.id, chatResponse)) d, err := json.Marshal(toChunk(w.id, chatResponse))
if err != nil { if err != nil {
return 0, err return 0, err
} }
w.ResponseWriter.Header().Set("Content-Type", "text/event-stream") w.ResponseWriter.Header().Set("Content-Type", "text/event-stream")

View File

@@ -3,12 +3,15 @@ package parser
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"encoding/binary"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"strconv" "strconv"
"strings" "strings"
"unicode" "unicode/utf16"
"unicode/utf8"
) )
type File struct { type File struct {
@@ -69,33 +72,31 @@ func ParseFile(r io.Reader) (*File, error) {
var b bytes.Buffer var b bytes.Buffer
var role string var role string
var lineCount int
var linePos int
var utf16 bool
var f File var f File
br := bufio.NewReader(r) br := bufio.NewReader(r)
for {
r, _, err := br.ReadRune() var sc scannerDecoder = utf8ScannerDecoder{}
if errors.Is(err, io.EOF) { if bom, err := br.Peek(2); err != nil {
break slog.Warn("error reading byte-order mark", "error", err)
} else if err != nil { } else if bytes.Equal(bom, []byte{0xFE, 0xFF}) {
sc = utf16ScannerDecoder{binary.LittleEndian}
//nolint:errcheck
br.Discard(2)
} else if bytes.Equal(bom, []byte{0xFF, 0xFE}) {
sc = utf16ScannerDecoder{binary.BigEndian}
//nolint:errcheck
br.Discard(2)
}
scanner := bufio.NewScanner(br)
scanner.Split(sc.ScanBytes)
for scanner.Scan() {
r, err := sc.DecodeRune(scanner.Bytes())
if err != nil {
return nil, err return nil, err
} }
// the utf16 byte order mark will be read as "unreadable" by ReadRune()
if isUnreadable(r) && lineCount == 0 && linePos == 0 {
utf16 = true
continue
}
// skip the second byte if we're reading utf16
if utf16 && r == 0 {
continue
}
next, r, err := parseRuneForState(r, curr) next, r, err := parseRuneForState(r, curr)
if errors.Is(err, io.ErrUnexpectedEOF) { if errors.Is(err, io.ErrUnexpectedEOF) {
return nil, fmt.Errorf("%w: %s", err, b.String()) return nil, fmt.Errorf("%w: %s", err, b.String())
@@ -103,13 +104,6 @@ func ParseFile(r io.Reader) (*File, error) {
return nil, err return nil, err
} }
if isNewline(r) {
lineCount++
linePos = 0
} else {
linePos++
}
// process the state transition, some transitions need to be intercepted and redirected // process the state transition, some transitions need to be intercepted and redirected
if next != curr { if next != curr {
switch curr { switch curr {
@@ -309,10 +303,6 @@ func isNewline(r rune) bool {
return r == '\r' || r == '\n' return r == '\r' || r == '\n'
} }
func isUnreadable(r rune) bool {
return r == unicode.ReplacementChar
}
func isValidMessageRole(role string) bool { func isValidMessageRole(role string) bool {
return role == "system" || role == "user" || role == "assistant" return role == "system" || role == "user" || role == "assistant"
} }
@@ -325,3 +315,39 @@ func isValidCommand(cmd string) bool {
return false return false
} }
} }
type scannerDecoder interface {
ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error)
DecodeRune([]byte) (rune, error)
}
type utf8ScannerDecoder struct{}
func (utf8ScannerDecoder) ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
return scanBytesN(data, 1, atEOF)
}
func (utf8ScannerDecoder) DecodeRune(data []byte) (rune, error) {
r, _ := utf8.DecodeRune(data)
return r, nil
}
type utf16ScannerDecoder struct {
binary.ByteOrder
}
func (utf16ScannerDecoder) ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
return scanBytesN(data, 2, atEOF)
}
func (e utf16ScannerDecoder) DecodeRune(data []byte) (rune, error) {
return utf16.Decode([]uint16{e.ByteOrder.Uint16(data)})[0], nil
}
func scanBytesN(data []byte, n int, atEOF bool) (int, []byte, error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
return n, data[:n], nil
}

View File

@@ -10,6 +10,7 @@ import (
"unicode/utf16" "unicode/utf16"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestParseFileFile(t *testing.T) { func TestParseFileFile(t *testing.T) {
@@ -25,7 +26,7 @@ TEMPLATE template1
reader := strings.NewReader(input) reader := strings.NewReader(input)
modelfile, err := ParseFile(reader) modelfile, err := ParseFile(reader)
assert.NoError(t, err) require.NoError(t, err)
expectedCommands := []Command{ expectedCommands := []Command{
{Name: "model", Args: "model1"}, {Name: "model", Args: "model1"},
@@ -88,7 +89,7 @@ func TestParseFileFrom(t *testing.T) {
for _, c := range cases { for _, c := range cases {
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
modelfile, err := ParseFile(strings.NewReader(c.input)) modelfile, err := ParseFile(strings.NewReader(c.input))
assert.ErrorIs(t, err, c.err) require.ErrorIs(t, err, c.err)
if modelfile != nil { if modelfile != nil {
assert.Equal(t, c.expected, modelfile.Commands) assert.Equal(t, c.expected, modelfile.Commands)
} }
@@ -105,7 +106,7 @@ PARAMETER param1
reader := strings.NewReader(input) reader := strings.NewReader(input)
_, err := ParseFile(reader) _, err := ParseFile(reader)
assert.ErrorIs(t, err, io.ErrUnexpectedEOF) require.ErrorIs(t, err, io.ErrUnexpectedEOF)
} }
func TestParseFileBadCommand(t *testing.T) { func TestParseFileBadCommand(t *testing.T) {
@@ -114,8 +115,7 @@ FROM foo
BADCOMMAND param1 value1 BADCOMMAND param1 value1
` `
_, err := ParseFile(strings.NewReader(input)) _, err := ParseFile(strings.NewReader(input))
assert.ErrorIs(t, err, errInvalidCommand) require.ErrorIs(t, err, errInvalidCommand)
} }
func TestParseFileMessages(t *testing.T) { func TestParseFileMessages(t *testing.T) {
@@ -201,7 +201,7 @@ MESSAGE system`,
for _, c := range cases { for _, c := range cases {
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
modelfile, err := ParseFile(strings.NewReader(c.input)) modelfile, err := ParseFile(strings.NewReader(c.input))
assert.ErrorIs(t, err, c.err) require.ErrorIs(t, err, c.err)
if modelfile != nil { if modelfile != nil {
assert.Equal(t, c.expected, modelfile.Commands) assert.Equal(t, c.expected, modelfile.Commands)
} }
@@ -355,7 +355,7 @@ TEMPLATE """
for _, c := range cases { for _, c := range cases {
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
modelfile, err := ParseFile(strings.NewReader(c.multiline)) modelfile, err := ParseFile(strings.NewReader(c.multiline))
assert.ErrorIs(t, err, c.err) require.ErrorIs(t, err, c.err)
if modelfile != nil { if modelfile != nil {
assert.Equal(t, c.expected, modelfile.Commands) assert.Equal(t, c.expected, modelfile.Commands)
} }
@@ -413,7 +413,7 @@ func TestParseFileParameters(t *testing.T) {
fmt.Fprintln(&b, "FROM foo") fmt.Fprintln(&b, "FROM foo")
fmt.Fprintln(&b, "PARAMETER", k) fmt.Fprintln(&b, "PARAMETER", k)
modelfile, err := ParseFile(&b) modelfile, err := ParseFile(&b)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, []Command{ assert.Equal(t, []Command{
{Name: "model", Args: "foo"}, {Name: "model", Args: "foo"},
@@ -442,7 +442,7 @@ FROM foo
for _, c := range cases { for _, c := range cases {
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
modelfile, err := ParseFile(strings.NewReader(c.input)) modelfile, err := ParseFile(strings.NewReader(c.input))
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, c.expected, modelfile.Commands) assert.Equal(t, c.expected, modelfile.Commands)
}) })
} }
@@ -501,15 +501,14 @@ SYSTEM ""
for _, c := range cases { for _, c := range cases {
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
modelfile, err := ParseFile(strings.NewReader(c)) modelfile, err := ParseFile(strings.NewReader(c))
assert.NoError(t, err) require.NoError(t, err)
modelfile2, err := ParseFile(strings.NewReader(modelfile.String())) modelfile2, err := ParseFile(strings.NewReader(modelfile.String()))
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, modelfile, modelfile2) assert.Equal(t, modelfile, modelfile2)
}) })
} }
} }
func TestParseFileUTF16ParseFile(t *testing.T) { func TestParseFileUTF16ParseFile(t *testing.T) {
@@ -522,10 +521,10 @@ SYSTEM You are a utf16 file.
utf16File := utf16.Encode(append([]rune{'\ufffe'}, []rune(data)...)) utf16File := utf16.Encode(append([]rune{'\ufffe'}, []rune(data)...))
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
err := binary.Write(buf, binary.LittleEndian, utf16File) err := binary.Write(buf, binary.LittleEndian, utf16File)
assert.NoError(t, err) require.NoError(t, err)
actual, err := ParseFile(buf) actual, err := ParseFile(buf)
assert.NoError(t, err) require.NoError(t, err)
expected := []Command{ expected := []Command{
{Name: "model", Args: "bob"}, {Name: "model", Args: "bob"},
@@ -539,9 +538,9 @@ SYSTEM You are a utf16 file.
// simulate a utf16 be file // simulate a utf16 be file
buf = new(bytes.Buffer) buf = new(bytes.Buffer)
err = binary.Write(buf, binary.BigEndian, utf16File) err = binary.Write(buf, binary.BigEndian, utf16File)
assert.NoError(t, err) require.NoError(t, err)
actual, err = ParseFile(buf) actual, err = ParseFile(buf)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, expected, actual.Commands) assert.Equal(t, expected, actual.Commands)
} }

View File

@@ -59,7 +59,7 @@ func (p *Progress) StopAndClear() bool {
stopped := p.stop() stopped := p.stop()
if stopped { if stopped {
// clear all progress lines // clear all progress lines
for i := 0; i < p.pos; i++ { for i := range p.pos {
if i > 0 { if i > 0 {
fmt.Fprint(p.w, "\033[A") fmt.Fprint(p.w, "\033[A")
} }
@@ -85,7 +85,7 @@ func (p *Progress) render() {
defer fmt.Fprint(p.w, "\033[?25h") defer fmt.Fprint(p.w, "\033[?25h")
// clear already rendered progress lines // clear already rendered progress lines
for i := 0; i < p.pos; i++ { for i := range p.pos {
if i > 0 { if i > 0 {
fmt.Fprint(p.w, "\033[A") fmt.Fprint(p.w, "\033[A")
} }

View File

@@ -52,7 +52,6 @@ func (b *Buffer) GetLineSpacing(line int) bool {
} }
return hasSpace.(bool) return hasSpace.(bool)
} }
func (b *Buffer) MoveLeft() { func (b *Buffer) MoveLeft() {
@@ -117,15 +116,12 @@ func (b *Buffer) MoveRight() {
if b.DisplayPos%b.LineWidth == 0 { if b.DisplayPos%b.LineWidth == 0 {
fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt()))) fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt())))
} else if (b.DisplayPos-rLength)%b.LineWidth == b.LineWidth-1 && hasSpace { } else if (b.DisplayPos-rLength)%b.LineWidth == b.LineWidth-1 && hasSpace {
fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt())+rLength)) fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt())+rLength))
b.DisplayPos += 1 b.DisplayPos += 1
} else if b.LineHasSpace.Size() > 0 && b.DisplayPos%b.LineWidth == b.LineWidth-1 && hasSpace { } else if b.LineHasSpace.Size() > 0 && b.DisplayPos%b.LineWidth == b.LineWidth-1 && hasSpace {
fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt()))) fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt())))
b.DisplayPos += 1 b.DisplayPos += 1
} else { } else {
fmt.Print(cursorRightN(rLength)) fmt.Print(cursorRightN(rLength))
} }
@@ -154,7 +150,7 @@ func (b *Buffer) MoveToStart() {
if b.Pos > 0 { if b.Pos > 0 {
currLine := b.DisplayPos / b.LineWidth currLine := b.DisplayPos / b.LineWidth
if currLine > 0 { if currLine > 0 {
for cnt := 0; cnt < currLine; cnt++ { for range currLine {
fmt.Print(CursorUp) fmt.Print(CursorUp)
} }
} }
@@ -169,7 +165,7 @@ func (b *Buffer) MoveToEnd() {
currLine := b.DisplayPos / b.LineWidth currLine := b.DisplayPos / b.LineWidth
totalLines := b.DisplaySize() / b.LineWidth totalLines := b.DisplaySize() / b.LineWidth
if currLine < totalLines { if currLine < totalLines {
for cnt := 0; cnt < totalLines-currLine; cnt++ { for range totalLines - currLine {
fmt.Print(CursorDown) fmt.Print(CursorDown)
} }
remainder := b.DisplaySize() % b.LineWidth remainder := b.DisplaySize() % b.LineWidth
@@ -185,7 +181,7 @@ func (b *Buffer) MoveToEnd() {
func (b *Buffer) DisplaySize() int { func (b *Buffer) DisplaySize() int {
sum := 0 sum := 0
for i := 0; i < b.Buf.Size(); i++ { for i := range b.Buf.Size() {
if e, ok := b.Buf.Get(i); ok { if e, ok := b.Buf.Get(i); ok {
if r, ok := e.(rune); ok { if r, ok := e.(rune); ok {
sum += runewidth.RuneWidth(r) sum += runewidth.RuneWidth(r)
@@ -197,7 +193,6 @@ func (b *Buffer) DisplaySize() int {
} }
func (b *Buffer) Add(r rune) { func (b *Buffer) Add(r rune) {
if b.Pos == b.Buf.Size() { if b.Pos == b.Buf.Size() {
b.AddChar(r, false) b.AddChar(r, false)
} else { } else {
@@ -210,7 +205,6 @@ func (b *Buffer) AddChar(r rune, insert bool) {
b.DisplayPos += rLength b.DisplayPos += rLength
if b.Pos > 0 { if b.Pos > 0 {
if b.DisplayPos%b.LineWidth == 0 { if b.DisplayPos%b.LineWidth == 0 {
fmt.Printf("%c", r) fmt.Printf("%c", r)
fmt.Printf("\n%s", b.Prompt.AltPrompt) fmt.Printf("\n%s", b.Prompt.AltPrompt)
@@ -235,7 +229,6 @@ func (b *Buffer) AddChar(r rune, insert bool) {
} else { } else {
b.LineHasSpace.Add(true) b.LineHasSpace.Add(true)
} }
} else { } else {
fmt.Printf("%c", r) fmt.Printf("%c", r)
} }
@@ -356,7 +349,6 @@ func (b *Buffer) drawRemaining() {
func (b *Buffer) Remove() { func (b *Buffer) Remove() {
if b.Buf.Size() > 0 && b.Pos > 0 { if b.Buf.Size() > 0 && b.Pos > 0 {
if e, ok := b.Buf.Get(b.Pos - 1); ok { if e, ok := b.Buf.Get(b.Pos - 1); ok {
if r, ok := e.(rune); ok { if r, ok := e.(rune); ok {
rLength := runewidth.RuneWidth(r) rLength := runewidth.RuneWidth(r)
@@ -382,7 +374,6 @@ func (b *Buffer) Remove() {
} else { } else {
fmt.Print(" " + CursorLeft) fmt.Print(" " + CursorLeft)
} }
} else if (b.DisplayPos-rLength)%b.LineWidth == 0 && hasSpace { } else if (b.DisplayPos-rLength)%b.LineWidth == 0 && hasSpace {
fmt.Printf(CursorBOL + ClearToEOL) fmt.Printf(CursorBOL + ClearToEOL)
fmt.Printf(CursorUp + CursorBOL + cursorRightN(b.Width)) fmt.Printf(CursorUp + CursorBOL + cursorRightN(b.Width))
@@ -391,10 +382,9 @@ func (b *Buffer) Remove() {
b.LineHasSpace.Remove(b.DisplayPos/b.LineWidth - 1) b.LineHasSpace.Remove(b.DisplayPos/b.LineWidth - 1)
} }
b.DisplayPos -= 1 b.DisplayPos -= 1
} else { } else {
fmt.Print(cursorLeftN(rLength)) fmt.Print(cursorLeftN(rLength))
for i := 0; i < rLength; i++ { for range rLength {
fmt.Print(" ") fmt.Print(" ")
} }
fmt.Print(cursorLeftN(rLength)) fmt.Print(cursorLeftN(rLength))
@@ -451,7 +441,7 @@ func (b *Buffer) DeleteBefore() {
func (b *Buffer) DeleteRemaining() { func (b *Buffer) DeleteRemaining() {
if b.DisplaySize() > 0 && b.Pos < b.DisplaySize() { if b.DisplaySize() > 0 && b.Pos < b.DisplaySize() {
charsToDel := b.Buf.Size() - b.Pos charsToDel := b.Buf.Size() - b.Pos
for cnt := 0; cnt < charsToDel; cnt++ { for range charsToDel {
b.Delete() b.Delete()
} }
} }
@@ -495,7 +485,7 @@ func (b *Buffer) ClearScreen() {
if currPos > 0 { if currPos > 0 {
targetLine := currPos / b.LineWidth targetLine := currPos / b.LineWidth
if targetLine > 0 { if targetLine > 0 {
for cnt := 0; cnt < targetLine; cnt++ { for range targetLine {
fmt.Print(CursorDown) fmt.Print(CursorDown)
} }
} }
@@ -525,7 +515,7 @@ func (b *Buffer) Replace(r []rune) {
fmt.Printf(CursorBOL + ClearToEOL) fmt.Printf(CursorBOL + ClearToEOL)
for i := 0; i < lineNums; i++ { for range lineNums {
fmt.Print(CursorUp + CursorBOL + ClearToEOL) fmt.Print(CursorUp + CursorBOL + ClearToEOL)
} }

View File

@@ -91,7 +91,7 @@ func (h *History) Add(l []rune) {
func (h *History) Compact() { func (h *History) Compact() {
s := h.Buf.Size() s := h.Buf.Size()
if s > h.Limit { if s > h.Limit {
for cnt := 0; cnt < s-h.Limit; cnt++ { for range s - h.Limit {
h.Buf.Remove(0) h.Buf.Remove(0)
} }
} }
@@ -139,7 +139,7 @@ func (h *History) Save() error {
defer f.Close() defer f.Close()
buf := bufio.NewWriter(f) buf := bufio.NewWriter(f)
for cnt := 0; cnt < h.Size(); cnt++ { for cnt := range h.Size() {
v, _ := h.Buf.Get(cnt) v, _ := h.Buf.Get(cnt)
line, _ := v.([]rune) line, _ := v.([]rune)
if _, err := buf.WriteString(string(line) + "\n"); err != nil { if _, err := buf.WriteString(string(line) + "\n"); err != nil {

View File

@@ -5,7 +5,6 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"syscall"
) )
type Prompt struct { type Prompt struct {
@@ -63,7 +62,7 @@ func New(prompt Prompt) (*Instance, error) {
func (i *Instance) Readline() (string, error) { func (i *Instance) Readline() (string, error) {
if !i.Terminal.rawmode { if !i.Terminal.rawmode {
fd := int(syscall.Stdin) fd := os.Stdin.Fd()
termios, err := SetRawMode(fd) termios, err := SetRawMode(fd)
if err != nil { if err != nil {
return "", err return "", err
@@ -80,8 +79,8 @@ func (i *Instance) Readline() (string, error) {
fmt.Print(prompt) fmt.Print(prompt)
defer func() { defer func() {
fd := int(syscall.Stdin) fd := os.Stdin.Fd()
// nolint: errcheck //nolint:errcheck
UnsetRawMode(fd, i.Terminal.termios) UnsetRawMode(fd, i.Terminal.termios)
i.Terminal.rawmode = false i.Terminal.rawmode = false
}() }()
@@ -136,7 +135,7 @@ func (i *Instance) Readline() (string, error) {
buf.MoveRight() buf.MoveRight()
case CharBracketedPaste: case CharBracketedPaste:
var code string var code string
for cnt := 0; cnt < 3; cnt++ { for range 3 {
r, err = i.Terminal.Read() r, err = i.Terminal.Read()
if err != nil { if err != nil {
return "", io.EOF return "", io.EOF
@@ -198,7 +197,7 @@ func (i *Instance) Readline() (string, error) {
buf.Remove() buf.Remove()
case CharTab: case CharTab:
// todo: convert back to real tabs // todo: convert back to real tabs
for cnt := 0; cnt < 8; cnt++ { for range 8 {
buf.Add(' ') buf.Add(' ')
} }
case CharDelete: case CharDelete:
@@ -216,7 +215,7 @@ func (i *Instance) Readline() (string, error) {
case CharCtrlW: case CharCtrlW:
buf.DeleteWord() buf.DeleteWord()
case CharCtrlZ: case CharCtrlZ:
fd := int(syscall.Stdin) fd := os.Stdin.Fd()
return handleCharCtrlZ(fd, i.Terminal.termios) return handleCharCtrlZ(fd, i.Terminal.termios)
case CharEnter, CharCtrlJ: case CharEnter, CharCtrlJ:
output := buf.String() output := buf.String()
@@ -248,7 +247,7 @@ func (i *Instance) HistoryDisable() {
} }
func NewTerminal() (*Terminal, error) { func NewTerminal() (*Terminal, error) {
fd := int(syscall.Stdin) fd := os.Stdin.Fd()
termios, err := SetRawMode(fd) termios, err := SetRawMode(fd)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -6,7 +6,7 @@ import (
"syscall" "syscall"
) )
func handleCharCtrlZ(fd int, termios any) (string, error) { func handleCharCtrlZ(fd uintptr, termios any) (string, error) {
t := termios.(*Termios) t := termios.(*Termios)
if err := UnsetRawMode(fd, t); err != nil { if err := UnsetRawMode(fd, t); err != nil {
return "", err return "", err

View File

@@ -1,6 +1,6 @@
package readline package readline
func handleCharCtrlZ(fd int, state any) (string, error) { func handleCharCtrlZ(fd uintptr, state any) (string, error) {
// not supported // not supported
return "", nil return "", nil
} }

View File

@@ -8,7 +8,7 @@ import (
type Termios syscall.Termios type Termios syscall.Termios
func SetRawMode(fd int) (*Termios, error) { func SetRawMode(fd uintptr) (*Termios, error) {
termios, err := getTermios(fd) termios, err := getTermios(fd)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -25,13 +25,13 @@ func SetRawMode(fd int) (*Termios, error) {
return termios, setTermios(fd, &newTermios) return termios, setTermios(fd, &newTermios)
} }
func UnsetRawMode(fd int, termios any) error { func UnsetRawMode(fd uintptr, termios any) error {
t := termios.(*Termios) t := termios.(*Termios)
return setTermios(fd, t) return setTermios(fd, t)
} }
// IsTerminal returns true if the given file descriptor is a terminal. // IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd int) bool { func IsTerminal(fd uintptr) bool {
_, err := getTermios(fd) _, err := getTermios(fd)
return err == nil return err == nil
} }

View File

@@ -7,17 +7,17 @@ import (
"unsafe" "unsafe"
) )
func getTermios(fd int) (*Termios, error) { func getTermios(fd uintptr) (*Termios, error) {
termios := new(Termios) termios := new(Termios)
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
if err != 0 { if err != 0 {
return nil, err return nil, err
} }
return termios, nil return termios, nil
} }
func setTermios(fd int, termios *Termios) error { func setTermios(fd uintptr, termios *Termios) error {
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
if err != 0 { if err != 0 {
return err return err
} }

View File

@@ -10,17 +10,17 @@ import (
const tcgets = 0x5401 const tcgets = 0x5401
const tcsets = 0x5402 const tcsets = 0x5402
func getTermios(fd int) (*Termios, error) { func getTermios(fd uintptr) (*Termios, error) {
termios := new(Termios) termios := new(Termios)
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), tcgets, uintptr(unsafe.Pointer(termios)), 0, 0, 0) _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, tcgets, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
if err != 0 { if err != 0 {
return nil, err return nil, err
} }
return termios, nil return termios, nil
} }
func setTermios(fd int, termios *Termios) error { func setTermios(fd uintptr, termios *Termios) error {
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), tcsets, uintptr(unsafe.Pointer(termios)), 0, 0, 0) _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, tcsets, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
if err != 0 { if err != 0 {
return err return err
} }

View File

@@ -9,13 +9,13 @@ type State struct {
} }
// IsTerminal checks if the given file descriptor is associated with a terminal // IsTerminal checks if the given file descriptor is associated with a terminal
func IsTerminal(fd int) bool { func IsTerminal(fd uintptr) bool {
var st uint32 var st uint32
err := windows.GetConsoleMode(windows.Handle(fd), &st) err := windows.GetConsoleMode(windows.Handle(fd), &st)
return err == nil return err == nil
} }
func SetRawMode(fd int) (*State, error) { func SetRawMode(fd uintptr) (*State, error) {
var st uint32 var st uint32
if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil {
return nil, err return nil, err
@@ -32,7 +32,7 @@ func SetRawMode(fd int) (*State, error) {
return &State{st}, nil return &State{st}, nil
} }
func UnsetRawMode(fd int, state any) error { func UnsetRawMode(fd uintptr, state any) error {
s := state.(*State) s := state.(*State)
return windows.SetConsoleMode(windows.Handle(fd), s.mode) return windows.SetConsoleMode(windows.Handle(fd), s.mode)
} }

View File

@@ -340,17 +340,17 @@ type downloadOpts struct {
} }
// downloadBlob downloads a blob from the registry and stores it in the blobs directory // downloadBlob downloads a blob from the registry and stores it in the blobs directory
func downloadBlob(ctx context.Context, opts downloadOpts) error { func downloadBlob(ctx context.Context, opts downloadOpts) (cacheHit bool, _ error) {
fp, err := GetBlobsPath(opts.digest) fp, err := GetBlobsPath(opts.digest)
if err != nil { if err != nil {
return err return false, err
} }
fi, err := os.Stat(fp) fi, err := os.Stat(fp)
switch { switch {
case errors.Is(err, os.ErrNotExist): case errors.Is(err, os.ErrNotExist):
case err != nil: case err != nil:
return err return false, err
default: default:
opts.fn(api.ProgressResponse{ opts.fn(api.ProgressResponse{
Status: fmt.Sprintf("pulling %s", opts.digest[7:19]), Status: fmt.Sprintf("pulling %s", opts.digest[7:19]),
@@ -359,7 +359,7 @@ func downloadBlob(ctx context.Context, opts downloadOpts) error {
Completed: fi.Size(), Completed: fi.Size(),
}) })
return nil return true, nil
} }
data, ok := blobDownloadManager.LoadOrStore(opts.digest, &blobDownload{Name: fp, Digest: opts.digest}) data, ok := blobDownloadManager.LoadOrStore(opts.digest, &blobDownload{Name: fp, Digest: opts.digest})
@@ -369,12 +369,12 @@ func downloadBlob(ctx context.Context, opts downloadOpts) error {
requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest) requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest)
if err := download.Prepare(ctx, requestURL, opts.regOpts); err != nil { if err := download.Prepare(ctx, requestURL, opts.regOpts); err != nil {
blobDownloadManager.Delete(opts.digest) blobDownloadManager.Delete(opts.digest)
return err return false, err
} }
// nolint: contextcheck //nolint:contextcheck
go download.Run(context.Background(), requestURL, opts.regOpts) go download.Run(context.Background(), requestURL, opts.regOpts)
} }
return download.Wait(ctx, opts.fn) return false, download.Wait(ctx, opts.fn)
} }

View File

@@ -18,17 +18,16 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
"slices"
"strconv" "strconv"
"strings" "strings"
"golang.org/x/exp/slices"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/auth" "github.com/ollama/ollama/auth"
"github.com/ollama/ollama/envconfig"
"github.com/ollama/ollama/format" "github.com/ollama/ollama/format"
"github.com/ollama/ollama/llm" "github.com/ollama/ollama/llm"
"github.com/ollama/ollama/parser" "github.com/ollama/ollama/parser"
"github.com/ollama/ollama/envconfig"
"github.com/ollama/ollama/types/errtypes" "github.com/ollama/ollama/types/errtypes"
"github.com/ollama/ollama/types/model" "github.com/ollama/ollama/types/model"
"github.com/ollama/ollama/version" "github.com/ollama/ollama/version"
@@ -315,7 +314,7 @@ func realpath(rel, from string) string {
return abspath return abspath
} }
func CreateModel(ctx context.Context, name, modelFileDir, quantization string, modelfile *parser.File, fn func(resp api.ProgressResponse)) (err error) { func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantization string, modelfile *parser.File, fn func(resp api.ProgressResponse)) (err error) {
config := ConfigV2{ config := ConfigV2{
OS: "linux", OS: "linux",
Architecture: "amd64", Architecture: "amd64",
@@ -333,7 +332,7 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m
switch c.Name { switch c.Name {
case "model", "adapter": case "model", "adapter":
var baseLayers []*layerWithGGML var baseLayers []*layerGGML
if name := model.ParseName(c.Args); name.IsValid() { if name := model.ParseName(c.Args); name.IsValid() {
baseLayers, err = parseFromModel(ctx, name, fn) baseLayers, err = parseFromModel(ctx, name, fn)
if err != nil { if err != nil {
@@ -440,19 +439,27 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m
layers = append(layers, baseLayer.Layer) layers = append(layers, baseLayer.Layer)
} }
case "license", "template", "system": case "license", "template", "system":
if c.Name != "license" {
// replace
layers = slices.DeleteFunc(layers, func(layer *Layer) bool {
if layer.MediaType != mediatype {
return false
}
if err := layer.Remove(); err != nil {
return false
}
return true
})
}
blob := strings.NewReader(c.Args) blob := strings.NewReader(c.Args)
layer, err := NewLayer(blob, mediatype) layer, err := NewLayer(blob, mediatype)
if err != nil { if err != nil {
return err return err
} }
if c.Name != "license" {
// replace
layers = slices.DeleteFunc(layers, func(layer *Layer) bool {
return layer.MediaType == mediatype
})
}
layers = append(layers, layer) layers = append(layers, layer)
case "message": case "message":
role, content, ok := strings.Cut(c.Args, ": ") role, content, ok := strings.Cut(c.Args, ": ")
@@ -571,26 +578,15 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m
} }
} }
unref := make(map[string]struct{}) old, _ := ParseNamedManifest(name)
if manifest, _, err := GetManifest(ParseModelPath(name)); err == nil {
for _, layer := range manifest.Layers {
if !slices.Contains(digests, layer.Digest) {
unref[layer.Digest] = struct{}{}
}
}
if manifest.Config.Digest != layer.Digest {
unref[manifest.Config.Digest] = struct{}{}
}
}
fn(api.ProgressResponse{Status: "writing manifest"}) fn(api.ProgressResponse{Status: "writing manifest"})
if err := WriteManifest(name, layer, layers); err != nil { if err := WriteManifest(name, layer, layers); err != nil {
return err return err
} }
if !envconfig.NoPrune { if !envconfig.NoPrune && old != nil {
if err := deleteUnusedLayers(nil, unref); err != nil { if err := old.RemoveLayers(); err != nil {
return err return err
} }
} }
@@ -662,7 +658,7 @@ func deleteUnusedLayers(skipModelPath *ModelPath, deleteMap map[string]struct{})
// save (i.e. delete from the deleteMap) any files used in other manifests // save (i.e. delete from the deleteMap) any files used in other manifests
manifest, _, err := GetManifest(fmp) manifest, _, err := GetManifest(fmp)
if err != nil { if err != nil {
// nolint: nilerr //nolint:nilerr
return nil return nil
} }
@@ -857,23 +853,27 @@ func PullModel(ctx context.Context, name string, regOpts *registryOptions, fn fu
layers = append(layers, manifest.Layers...) layers = append(layers, manifest.Layers...)
layers = append(layers, manifest.Config) layers = append(layers, manifest.Config)
skipVerify := make(map[string]bool)
for _, layer := range layers { for _, layer := range layers {
if err := downloadBlob( cacheHit, err := downloadBlob(ctx, downloadOpts{
ctx, mp: mp,
downloadOpts{ digest: layer.Digest,
mp: mp, regOpts: regOpts,
digest: layer.Digest, fn: fn,
regOpts: regOpts, })
fn: fn, if err != nil {
}); err != nil {
return err return err
} }
skipVerify[layer.Digest] = cacheHit
delete(deleteMap, layer.Digest) delete(deleteMap, layer.Digest)
} }
delete(deleteMap, manifest.Config.Digest) delete(deleteMap, manifest.Config.Digest)
fn(api.ProgressResponse{Status: "verifying sha256 digest"}) fn(api.ProgressResponse{Status: "verifying sha256 digest"})
for _, layer := range layers { for _, layer := range layers {
if skipVerify[layer.Digest] {
continue
}
if err := verifyBlob(layer.Digest); err != nil { if err := verifyBlob(layer.Digest); err != nil {
if errors.Is(err, errDigestMismatch) { if errors.Is(err, errDigestMismatch) {
// something went wrong, delete the blob // something went wrong, delete the blob
@@ -988,7 +988,7 @@ func getTokenSubject(token string) string {
func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.URL, headers http.Header, body io.ReadSeeker, regOpts *registryOptions) (*http.Response, error) { func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.URL, headers http.Header, body io.ReadSeeker, regOpts *registryOptions) (*http.Response, error) {
anonymous := true // access will default to anonymous if no user is found associated with the public key anonymous := true // access will default to anonymous if no user is found associated with the public key
for i := 0; i < 2; i++ { for range 2 {
resp, err := makeRequest(ctx, method, requestURL, headers, body, regOpts) resp, err := makeRequest(ctx, method, requestURL, headers, body, regOpts)
if err != nil { if err != nil {
if !errors.Is(err, context.Canceled) { if !errors.Is(err, context.Canceled) {

View File

@@ -1,9 +1,9 @@
package server package server
import ( import (
"bytes"
"crypto/sha256" "crypto/sha256"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"log/slog" "log/slog"
@@ -34,12 +34,6 @@ func (m *Manifest) Remove() error {
return err return err
} }
for _, layer := range append(m.Layers, m.Config) {
if err := layer.Remove(); err != nil {
return err
}
}
manifests, err := GetManifestPath() manifests, err := GetManifestPath()
if err != nil { if err != nil {
return err return err
@@ -48,6 +42,18 @@ func (m *Manifest) Remove() error {
return PruneDirectory(manifests) return PruneDirectory(manifests)
} }
func (m *Manifest) RemoveLayers() error {
for _, layer := range append(m.Layers, m.Config) {
if err := layer.Remove(); errors.Is(err, os.ErrNotExist) {
slog.Debug("layer does not exist", "digest", layer.Digest)
} else if err != nil {
return err
}
}
return nil
}
func ParseNamedManifest(n model.Name) (*Manifest, error) { func ParseNamedManifest(n model.Name) (*Manifest, error) {
if !n.IsFullyQualified() { if !n.IsFullyQualified() {
return nil, model.Unqualified(n) return nil, model.Unqualified(n)
@@ -85,30 +91,31 @@ func ParseNamedManifest(n model.Name) (*Manifest, error) {
}, nil }, nil
} }
func WriteManifest(name string, config *Layer, layers []*Layer) error { func WriteManifest(name model.Name, config *Layer, layers []*Layer) error {
manifest := ManifestV2{ manifests, err := GetManifestPath()
if err != nil {
return err
}
p := filepath.Join(manifests, name.Filepath())
if err := os.MkdirAll(filepath.Dir(p), 0o755); err != nil {
return err
}
f, err := os.Create(p)
if err != nil {
return err
}
defer f.Close()
m := ManifestV2{
SchemaVersion: 2, SchemaVersion: 2,
MediaType: "application/vnd.docker.distribution.manifest.v2+json", MediaType: "application/vnd.docker.distribution.manifest.v2+json",
Config: config, Config: config,
Layers: layers, Layers: layers,
} }
var b bytes.Buffer return json.NewEncoder(f).Encode(m)
if err := json.NewEncoder(&b).Encode(manifest); err != nil {
return err
}
modelpath := ParseModelPath(name)
manifestPath, err := modelpath.GetManifestPath()
if err != nil {
return err
}
if err := os.MkdirAll(filepath.Dir(manifestPath), 0o755); err != nil {
return err
}
return os.WriteFile(manifestPath, b.Bytes(), 0o644)
} }
func Manifests() (map[model.Name]*Manifest, error) { func Manifests() (map[model.Name]*Manifest, error) {

View File

@@ -7,6 +7,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
@@ -14,27 +15,26 @@ import (
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/convert" "github.com/ollama/ollama/convert"
"github.com/ollama/ollama/llm" "github.com/ollama/ollama/llm"
"github.com/ollama/ollama/templates"
"github.com/ollama/ollama/types/model" "github.com/ollama/ollama/types/model"
) )
var intermediateBlobs map[string]string = make(map[string]string) var intermediateBlobs map[string]string = make(map[string]string)
type layerWithGGML struct { type layerGGML struct {
*Layer *Layer
*llm.GGML *llm.GGML
} }
func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) {
modelpath := ParseModelPath(name.String()) m, err := ParseNamedManifest(name)
manifest, _, err := GetManifest(modelpath)
switch { switch {
case errors.Is(err, os.ErrNotExist): case errors.Is(err, os.ErrNotExist):
if err := PullModel(ctx, name.String(), &registryOptions{}, fn); err != nil { if err := PullModel(ctx, name.String(), &registryOptions{}, fn); err != nil {
return nil, err return nil, err
} }
modelpath = ParseModelPath(name.String()) m, err = ParseNamedManifest(name)
manifest, _, err = GetManifest(modelpath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -42,8 +42,8 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe
return nil, err return nil, err
} }
for _, layer := range manifest.Layers { for _, layer := range m.Layers {
layer, err := NewLayerFromLayer(layer.Digest, layer.MediaType, modelpath.GetShortTagname()) layer, err := NewLayerFromLayer(layer.Digest, layer.MediaType, name.DisplayShortest())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -68,17 +68,16 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe
return nil, err return nil, err
} }
layers = append(layers, &layerWithGGML{layer, ggml}) layers = append(layers, &layerGGML{layer, ggml})
default: default:
layers = append(layers, &layerWithGGML{layer, nil}) layers = append(layers, &layerGGML{layer, nil})
} }
} }
return layers, nil return layers, nil
} }
func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) {
stat, err := file.Stat() stat, err := file.Stat()
if err != nil { if err != nil {
return nil, err return nil, err
@@ -182,13 +181,13 @@ func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(a
return nil, err return nil, err
} }
layers = append(layers, &layerWithGGML{layer, ggml}) layers = append(layers, &layerGGML{layer, ggml})
intermediateBlobs[digest] = layer.Digest intermediateBlobs[digest] = layer.Digest
return layers, nil return detectChatTemplate(layers)
} }
func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) { func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) {
sr := io.NewSectionReader(file, 0, 512) sr := io.NewSectionReader(file, 0, 512)
contentType, err := detectContentType(sr) contentType, err := detectContentType(sr)
if err != nil { if err != nil {
@@ -230,10 +229,30 @@ func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(ap
return nil, err return nil, err
} }
layers = append(layers, &layerWithGGML{layer, ggml}) layers = append(layers, &layerGGML{layer, ggml})
offset = n offset = n
} }
return detectChatTemplate(layers)
}
func detectChatTemplate(layers []*layerGGML) ([]*layerGGML, error) {
for _, layer := range layers {
if s := layer.GGML.KV().ChatTemplate(); s != "" {
if t, err := templates.NamedTemplate(s); err != nil {
slog.Debug("template detection", "error", err)
} else {
tmpl, err := NewLayer(t.Reader(), "application/vnd.ollama.image.template")
if err != nil {
return nil, err
}
tmpl.status = fmt.Sprintf("using autodetected template %s", t.Name)
layers = append(layers, &layerGGML{tmpl, nil})
}
}
}
return layers, nil return layers, nil
} }

View File

@@ -6,12 +6,13 @@ import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestGetBlobsPath(t *testing.T) { func TestGetBlobsPath(t *testing.T) {
// GetBlobsPath expects an actual directory to exist // GetBlobsPath expects an actual directory to exist
dir, err := os.MkdirTemp("", "ollama-test") dir, err := os.MkdirTemp("", "ollama-test")
assert.Nil(t, err) require.NoError(t, err)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
tests := []struct { tests := []struct {
@@ -63,7 +64,7 @@ func TestGetBlobsPath(t *testing.T) {
got, err := GetBlobsPath(tc.digest) got, err := GetBlobsPath(tc.digest)
assert.ErrorIs(t, tc.err, err, tc.name) require.ErrorIs(t, tc.err, err, tc.name)
assert.Equal(t, tc.expected, got, tc.name) assert.Equal(t, tc.expected, got, tc.name)
}) })
} }

View File

@@ -16,6 +16,7 @@ import (
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
"slices"
"strconv" "strconv"
"strings" "strings"
"syscall" "syscall"
@@ -23,7 +24,6 @@ import (
"github.com/gin-contrib/cors" "github.com/gin-contrib/cors"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"golang.org/x/exp/slices"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/envconfig"
@@ -77,7 +77,6 @@ func isSupportedImageType(image []byte) bool {
} }
func (s *Server) GenerateHandler(c *gin.Context) { func (s *Server) GenerateHandler(c *gin.Context) {
checkpointStart := time.Now() checkpointStart := time.Now()
var req api.GenerateRequest var req api.GenerateRequest
err := c.ShouldBindJSON(&req) err := c.ShouldBindJSON(&req)
@@ -524,8 +523,8 @@ func checkNameExists(name model.Name) error {
} }
func (s *Server) CreateModelHandler(c *gin.Context) { func (s *Server) CreateModelHandler(c *gin.Context) {
var req api.CreateRequest var r api.CreateRequest
if err := c.ShouldBindJSON(&req); errors.Is(err, io.EOF) { if err := c.ShouldBindJSON(&r); errors.Is(err, io.EOF) {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "missing request body"}) c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "missing request body"})
return return
} else if err != nil { } else if err != nil {
@@ -533,7 +532,7 @@ func (s *Server) CreateModelHandler(c *gin.Context) {
return return
} }
name := model.ParseName(cmp.Or(req.Model, req.Name)) name := model.ParseName(cmp.Or(r.Model, r.Name))
if !name.IsValid() { if !name.IsValid() {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": errtypes.InvalidModelNameErrMsg}) c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": errtypes.InvalidModelNameErrMsg})
return return
@@ -544,24 +543,24 @@ func (s *Server) CreateModelHandler(c *gin.Context) {
return return
} }
if req.Path == "" && req.Modelfile == "" { if r.Path == "" && r.Modelfile == "" {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "path or modelfile are required"}) c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "path or modelfile are required"})
return return
} }
var r io.Reader = strings.NewReader(req.Modelfile) var sr io.Reader = strings.NewReader(r.Modelfile)
if req.Path != "" && req.Modelfile == "" { if r.Path != "" && r.Modelfile == "" {
f, err := os.Open(req.Path) f, err := os.Open(r.Path)
if err != nil { if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("error reading modelfile: %s", err)}) c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("error reading modelfile: %s", err)})
return return
} }
defer f.Close() defer f.Close()
r = f sr = f
} }
modelfile, err := parser.ParseFile(r) f, err := parser.ParseFile(sr)
if err != nil { if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()}) c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return return
@@ -577,17 +576,13 @@ func (s *Server) CreateModelHandler(c *gin.Context) {
ctx, cancel := context.WithCancel(c.Request.Context()) ctx, cancel := context.WithCancel(c.Request.Context())
defer cancel() defer cancel()
quantization := req.Quantization quantization := cmp.Or(r.Quantize, r.Quantization)
if req.Quantize != "" { if err := CreateModel(ctx, name, filepath.Dir(r.Path), strings.ToUpper(quantization), f, fn); err != nil {
quantization = req.Quantize
}
if err := CreateModel(ctx, name.String(), filepath.Dir(req.Path), strings.ToUpper(quantization), modelfile, fn); err != nil {
ch <- gin.H{"error": err.Error()} ch <- gin.H{"error": err.Error()}
} }
}() }()
if req.Stream != nil && !*req.Stream { if r.Stream != nil && !*r.Stream {
waitForStream(c, ch) waitForStream(c, ch)
return return
} }
@@ -621,6 +616,11 @@ func (s *Server) DeleteModelHandler(c *gin.Context) {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return return
} }
if err := m.RemoveLayers(); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
} }
func (s *Server) ShowModelHandler(c *gin.Context) { func (s *Server) ShowModelHandler(c *gin.Context) {
@@ -730,7 +730,7 @@ func (s *Server) ListModelsHandler(c *gin.Context) {
return return
} }
models := []api.ModelResponse{} models := []api.ListModelResponse{}
for n, m := range ms { for n, m := range ms {
f, err := m.Config.Open() f, err := m.Config.Open()
if err != nil { if err != nil {
@@ -746,7 +746,7 @@ func (s *Server) ListModelsHandler(c *gin.Context) {
} }
// tag should never be masked // tag should never be masked
models = append(models, api.ModelResponse{ models = append(models, api.ListModelResponse{
Model: n.DisplayShortest(), Model: n.DisplayShortest(),
Name: n.DisplayShortest(), Name: n.DisplayShortest(),
Size: m.Size(), Size: m.Size(),
@@ -762,7 +762,7 @@ func (s *Server) ListModelsHandler(c *gin.Context) {
}) })
} }
slices.SortStableFunc(models, func(i, j api.ModelResponse) int { slices.SortStableFunc(models, func(i, j api.ListModelResponse) int {
// most recently modified first // most recently modified first
return cmp.Compare(j.ModifiedAt.Unix(), i.ModifiedAt.Unix()) return cmp.Compare(j.ModifiedAt.Unix(), i.ModifiedAt.Unix())
}) })
@@ -942,7 +942,7 @@ func allowedHostsMiddleware(addr net.Addr) gin.HandlerFunc {
} }
if allowedHost(host) { if allowedHost(host) {
if c.Request.Method == "OPTIONS" { if c.Request.Method == http.MethodOptions {
c.AbortWithStatus(http.StatusNoContent) c.AbortWithStatus(http.StatusNoContent)
return return
} }
@@ -960,6 +960,10 @@ func (s *Server) GenerateRoutes() http.Handler {
config.AllowWildcard = true config.AllowWildcard = true
config.AllowBrowserExtensions = true config.AllowBrowserExtensions = true
config.AllowHeaders = []string{"Authorization", "Content-Type", "User-Agent", "Accept", "X-Requested-With"} config.AllowHeaders = []string{"Authorization", "Content-Type", "User-Agent", "Accept", "X-Requested-With"}
openAIProperties := []string{"lang", "package-version", "os", "arch", "runtime", "runtime-version", "async"}
for _, prop := range openAIProperties {
config.AllowHeaders = append(config.AllowHeaders, "x-stainless-"+prop)
}
config.AllowOrigins = envconfig.AllowOrigins config.AllowOrigins = envconfig.AllowOrigins
r := gin.Default() r := gin.Default()
@@ -1139,7 +1143,7 @@ func streamResponse(c *gin.Context, ch chan any) {
} }
func (s *Server) ProcessHandler(c *gin.Context) { func (s *Server) ProcessHandler(c *gin.Context) {
models := []api.ModelResponse{} models := []api.ProcessModelResponse{}
for _, v := range s.sched.loaded { for _, v := range s.sched.loaded {
model := v.model model := v.model
@@ -1151,7 +1155,7 @@ func (s *Server) ProcessHandler(c *gin.Context) {
QuantizationLevel: model.Config.FileType, QuantizationLevel: model.Config.FileType,
} }
mr := api.ModelResponse{ mr := api.ProcessModelResponse{
Model: model.ShortName, Model: model.ShortName,
Name: model.ShortName, Name: model.ShortName,
Size: int64(v.estimatedTotal), Size: int64(v.estimatedTotal),
@@ -1171,7 +1175,7 @@ func (s *Server) ProcessHandler(c *gin.Context) {
models = append(models, mr) models = append(models, mr)
} }
c.JSON(http.StatusOK, api.ListResponse{Models: models}) c.JSON(http.StatusOK, api.ProcessResponse{Models: models})
} }
// ChatPrompt builds up a prompt from a series of messages for the currently `loaded` model // ChatPrompt builds up a prompt from a series of messages for the currently `loaded` model
@@ -1306,7 +1310,6 @@ func (s *Server) ChatHandler(c *gin.Context) {
defer close(ch) defer close(ch)
fn := func(r llm.CompletionResponse) { fn := func(r llm.CompletionResponse) {
resp := api.ChatResponse{ resp := api.ChatResponse{
Model: req.Model, Model: req.Model,
CreatedAt: time.Now().UTC(), CreatedAt: time.Now().UTC(),

View File

@@ -15,11 +15,12 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/llm"
) )
var stream bool = false var stream bool = false
func createBinFile(t *testing.T) string { func createBinFile(t *testing.T, kv map[string]any, ti []llm.Tensor) string {
t.Helper() t.Helper()
f, err := os.CreateTemp(t.TempDir(), "") f, err := os.CreateTemp(t.TempDir(), "")
@@ -28,19 +29,7 @@ func createBinFile(t *testing.T) string {
} }
defer f.Close() defer f.Close()
if err := binary.Write(f, binary.LittleEndian, []byte("GGUF")); err != nil { if err := llm.NewGGUFV3(binary.LittleEndian).Encode(f, kv, ti); err != nil {
t.Fatal(err)
}
if err := binary.Write(f, binary.LittleEndian, uint32(3)); err != nil {
t.Fatal(err)
}
if err := binary.Write(f, binary.LittleEndian, uint64(0)); err != nil {
t.Fatal(err)
}
if err := binary.Write(f, binary.LittleEndian, uint64(0)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -101,7 +90,7 @@ func TestCreateFromBin(t *testing.T) {
var s Server var s Server
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{ w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test", Name: "test",
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t)), Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
Stream: &stream, Stream: &stream,
}) })
@@ -126,7 +115,7 @@ func TestCreateFromModel(t *testing.T) {
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{ w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test", Name: "test",
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t)), Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
Stream: &stream, Stream: &stream,
}) })
@@ -158,3 +147,414 @@ func TestCreateFromModel(t *testing.T) {
filepath.Join(p, "blobs", "sha256-ca239d7bd8ea90e4a5d2e6bf88f8d74a47b14336e73eb4e18bed4dd325018116"), filepath.Join(p, "blobs", "sha256-ca239d7bd8ea90e4a5d2e6bf88f8d74a47b14336e73eb4e18bed4dd325018116"),
}) })
} }
func TestCreateRemovesLayers(t *testing.T) {
p := t.TempDir()
t.Setenv("OLLAMA_MODELS", p)
var s Server
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .Prompt }}", createBinFile(t, nil, nil)),
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
filepath.Join(p, "blobs", "sha256-b507b9c2f6ca642bffcd06665ea7c91f235fd32daeefdf875a0f938db05fb315"),
filepath.Join(p, "blobs", "sha256-bc80b03733773e0728011b2f4adf34c458b400e1aad48cb28d61170f3a2ad2d6"),
})
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .System }} {{ .Prompt }}", createBinFile(t, nil, nil)),
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-8f2c2167d789c6b2302dff965160fa5029f6a24096d262c1cbb469f21a045382"),
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed"),
})
}
func TestCreateUnsetsSystem(t *testing.T) {
p := t.TempDir()
t.Setenv("OLLAMA_MODELS", p)
var s Server
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nSYSTEM Say hi!", createBinFile(t, nil, nil)),
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-8585df945d1069bc78b79bd10bb73ba07fbc29b0f5479a31a601c0d12731416e"),
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
filepath.Join(p, "blobs", "sha256-f29e82a8284dbdf5910b1555580ff60b04238b8da9d5e51159ada67a4d0d5851"),
})
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nSYSTEM \"\"", createBinFile(t, nil, nil)),
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-67d4b8d106af2a5b100a46e9bdc038c71eef2a35c9abac784092654212f97cf5"),
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
filepath.Join(p, "blobs", "sha256-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"),
})
bts, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
if err != nil {
t.Fatal(err)
}
if string(bts) != "" {
t.Fatalf("expected empty string, actual %s", string(bts))
}
}
func TestCreateMergeParameters(t *testing.T) {
p := t.TempDir()
t.Setenv("OLLAMA_MODELS", p)
var s Server
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nPARAMETER temperature 1\nPARAMETER top_k 10\nPARAMETER stop USER:\nPARAMETER stop ASSISTANT:", createBinFile(t, nil, nil)),
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-1d0ad71299d48c2fb7ae2b98e683643e771f8a5b72be34942af90d97a91c1e37"),
filepath.Join(p, "blobs", "sha256-4a384beaf47a9cbe452dfa5ab70eea691790f3b35a832d12933a1996685bf2b6"),
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
})
// in order to merge parameters, the second model must be created FROM the first
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test2",
Modelfile: "FROM test\nPARAMETER temperature 0.6\nPARAMETER top_p 0.7",
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"),
})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-1d0ad71299d48c2fb7ae2b98e683643e771f8a5b72be34942af90d97a91c1e37"),
filepath.Join(p, "blobs", "sha256-4a384beaf47a9cbe452dfa5ab70eea691790f3b35a832d12933a1996685bf2b6"),
filepath.Join(p, "blobs", "sha256-4cd9d4ba6b734d9b4cbd1e5caa60374c00722e993fce5e1e2d15a33698f71187"),
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
filepath.Join(p, "blobs", "sha256-e29a7b3c47287a2489c895d21fe413c20f859a85d20e749492f52a838e36e1ba"),
})
actual, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-e29a7b3c47287a2489c895d21fe413c20f859a85d20e749492f52a838e36e1ba"))
if err != nil {
t.Fatal(err)
}
expect, err := json.Marshal(map[string]any{"temperature": 0.6, "top_k": 10, "top_p": 0.7, "stop": []string{"USER:", "ASSISTANT:"}})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(bytes.TrimSpace(expect), bytes.TrimSpace(actual)) {
t.Errorf("expected %s, actual %s", string(expect), string(actual))
}
// slices are replaced
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test2",
Modelfile: "FROM test\nPARAMETER temperature 0.6\nPARAMETER top_p 0.7\nPARAMETER stop <|endoftext|>",
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"),
})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-12f58bb75cb3042d69a7e013ab87fb3c3c7088f50ddc62f0c77bd332f0d44d35"),
filepath.Join(p, "blobs", "sha256-1d0ad71299d48c2fb7ae2b98e683643e771f8a5b72be34942af90d97a91c1e37"),
filepath.Join(p, "blobs", "sha256-257aa726584f24970a4f240765e75a7169bfbe7f4966c1f04513d6b6c860583a"),
filepath.Join(p, "blobs", "sha256-4a384beaf47a9cbe452dfa5ab70eea691790f3b35a832d12933a1996685bf2b6"),
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
})
actual, err = os.ReadFile(filepath.Join(p, "blobs", "sha256-12f58bb75cb3042d69a7e013ab87fb3c3c7088f50ddc62f0c77bd332f0d44d35"))
if err != nil {
t.Fatal(err)
}
expect, err = json.Marshal(map[string]any{"temperature": 0.6, "top_k": 10, "top_p": 0.7, "stop": []string{"<|endoftext|>"}})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(bytes.TrimSpace(expect), bytes.TrimSpace(actual)) {
t.Errorf("expected %s, actual %s", string(expect), string(actual))
}
}
func TestCreateReplacesMessages(t *testing.T) {
p := t.TempDir()
t.Setenv("OLLAMA_MODELS", p)
var s Server
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nMESSAGE assistant \"What is my purpose?\"\nMESSAGE user \"You run tests.\"\nMESSAGE assistant \"Oh, my god.\"", createBinFile(t, nil, nil)),
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-298baeaf6928a60cf666d88d64a1ba606feb43a2865687c39e40652e407bffc4"),
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
filepath.Join(p, "blobs", "sha256-e0e27d47045063ccb167ae852c51d49a98eab33fabaee4633fdddf97213e40b5"),
})
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test2",
Modelfile: "FROM test\nMESSAGE assistant \"You're a test, Harry.\"\nMESSAGE user \"I-I'm a what?\"\nMESSAGE assistant \"A test. And a thumping good one at that, I'd wager.\"",
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"),
})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-298baeaf6928a60cf666d88d64a1ba606feb43a2865687c39e40652e407bffc4"),
filepath.Join(p, "blobs", "sha256-4f48b25fe9969564c82f58eb1cedbdff6484cc0baf474bc6c2a9b37c8da3362a"),
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
filepath.Join(p, "blobs", "sha256-a60ecc9da299ec7ede453f99236e5577fd125e143689b646d9f0ddc9971bf4db"),
filepath.Join(p, "blobs", "sha256-e0e27d47045063ccb167ae852c51d49a98eab33fabaee4633fdddf97213e40b5"),
})
type message struct {
Role string `json:"role"`
Content string `json:"content"`
}
f, err := os.Open(filepath.Join(p, "blobs", "sha256-a60ecc9da299ec7ede453f99236e5577fd125e143689b646d9f0ddc9971bf4db"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
var actual []message
if err := json.NewDecoder(f).Decode(&actual); err != nil {
t.Fatal(err)
}
expect := []message{
{Role: "assistant", Content: "You're a test, Harry."},
{Role: "user", Content: "I-I'm a what?"},
{Role: "assistant", Content: "A test. And a thumping good one at that, I'd wager."},
}
if !slices.Equal(actual, expect) {
t.Errorf("expected %s, actual %s", expect, actual)
}
}
func TestCreateTemplateSystem(t *testing.T) {
p := t.TempDir()
t.Setenv("OLLAMA_MODELS", p)
var s Server
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .Prompt }}\nSYSTEM Say hello!\nTEMPLATE {{ .System }} {{ .Prompt }}\nSYSTEM Say bye!", createBinFile(t, nil, nil)),
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-2b5e330885117c82f3fd75169ea323e141070a2947c11ddb9f79ee0b01c589c1"),
filepath.Join(p, "blobs", "sha256-4c5f51faac758fecaff8db42f0b7382891a4d0c0bb885f7b86be88c814a7cc86"),
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed"),
})
template, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed"))
if err != nil {
t.Fatal(err)
}
if string(template) != "{{ .System }} {{ .Prompt }}" {
t.Errorf("expected \"{{ .System }} {{ .Prompt }}\", actual %s", template)
}
system, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-4c5f51faac758fecaff8db42f0b7382891a4d0c0bb885f7b86be88c814a7cc86"))
if err != nil {
t.Fatal(err)
}
if string(system) != "Say bye!" {
t.Errorf("expected \"Say bye!\", actual %s", system)
}
}
func TestCreateLicenses(t *testing.T) {
p := t.TempDir()
t.Setenv("OLLAMA_MODELS", p)
var s Server
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nLICENSE MIT\nLICENSE Apache-2.0", createBinFile(t, nil, nil)),
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-2af71558e438db0b73a20beab92dc278a94e1bbe974c00c1a33e3ab62d53a608"),
filepath.Join(p, "blobs", "sha256-79a39c37536ddee29cbadd5d5e2dcba8ed7f03e431f626ff38432c1c866bb7e2"),
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
filepath.Join(p, "blobs", "sha256-e5dcffe836b6ec8a58e492419b550e65fb8cbdc308503979e5dacb33ac7ea3b7"),
})
mit, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-e5dcffe836b6ec8a58e492419b550e65fb8cbdc308503979e5dacb33ac7ea3b7"))
if err != nil {
t.Fatal(err)
}
if string(mit) != "MIT" {
t.Errorf("expected MIT, actual %s", mit)
}
apache, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-2af71558e438db0b73a20beab92dc278a94e1bbe974c00c1a33e3ab62d53a608"))
if err != nil {
t.Fatal(err)
}
if string(apache) != "Apache-2.0" {
t.Errorf("expected Apache-2.0, actual %s", apache)
}
}
func TestCreateDetectTemplate(t *testing.T) {
p := t.TempDir()
t.Setenv("OLLAMA_MODELS", p)
var s Server
t.Run("matched", func(t *testing.T) {
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, llm.KV{
"tokenizer.chat_template": "{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
}, nil)),
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-2f8e594e6f34b1b4d36a246628eeb3365ce442303d656f1fcc69e821722acea0"),
filepath.Join(p, "blobs", "sha256-542b217f179c7825eeb5bca3c77d2b75ed05bafbd3451d9188891a60a85337c6"),
filepath.Join(p, "blobs", "sha256-553c4a3f747b3d22a4946875f1cc8ed011c2930d83f864a0c7265f9ec0a20413"),
})
})
t.Run("unmatched", func(t *testing.T) {
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
filepath.Join(p, "blobs", "sha256-ca239d7bd8ea90e4a5d2e6bf88f8d74a47b14336e73eb4e18bed4dd325018116"),
})
})
}

View File

@@ -1,12 +1,15 @@
package server package server
import ( import (
"bytes"
"encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/types/model"
) )
func TestDelete(t *testing.T) { func TestDelete(t *testing.T) {
@@ -16,7 +19,7 @@ func TestDelete(t *testing.T) {
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{ w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test", Name: "test",
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t)), Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
}) })
if w.Code != http.StatusOK { if w.Code != http.StatusOK {
@@ -25,7 +28,7 @@ func TestDelete(t *testing.T) {
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{ w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test2", Name: "test2",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .System }} {{ .Prompt }}", createBinFile(t)), Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .System }} {{ .Prompt }}", createBinFile(t, nil, nil)),
}) })
if w.Code != http.StatusOK { if w.Code != http.StatusOK {
@@ -69,3 +72,33 @@ func TestDelete(t *testing.T) {
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{}) checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{})
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{}) checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{})
} }
func TestDeleteDuplicateLayers(t *testing.T) {
p := t.TempDir()
t.Setenv("OLLAMA_MODELS", p)
var s Server
n := model.ParseName("test")
var b bytes.Buffer
if err := json.NewEncoder(&b).Encode(&ConfigV2{}); err != nil {
t.Fatal(err)
}
config, err := NewLayer(&b, "application/vnd.docker.container.image.v1+json")
if err != nil {
t.Fatal(err)
}
// create a manifest with duplicate layers
if err := WriteManifest(n, config, []*Layer{config}); err != nil {
t.Fatal(err)
}
w := createRequest(t, s.DeleteModelHandler, api.DeleteRequest{Name: "test"})
if w.Code != http.StatusOK {
t.Errorf("expected status code 200, actual %d", w.Code)
}
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{})
}

View File

@@ -29,7 +29,7 @@ func TestList(t *testing.T) {
for _, n := range expectNames { for _, n := range expectNames {
createRequest(t, s.CreateModelHandler, api.CreateRequest{ createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: n, Name: n,
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t)), Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
}) })
} }

View File

@@ -15,9 +15,11 @@ import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/parser" "github.com/ollama/ollama/parser"
"github.com/ollama/ollama/types/model"
"github.com/ollama/ollama/version" "github.com/ollama/ollama/version"
) )
@@ -25,20 +27,20 @@ func createTestFile(t *testing.T, name string) string {
t.Helper() t.Helper()
f, err := os.CreateTemp(t.TempDir(), name) f, err := os.CreateTemp(t.TempDir(), name)
assert.Nil(t, err) require.NoError(t, err)
defer f.Close() defer f.Close()
err = binary.Write(f, binary.LittleEndian, []byte("GGUF")) err = binary.Write(f, binary.LittleEndian, []byte("GGUF"))
assert.Nil(t, err) require.NoError(t, err)
err = binary.Write(f, binary.LittleEndian, uint32(3)) err = binary.Write(f, binary.LittleEndian, uint32(3))
assert.Nil(t, err) require.NoError(t, err)
err = binary.Write(f, binary.LittleEndian, uint64(0)) err = binary.Write(f, binary.LittleEndian, uint64(0))
assert.Nil(t, err) require.NoError(t, err)
err = binary.Write(f, binary.LittleEndian, uint64(0)) err = binary.Write(f, binary.LittleEndian, uint64(0))
assert.Nil(t, err) require.NoError(t, err)
return f.Name() return f.Name()
} }
@@ -53,16 +55,18 @@ func Test_Routes(t *testing.T) {
} }
createTestModel := func(t *testing.T, name string) { createTestModel := func(t *testing.T, name string) {
t.Helper()
fname := createTestFile(t, "ollama-model") fname := createTestFile(t, "ollama-model")
r := strings.NewReader(fmt.Sprintf("FROM %s\nPARAMETER seed 42\nPARAMETER top_p 0.9\nPARAMETER stop foo\nPARAMETER stop bar", fname)) r := strings.NewReader(fmt.Sprintf("FROM %s\nPARAMETER seed 42\nPARAMETER top_p 0.9\nPARAMETER stop foo\nPARAMETER stop bar", fname))
modelfile, err := parser.ParseFile(r) modelfile, err := parser.ParseFile(r)
assert.Nil(t, err) require.NoError(t, err)
fn := func(resp api.ProgressResponse) { fn := func(resp api.ProgressResponse) {
t.Logf("Status: %s", resp.Status) t.Logf("Status: %s", resp.Status)
} }
err = CreateModel(context.TODO(), name, "", "", modelfile, fn) err = CreateModel(context.TODO(), model.ParseName(name), "", "", modelfile, fn)
assert.Nil(t, err) require.NoError(t, err)
} }
testCases := []testCase{ testCases := []testCase{
@@ -74,9 +78,9 @@ func Test_Routes(t *testing.T) {
}, },
Expected: func(t *testing.T, resp *http.Response) { Expected: func(t *testing.T, resp *http.Response) {
contentType := resp.Header.Get("Content-Type") contentType := resp.Header.Get("Content-Type")
assert.Equal(t, contentType, "application/json; charset=utf-8") assert.Equal(t, "application/json; charset=utf-8", contentType)
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
assert.Nil(t, err) require.NoError(t, err)
assert.Equal(t, fmt.Sprintf(`{"version":"%s"}`, version.Version), string(body)) assert.Equal(t, fmt.Sprintf(`{"version":"%s"}`, version.Version), string(body))
}, },
}, },
@@ -86,17 +90,17 @@ func Test_Routes(t *testing.T) {
Path: "/api/tags", Path: "/api/tags",
Expected: func(t *testing.T, resp *http.Response) { Expected: func(t *testing.T, resp *http.Response) {
contentType := resp.Header.Get("Content-Type") contentType := resp.Header.Get("Content-Type")
assert.Equal(t, contentType, "application/json; charset=utf-8") assert.Equal(t, "application/json; charset=utf-8", contentType)
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
assert.Nil(t, err) require.NoError(t, err)
var modelList api.ListResponse var modelList api.ListResponse
err = json.Unmarshal(body, &modelList) err = json.Unmarshal(body, &modelList)
assert.Nil(t, err) require.NoError(t, err)
assert.NotNil(t, modelList.Models) assert.NotNil(t, modelList.Models)
assert.Equal(t, 0, len(modelList.Models)) assert.Empty(t, len(modelList.Models))
}, },
}, },
{ {
@@ -108,16 +112,18 @@ func Test_Routes(t *testing.T) {
}, },
Expected: func(t *testing.T, resp *http.Response) { Expected: func(t *testing.T, resp *http.Response) {
contentType := resp.Header.Get("Content-Type") contentType := resp.Header.Get("Content-Type")
assert.Equal(t, contentType, "application/json; charset=utf-8") assert.Equal(t, "application/json; charset=utf-8", contentType)
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
assert.Nil(t, err) require.NoError(t, err)
assert.NotContains(t, string(body), "expires_at")
var modelList api.ListResponse var modelList api.ListResponse
err = json.Unmarshal(body, &modelList) err = json.Unmarshal(body, &modelList)
assert.Nil(t, err) require.NoError(t, err)
assert.Equal(t, 1, len(modelList.Models)) assert.Len(t, modelList.Models, 1)
assert.Equal(t, modelList.Models[0].Name, "test-model:latest") assert.Equal(t, "test-model:latest", modelList.Models[0].Name)
}, },
}, },
{ {
@@ -134,7 +140,7 @@ func Test_Routes(t *testing.T) {
Stream: &stream, Stream: &stream,
} }
jsonData, err := json.Marshal(createReq) jsonData, err := json.Marshal(createReq)
assert.Nil(t, err) require.NoError(t, err)
req.Body = io.NopCloser(bytes.NewReader(jsonData)) req.Body = io.NopCloser(bytes.NewReader(jsonData))
}, },
@@ -142,11 +148,11 @@ func Test_Routes(t *testing.T) {
contentType := resp.Header.Get("Content-Type") contentType := resp.Header.Get("Content-Type")
assert.Equal(t, "application/json", contentType) assert.Equal(t, "application/json", contentType)
_, err := io.ReadAll(resp.Body) _, err := io.ReadAll(resp.Body)
assert.Nil(t, err) require.NoError(t, err)
assert.Equal(t, resp.StatusCode, 200) assert.Equal(t, 200, resp.StatusCode)
model, err := GetModel("t-bone") model, err := GetModel("t-bone")
assert.Nil(t, err) require.NoError(t, err)
assert.Equal(t, "t-bone:latest", model.ShortName) assert.Equal(t, "t-bone:latest", model.ShortName)
}, },
}, },
@@ -161,13 +167,13 @@ func Test_Routes(t *testing.T) {
Destination: "beefsteak", Destination: "beefsteak",
} }
jsonData, err := json.Marshal(copyReq) jsonData, err := json.Marshal(copyReq)
assert.Nil(t, err) require.NoError(t, err)
req.Body = io.NopCloser(bytes.NewReader(jsonData)) req.Body = io.NopCloser(bytes.NewReader(jsonData))
}, },
Expected: func(t *testing.T, resp *http.Response) { Expected: func(t *testing.T, resp *http.Response) {
model, err := GetModel("beefsteak") model, err := GetModel("beefsteak")
assert.Nil(t, err) require.NoError(t, err)
assert.Equal(t, "beefsteak:latest", model.ShortName) assert.Equal(t, "beefsteak:latest", model.ShortName)
}, },
}, },
@@ -179,18 +185,18 @@ func Test_Routes(t *testing.T) {
createTestModel(t, "show-model") createTestModel(t, "show-model")
showReq := api.ShowRequest{Model: "show-model"} showReq := api.ShowRequest{Model: "show-model"}
jsonData, err := json.Marshal(showReq) jsonData, err := json.Marshal(showReq)
assert.Nil(t, err) require.NoError(t, err)
req.Body = io.NopCloser(bytes.NewReader(jsonData)) req.Body = io.NopCloser(bytes.NewReader(jsonData))
}, },
Expected: func(t *testing.T, resp *http.Response) { Expected: func(t *testing.T, resp *http.Response) {
contentType := resp.Header.Get("Content-Type") contentType := resp.Header.Get("Content-Type")
assert.Equal(t, contentType, "application/json; charset=utf-8") assert.Equal(t, "application/json; charset=utf-8", contentType)
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
assert.Nil(t, err) require.NoError(t, err)
var showResp api.ShowResponse var showResp api.ShowResponse
err = json.Unmarshal(body, &showResp) err = json.Unmarshal(body, &showResp)
assert.Nil(t, err) require.NoError(t, err)
var params []string var params []string
paramsSplit := strings.Split(showResp.Parameters, "\n") paramsSplit := strings.Split(showResp.Parameters, "\n")
@@ -221,14 +227,14 @@ func Test_Routes(t *testing.T) {
t.Run(tc.Name, func(t *testing.T) { t.Run(tc.Name, func(t *testing.T) {
u := httpSrv.URL + tc.Path u := httpSrv.URL + tc.Path
req, err := http.NewRequestWithContext(context.TODO(), tc.Method, u, nil) req, err := http.NewRequestWithContext(context.TODO(), tc.Method, u, nil)
assert.Nil(t, err) require.NoError(t, err)
if tc.Setup != nil { if tc.Setup != nil {
tc.Setup(t, req) tc.Setup(t, req)
} }
resp, err := httpSrv.Client().Do(req) resp, err := httpSrv.Client().Do(req)
assert.Nil(t, err) require.NoError(t, err)
defer resp.Body.Close() defer resp.Body.Close()
if tc.Expected != nil { if tc.Expected != nil {
@@ -255,7 +261,7 @@ func TestCase(t *testing.T) {
t.Run(tt, func(t *testing.T) { t.Run(tt, func(t *testing.T) {
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{ w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: tt, Name: tt,
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t)), Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
Stream: &stream, Stream: &stream,
}) })
@@ -271,7 +277,7 @@ func TestCase(t *testing.T) {
t.Run("create", func(t *testing.T) { t.Run("create", func(t *testing.T) {
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{ w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: strings.ToUpper(tt), Name: strings.ToUpper(tt),
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t)), Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
Stream: &stream, Stream: &stream,
}) })

View File

@@ -7,17 +7,17 @@ import (
"log/slog" "log/slog"
"reflect" "reflect"
"runtime" "runtime"
"slices"
"sort" "sort"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/envconfig"
"github.com/ollama/ollama/format" "github.com/ollama/ollama/format"
"github.com/ollama/ollama/gpu" "github.com/ollama/ollama/gpu"
"github.com/ollama/ollama/llm" "github.com/ollama/ollama/llm"
"github.com/ollama/ollama/envconfig"
"golang.org/x/exp/slices"
) )
type LlmRequest struct { type LlmRequest struct {
@@ -66,7 +66,7 @@ func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options,
opts.NumCtx = 4 opts.NumCtx = 4
} }
opts.NumCtx = opts.NumCtx * envconfig.NumParallel opts.NumCtx *= envconfig.NumParallel
req := &LlmRequest{ req := &LlmRequest{
ctx: c, ctx: c,
@@ -370,7 +370,6 @@ func (s *Scheduler) updateFreeSpace(allGpus gpu.GpuInfoList) {
r.refMu.Lock() r.refMu.Lock()
gpuIDs := make([]string, 0, len(r.gpus)) gpuIDs := make([]string, 0, len(r.gpus))
if r.llama != nil { if r.llama != nil {
// TODO this should be broken down by GPU instead of assuming uniform spread // TODO this should be broken down by GPU instead of assuming uniform spread
estimatedVRAMPerGPU := r.llama.EstimatedVRAM() / uint64(len(r.gpus)) estimatedVRAMPerGPU := r.llama.EstimatedVRAM() / uint64(len(r.gpus))
for _, gpu := range r.gpus { for _, gpu := range r.gpus {
@@ -529,7 +528,6 @@ func (runner *runnerRef) waitForVRAMRecovery() chan interface{} {
} }
}() }()
return finished return finished
} }
type ByDuration []*runnerRef type ByDuration []*runnerRef

View File

@@ -12,11 +12,10 @@ import (
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/app/lifecycle" "github.com/ollama/ollama/app/lifecycle"
"github.com/ollama/ollama/envconfig"
"github.com/ollama/ollama/format" "github.com/ollama/ollama/format"
"github.com/ollama/ollama/gpu" "github.com/ollama/ollama/gpu"
"github.com/ollama/ollama/llm" "github.com/ollama/ollama/llm"
"github.com/ollama/ollama/envconfig"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -53,10 +52,10 @@ func TestLoad(t *testing.T) {
} }
gpus := gpu.GpuInfoList{} gpus := gpu.GpuInfoList{}
s.load(req, ggml, gpus) s.load(req, ggml, gpus)
require.Len(t, req.successCh, 0) require.Empty(t, req.successCh)
require.Len(t, req.errCh, 1) require.Len(t, req.errCh, 1)
s.loadedMu.Lock() s.loadedMu.Lock()
require.Len(t, s.loaded, 0) require.Empty(t, s.loaded)
s.loadedMu.Unlock() s.loadedMu.Unlock()
err := <-req.errCh err := <-req.errCh
require.Contains(t, err.Error(), "this model may be incompatible") require.Contains(t, err.Error(), "this model may be incompatible")
@@ -113,7 +112,7 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV
t.Helper() t.Helper()
f, err := os.CreateTemp(t.TempDir(), modelName) f, err := os.CreateTemp(t.TempDir(), modelName)
assert.Nil(t, err) require.NoError(t, err)
defer f.Close() defer f.Close()
gguf := llm.NewGGUFV3(binary.LittleEndian) gguf := llm.NewGGUFV3(binary.LittleEndian)
@@ -131,7 +130,7 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV
}, []llm.Tensor{ }, []llm.Tensor{
{Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}}, {Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}},
}) })
assert.Nil(t, err) require.NoError(t, err)
fname := f.Name() fname := f.Name()
model := &Model{Name: modelName, ModelPath: fname} model := &Model{Name: modelName, ModelPath: fname}
@@ -190,8 +189,8 @@ func TestRequests(t *testing.T) {
select { select {
case resp := <-scenario1a.req.successCh: case resp := <-scenario1a.req.successCh:
require.Equal(t, resp.llama, scenario1a.srv) require.Equal(t, resp.llama, scenario1a.srv)
require.Len(t, s.pendingReqCh, 0) require.Empty(t, s.pendingReqCh)
require.Len(t, scenario1a.req.errCh, 0) require.Empty(t, scenario1a.req.errCh)
case <-ctx.Done(): case <-ctx.Done():
t.Errorf("timeout") t.Errorf("timeout")
} }
@@ -203,8 +202,8 @@ func TestRequests(t *testing.T) {
select { select {
case resp := <-scenario1b.req.successCh: case resp := <-scenario1b.req.successCh:
require.Equal(t, resp.llama, scenario1a.srv) require.Equal(t, resp.llama, scenario1a.srv)
require.Len(t, s.pendingReqCh, 0) require.Empty(t, s.pendingReqCh)
require.Len(t, scenario1b.req.errCh, 0) require.Empty(t, scenario1b.req.errCh)
case <-ctx.Done(): case <-ctx.Done():
t.Errorf("timeout") t.Errorf("timeout")
} }
@@ -221,8 +220,8 @@ func TestRequests(t *testing.T) {
select { select {
case resp := <-scenario2a.req.successCh: case resp := <-scenario2a.req.successCh:
require.Equal(t, resp.llama, scenario2a.srv) require.Equal(t, resp.llama, scenario2a.srv)
require.Len(t, s.pendingReqCh, 0) require.Empty(t, s.pendingReqCh)
require.Len(t, scenario2a.req.errCh, 0) require.Empty(t, scenario2a.req.errCh)
case <-ctx.Done(): case <-ctx.Done():
t.Errorf("timeout") t.Errorf("timeout")
} }
@@ -237,8 +236,8 @@ func TestRequests(t *testing.T) {
select { select {
case resp := <-scenario3a.req.successCh: case resp := <-scenario3a.req.successCh:
require.Equal(t, resp.llama, scenario3a.srv) require.Equal(t, resp.llama, scenario3a.srv)
require.Len(t, s.pendingReqCh, 0) require.Empty(t, s.pendingReqCh)
require.Len(t, scenario3a.req.errCh, 0) require.Empty(t, scenario3a.req.errCh)
case <-ctx.Done(): case <-ctx.Done():
t.Errorf("timeout") t.Errorf("timeout")
} }
@@ -253,8 +252,8 @@ func TestRequests(t *testing.T) {
select { select {
case resp := <-scenario3b.req.successCh: case resp := <-scenario3b.req.successCh:
require.Equal(t, resp.llama, scenario3b.srv) require.Equal(t, resp.llama, scenario3b.srv)
require.Len(t, s.pendingReqCh, 0) require.Empty(t, s.pendingReqCh)
require.Len(t, scenario3b.req.errCh, 0) require.Empty(t, scenario3b.req.errCh)
case <-ctx.Done(): case <-ctx.Done():
t.Errorf("timeout") t.Errorf("timeout")
} }
@@ -269,8 +268,8 @@ func TestRequests(t *testing.T) {
select { select {
case resp := <-scenario3c.req.successCh: case resp := <-scenario3c.req.successCh:
require.Equal(t, resp.llama, scenario3c.srv) require.Equal(t, resp.llama, scenario3c.srv)
require.Len(t, s.pendingReqCh, 0) require.Empty(t, s.pendingReqCh)
require.Len(t, scenario3c.req.errCh, 0) require.Empty(t, scenario3c.req.errCh)
case <-ctx.Done(): case <-ctx.Done():
t.Errorf("timeout") t.Errorf("timeout")
} }
@@ -296,8 +295,8 @@ func TestRequests(t *testing.T) {
select { select {
case resp := <-scenario3d.req.successCh: case resp := <-scenario3d.req.successCh:
require.Equal(t, resp.llama, scenario3d.srv) require.Equal(t, resp.llama, scenario3d.srv)
require.Len(t, s.pendingReqCh, 0) require.Empty(t, s.pendingReqCh)
require.Len(t, scenario3d.req.errCh, 0) require.Empty(t, scenario3d.req.errCh)
case <-ctx.Done(): case <-ctx.Done():
t.Errorf("timeout") t.Errorf("timeout")
} }
@@ -332,7 +331,7 @@ func TestGetRunner(t *testing.T) {
slog.Info("scenario1b") slog.Info("scenario1b")
successCh1b, errCh1b := s.GetRunner(scenario1b.ctx, scenario1b.req.model, scenario1b.req.opts, scenario1b.req.sessionDuration) successCh1b, errCh1b := s.GetRunner(scenario1b.ctx, scenario1b.req.model, scenario1b.req.opts, scenario1b.req.sessionDuration)
require.Len(t, s.pendingReqCh, 1) require.Len(t, s.pendingReqCh, 1)
require.Len(t, successCh1b, 0) require.Empty(t, successCh1b)
require.Len(t, errCh1b, 1) require.Len(t, errCh1b, 1)
err := <-errCh1b err := <-errCh1b
require.Contains(t, err.Error(), "server busy") require.Contains(t, err.Error(), "server busy")
@@ -340,8 +339,8 @@ func TestGetRunner(t *testing.T) {
select { select {
case resp := <-successCh1a: case resp := <-successCh1a:
require.Equal(t, resp.llama, scenario1a.srv) require.Equal(t, resp.llama, scenario1a.srv)
require.Len(t, s.pendingReqCh, 0) require.Empty(t, s.pendingReqCh)
require.Len(t, errCh1a, 0) require.Empty(t, errCh1a)
case <-ctx.Done(): case <-ctx.Done():
t.Errorf("timeout") t.Errorf("timeout")
} }
@@ -355,9 +354,9 @@ func TestGetRunner(t *testing.T) {
successCh1c, errCh1c := s.GetRunner(scenario1c.ctx, scenario1c.req.model, scenario1c.req.opts, scenario1c.req.sessionDuration) successCh1c, errCh1c := s.GetRunner(scenario1c.ctx, scenario1c.req.model, scenario1c.req.opts, scenario1c.req.sessionDuration)
// Starts in pending channel, then should be quickly processsed to return an error // Starts in pending channel, then should be quickly processsed to return an error
time.Sleep(5 * time.Millisecond) time.Sleep(5 * time.Millisecond)
require.Len(t, successCh1c, 0) require.Empty(t, successCh1c)
s.loadedMu.Lock() s.loadedMu.Lock()
require.Len(t, s.loaded, 0) require.Empty(t, s.loaded)
s.loadedMu.Unlock() s.loadedMu.Unlock()
require.Len(t, errCh1c, 1) require.Len(t, errCh1c, 1)
err = <-errCh1c err = <-errCh1c
@@ -386,8 +385,8 @@ func TestPrematureExpired(t *testing.T) {
select { select {
case resp := <-successCh1a: case resp := <-successCh1a:
require.Equal(t, resp.llama, scenario1a.srv) require.Equal(t, resp.llama, scenario1a.srv)
require.Len(t, s.pendingReqCh, 0) require.Empty(t, s.pendingReqCh)
require.Len(t, errCh1a, 0) require.Empty(t, errCh1a)
s.loadedMu.Lock() s.loadedMu.Lock()
require.Len(t, s.loaded, 1) require.Len(t, s.loaded, 1)
s.loadedMu.Unlock() s.loadedMu.Unlock()
@@ -401,9 +400,9 @@ func TestPrematureExpired(t *testing.T) {
time.Sleep(20 * time.Millisecond) time.Sleep(20 * time.Millisecond)
require.LessOrEqual(t, len(s.finishedReqCh), 1) require.LessOrEqual(t, len(s.finishedReqCh), 1)
time.Sleep(10 * time.Millisecond) time.Sleep(10 * time.Millisecond)
require.Len(t, s.finishedReqCh, 0) require.Empty(t, s.finishedReqCh)
s.loadedMu.Lock() s.loadedMu.Lock()
require.Len(t, s.loaded, 0) require.Empty(t, s.loaded)
s.loadedMu.Unlock() s.loadedMu.Unlock()
// also shouldn't happen in real life // also shouldn't happen in real life
@@ -487,7 +486,6 @@ func TestFindRunnerToUnload(t *testing.T) {
r2.refCount = 1 r2.refCount = 1
resp = s.findRunnerToUnload() resp = s.findRunnerToUnload()
require.Equal(t, r1, resp) require.Equal(t, r1, resp)
} }
func TestNeedsReload(t *testing.T) { func TestNeedsReload(t *testing.T) {

View File

@@ -146,7 +146,7 @@ func (b *blobUpload) Run(ctx context.Context, opts *registryOptions) {
case requestURL := <-b.nextURL: case requestURL := <-b.nextURL:
g.Go(func() error { g.Go(func() error {
var err error var err error
for try := 0; try < maxRetries; try++ { for try := range maxRetries {
err = b.uploadPart(inner, http.MethodPatch, requestURL, part, opts) err = b.uploadPart(inner, http.MethodPatch, requestURL, part, opts)
switch { switch {
case errors.Is(err, context.Canceled): case errors.Is(err, context.Canceled):
@@ -190,7 +190,7 @@ func (b *blobUpload) Run(ctx context.Context, opts *registryOptions) {
headers.Set("Content-Type", "application/octet-stream") headers.Set("Content-Type", "application/octet-stream")
headers.Set("Content-Length", "0") headers.Set("Content-Length", "0")
for try := 0; try < maxRetries; try++ { for try := range maxRetries {
var resp *http.Response var resp *http.Response
resp, err = makeRequestWithRetry(ctx, http.MethodPut, requestURL, headers, nil, opts) resp, err = makeRequestWithRetry(ctx, http.MethodPut, requestURL, headers, nil, opts)
if errors.Is(err, context.Canceled) { if errors.Is(err, context.Canceled) {
@@ -253,7 +253,7 @@ func (b *blobUpload) uploadPart(ctx context.Context, method string, requestURL *
} }
// retry uploading to the redirect URL // retry uploading to the redirect URL
for try := 0; try < maxRetries; try++ { for try := range maxRetries {
err = b.uploadPart(ctx, http.MethodPut, redirectURL, part, nil) err = b.uploadPart(ctx, http.MethodPut, redirectURL, part, nil)
switch { switch {
case errors.Is(err, context.Canceled): case errors.Is(err, context.Canceled):
@@ -391,7 +391,7 @@ func uploadBlob(ctx context.Context, mp ModelPath, layer *Layer, opts *registryO
return err return err
} }
// nolint: contextcheck //nolint:contextcheck
go upload.Run(context.Background(), opts) go upload.Run(context.Background(), opts)
} }

1
templates/alfred.gotmpl Normal file
View File

@@ -0,0 +1 @@
{{ if .System }}<start_system>{{ .System }}<end_message>{{ end }}{{ if .Prompt }}<start_user>{{ .Prompt }}<end_message>{{ end }}<start_assistant>{{ .Response }}<end_message>

7
templates/alpaca.gotmpl Normal file
View File

@@ -0,0 +1,7 @@
{{ if .System }}{{ .System }}
{{ end }}{{ if .Prompt }}### Instruction:
{{ .Prompt }}
{{ end }}### Response:
{{ .Response }}

6
templates/chatml.gotmpl Normal file
View File

@@ -0,0 +1,6 @@
{{ if .System }}<|im_start|>system
{{ .System }}<|im_end|>
{{ end }}{{ if .Prompt }}<|im_start|>user
{{ .Prompt }}<|im_end|>
{{ end }}<|im_start|>assistant
{{ .Response }}<|im_end|>

5
templates/chatqa.gotmpl Normal file
View File

@@ -0,0 +1,5 @@
{{ if .System }}System: {{ .System }}
{{ end }}{{ if .Prompt }}User: {{ .Prompt }}
{{ end }}Assistant: <|begin_of_text|>{{ .Response }}

View File

@@ -0,0 +1,8 @@
{{ if .System }} Source: system
{{ .System }} <step>{{ end }} Source: user
{{ .Prompt }} <step> Source: assistant
Destination: user
{{ .Response }}<step>

View File

@@ -0,0 +1,3 @@
{{ if .System }}{{ .System }}
{{ end }}{{ if .Prompt }}User: {{ .Prompt }}
{{ end }}Assistant: {{ .Response }}

View File

@@ -0,0 +1,4 @@
<start_of_turn>user
{{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}<end_of_turn>
<start_of_turn>model
{{ .Response }}<end_of_turn>

View File

@@ -0,0 +1,9 @@
{{ if .System }}
System:
{{ .System }}
{{ end }}{{ if .Prompt }}Question:
{{ .Prompt }}
{{ end }}Answer:
{{ .Response }}

138
templates/index.json Normal file
View File

@@ -0,0 +1,138 @@
[
{
"template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}",
"name": "chatml"
},
{
"template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"name": "chatml"
},
{
"template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
"name": "zephyr"
},
{
"template": "{% if messages[0]['role'] == 'user' or messages[0]['role'] == 'system' %}{{ bos_token }}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% elif messages[-1]['role'] == 'assistant' %}{{ eos_token }}{% endif %}",
"name": "chatml"
},
{
"template": "{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}",
"name": "openchat"
},
{
"template": "{{bos_token}}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"name": "chatml"
},
{
"template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"name": "chatml"
},
{
"template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"name": "chatml"
},
{
"template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"name": "chatml"
},
{
"template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
"name": "zephyr"
},
{
"template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
"name": "mistral-instruct"
},
{
"template": "{{bos_token}}{{'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n'}}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n {{ raise_exception('System messages are not allowed in this template.') }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction\n' + message['content'] + '\n\n'}}\n {%- else %}\n{{'### Response\n' + message['content'] + eos_token + '\n\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response\n'}}",
"name": "starcoder2-instruct"
},
{
"template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content | trim + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content | trim + ' ' + eos_token }}{% endif %}{% endfor %}",
"name": "llama2-chat"
},
{
"template": "{% if messages[0]['role'] == 'system' %}{% set user_index = 1 %}{% else %}{% set user_index = 0 %}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != ((loop.index0 + user_index) % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ '<s>' }}{% endif %}{% set content = 'Source: ' + message['role'] + '\n\n ' + message['content'] | trim %}{{ content + ' <step> ' }}{% endfor %}{{'Source: assistant\nDestination: user\n\n '}}",
"name": "codellama-70b-instruct"
},
{
"template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
"name": "mistral-instruct"
},
{
"template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|im_start|>user\n' + message['content'] + '<|im_end|>' }}\n{% elif message['role'] == 'system' %}\n{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>' }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|im_start|>assistant\n' + message['content'] + '<|im_end|>' }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|im_start|>assistant' }}\n{% endif %}\n{% endfor %}",
"name": "chatml"
},
{
"template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"name": "chatml"
},
{
"template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful assistant.' %}{% endif %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{{'<|im_start|>system\n' + system_message + '<|im_end|>\n'}}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"name": "chatml"
},
{
"template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif 'system' not in messages[0]['role'] %}{% set loop_messages = messages %}{% set system_message = 'You are DBRX, created by Databricks. You were last updated in December 2023. You answer questions based on information available up to that point.\nYOU PROVIDE SHORT RESPONSES TO SHORT QUESTIONS OR STATEMENTS, but provide thorough responses to more complex and open-ended questions.\nYou assist with various tasks, from writing to coding (using markdown for code blocks \u2014 remember to use ``` with code, JSON, and tables).\n(You do not have real-time data access or code execution capabilities. You avoid stereotyping and provide balanced perspectives on controversial topics. You do not provide song lyrics, poems, or news articles and do not divulge details of your training data.)\nThis is your system prompt, guiding your responses. Do not reference it, just respond to the user. If you find yourself talking about this message, stop. You should be responding appropriately and usually that means not mentioning this.\nYOU DO NOT MENTION ANY OF THIS INFORMATION ABOUT YOURSELF UNLESS THE INFORMATION IS DIRECTLY PERTINENT TO THE USER\\'S QUERY.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{% if system_message != false %}{{ '<|im_start|>system\n' + system_message | trim + '<|im_end|>\n'}}{% endif %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% else %}{{ '\n' + '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% endif %}{% if (add_generation_prompt == true and loop.last) %}{{ '\n' + '<|im_start|>' + 'assistant' + '\n' }}{% endif %}{% endfor %}",
"name": "chatml"
},
{
"template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}",
"name": "alpaca"
},
{
"template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
"name": "chatqa"
},
{
"template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
"name": "gemma-instruct"
},
{
"template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
"name": "llama3-instruct"
},
{
"template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'Question:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'system' %}\n{{ 'System:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Answer:\n' + message['content'] + '\n\n' }}{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Answer:\n' }}{% endif %}{% endfor %}",
"name": "granite-instruct"
},
{
"template": "{{bos_token}}{{'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n'}}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n {{ raise_exception('System messages are not allowed in this template.') }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'@@ Instruction\n' + message['content'] + '\n\n'}}\n {%- else %}\n{{'@@ Response\n' + message['content'] + eos_token + '\n\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'@@ Response\n'}}",
"name": "magicoder"
},
{
"template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '<start_user>' + message['content'].strip() + '<end_message>' }}{% elif message['role'] == 'system' %}{{ '<start_system>' + message['content'].strip() + '<end_message>' }}{% elif message['role'] == 'assistant' %}{{ '<start_assistant>' + message['content'] + '<end_message>' }}{% else %}{{ raise_exception('Only system, user and assistant roles are supported.') }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<start_assistant>' }}{% endif %}{% endfor %}",
"name": "alfred"
},
{
"template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}",
"name": "llama2-chat"
},
{
"template": "{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}",
"name": "phi-3"
},
{
"template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}",
"name": "phi-3"
},
{
"template": "{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
"name": "phi-3"
},
{
"template": "{{ bos_token }}{%- if messages[0]['role'] == 'system' -%}{% set loop_messages = messages[1:] %}{%- else -%}{% set loop_messages = messages %}{% endif %}System: This is a chat between a user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions based on the context. The assistant should also indicate when the answer cannot be found in the context.\n\n{% for message in loop_messages %}{%- if message['role'] == 'user' -%}User: {{ message['content'].strip() + '\n\n' }}{%- else -%}Assistant: {{ message['content'].strip() + '\n\n' }}{%- endif %}{% if loop.last and message['role'] == 'user' %}Assistant:{% endif %}{% endfor %}",
"name": "chatqa"
},
{
"template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'User: \n' + message['content'] }}\n{% elif message['role'] == 'system' %}\n{{ 'System: ' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ 'Falcon:\n' + message['content']}}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Falcon:' }}\n{% endif %}\n{% endfor %}",
"name": "falcon-instruct"
},
{
"template": "{% for message in messages %}{% if not loop.first %}{{ '\n' }}{% endif %}{% if message['role'] == 'system' %}{{ 'System: ' }}{% elif message['role'] == 'user' %}{{ 'User: ' }}{% elif message['role'] == 'assistant' %}{{ 'Falcon: ' }}{% endif %}{{ message['content'] }}{% endfor %}{% if add_generation_prompt %}{{ '\n' + 'Falcon:' }}{% endif %}",
"name": "falcon-instruct"
},
{
"template": "{% for message in messages %}{% if message['role'] == 'system' %}{% if message['content']%}{{'### System:\n' + message['content']+'\n\n'}}{% endif %}{% elif message['role'] == 'user' %}{{'### User:\n' + message['content']+'\n\n'}}{% elif message['role'] == 'assistant' %}{{'### Assistant:\n' + message['content']}}{% endif %}{% if loop.last and add_generation_prompt %}{{ '### Assistant:\n' }}{% endif %}{% endfor %}",
"name": "solar-instruct"
}
]

View File

@@ -0,0 +1,3 @@
[INST] <<SYS>>{{ .System }}<</SYS>>
{{ .Prompt }} [/INST] {{ .Response }}

View File

@@ -0,0 +1,7 @@
{{ if .System }}<|start_header_id|>system<|end_header_id|>
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
{{ .Response }}<|eot_id|>

View File

@@ -0,0 +1,7 @@
{{ if .System }}{{ .System }}
{{ end }}{{ if .Prompt }}@@ Instruction
{{ .Prompt }}
{{ end }}@@ Response
{{ .Response }}

View File

@@ -0,0 +1,6 @@
{{ if .System }}<|im_start|>system
{{ .System }}<|im_end|>
{{ end }}{{ if .Prompt }}<|im_start|>user
{{ .Prompt }}<|im_end|>
{{ end }}<|im_start|>assistant
{{ .Response }}<|im_end|>

View File

@@ -0,0 +1 @@
{{ .System }}<|end_of_turn|>GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|>

6
templates/phi-3.gotmpl Normal file
View File

@@ -0,0 +1,6 @@
{{ if .System }}<|system|>
{{ .System }}<|end|>
{{ end }}{{ if .Prompt }}<|user|>
{{ .Prompt }}<|end|>
{{ end }}<|assistant|>
{{ .Response }}<|end|>

View File

@@ -0,0 +1,8 @@
{{ if .System }}### System:
{{ .System }}
{{ end }}{{ if .Prompt }}### User:
{{ .Prompt }}
{{ end }}### Assistant:
{{ .Response }}

View File

@@ -0,0 +1,9 @@
{{ if .System }}{{ .System }}
{{ end }}{{ if .Prompt }}### Instruction
{{ .Prompt }}
{{ end }}### Response
{{ .Response }}<|endoftext|>

70
templates/template.go Normal file
View File

@@ -0,0 +1,70 @@
package templates
import (
"bytes"
"embed"
"encoding/json"
"errors"
"io"
"math"
"sync"
"github.com/agnivade/levenshtein"
)
//go:embed index.json
var indexBytes []byte
//go:embed *.gotmpl
var templatesFS embed.FS
var templatesOnce = sync.OnceValues(func() ([]*Template, error) {
var templates []*Template
if err := json.Unmarshal(indexBytes, &templates); err != nil {
return nil, err
}
for _, t := range templates {
bts, err := templatesFS.ReadFile(t.Name + ".gotmpl")
if err != nil {
return nil, err
}
// normalize line endings
t.Bytes = bytes.ReplaceAll(bts, []byte("\r\n"), []byte("\n"))
}
return templates, nil
})
type Template struct {
Name string `json:"name"`
Template string `json:"template"`
Bytes []byte
}
func (t Template) Reader() io.Reader {
return bytes.NewReader(t.Bytes)
}
func NamedTemplate(s string) (*Template, error) {
templates, err := templatesOnce()
if err != nil {
return nil, err
}
var template *Template
score := math.MaxInt
for _, t := range templates {
if s := levenshtein.ComputeDistance(s, t.Template); s < score {
score = s
template = t
}
}
if score < 100 {
return template, nil
}
return nil, errors.New("no matching template found")
}

View File

@@ -0,0 +1,59 @@
package templates
import (
"bufio"
"bytes"
"encoding/json"
"io"
"os"
"path/filepath"
"testing"
"text/template"
"github.com/ollama/ollama/llm"
)
func TestKVChatTemplate(t *testing.T) {
f, err := os.Open(filepath.Join("testdata", "templates.jsonl"))
if err != nil {
t.Fatal(err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
var ss map[string]string
if err := json.Unmarshal(scanner.Bytes(), &ss); err != nil {
t.Fatal(err)
}
for k, v := range ss {
t.Run(k, func(t *testing.T) {
kv := llm.KV{"tokenizer.chat_template": v}
s := kv.ChatTemplate()
r, err := NamedTemplate(s)
if err != nil {
t.Fatal(err)
}
if r.Name != k {
t.Errorf("expected %q, got %q", k, r.Name)
}
var b bytes.Buffer
if _, err := io.Copy(&b, r.Reader()); err != nil {
t.Fatal(err)
}
tmpl, err := template.New(s).Parse(b.String())
if err != nil {
t.Fatal(err)
}
if tmpl.Tree.Root.String() == "" {
t.Errorf("empty %s template", k)
}
})
}
}
}

35
templates/testdata/templates.jsonl vendored Normal file
View File

@@ -0,0 +1,35 @@
{"chatml": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}"}
{"chatml": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"}
{"zephyr": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"}
{"chatml": "{% if messages[0]['role'] == 'user' or messages[0]['role'] == 'system' %}{{ bos_token }}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% elif messages[-1]['role'] == 'assistant' %}{{ eos_token }}{% endif %}"}
{"openchat": "{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}"}
{"chatml": "{{bos_token}}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"}
{"chatml": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"}
{"chatml": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"}
{"chatml": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"}
{"zephyr": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"}
{"mistral-instruct": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}"}
{"starcoder2-instruct": "{{bos_token}}{{'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n'}}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n {{ raise_exception('System messages are not allowed in this template.') }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction\n' + message['content'] + '\n\n'}}\n {%- else %}\n{{'### Response\n' + message['content'] + eos_token + '\n\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response\n'}}"}
{"llama2-chat": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content | trim + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content | trim + ' ' + eos_token }}{% endif %}{% endfor %}"}
{"codellama-70b-instruct": "{% if messages[0]['role'] == 'system' %}{% set user_index = 1 %}{% else %}{% set user_index = 0 %}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != ((loop.index0 + user_index) % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ '<s>' }}{% endif %}{% set content = 'Source: ' + message['role'] + '\n\n ' + message['content'] | trim %}{{ content + ' <step> ' }}{% endfor %}{{'Source: assistant\nDestination: user\n\n '}}"}
{"mistral-instruct": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}"}
{"chatml": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|im_start|>user\n' + message['content'] + '<|im_end|>' }}\n{% elif message['role'] == 'system' %}\n{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>' }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|im_start|>assistant\n' + message['content'] + '<|im_end|>' }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|im_start|>assistant' }}\n{% endif %}\n{% endfor %}"}
{"chatml": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"}
{"chatml": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful assistant.' %}{% endif %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{{'<|im_start|>system\n' + system_message + '<|im_end|>\n'}}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"}
{"chatml": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif 'system' not in messages[0]['role'] %}{% set loop_messages = messages %}{% set system_message = 'You are DBRX, created by Databricks. You were last updated in December 2023. You answer questions based on information available up to that point.\nYOU PROVIDE SHORT RESPONSES TO SHORT QUESTIONS OR STATEMENTS, but provide thorough responses to more complex and open-ended questions.\nYou assist with various tasks, from writing to coding (using markdown for code blocks \u2014 remember to use ``` with code, JSON, and tables).\n(You do not have real-time data access or code execution capabilities. You avoid stereotyping and provide balanced perspectives on controversial topics. You do not provide song lyrics, poems, or news articles and do not divulge details of your training data.)\nThis is your system prompt, guiding your responses. Do not reference it, just respond to the user. If you find yourself talking about this message, stop. You should be responding appropriately and usually that means not mentioning this.\nYOU DO NOT MENTION ANY OF THIS INFORMATION ABOUT YOURSELF UNLESS THE INFORMATION IS DIRECTLY PERTINENT TO THE USER\\'S QUERY.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{% if system_message != false %}{{ '<|im_start|>system\n' + system_message | trim + '<|im_end|>\n'}}{% endif %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% else %}{{ '\n' + '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% endif %}{% if (add_generation_prompt == true and loop.last) %}{{ '\n' + '<|im_start|>' + 'assistant' + '\n' }}{% endif %}{% endfor %}"}
{"alpaca": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}"}
{"chatqa": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}"}
{"gemma-instruct": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}"}
{"llama3-instruct": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"}
{"granite-instruct": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'Question:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'system' %}\n{{ 'System:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Answer:\n' + message['content'] + '\n\n' }}{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Answer:\n' }}{% endif %}{% endfor %}"}
{"magicoder": "{{bos_token}}{{'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n'}}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n {{ raise_exception('System messages are not allowed in this template.') }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'@@ Instruction\n' + message['content'] + '\n\n'}}\n {%- else %}\n{{'@@ Response\n' + message['content'] + eos_token + '\n\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'@@ Response\n'}}"}
{"alfred": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '<start_user>' + message['content'].strip() + '<end_message>' }}{% elif message['role'] == 'system' %}{{ '<start_system>' + message['content'].strip() + '<end_message>' }}{% elif message['role'] == 'assistant' %}{{ '<start_assistant>' + message['content'] + '<end_message>' }}{% else %}{{ raise_exception('Only system, user and assistant roles are supported.') }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<start_assistant>' }}{% endif %}{% endfor %}"}
{"llama2-chat": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}"}
{"phi-3": "{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"}
{"phi-3": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}"}
{"phi-3": "{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}"}
{"chatqa": "{{ bos_token }}{%- if messages[0]['role'] == 'system' -%}{% set loop_messages = messages[1:] %}{%- else -%}{% set loop_messages = messages %}{% endif %}System: This is a chat between a user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions based on the context. The assistant should also indicate when the answer cannot be found in the context.\n\n{% for message in loop_messages %}{%- if message['role'] == 'user' -%}User: {{ message['content'].strip() + '\n\n' }}{%- else -%}Assistant: {{ message['content'].strip() + '\n\n' }}{%- endif %}{% if loop.last and message['role'] == 'user' %}Assistant:{% endif %}{% endfor %}"}
{"falcon-instruct": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'User: \n' + message['content'] }}\n{% elif message['role'] == 'system' %}\n{{ 'System: ' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ 'Falcon:\n' + message['content']}}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Falcon:' }}\n{% endif %}\n{% endfor %}"}
{"falcon-instruct": "{% for message in messages %}{% if not loop.first %}{{ '\n' }}{% endif %}{% if message['role'] == 'system' %}{{ 'System: ' }}{% elif message['role'] == 'user' %}{{ 'User: ' }}{% elif message['role'] == 'assistant' %}{{ 'Falcon: ' }}{% endif %}{{ message['content'] }}{% endfor %}{% if add_generation_prompt %}{{ '\n' + 'Falcon:' }}{% endif %}"}
{"solar-instruct": "{% for message in messages %}{% if message['role'] == 'system' %}{% if message['content']%}{{'### System:\n' + message['content']+'\n\n'}}{% endif %}{% elif message['role'] == 'user' %}{{'### User:\n' + message['content']+'\n\n'}}{% elif message['role'] == 'assistant' %}{{'### Assistant:\n' + message['content']}}{% endif %}{% if loop.last and add_generation_prompt %}{{ '### Assistant:\n' }}{% endif %}{% endfor %}"}
{"chatml": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"}

3
templates/vicuna.gotmpl Normal file
View File

@@ -0,0 +1,3 @@
{{ if .System }}{{ .System }}
{{ end }}{{ if .Prompt }}USER: {{ .Prompt }}
{{ end }}ASSISTANT: {{ .Response }}

6
templates/zephyr.gotmpl Normal file
View File

@@ -0,0 +1,6 @@
{{ if .System }}<|system|>
{{ .System }}</s>
{{ end }}{{ if .Prompt }}<|user|>
{{ .Prompt }}</s>
{{ end }}<|assistant|>
{{ .Response }}</s>

View File

@@ -268,7 +268,6 @@ func TestNameIsValidPart(t *testing.T) {
} }
}) })
} }
} }
func TestFilepathAllocs(t *testing.T) { func TestFilepathAllocs(t *testing.T) {
@@ -325,7 +324,7 @@ func TestParseNameFromFilepath(t *testing.T) {
filepath.Join("host:port", "namespace", "model", "tag"): {Host: "host:port", Namespace: "namespace", Model: "model", Tag: "tag"}, filepath.Join("host:port", "namespace", "model", "tag"): {Host: "host:port", Namespace: "namespace", Model: "model", Tag: "tag"},
filepath.Join("namespace", "model", "tag"): {}, filepath.Join("namespace", "model", "tag"): {},
filepath.Join("model", "tag"): {}, filepath.Join("model", "tag"): {},
filepath.Join("model"): {}, "model": {},
filepath.Join("..", "..", "model", "tag"): {}, filepath.Join("..", "..", "model", "tag"): {},
filepath.Join("", "namespace", ".", "tag"): {}, filepath.Join("", "namespace", ".", "tag"): {},
filepath.Join(".", ".", ".", "."): {}, filepath.Join(".", ".", ".", "."): {},
@@ -382,14 +381,13 @@ func FuzzName(f *testing.F) {
t.Errorf("String() = %q; want %q", n.String(), s) t.Errorf("String() = %q; want %q", n.String(), s)
} }
} }
}) })
} }
func TestIsValidNamespace(t *testing.T) { func TestIsValidNamespace(t *testing.T) {
cases := []struct { cases := []struct {
username string username string
expected bool expected bool
}{ }{
{"", false}, {"", false},
{"a", true}, {"a", true},