mirror of
https://github.com/likelovewant/ollama-for-amd.git
synced 2025-12-22 14:53:56 +00:00
Merge branch 'ollama:main' into main
This commit is contained in:
1
.github/workflows/release.yaml
vendored
1
.github/workflows/release.yaml
vendored
@@ -28,6 +28,7 @@ jobs:
|
|||||||
security unlock-keychain -p password build.keychain
|
security unlock-keychain -p password build.keychain
|
||||||
security import certificate.p12 -k build.keychain -P $MACOS_SIGNING_KEY_PASSWORD -T /usr/bin/codesign
|
security import certificate.p12 -k build.keychain -P $MACOS_SIGNING_KEY_PASSWORD -T /usr/bin/codesign
|
||||||
security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k password build.keychain
|
security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k password build.keychain
|
||||||
|
security set-keychain-settings -lut 3600 build.keychain
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
|
|||||||
28
README.md
28
README.md
@@ -69,15 +69,17 @@ Here are some example models that can be downloaded:
|
|||||||
| ------------------ | ---------- | ----- | ------------------------------ |
|
| ------------------ | ---------- | ----- | ------------------------------ |
|
||||||
| Llama 3 | 8B | 4.7GB | `ollama run llama3` |
|
| Llama 3 | 8B | 4.7GB | `ollama run llama3` |
|
||||||
| Llama 3 | 70B | 40GB | `ollama run llama3:70b` |
|
| Llama 3 | 70B | 40GB | `ollama run llama3:70b` |
|
||||||
| Phi-3 | 3.8B | 2.3GB | `ollama run phi3` |
|
| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` |
|
||||||
|
| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` |
|
||||||
|
| Gemma | 2B | 1.4GB | `ollama run gemma:2b` |
|
||||||
|
| Gemma | 7B | 4.8GB | `ollama run gemma:7b` |
|
||||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||||
|
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
||||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||||
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
||||||
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
||||||
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
||||||
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
||||||
| Gemma | 2B | 1.4GB | `ollama run gemma:2b` |
|
|
||||||
| Gemma | 7B | 4.8GB | `ollama run gemma:7b` |
|
|
||||||
| Solar | 10.7B | 6.1GB | `ollama run solar` |
|
| Solar | 10.7B | 6.1GB | `ollama run solar` |
|
||||||
|
|
||||||
> Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
> Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
||||||
@@ -210,25 +212,7 @@ ollama list
|
|||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
Install `cmake` and `go`:
|
See the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md)
|
||||||
|
|
||||||
```
|
|
||||||
brew install cmake go
|
|
||||||
```
|
|
||||||
|
|
||||||
Then generate dependencies:
|
|
||||||
|
|
||||||
```
|
|
||||||
go generate ./...
|
|
||||||
```
|
|
||||||
|
|
||||||
Then build the binary:
|
|
||||||
|
|
||||||
```
|
|
||||||
go build .
|
|
||||||
```
|
|
||||||
|
|
||||||
More detailed instructions can be found in the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md)
|
|
||||||
|
|
||||||
### Running local builds
|
### Running local builds
|
||||||
|
|
||||||
|
|||||||
36
cmd/cmd.go
36
cmd/cmd.go
@@ -35,6 +35,7 @@ import (
|
|||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
"github.com/ollama/ollama/auth"
|
"github.com/ollama/ollama/auth"
|
||||||
"github.com/ollama/ollama/format"
|
"github.com/ollama/ollama/format"
|
||||||
|
"github.com/ollama/ollama/parser"
|
||||||
"github.com/ollama/ollama/progress"
|
"github.com/ollama/ollama/progress"
|
||||||
"github.com/ollama/ollama/server"
|
"github.com/ollama/ollama/server"
|
||||||
"github.com/ollama/ollama/types/errtypes"
|
"github.com/ollama/ollama/types/errtypes"
|
||||||
@@ -63,7 +64,7 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
modelfile, err := model.ParseFile(f)
|
modelfile, err := parser.ParseFile(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -207,7 +208,7 @@ func tempZipFiles(path string) (string, error) {
|
|||||||
// pytorch files might also be unresolved git lfs references; skip if they are
|
// pytorch files might also be unresolved git lfs references; skip if they are
|
||||||
// covers pytorch_model-x-of-y.bin, pytorch_model.fp32-x-of-y.bin, pytorch_model.bin
|
// covers pytorch_model-x-of-y.bin, pytorch_model.fp32-x-of-y.bin, pytorch_model.bin
|
||||||
files = append(files, pt...)
|
files = append(files, pt...)
|
||||||
} else if pt, _ := glob(filepath.Join(path, "consolidated*.pth"), "application/octet-stream"); len(pt) > 0 {
|
} else if pt, _ := glob(filepath.Join(path, "consolidated*.pth"), "application/zip"); len(pt) > 0 {
|
||||||
// pytorch files might also be unresolved git lfs references; skip if they are
|
// pytorch files might also be unresolved git lfs references; skip if they are
|
||||||
// covers consolidated.x.pth, consolidated.pth
|
// covers consolidated.x.pth, consolidated.pth
|
||||||
files = append(files, pt...)
|
files = append(files, pt...)
|
||||||
@@ -1078,12 +1079,24 @@ func versionHandler(cmd *cobra.Command, _ []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func appendHostEnvDocs(cmd *cobra.Command) {
|
type EnvironmentVar struct {
|
||||||
const hostEnvDocs = `
|
Name string
|
||||||
|
Description string
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendEnvDocs(cmd *cobra.Command, envs []EnvironmentVar) {
|
||||||
|
if len(envs) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
envUsage := `
|
||||||
Environment Variables:
|
Environment Variables:
|
||||||
OLLAMA_HOST The host:port or base URL of the Ollama server (e.g. http://localhost:11434)
|
|
||||||
`
|
`
|
||||||
cmd.SetUsageTemplate(cmd.UsageTemplate() + hostEnvDocs)
|
for _, e := range envs {
|
||||||
|
envUsage += fmt.Sprintf(" %-16s %s\n", e.Name, e.Description)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.SetUsageTemplate(cmd.UsageTemplate() + envUsage)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCLI() *cobra.Command {
|
func NewCLI() *cobra.Command {
|
||||||
@@ -1220,6 +1233,10 @@ Environment Variables:
|
|||||||
RunE: DeleteHandler,
|
RunE: DeleteHandler,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ollamaHostEnv := EnvironmentVar{"OLLAMA_HOST", "The host:port or base URL of the Ollama server (e.g. http://localhost:11434)"}
|
||||||
|
ollamaNoHistoryEnv := EnvironmentVar{"OLLAMA_NOHISTORY", "Disable readline history"}
|
||||||
|
envs := []EnvironmentVar{ollamaHostEnv}
|
||||||
|
|
||||||
for _, cmd := range []*cobra.Command{
|
for _, cmd := range []*cobra.Command{
|
||||||
createCmd,
|
createCmd,
|
||||||
showCmd,
|
showCmd,
|
||||||
@@ -1231,7 +1248,12 @@ Environment Variables:
|
|||||||
copyCmd,
|
copyCmd,
|
||||||
deleteCmd,
|
deleteCmd,
|
||||||
} {
|
} {
|
||||||
appendHostEnvDocs(cmd)
|
switch cmd {
|
||||||
|
case runCmd:
|
||||||
|
appendEnvDocs(cmd, []EnvironmentVar{ollamaHostEnv, ollamaNoHistoryEnv})
|
||||||
|
default:
|
||||||
|
appendEnvDocs(cmd, envs)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rootCmd.AddCommand(
|
rootCmd.AddCommand(
|
||||||
|
|||||||
@@ -138,6 +138,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|||||||
fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word")
|
fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word")
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor")
|
fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor")
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor")
|
fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor")
|
||||||
|
fmt.Fprintln(os.Stderr, " Ctrl + w Delete the word before the cursor")
|
||||||
fmt.Fprintln(os.Stderr, "")
|
fmt.Fprintln(os.Stderr, "")
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen")
|
fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen")
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding")
|
fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding")
|
||||||
@@ -182,6 +183,10 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if os.Getenv("OLLAMA_NOHISTORY") != "" {
|
||||||
|
scanner.HistoryDisable()
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Print(readline.StartBracketedPaste)
|
fmt.Print(readline.StartBracketedPaste)
|
||||||
defer fmt.Printf(readline.EndBracketedPaste)
|
defer fmt.Printf(readline.EndBracketedPaste)
|
||||||
|
|
||||||
|
|||||||
@@ -18,6 +18,16 @@ import (
|
|||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
_ int32 = iota
|
||||||
|
tokenTypeNormal
|
||||||
|
tokenTypeUnknown
|
||||||
|
tokenTypeControl
|
||||||
|
tokenTypeUserDefined
|
||||||
|
tokenTypeUnused
|
||||||
|
tokenTypeByte
|
||||||
|
)
|
||||||
|
|
||||||
type Params struct {
|
type Params struct {
|
||||||
Architectures []string `json:"architectures"`
|
Architectures []string `json:"architectures"`
|
||||||
VocabSize int `json:"vocab_size"`
|
VocabSize int `json:"vocab_size"`
|
||||||
@@ -37,6 +47,8 @@ type Params struct {
|
|||||||
Experts int `json:"num_local_experts"`
|
Experts int `json:"num_local_experts"`
|
||||||
ExpertsUsed int `json:"num_experts_per_tok"`
|
ExpertsUsed int `json:"num_experts_per_tok"`
|
||||||
|
|
||||||
|
PreTokenizer string
|
||||||
|
|
||||||
ByteOrder
|
ByteOrder
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,10 +86,9 @@ func GetModelFormat(dirname string) (ModelFormat, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, fn := range files {
|
for _, fn := range files {
|
||||||
slog.Debug(fmt.Sprintf("file = %s", fn))
|
|
||||||
if strings.HasSuffix(fn, ".safetensors") {
|
if strings.HasSuffix(fn, ".safetensors") {
|
||||||
return &SafetensorFormat{}, nil
|
return &SafetensorFormat{}, nil
|
||||||
} else if strings.HasSuffix(fn, ".bin") {
|
} else if strings.HasSuffix(fn, ".bin") || strings.HasSuffix(fn, ".pth") {
|
||||||
slog.Debug("model is torch")
|
slog.Debug("model is torch")
|
||||||
return &TorchFormat{}, nil
|
return &TorchFormat{}, nil
|
||||||
}
|
}
|
||||||
@@ -92,6 +103,7 @@ type Vocab struct {
|
|||||||
Tokens []string
|
Tokens []string
|
||||||
Scores []float32
|
Scores []float32
|
||||||
Types []int32
|
Types []int32
|
||||||
|
Merges []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
|
func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
|
||||||
@@ -170,7 +182,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
|
|||||||
}
|
}
|
||||||
v.Tokens = append(v.Tokens, t.key)
|
v.Tokens = append(v.Tokens, t.key)
|
||||||
v.Scores = append(v.Scores, -1000.0)
|
v.Scores = append(v.Scores, -1000.0)
|
||||||
v.Types = append(v.Types, int32(llm.GGUFTokenUserDefined))
|
v.Types = append(v.Types, tokenTypeUserDefined)
|
||||||
}
|
}
|
||||||
slog.Info(fmt.Sprintf("vocab size w/ extra tokens: %d", len(v.Tokens)))
|
slog.Info(fmt.Sprintf("vocab size w/ extra tokens: %d", len(v.Tokens)))
|
||||||
|
|
||||||
@@ -180,7 +192,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
|
|||||||
for cnt := 0; cnt < missingTokens; cnt++ {
|
for cnt := 0; cnt < missingTokens; cnt++ {
|
||||||
v.Tokens = append(v.Tokens, fmt.Sprintf("<dummy%05d>", cnt+1))
|
v.Tokens = append(v.Tokens, fmt.Sprintf("<dummy%05d>", cnt+1))
|
||||||
v.Scores = append(v.Scores, -1)
|
v.Scores = append(v.Scores, -1)
|
||||||
v.Types = append(v.Types, int32(llm.GGUFTokenUserDefined))
|
v.Types = append(v.Types, tokenTypeUserDefined)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
103
convert/convert_test.go
Normal file
103
convert/convert_test.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
//go:build slow
|
||||||
|
|
||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func convertFull(t *testing.T, p string) (llm.KV, llm.Tensors) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
mf, err := GetModelFormat(p)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := mf.GetParams(p)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
arch, err := mf.GetModelArch("", p, params)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := arch.LoadVocab(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := arch.GetTensors(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.CreateTemp(t.TempDir(), "f16")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if err := arch.WriteGGUF(f); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := os.Open(f.Name())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
m, _, err := llm.DecodeGGML(r)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.KV(), m.Tensors()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConvertFull(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
path string
|
||||||
|
arch string
|
||||||
|
tensors int
|
||||||
|
layers int
|
||||||
|
}{
|
||||||
|
{"Meta-Llama-3-8B-Instruct", "llama", 291, 35},
|
||||||
|
{"Mistral-7B-Instruct-v0.2", "llama", 291, 35},
|
||||||
|
{"Mixtral-8x7B-Instruct-v0.1", "llama", 291, 35},
|
||||||
|
{"gemma-2b-it", "gemma", 164, 20},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range cases {
|
||||||
|
t.Run(tt.path, func(t *testing.T) {
|
||||||
|
p := filepath.Join("testdata", tt.path)
|
||||||
|
if _, err := os.Stat(p); err != nil {
|
||||||
|
t.Skipf("%s not found", p)
|
||||||
|
}
|
||||||
|
|
||||||
|
kv, tensors := convertFull(t, p)
|
||||||
|
|
||||||
|
if kv.Architecture() != tt.arch {
|
||||||
|
t.Fatalf("expected llama, got %s", kv.Architecture())
|
||||||
|
}
|
||||||
|
|
||||||
|
if kv.FileType().String() != "F16" {
|
||||||
|
t.Fatalf("expected F16, got %s", kv.FileType())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tensors) != tt.tensors {
|
||||||
|
t.Fatalf("expected %d tensors, got %d", tt.tensors, len(tensors))
|
||||||
|
}
|
||||||
|
|
||||||
|
layers := tensors.Layers()
|
||||||
|
if len(layers) != tt.layers {
|
||||||
|
t.Fatalf("expected %d layers, got %d", tt.layers, len(layers))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,14 +1,11 @@
|
|||||||
package convert
|
package convert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/d4l3k/go-bfloat16"
|
|
||||||
"github.com/pdevine/tensor"
|
"github.com/pdevine/tensor"
|
||||||
"github.com/pdevine/tensor/native"
|
"github.com/pdevine/tensor/native"
|
||||||
|
|
||||||
@@ -19,49 +16,27 @@ type GemmaModel struct {
|
|||||||
ModelData
|
ModelData
|
||||||
}
|
}
|
||||||
|
|
||||||
func gemmaLayerHandler(w io.Writer, r safetensorWriterTo, f *os.File) error {
|
|
||||||
slog.Debug(fmt.Sprintf("converting '%s'", r.t.Name))
|
|
||||||
|
|
||||||
data := make([]byte, r.end-r.start)
|
|
||||||
if err := binary.Read(f, r.bo, data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tDataF32 := bfloat16.DecodeFloat32(data)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
tDataF32, err = addOnes(tDataF32, int(r.t.Shape[0]))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := binary.Write(w, r.bo, tDataF32); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addOnes(data []float32, vectorSize int) ([]float32, error) {
|
func addOnes(data []float32, vectorSize int) ([]float32, error) {
|
||||||
n := tensor.New(tensor.WithShape(vectorSize), tensor.WithBacking(data))
|
n := tensor.New(tensor.WithShape(vectorSize), tensor.WithBacking(data))
|
||||||
ones := tensor.Ones(tensor.Float32, vectorSize)
|
ones := tensor.Ones(tensor.Float32, vectorSize)
|
||||||
|
|
||||||
var err error
|
n, err := n.Add(ones)
|
||||||
n, err = n.Add(ones)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []float32{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
newN, err := native.SelectF32(n, 0)
|
ts, err := native.SelectF32(n, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []float32{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var fullTensor []float32
|
var f32s []float32
|
||||||
for _, v := range newN {
|
for _, t := range ts {
|
||||||
fullTensor = append(fullTensor, v...)
|
f32s = append(f32s, t...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fullTensor, nil
|
|
||||||
|
return f32s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GemmaModel) GetTensors() error {
|
func (m *GemmaModel) GetTensors() error {
|
||||||
@@ -71,12 +46,10 @@ func (m *GemmaModel) GetTensors() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug(fmt.Sprintf("Total tensors: %d", len(t)))
|
slog.Debug(fmt.Sprintf("Total tensors: %d", len(t)))
|
||||||
|
|
||||||
m.Tensors = []llm.Tensor{}
|
|
||||||
for _, l := range t {
|
for _, l := range t {
|
||||||
if strings.HasSuffix(l.Name, "norm.weight") {
|
if strings.HasSuffix(l.Name, "norm.weight") {
|
||||||
wt := l.WriterTo.(safetensorWriterTo)
|
wt := l.WriterTo.(safetensorWriterTo)
|
||||||
wt.handler = gemmaLayerHandler
|
wt.repacker = m.Repack
|
||||||
l.WriterTo = wt
|
l.WriterTo = wt
|
||||||
}
|
}
|
||||||
m.Tensors = append(m.Tensors, l)
|
m.Tensors = append(m.Tensors, l)
|
||||||
@@ -94,6 +67,10 @@ func (m *GemmaModel) LoadVocab() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *GemmaModel) Repack(_ string, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
return addOnes(data, int(shape[0]))
|
||||||
|
}
|
||||||
|
|
||||||
func (m *GemmaModel) WriteGGUF(ws io.WriteSeeker) error {
|
func (m *GemmaModel) WriteGGUF(ws io.WriteSeeker) error {
|
||||||
kv := llm.KV{
|
kv := llm.KV{
|
||||||
"general.architecture": "gemma",
|
"general.architecture": "gemma",
|
||||||
|
|||||||
172
convert/llama.go
172
convert/llama.go
@@ -1,17 +1,17 @@
|
|||||||
package convert
|
package convert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"cmp"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/nlpodyssey/gopickle/pytorch"
|
|
||||||
"github.com/pdevine/tensor"
|
"github.com/pdevine/tensor"
|
||||||
"github.com/pdevine/tensor/native"
|
"github.com/pdevine/tensor/native"
|
||||||
"github.com/x448/float16"
|
|
||||||
|
|
||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
)
|
)
|
||||||
@@ -20,81 +20,12 @@ type LlamaModel struct {
|
|||||||
ModelData
|
ModelData
|
||||||
}
|
}
|
||||||
|
|
||||||
func llamaLayerHandler(w io.Writer, r torchWriterTo) error {
|
|
||||||
slog.Debug(fmt.Sprintf("repacking layer '%s'", r.t.Name))
|
|
||||||
|
|
||||||
data := r.storage.(*pytorch.HalfStorage).Data
|
|
||||||
tData := make([]uint16, len(data))
|
|
||||||
for cnt, v := range data {
|
|
||||||
tData[cnt] = uint16(float16.Fromfloat32(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
var heads uint32
|
|
||||||
if strings.Contains(r.t.Name, "attn_q") {
|
|
||||||
heads = uint32(r.params.AttentionHeads)
|
|
||||||
} else if strings.Contains(r.t.Name, "attn_k") {
|
|
||||||
heads = uint32(r.params.KeyValHeads)
|
|
||||||
if heads == 0 {
|
|
||||||
heads = uint32(r.params.AttentionHeads)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("unknown layer type")
|
|
||||||
}
|
|
||||||
|
|
||||||
slog.Debug(fmt.Sprintf("heads = %d", heads))
|
|
||||||
|
|
||||||
tData, err = llamaRepack(tData, int(heads), r.t.Shape)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = binary.Write(w, r.bo, tData); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func llamaRepack(data []uint16, heads int, shape []uint64) ([]uint16, error) {
|
|
||||||
n := tensor.New(tensor.WithShape(int(shape[0]), int(shape[1])), tensor.WithBacking(data))
|
|
||||||
origShape := n.Shape().Clone()
|
|
||||||
|
|
||||||
// reshape the tensor and swap axes 1 and 2 to unpack the layer for gguf
|
|
||||||
if err := n.Reshape(heads, 2, origShape[0]/heads/2, origShape[1]); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.T(0, 2, 1, 3); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.Reshape(origShape...); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.Transpose(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newN, err := native.SelectU16(n, 1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var fullTensor []uint16
|
|
||||||
for _, v := range newN {
|
|
||||||
fullTensor = append(fullTensor, v...)
|
|
||||||
}
|
|
||||||
return fullTensor, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LlamaModel) GetTensors() error {
|
func (m *LlamaModel) GetTensors() error {
|
||||||
t, err := m.Format.GetTensors(m.Path, m.Params)
|
t, err := m.Format.GetTensors(m.Path, m.Params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Tensors = []llm.Tensor{}
|
|
||||||
|
|
||||||
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
||||||
re, err := regexp.Compile(pattern)
|
re, err := regexp.Compile(pattern)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -104,10 +35,16 @@ func (m *LlamaModel) GetTensors() error {
|
|||||||
for _, l := range t {
|
for _, l := range t {
|
||||||
matches := re.FindAllStringSubmatch(l.Name, -1)
|
matches := re.FindAllStringSubmatch(l.Name, -1)
|
||||||
if len(matches) > 0 {
|
if len(matches) > 0 {
|
||||||
slog.Debug(fmt.Sprintf("setting handler for: %s", l.Name))
|
switch m.Format.(type) {
|
||||||
|
case *TorchFormat:
|
||||||
wt := l.WriterTo.(torchWriterTo)
|
wt := l.WriterTo.(torchWriterTo)
|
||||||
wt.handler = llamaLayerHandler
|
wt.repacker = m.Repack
|
||||||
l.WriterTo = wt
|
l.WriterTo = wt
|
||||||
|
case *SafetensorFormat:
|
||||||
|
wt := l.WriterTo.(safetensorWriterTo)
|
||||||
|
wt.repacker = m.Repack
|
||||||
|
l.WriterTo = wt
|
||||||
|
}
|
||||||
}
|
}
|
||||||
m.Tensors = append(m.Tensors, l)
|
m.Tensors = append(m.Tensors, l)
|
||||||
}
|
}
|
||||||
@@ -115,19 +52,22 @@ func (m *LlamaModel) GetTensors() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LlamaModel) LoadVocab() error {
|
func (m *LlamaModel) LoadVocab() (err error) {
|
||||||
var v *Vocab
|
pre, ts, merges, err := parseTokens(filepath.Join(m.Path, "tokenizer.json"))
|
||||||
var err error
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
slog.Debug("loading vocab")
|
} else if err != nil {
|
||||||
v, err = LoadSentencePieceTokens(m.Path, m.Params)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug("vocab loaded")
|
m.Vocab = &Vocab{}
|
||||||
|
for _, t := range ts {
|
||||||
|
m.Vocab.Tokens = append(m.Vocab.Tokens, t.Content)
|
||||||
|
m.Vocab.Types = append(m.Vocab.Types, t.Type())
|
||||||
|
}
|
||||||
|
|
||||||
m.Vocab = v
|
m.Vocab.Merges = merges
|
||||||
|
m.Params.PreTokenizer = pre
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -140,23 +80,79 @@ func (m *LlamaModel) WriteGGUF(ws io.WriteSeeker) error {
|
|||||||
"llama.embedding_length": uint32(m.Params.HiddenSize),
|
"llama.embedding_length": uint32(m.Params.HiddenSize),
|
||||||
"llama.block_count": uint32(m.Params.HiddenLayers),
|
"llama.block_count": uint32(m.Params.HiddenLayers),
|
||||||
"llama.feed_forward_length": uint32(m.Params.IntermediateSize),
|
"llama.feed_forward_length": uint32(m.Params.IntermediateSize),
|
||||||
|
"llama.rope.freq_base": float32(m.Params.RopeFrequencyBase),
|
||||||
"llama.rope.dimension_count": uint32(m.Params.HiddenSize / m.Params.AttentionHeads),
|
"llama.rope.dimension_count": uint32(m.Params.HiddenSize / m.Params.AttentionHeads),
|
||||||
"llama.attention.head_count": uint32(m.Params.AttentionHeads),
|
"llama.attention.head_count": uint32(m.Params.AttentionHeads),
|
||||||
"llama.attention.head_count_kv": uint32(m.Params.KeyValHeads),
|
"llama.attention.head_count_kv": uint32(m.Params.KeyValHeads),
|
||||||
"llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS),
|
"llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS),
|
||||||
"general.file_type": uint32(1),
|
"general.file_type": uint32(1),
|
||||||
"tokenizer.ggml.model": "llama",
|
"tokenizer.ggml.model": "gpt2",
|
||||||
|
|
||||||
|
"tokenizer.ggml.pre": m.Params.PreTokenizer,
|
||||||
"tokenizer.ggml.tokens": m.Vocab.Tokens,
|
"tokenizer.ggml.tokens": m.Vocab.Tokens,
|
||||||
"tokenizer.ggml.scores": m.Vocab.Scores,
|
|
||||||
"tokenizer.ggml.token_type": m.Vocab.Types,
|
"tokenizer.ggml.token_type": m.Vocab.Types,
|
||||||
|
|
||||||
"tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID),
|
"tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID),
|
||||||
"tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID),
|
"tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID),
|
||||||
"tokenizer.ggml.unknown_token_id": uint32(0),
|
"tokenizer.ggml.unknown_token_id": uint32(0),
|
||||||
"tokenizer.ggml.add_bos_token": true,
|
}
|
||||||
"tokenizer.ggml.add_eos_token": false,
|
|
||||||
|
if len(m.Vocab.Merges) > 0 {
|
||||||
|
kv["tokenizer.ggml.merges"] = m.Vocab.Merges
|
||||||
|
} else {
|
||||||
|
kv["tokenizer.ggml.scores"] = m.Vocab.Scores
|
||||||
}
|
}
|
||||||
|
|
||||||
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *LlamaModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
return llamaRepack(name, m.Params, data, shape)
|
||||||
|
}
|
||||||
|
|
||||||
|
func llamaRepack(name string, params *Params, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
var dims []int
|
||||||
|
for _, dim := range shape {
|
||||||
|
if dim != 0 {
|
||||||
|
dims = append(dims, int(dim))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var heads int
|
||||||
|
if strings.HasSuffix(name, "attn_q.weight") {
|
||||||
|
heads = params.AttentionHeads
|
||||||
|
} else if strings.HasSuffix(name, "attn_k.weight") {
|
||||||
|
heads = cmp.Or(params.KeyValHeads, params.AttentionHeads)
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("unknown tensor name: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
n := tensor.New(tensor.WithShape(dims...), tensor.WithBacking(data))
|
||||||
|
if err := n.Reshape(append([]int{heads, 2, dims[0] / heads / 2}, dims[1:]...)...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := n.T(0, 2, 1, 3); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := n.Reshape(dims...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := n.Transpose(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := native.SelectF32(n, 1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var f32s []float32
|
||||||
|
for _, t := range ts {
|
||||||
|
f32s = append(f32s, t...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f32s, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,17 +1,8 @@
|
|||||||
package convert
|
package convert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/d4l3k/go-bfloat16"
|
|
||||||
"github.com/pdevine/tensor"
|
|
||||||
"github.com/pdevine/tensor/native"
|
|
||||||
"github.com/x448/float16"
|
|
||||||
|
|
||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
)
|
)
|
||||||
@@ -20,90 +11,12 @@ type MistralModel struct {
|
|||||||
ModelData
|
ModelData
|
||||||
}
|
}
|
||||||
|
|
||||||
func mistralLayerHandler(w io.Writer, r safetensorWriterTo, f *os.File) error {
|
|
||||||
layerSize := r.end - r.start
|
|
||||||
|
|
||||||
var err error
|
|
||||||
tData := make([]uint16, layerSize/2)
|
|
||||||
if err = binary.Read(f, r.bo, tData); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var heads uint32
|
|
||||||
if strings.Contains(r.t.Name, "attn_q") {
|
|
||||||
heads = uint32(r.params.AttentionHeads)
|
|
||||||
} else if strings.Contains(r.t.Name, "attn_k") {
|
|
||||||
heads = uint32(r.params.KeyValHeads)
|
|
||||||
if heads == 0 {
|
|
||||||
heads = uint32(r.params.AttentionHeads)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("unknown layer type")
|
|
||||||
}
|
|
||||||
|
|
||||||
tData, err = repack(tData, int(heads), r.t.Shape)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf []byte
|
|
||||||
for _, n := range tData {
|
|
||||||
buf = r.bo.AppendUint16(buf, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
tempBuf := make([]uint16, len(tData))
|
|
||||||
tDataF32 := bfloat16.DecodeFloat32(buf)
|
|
||||||
for cnt, v := range tDataF32 {
|
|
||||||
tDataF16 := float16.Fromfloat32(v)
|
|
||||||
tempBuf[cnt] = uint16(tDataF16)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = binary.Write(w, r.bo, tempBuf); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func repack(data []uint16, heads int, shape []uint64) ([]uint16, error) {
|
|
||||||
n := tensor.New(tensor.WithShape(int(shape[0]), int(shape[1])), tensor.WithBacking(data))
|
|
||||||
origShape := n.Shape().Clone()
|
|
||||||
|
|
||||||
// reshape the tensor and swap axes 1 and 2 to unpack the layer for gguf
|
|
||||||
if err := n.Reshape(heads, 2, origShape[0]/heads/2, origShape[1]); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.T(0, 2, 1, 3); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.Reshape(origShape...); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.Transpose(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newN, err := native.SelectU16(n, 1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var fullTensor []uint16
|
|
||||||
for _, v := range newN {
|
|
||||||
fullTensor = append(fullTensor, v...)
|
|
||||||
}
|
|
||||||
return fullTensor, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MistralModel) GetTensors() error {
|
func (m *MistralModel) GetTensors() error {
|
||||||
t, err := m.Format.GetTensors(m.Path, m.Params)
|
t, err := m.Format.GetTensors(m.Path, m.Params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Tensors = []llm.Tensor{}
|
|
||||||
|
|
||||||
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
||||||
re, err := regexp.Compile(pattern)
|
re, err := regexp.Compile(pattern)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -114,7 +27,7 @@ func (m *MistralModel) GetTensors() error {
|
|||||||
matches := re.FindAllStringSubmatch(l.Name, -1)
|
matches := re.FindAllStringSubmatch(l.Name, -1)
|
||||||
if len(matches) > 0 {
|
if len(matches) > 0 {
|
||||||
wt := l.WriterTo.(safetensorWriterTo)
|
wt := l.WriterTo.(safetensorWriterTo)
|
||||||
wt.handler = mistralLayerHandler
|
wt.repacker = m.Repack
|
||||||
l.WriterTo = wt
|
l.WriterTo = wt
|
||||||
}
|
}
|
||||||
m.Tensors = append(m.Tensors, l)
|
m.Tensors = append(m.Tensors, l)
|
||||||
@@ -160,3 +73,7 @@ func (m *MistralModel) WriteGGUF(ws io.WriteSeeker) error {
|
|||||||
|
|
||||||
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *MistralModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
return llamaRepack(name, m.Params, data, shape)
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,8 +17,6 @@ func (m *MixtralModel) GetTensors() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Tensors = []llm.Tensor{}
|
|
||||||
|
|
||||||
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
||||||
re, err := regexp.Compile(pattern)
|
re, err := regexp.Compile(pattern)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -29,7 +27,7 @@ func (m *MixtralModel) GetTensors() error {
|
|||||||
matches := re.FindAllStringSubmatch(l.Name, -1)
|
matches := re.FindAllStringSubmatch(l.Name, -1)
|
||||||
if len(matches) > 0 {
|
if len(matches) > 0 {
|
||||||
wt := l.WriterTo.(safetensorWriterTo)
|
wt := l.WriterTo.(safetensorWriterTo)
|
||||||
wt.handler = mistralLayerHandler
|
wt.repacker = m.Repack
|
||||||
l.WriterTo = wt
|
l.WriterTo = wt
|
||||||
}
|
}
|
||||||
m.Tensors = append(m.Tensors, l)
|
m.Tensors = append(m.Tensors, l)
|
||||||
@@ -83,3 +81,7 @@ func (m *MixtralModel) WriteGGUF(ws io.WriteSeeker) error {
|
|||||||
|
|
||||||
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *MixtralModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
return llamaRepack(name, m.Params, data, shape)
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,14 +6,13 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"slices"
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/d4l3k/go-bfloat16"
|
"github.com/d4l3k/go-bfloat16"
|
||||||
"github.com/mitchellh/mapstructure"
|
|
||||||
"github.com/x448/float16"
|
"github.com/x448/float16"
|
||||||
|
|
||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
@@ -26,39 +25,38 @@ type safetensorWriterTo struct {
|
|||||||
bo ByteOrder
|
bo ByteOrder
|
||||||
|
|
||||||
filename string
|
filename string
|
||||||
|
dtype string
|
||||||
|
|
||||||
start, end, padding uint64
|
offset, size int64
|
||||||
handler func(w io.Writer, r safetensorWriterTo, f *os.File) error
|
repacker func(string, []float32, []uint64) ([]float32, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type tensorMetaData struct {
|
type safetensorMetadata struct {
|
||||||
Type string `mapstructure:"dtype"`
|
Type string `json:"dtype"`
|
||||||
Shape []int `mapstructure:"shape"`
|
Shape []uint64 `json:"shape"`
|
||||||
Offsets []int `mapstructure:"data_offsets"`
|
Offsets []int64 `json:"data_offsets"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SafetensorFormat struct{}
|
type SafetensorFormat struct{}
|
||||||
|
|
||||||
func (m *SafetensorFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) {
|
func (m *SafetensorFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) {
|
||||||
slog.Debug("getting tensor data")
|
|
||||||
var tensors []llm.Tensor
|
var tensors []llm.Tensor
|
||||||
files, err := filepath.Glob(filepath.Join(dirpath, "/model-*.safetensors"))
|
matches, err := filepath.Glob(filepath.Join(dirpath, "*.safetensors"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var offset uint64
|
var offset uint64
|
||||||
for _, f := range files {
|
for _, f := range matches {
|
||||||
var t []llm.Tensor
|
var t []llm.Tensor
|
||||||
var err error
|
var err error
|
||||||
t, offset, err = m.readTensors(f, offset, params)
|
t, offset, err = m.readTensors(f, offset, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error(err.Error())
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tensors = append(tensors, t...)
|
tensors = append(tensors, t...)
|
||||||
}
|
}
|
||||||
slog.Debug(fmt.Sprintf("all tensors = %d", len(tensors)))
|
|
||||||
return tensors, nil
|
return tensors, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,70 +67,57 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params)
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
var jsonSize uint64
|
var n int64
|
||||||
if err := binary.Read(f, binary.LittleEndian, &jsonSize); err != nil {
|
if err := binary.Read(f, binary.LittleEndian, &n); err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := make([]byte, jsonSize)
|
b := bytes.NewBuffer(make([]byte, 0, n))
|
||||||
_, err = io.ReadFull(f, buf)
|
if _, err = io.CopyN(b, f, n); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d := json.NewDecoder(bytes.NewBuffer(buf))
|
var headers map[string]safetensorMetadata
|
||||||
d.UseNumber()
|
if err := json.NewDecoder(b).Decode(&headers); err != nil {
|
||||||
var parsed map[string]interface{}
|
|
||||||
if err = d.Decode(&parsed); err != nil {
|
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var keys []string
|
var keys []string
|
||||||
for k := range parsed {
|
for key := range headers {
|
||||||
keys = append(keys, k)
|
if !strings.HasSuffix(key, "self_attn.rotary_embd.inv_freq") {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
slices.Sort(keys)
|
slices.Sort(keys)
|
||||||
slog.Info("converting layers")
|
|
||||||
|
|
||||||
var tensors []llm.Tensor
|
var tensors []llm.Tensor
|
||||||
for _, k := range keys {
|
for _, key := range keys {
|
||||||
vals := parsed[k].(map[string]interface{})
|
value := headers[key]
|
||||||
var data tensorMetaData
|
|
||||||
if err = mapstructure.Decode(vals, &data); err != nil {
|
|
||||||
slog.Error("couldn't decode properly")
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var size uint64
|
|
||||||
var kind uint32
|
var kind uint32
|
||||||
switch len(data.Shape) {
|
switch len(value.Shape) {
|
||||||
case 0:
|
case 0:
|
||||||
// metadata
|
// valuedata
|
||||||
continue
|
continue
|
||||||
case 1:
|
|
||||||
// convert to float32
|
|
||||||
kind = 0
|
|
||||||
size = uint64(data.Shape[0] * 4)
|
|
||||||
case 2:
|
case 2:
|
||||||
// convert to float16
|
|
||||||
kind = 1
|
kind = 1
|
||||||
size = uint64(data.Shape[0] * data.Shape[1] * 2)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ggufName, err := m.GetLayerName(k)
|
name, err := m.GetLayerName(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error(err.Error())
|
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
shape := []uint64{0, 0, 0, 0}
|
shape := make([]uint64, len(value.Shape))
|
||||||
for i := range data.Shape {
|
copy(shape, value.Shape)
|
||||||
shape[i] = uint64(data.Shape[i])
|
|
||||||
|
pad := func(s int64) int64 {
|
||||||
|
return 8 + n + s
|
||||||
}
|
}
|
||||||
|
|
||||||
t := llm.Tensor{
|
t := llm.Tensor{
|
||||||
Name: ggufName,
|
Name: name,
|
||||||
Kind: kind,
|
Kind: kind,
|
||||||
Offset: offset,
|
Offset: offset,
|
||||||
Shape: shape[:],
|
Shape: shape[:],
|
||||||
@@ -143,18 +128,15 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params)
|
|||||||
params: params,
|
params: params,
|
||||||
bo: params.ByteOrder,
|
bo: params.ByteOrder,
|
||||||
filename: fn,
|
filename: fn,
|
||||||
start: uint64(data.Offsets[0]),
|
dtype: value.Type,
|
||||||
end: uint64(data.Offsets[1]),
|
offset: pad(value.Offsets[0]),
|
||||||
padding: 8 + jsonSize,
|
size: pad(value.Offsets[1]) - pad(value.Offsets[0]),
|
||||||
}
|
}
|
||||||
|
|
||||||
offset += size
|
offset += t.Size()
|
||||||
tensors = append(tensors, t)
|
tensors = append(tensors, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug(fmt.Sprintf("total tensors for file = %d", len(tensors)))
|
|
||||||
slog.Debug(fmt.Sprintf("offset = %d", offset))
|
|
||||||
|
|
||||||
return tensors, offset, nil
|
return tensors, offset, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,9 +149,7 @@ func (m *SafetensorFormat) GetParams(dirpath string) (*Params, error) {
|
|||||||
|
|
||||||
var params Params
|
var params Params
|
||||||
|
|
||||||
d := json.NewDecoder(f)
|
if err := json.NewDecoder(f).Decode(¶ms); err != nil {
|
||||||
err = d.Decode(¶ms)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -224,55 +204,58 @@ func (r safetensorWriterTo) WriteTo(w io.Writer) (n int64, err error) {
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
if _, err = f.Seek(int64(r.padding+r.start), 0); err != nil {
|
if _, err = f.Seek(r.offset, io.SeekStart); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// use the handler if one is present
|
var f32s []float32
|
||||||
if r.handler != nil {
|
switch r.dtype {
|
||||||
return 0, r.handler(w, r, f)
|
case "F32":
|
||||||
|
f32s = make([]float32, r.size/4)
|
||||||
|
if err = binary.Read(f, r.bo, f32s); err != nil {
|
||||||
|
return 0, err
|
||||||
}
|
}
|
||||||
|
case "F16":
|
||||||
remaining := r.end - r.start
|
u16s := make([]uint16, r.size/2)
|
||||||
|
if err = binary.Read(f, r.bo, u16s); err != nil {
|
||||||
bufSize := uint64(10240)
|
|
||||||
var finished bool
|
|
||||||
for {
|
|
||||||
data := make([]byte, min(bufSize, remaining))
|
|
||||||
|
|
||||||
b, err := io.ReadFull(f, data)
|
|
||||||
remaining -= uint64(b)
|
|
||||||
|
|
||||||
if err == io.EOF || remaining <= 0 {
|
|
||||||
finished = true
|
|
||||||
} else if err != nil {
|
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// convert bfloat16 -> ieee float32
|
for _, b := range u16s {
|
||||||
tDataF32 := bfloat16.DecodeFloat32(data)
|
f32s = append(f32s, float16.Frombits(b).Float32())
|
||||||
|
}
|
||||||
|
|
||||||
|
case "BF16":
|
||||||
|
u8s := make([]uint8, r.size)
|
||||||
|
if err = binary.Read(f, r.bo, u8s); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f32s = bfloat16.DecodeFloat32(u8s)
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown data type: %s", r.dtype)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.repacker != nil {
|
||||||
|
f32s, err = r.repacker(r.t.Name, f32s, r.t.Shape)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch r.t.Kind {
|
switch r.t.Kind {
|
||||||
case 0:
|
case 0:
|
||||||
if err := binary.Write(w, r.bo, tDataF32); err != nil {
|
return 0, binary.Write(w, r.bo, f32s)
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
case 1:
|
case 1:
|
||||||
// convert float32 -> float16
|
f16s := make([]uint16, len(f32s))
|
||||||
tempBuf := make([]uint16, len(data)/2)
|
for i := range f32s {
|
||||||
for cnt, v := range tDataF32 {
|
f16s[i] = float16.Fromfloat32(f32s[i]).Bits()
|
||||||
tDataF16 := float16.Fromfloat32(v)
|
|
||||||
tempBuf[cnt] = uint16(tDataF16)
|
|
||||||
}
|
}
|
||||||
if err := binary.Write(w, r.bo, tempBuf); err != nil {
|
|
||||||
return 0, err
|
return 0, binary.Write(w, r.bo, f16s)
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown storage type: %d", r.t.Kind)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if finished {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) {
|
func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) {
|
||||||
@@ -281,6 +264,15 @@ func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (M
|
|||||||
return nil, fmt.Errorf("No architecture specified to convert")
|
return nil, fmt.Errorf("No architecture specified to convert")
|
||||||
case 1:
|
case 1:
|
||||||
switch params.Architectures[0] {
|
switch params.Architectures[0] {
|
||||||
|
case "LlamaForCausalLM":
|
||||||
|
return &LlamaModel{
|
||||||
|
ModelData{
|
||||||
|
Name: name,
|
||||||
|
Path: dirPath,
|
||||||
|
Params: params,
|
||||||
|
Format: m,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
case "MistralForCausalLM":
|
case "MistralForCausalLM":
|
||||||
return &MistralModel{
|
return &MistralModel{
|
||||||
ModelData{
|
ModelData{
|
||||||
|
|||||||
109
convert/tokenizer.go
Normal file
109
convert/tokenizer.go
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Tokenizer struct {
|
||||||
|
Version string `json:"version"`
|
||||||
|
AddedTokens []Token `json:"added_tokens"`
|
||||||
|
Model TokenizerModel `json:"model"`
|
||||||
|
|
||||||
|
PreTokenizer struct {
|
||||||
|
PreTokenizers []struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Pattern struct {
|
||||||
|
Regex string `json:"Regex"`
|
||||||
|
} `json:"pattern"`
|
||||||
|
} `json:"pretokenizers"`
|
||||||
|
} `json:"pre_tokenizer"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TokenizerModel struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Vocab map[string]int `json:"vocab"`
|
||||||
|
Merges []string `json:"merges"`
|
||||||
|
Tokens []Token
|
||||||
|
}
|
||||||
|
|
||||||
|
type Token struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
Special bool `json:"special"`
|
||||||
|
UserDefined bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Token) Type() int32 {
|
||||||
|
switch {
|
||||||
|
case t.Special:
|
||||||
|
return tokenTypeControl
|
||||||
|
case t.UserDefined:
|
||||||
|
return tokenTypeUserDefined
|
||||||
|
default:
|
||||||
|
return tokenTypeNormal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tokenizer) maxID() int {
|
||||||
|
return max(
|
||||||
|
slices.Max(maps.Values(t.Model.Vocab)),
|
||||||
|
slices.MaxFunc(t.AddedTokens, func(a, b Token) int {
|
||||||
|
return cmp.Compare(a.ID, b.ID)
|
||||||
|
}).ID,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTokens(dirpath string) (pre string, tokens []Token, merges []string, err error) {
|
||||||
|
f, err := os.Open(dirpath)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var t Tokenizer
|
||||||
|
if err := json.NewDecoder(f).Decode(&t); err != nil {
|
||||||
|
return "", nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens = make([]Token, t.maxID()+1)
|
||||||
|
for k, v := range t.Model.Vocab {
|
||||||
|
tokens[v] = Token{ID: v, Content: k, Special: false, UserDefined: false}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range t.AddedTokens {
|
||||||
|
v.UserDefined = true
|
||||||
|
tokens[v.ID] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
sha256sum := sha256.New()
|
||||||
|
for _, pt := range t.PreTokenizer.PreTokenizers {
|
||||||
|
switch pt.Type {
|
||||||
|
case "Split":
|
||||||
|
if pt.Pattern.Regex != "" {
|
||||||
|
sha256sum.Write([]byte(pt.Pattern.Regex))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch digest := fmt.Sprintf("%x", sha256sum.Sum(nil)); digest {
|
||||||
|
case "d98f9631be1e9607a9848c26c1f9eac1aa9fc21ac6ba82a2fc0741af9780a48f":
|
||||||
|
pre = "llama-bpe"
|
||||||
|
case "03df5c5863ad70781dcfdef491ead25140f895fe8010964be0daefe27be32b02":
|
||||||
|
pre = "deepseek-llm"
|
||||||
|
case "21cde974d587f0d54dc8d56b183cc1e6239600172035c68fbd6d4b9f8da0576e":
|
||||||
|
pre = "deepseek-coder"
|
||||||
|
default:
|
||||||
|
slog.Warn("unknown pretokenizer, using default", "digest", digest)
|
||||||
|
pre = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
return pre, tokens, t.Model.Merges, nil
|
||||||
|
}
|
||||||
@@ -25,7 +25,7 @@ type torchWriterTo struct {
|
|||||||
bo ByteOrder
|
bo ByteOrder
|
||||||
|
|
||||||
storage pytorch.StorageInterface
|
storage pytorch.StorageInterface
|
||||||
handler func(w io.Writer, r torchWriterTo) error
|
repacker func(string, []float32, []uint64) ([]float32, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type TorchFormat struct{}
|
type TorchFormat struct{}
|
||||||
@@ -33,14 +33,14 @@ type TorchFormat struct{}
|
|||||||
func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) {
|
func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) {
|
||||||
slog.Debug("getting torch tensors")
|
slog.Debug("getting torch tensors")
|
||||||
|
|
||||||
files, err := filepath.Glob(filepath.Join(dirpath, "pytorch_model-*.bin"))
|
var files []string
|
||||||
if err != nil {
|
if pt, _ := filepath.Glob(filepath.Join(dirpath, "consolidated*.pth")); len(pt) > 0 {
|
||||||
slog.Error("didn't find any torch files")
|
files = append(files, pt...)
|
||||||
return nil, err
|
} else if pt, _ := filepath.Glob(filepath.Join(dirpath, "pytorch_model*.pth")); len(pt) > 0 {
|
||||||
|
files = append(files, pt...)
|
||||||
}
|
}
|
||||||
|
|
||||||
var offset uint64
|
var offset uint64
|
||||||
|
|
||||||
var tensors []llm.Tensor
|
var tensors []llm.Tensor
|
||||||
for _, fn := range files {
|
for _, fn := range files {
|
||||||
m, err := pytorch.Load(fn)
|
m, err := pytorch.Load(fn)
|
||||||
@@ -77,7 +77,7 @@ func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor,
|
|||||||
slog.Error(err.Error())
|
slog.Error(err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
slog.Debug(fmt.Sprintf("finding name for '%s' -> '%s'", k.(string), ggufName))
|
slog.Debug(fmt.Sprintf("'%35s': '%30s' %10d [%#v]", k.(string), ggufName, size, tshape))
|
||||||
|
|
||||||
shape := []uint64{0, 0, 0, 0}
|
shape := []uint64{0, 0, 0, 0}
|
||||||
for i := range tshape {
|
for i := range tshape {
|
||||||
@@ -120,7 +120,7 @@ func getAltParams(dirpath string) (*Params, error) {
|
|||||||
AttentionHeads int `json:"n_heads"`
|
AttentionHeads int `json:"n_heads"`
|
||||||
KeyValHeads int `json:"n_kv_heads"`
|
KeyValHeads int `json:"n_kv_heads"`
|
||||||
HiddenLayers int `json:"n_layers"`
|
HiddenLayers int `json:"n_layers"`
|
||||||
RopeTheta int `json:"rope_theta"`
|
RopeTheta float64 `json:"rope_theta"`
|
||||||
NormEPS float64 `json:"norm_eps"`
|
NormEPS float64 `json:"norm_eps"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,6 +133,7 @@ func getAltParams(dirpath string) (*Params, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
params := &Params{
|
params := &Params{
|
||||||
|
Architectures: []string{"LlamaForCausalLM"},
|
||||||
HiddenSize: tparams.HiddenSize,
|
HiddenSize: tparams.HiddenSize,
|
||||||
AttentionHeads: tparams.AttentionHeads,
|
AttentionHeads: tparams.AttentionHeads,
|
||||||
KeyValHeads: tparams.KeyValHeads,
|
KeyValHeads: tparams.KeyValHeads,
|
||||||
@@ -229,37 +230,38 @@ func (m *TorchFormat) GetLayerName(n string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r torchWriterTo) WriteTo(w io.Writer) (n int64, err error) {
|
func (r torchWriterTo) WriteTo(w io.Writer) (n int64, err error) {
|
||||||
// use the handler if one is present
|
var f32s []float32
|
||||||
if r.handler != nil {
|
switch s := r.storage.(type) {
|
||||||
return 0, r.handler(w, r)
|
case *pytorch.FloatStorage:
|
||||||
|
f32s = s.Data
|
||||||
|
case *pytorch.HalfStorage:
|
||||||
|
f32s = s.Data
|
||||||
|
case *pytorch.BFloat16Storage:
|
||||||
|
f32s = s.Data
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown data type: %T", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.repacker != nil {
|
||||||
|
f32s, err = r.repacker(r.t.Name, f32s, r.t.Shape)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch r.storage.(type) {
|
|
||||||
case *pytorch.FloatStorage:
|
|
||||||
slog.Warn(fmt.Sprintf("unexpected storage found for layer '%s'; skipping", r.t.Name))
|
|
||||||
return 0, nil
|
|
||||||
case *pytorch.HalfStorage:
|
|
||||||
switch r.t.Kind {
|
switch r.t.Kind {
|
||||||
case 0:
|
case 0:
|
||||||
data := r.storage.(*pytorch.HalfStorage).Data
|
return 0, binary.Write(w, r.bo, f32s)
|
||||||
slog.Debug(fmt.Sprintf("%35s F32 (%d)", r.t.Name, len(data)))
|
|
||||||
if err := binary.Write(w, r.bo, data); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
case 1:
|
case 1:
|
||||||
data := r.storage.(*pytorch.HalfStorage).Data
|
f16s := make([]uint16, len(f32s))
|
||||||
tData := make([]uint16, len(data))
|
for i := range f32s {
|
||||||
for cnt, v := range data {
|
f16s[i] = float16.Fromfloat32(f32s[i]).Bits()
|
||||||
tData[cnt] = uint16(float16.Fromfloat32(v))
|
|
||||||
}
|
|
||||||
slog.Debug(fmt.Sprintf("%35s F16 (%d)", r.t.Name, len(tData)))
|
|
||||||
if err := binary.Write(w, r.bo, tData); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0, nil
|
return 0, binary.Write(w, r.bo, f16s)
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown storage type: %d", r.t.Kind)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TorchFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) {
|
func (m *TorchFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) {
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ Install required tools:
|
|||||||
- go version 1.22 or higher
|
- go version 1.22 or higher
|
||||||
- gcc version 11.4.0 or higher
|
- gcc version 11.4.0 or higher
|
||||||
|
|
||||||
|
### MacOS
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
brew install go cmake gcc
|
brew install go cmake gcc
|
||||||
```
|
```
|
||||||
|
|||||||
93
docs/faq.md
93
docs/faq.md
@@ -6,7 +6,7 @@ Ollama on macOS and Windows will automatically download updates. Click on the ta
|
|||||||
|
|
||||||
On Linux, re-run the install script:
|
On Linux, re-run the install script:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
curl -fsSL https://ollama.com/install.sh | sh
|
curl -fsSL https://ollama.com/install.sh | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -30,7 +30,7 @@ To change this when using `ollama run`, use `/set parameter`:
|
|||||||
|
|
||||||
When using the API, specify the `num_ctx` parameter:
|
When using the API, specify the `num_ctx` parameter:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{
|
curl http://localhost:11434/api/generate -d '{
|
||||||
"model": "llama3",
|
"model": "llama3",
|
||||||
"prompt": "Why is the sky blue?",
|
"prompt": "Why is the sky blue?",
|
||||||
@@ -40,6 +40,21 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## How can I tell if my model was loaded onto the GPU?
|
||||||
|
|
||||||
|
Use the `ollama ps` command to see what models are currently loaded into memory.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
ollama ps
|
||||||
|
NAME ID SIZE PROCESSOR UNTIL
|
||||||
|
llama3:70b bcfb190ca3a7 42 GB 100% GPU 4 minutes from now
|
||||||
|
```
|
||||||
|
|
||||||
|
The `Processor` column will show which memory the model was loaded in to:
|
||||||
|
* `100% GPU` means the model was loaded entirely into the GPU
|
||||||
|
* `100% CPU` means the model was loaded entirely in system memory
|
||||||
|
* `48%/52% CPU/GPU` means the model was loaded partially onto both the GPU and into system memory
|
||||||
|
|
||||||
## How do I configure Ollama server?
|
## How do I configure Ollama server?
|
||||||
|
|
||||||
Ollama server can be configured with environment variables.
|
Ollama server can be configured with environment variables.
|
||||||
@@ -94,6 +109,34 @@ On Windows, Ollama inherits your user and system environment variables.
|
|||||||
|
|
||||||
6. Start the Ollama application from the Windows Start menu.
|
6. Start the Ollama application from the Windows Start menu.
|
||||||
|
|
||||||
|
## How do I use Ollama behind a proxy?
|
||||||
|
|
||||||
|
Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values. When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate. Refer to the section above for how to use environment variables on your platform.
|
||||||
|
|
||||||
|
### How do I use Ollama behind a proxy in Docker?
|
||||||
|
|
||||||
|
The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container.
|
||||||
|
|
||||||
|
Alternatively, the Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy).
|
||||||
|
|
||||||
|
Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate.
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
FROM ollama/ollama
|
||||||
|
COPY my-ca.pem /usr/local/share/ca-certificates/my-ca.crt
|
||||||
|
RUN update-ca-certificates
|
||||||
|
```
|
||||||
|
|
||||||
|
Build and run this image:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker build -t ollama-with-ca .
|
||||||
|
docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca
|
||||||
|
```
|
||||||
|
|
||||||
|
## Does Ollama send my prompts and answers back to ollama.com?
|
||||||
|
|
||||||
|
No. Ollama runs locally, and conversation data does not leave your machine.
|
||||||
|
|
||||||
## How can I expose Ollama on my network?
|
## How can I expose Ollama on my network?
|
||||||
|
|
||||||
@@ -120,7 +163,7 @@ server {
|
|||||||
|
|
||||||
Ollama can be accessed using a range of tools for tunneling tools. For example with Ngrok:
|
Ollama can be accessed using a range of tools for tunneling tools. For example with Ngrok:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
ngrok http 11434 --host-header="localhost:11434"
|
ngrok http 11434 --host-header="localhost:11434"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -128,7 +171,7 @@ ngrok http 11434 --host-header="localhost:11434"
|
|||||||
|
|
||||||
To use Ollama with Cloudflare Tunnel, use the `--url` and `--http-host-header` flags:
|
To use Ollama with Cloudflare Tunnel, use the `--url` and `--http-host-header` flags:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
cloudflared tunnel --url http://localhost:11434 --http-host-header="localhost:11434"
|
cloudflared tunnel --url http://localhost:11434 --http-host-header="localhost:11434"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -150,39 +193,10 @@ If a different directory needs to be used, set the environment variable `OLLAMA_
|
|||||||
|
|
||||||
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
||||||
|
|
||||||
## Does Ollama send my prompts and answers back to ollama.com?
|
|
||||||
|
|
||||||
No. Ollama runs locally, and conversation data does not leave your machine.
|
|
||||||
|
|
||||||
## How can I use Ollama in Visual Studio Code?
|
## How can I use Ollama in Visual Studio Code?
|
||||||
|
|
||||||
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/ollama/ollama#extensions--plugins) at the bottom of the main repository readme.
|
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/ollama/ollama#extensions--plugins) at the bottom of the main repository readme.
|
||||||
|
|
||||||
## How do I use Ollama behind a proxy?
|
|
||||||
|
|
||||||
Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values. When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate. Refer to the section above for how to use environment variables on your platform.
|
|
||||||
|
|
||||||
### How do I use Ollama behind a proxy in Docker?
|
|
||||||
|
|
||||||
The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container.
|
|
||||||
|
|
||||||
Alternatively, the Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy).
|
|
||||||
|
|
||||||
Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate.
|
|
||||||
|
|
||||||
```dockerfile
|
|
||||||
FROM ollama/ollama
|
|
||||||
COPY my-ca.pem /usr/local/share/ca-certificates/my-ca.crt
|
|
||||||
RUN update-ca-certificates
|
|
||||||
```
|
|
||||||
|
|
||||||
Build and run this image:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker build -t ollama-with-ca .
|
|
||||||
docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca
|
|
||||||
```
|
|
||||||
|
|
||||||
## How do I use Ollama with GPU acceleration in Docker?
|
## How do I use Ollama with GPU acceleration in Docker?
|
||||||
|
|
||||||
The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details.
|
The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details.
|
||||||
@@ -197,7 +211,7 @@ Open `Control Panel > Networking and Internet > View network status and tasks` a
|
|||||||
Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these
|
Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these
|
||||||
properties.
|
properties.
|
||||||
|
|
||||||
## How can I pre-load a model to get faster response times?
|
## How can I preload a model into Ollama to get faster response times?
|
||||||
|
|
||||||
If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints.
|
If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints.
|
||||||
|
|
||||||
@@ -211,6 +225,11 @@ To use the chat completions endpoint, use:
|
|||||||
curl http://localhost:11434/api/chat -d '{"model": "mistral"}'
|
curl http://localhost:11434/api/chat -d '{"model": "mistral"}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To preload a model using the CLI, use the command:
|
||||||
|
```shell
|
||||||
|
ollama run llama3 ""
|
||||||
|
```
|
||||||
|
|
||||||
## How do I keep a model loaded in memory or make it unload immediately?
|
## How do I keep a model loaded in memory or make it unload immediately?
|
||||||
|
|
||||||
By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory.
|
By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory.
|
||||||
@@ -235,8 +254,6 @@ Alternatively, you can change the amount of time all models are loaded into memo
|
|||||||
|
|
||||||
If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` API parameter with the `/api/generate` or `/api/chat` API endpoints.
|
If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` API parameter with the `/api/generate` or `/api/chat` API endpoints.
|
||||||
|
|
||||||
## How do I manage the maximum number of requests the server can queue
|
## How do I manage the maximum number of requests the Ollama server can queue?
|
||||||
|
|
||||||
If too many requests are sent to the server, it will respond with a 503 error
|
If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`.
|
||||||
indicating the server is overloaded. You can adjust how many requests may be
|
|
||||||
queue by setting `OLLAMA_MAX_QUEUE`
|
|
||||||
|
|||||||
@@ -37,16 +37,9 @@ Join the [Discord](https://discord.gg/ollama) for help interpreting the logs.
|
|||||||
|
|
||||||
## LLM libraries
|
## LLM libraries
|
||||||
|
|
||||||
Ollama includes multiple LLM libraries compiled for different GPUs and CPU
|
Ollama includes multiple LLM libraries compiled for different GPUs and CPU vector features. Ollama tries to pick the best one based on the capabilities of your system. If this autodetection has problems, or you run into other problems (e.g. crashes in your GPU) you can workaround this by forcing a specific LLM library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest but most compatible is `cpu`. Rosetta emulation under MacOS will work with the `cpu` library.
|
||||||
vector features. Ollama tries to pick the best one based on the capabilities of
|
|
||||||
your system. If this autodetection has problems, or you run into other problems
|
|
||||||
(e.g. crashes in your GPU) you can workaround this by forcing a specific LLM
|
|
||||||
library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest
|
|
||||||
but most compatible is `cpu`. Rosetta emulation under MacOS will work with the
|
|
||||||
`cpu` library.
|
|
||||||
|
|
||||||
In the server log, you will see a message that looks something like this (varies
|
In the server log, you will see a message that looks something like this (varies from release to release):
|
||||||
from release to release):
|
|
||||||
|
|
||||||
```
|
```
|
||||||
Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5]
|
Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5]
|
||||||
@@ -54,9 +47,7 @@ Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5]
|
|||||||
|
|
||||||
**Experimental LLM Library Override**
|
**Experimental LLM Library Override**
|
||||||
|
|
||||||
You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass
|
You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass autodetection, so for example, if you have a CUDA card, but want to force the CPU LLM library with AVX2 vector support, use:
|
||||||
autodetection, so for example, if you have a CUDA card, but want to force the
|
|
||||||
CPU LLM library with AVX2 vector support, use:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve
|
OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve
|
||||||
@@ -69,9 +60,7 @@ cat /proc/cpuinfo| grep flags | head -1
|
|||||||
|
|
||||||
## Installing older or pre-release versions on Linux
|
## Installing older or pre-release versions on Linux
|
||||||
|
|
||||||
If you run into problems on Linux and want to install an older version, or you'd
|
If you run into problems on Linux and want to install an older version, or you'd like to try out a pre-release before it's officially released, you can tell the install script which version to install.
|
||||||
like to try out a pre-release before it's officially released, you can tell the
|
|
||||||
install script which version to install.
|
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh
|
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh
|
||||||
@@ -79,20 +68,13 @@ curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh
|
|||||||
|
|
||||||
## Linux tmp noexec
|
## Linux tmp noexec
|
||||||
|
|
||||||
If your system is configured with the "noexec" flag where Ollama stores its
|
If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/
|
||||||
temporary executable files, you can specify an alternate location by setting
|
|
||||||
OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example
|
|
||||||
OLLAMA_TMPDIR=/usr/share/ollama/
|
|
||||||
|
|
||||||
## Container fails to run on NVIDIA GPU
|
## Container fails to run on NVIDIA GPU
|
||||||
|
|
||||||
Make sure you've set up the conatiner runtime first as described in [docker.md](./docker.md)
|
Make sure you've set up the container runtime first as described in [docker.md](./docker.md)
|
||||||
|
|
||||||
Sometimes the container runtime can have difficulties initializing the GPU.
|
Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem
|
||||||
When you check the server logs, this can show up as various error codes, such
|
|
||||||
as "3" (not initialized), "46" (device unavailable), "100" (no device), "999"
|
|
||||||
(unknown), or others. The following troubleshooting techniques may help resolve
|
|
||||||
the problem
|
|
||||||
|
|
||||||
- Is the uvm driver not loaded? `sudo nvidia-modprobe -u`
|
- Is the uvm driver not loaded? `sudo nvidia-modprobe -u`
|
||||||
- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
|
- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ Here's a quick example showing API access from `powershell`
|
|||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
While we're in preview, `OLLAMA_DEBUG` is always enabled, which adds
|
While we're in preview, `OLLAMA_DEBUG` is always enabled, which adds
|
||||||
a "view logs" menu item to the app, and increses logging for the GUI app and
|
a "view logs" menu item to the app, and increases logging for the GUI app and
|
||||||
server.
|
server.
|
||||||
|
|
||||||
Ollama on Windows stores files in a few different locations. You can view them in
|
Ollama on Windows stores files in a few different locations. You can view them in
|
||||||
|
|||||||
5
go.mod
5
go.mod
@@ -4,12 +4,10 @@ go 1.22.0
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/containerd/console v1.0.3
|
github.com/containerd/console v1.0.3
|
||||||
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
|
|
||||||
github.com/emirpasic/gods v1.18.1
|
github.com/emirpasic/gods v1.18.1
|
||||||
github.com/gin-gonic/gin v1.10.0
|
github.com/gin-gonic/gin v1.10.0
|
||||||
github.com/golang/protobuf v1.5.4 // indirect
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
github.com/google/uuid v1.1.2
|
github.com/google/uuid v1.1.2
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
|
||||||
github.com/olekukonko/tablewriter v0.0.5
|
github.com/olekukonko/tablewriter v0.0.5
|
||||||
github.com/spf13/cobra v1.7.0
|
github.com/spf13/cobra v1.7.0
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
@@ -18,6 +16,8 @@ require (
|
|||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
|
||||||
|
github.com/mattn/go-runewidth v0.0.14
|
||||||
github.com/nlpodyssey/gopickle v0.3.0
|
github.com/nlpodyssey/gopickle v0.3.0
|
||||||
github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c
|
github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c
|
||||||
)
|
)
|
||||||
@@ -33,7 +33,6 @@ require (
|
|||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/google/flatbuffers v24.3.25+incompatible // indirect
|
github.com/google/flatbuffers v24.3.25+incompatible // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/rivo/uniseg v0.2.0 // indirect
|
github.com/rivo/uniseg v0.2.0 // indirect
|
||||||
|
|||||||
2
go.sum
2
go.sum
@@ -135,8 +135,6 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
|
|||||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
|
||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
|||||||
32
llm/ext_server/server.cpp
vendored
32
llm/ext_server/server.cpp
vendored
@@ -334,6 +334,7 @@ struct server_metrics {
|
|||||||
struct llama_server_context
|
struct llama_server_context
|
||||||
{
|
{
|
||||||
llama_model *model = nullptr;
|
llama_model *model = nullptr;
|
||||||
|
float modelProgress = 0.0;
|
||||||
llama_context *ctx = nullptr;
|
llama_context *ctx = nullptr;
|
||||||
|
|
||||||
clip_ctx *clp_ctx = nullptr;
|
clip_ctx *clp_ctx = nullptr;
|
||||||
@@ -737,7 +738,7 @@ struct llama_server_context
|
|||||||
sampler_names.emplace_back(sampler_name);
|
sampler_names.emplace_back(sampler_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
slot->sparams.samplers_sequence = sampler_types_from_names(sampler_names, false);
|
slot->sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -1095,7 +1096,7 @@ struct llama_server_context
|
|||||||
std::vector<std::string> samplers_sequence;
|
std::vector<std::string> samplers_sequence;
|
||||||
for (const auto &sampler_type : slot.sparams.samplers_sequence)
|
for (const auto &sampler_type : slot.sparams.samplers_sequence)
|
||||||
{
|
{
|
||||||
samplers_sequence.emplace_back(sampler_type_to_name_string(sampler_type));
|
samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type));
|
||||||
}
|
}
|
||||||
|
|
||||||
return json {
|
return json {
|
||||||
@@ -2104,6 +2105,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
|||||||
printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
|
printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
|
||||||
printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
|
printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
|
||||||
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
|
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
|
||||||
|
printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled");
|
||||||
printf(" -spf FNAME, --system-prompt-file FNAME\n");
|
printf(" -spf FNAME, --system-prompt-file FNAME\n");
|
||||||
printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
|
printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
|
||||||
printf(" -ctk TYPE, --cache-type-k TYPE\n");
|
printf(" -ctk TYPE, --cache-type-k TYPE\n");
|
||||||
@@ -2501,7 +2503,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
{
|
{
|
||||||
params.use_mmap = false;
|
params.use_mmap = false;
|
||||||
}
|
}
|
||||||
else if (arg == "--numa") {
|
else if (arg == "--numa")
|
||||||
|
{
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
@@ -2521,6 +2524,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
{
|
{
|
||||||
params.cont_batching = true;
|
params.cont_batching = true;
|
||||||
}
|
}
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn")
|
||||||
|
{
|
||||||
|
params.flash_attn = true;
|
||||||
|
}
|
||||||
else if (arg == "-np" || arg == "--parallel")
|
else if (arg == "-np" || arg == "--parallel")
|
||||||
{
|
{
|
||||||
if (++i >= argc)
|
if (++i >= argc)
|
||||||
@@ -2529,7 +2536,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.n_parallel = std::stoi(argv[i]);
|
params.n_parallel = std::stoi(argv[i]);
|
||||||
} else if (arg == "-n" || arg == "--n-predict")
|
}
|
||||||
|
else if (arg == "-n" || arg == "--n-predict")
|
||||||
{
|
{
|
||||||
if (++i >= argc)
|
if (++i >= argc)
|
||||||
{
|
{
|
||||||
@@ -2537,7 +2545,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.n_predict = std::stoi(argv[i]);
|
params.n_predict = std::stoi(argv[i]);
|
||||||
} else if (arg == "-spf" || arg == "--system-prompt-file")
|
}
|
||||||
|
else if (arg == "-spf" || arg == "--system-prompt-file")
|
||||||
{
|
{
|
||||||
if (++i >= argc)
|
if (++i >= argc)
|
||||||
{
|
{
|
||||||
@@ -2771,6 +2780,12 @@ inline void signal_handler(int signal) {
|
|||||||
shutdown_handler(signal);
|
shutdown_handler(signal);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool update_load_progress(float progress, void *data)
|
||||||
|
{
|
||||||
|
((llama_server_context*)data)->modelProgress = progress;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
char* wchar_to_char(const wchar_t* wstr) {
|
char* wchar_to_char(const wchar_t* wstr) {
|
||||||
if (wstr == nullptr) return nullptr;
|
if (wstr == nullptr) return nullptr;
|
||||||
@@ -2876,7 +2891,9 @@ int main(int argc, char **argv) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case SERVER_STATE_LOADING_MODEL:
|
case SERVER_STATE_LOADING_MODEL:
|
||||||
res.set_content(R"({"status": "loading model"})", "application/json");
|
char buf[128];
|
||||||
|
snprintf(&buf[0], 128, R"({"status": "loading model", "progress": %0.2f})", llama.modelProgress);
|
||||||
|
res.set_content(buf, "application/json");
|
||||||
res.status = 503; // HTTP Service Unavailable
|
res.status = 503; // HTTP Service Unavailable
|
||||||
break;
|
break;
|
||||||
case SERVER_STATE_ERROR:
|
case SERVER_STATE_ERROR:
|
||||||
@@ -3071,6 +3088,9 @@ int main(int argc, char **argv) {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// load the model
|
// load the model
|
||||||
|
params.progress_callback = update_load_progress;
|
||||||
|
params.progress_callback_user_data = (void*)&llama;
|
||||||
|
|
||||||
if (!llama.load_model(params))
|
if (!llama.load_model(params))
|
||||||
{
|
{
|
||||||
state.store(SERVER_STATE_ERROR);
|
state.store(SERVER_STATE_ERROR);
|
||||||
|
|||||||
@@ -27,8 +27,16 @@ const (
|
|||||||
fileTypeIQ2_XXS
|
fileTypeIQ2_XXS
|
||||||
fileTypeIQ2_XS
|
fileTypeIQ2_XS
|
||||||
fileTypeQ2_K_S
|
fileTypeQ2_K_S
|
||||||
fileTypeQ3_K_XS
|
fileTypeIQ3_XS
|
||||||
fileTypeIQ3_XXS
|
fileTypeIQ3_XXS
|
||||||
|
fileTypeIQ1_S
|
||||||
|
fileTypeIQ4_NL
|
||||||
|
fileTypeIQ3_S
|
||||||
|
fileTypeIQ2_S
|
||||||
|
fileTypeIQ4_XS
|
||||||
|
fileTypeIQ2_M
|
||||||
|
fileTypeIQ1_M
|
||||||
|
fileTypeBF16
|
||||||
|
|
||||||
fileTypeUnknown
|
fileTypeUnknown
|
||||||
)
|
)
|
||||||
@@ -75,10 +83,26 @@ func ParseFileType(s string) (fileType, error) {
|
|||||||
return fileTypeIQ2_XS, nil
|
return fileTypeIQ2_XS, nil
|
||||||
case "Q2_K_S":
|
case "Q2_K_S":
|
||||||
return fileTypeQ2_K_S, nil
|
return fileTypeQ2_K_S, nil
|
||||||
case "Q3_K_XS":
|
case "IQ3_XS":
|
||||||
return fileTypeQ3_K_XS, nil
|
return fileTypeIQ3_XS, nil
|
||||||
case "IQ3_XXS":
|
case "IQ3_XXS":
|
||||||
return fileTypeIQ3_XXS, nil
|
return fileTypeIQ3_XXS, nil
|
||||||
|
case "IQ1_S":
|
||||||
|
return fileTypeIQ1_S, nil
|
||||||
|
case "IQ4_NL":
|
||||||
|
return fileTypeIQ4_NL, nil
|
||||||
|
case "IQ3_S":
|
||||||
|
return fileTypeIQ3_S, nil
|
||||||
|
case "IQ2_S":
|
||||||
|
return fileTypeIQ2_S, nil
|
||||||
|
case "IQ4_XS":
|
||||||
|
return fileTypeIQ4_XS, nil
|
||||||
|
case "IQ2_M":
|
||||||
|
return fileTypeIQ2_M, nil
|
||||||
|
case "IQ1_M":
|
||||||
|
return fileTypeIQ1_M, nil
|
||||||
|
case "BF16":
|
||||||
|
return fileTypeBF16, nil
|
||||||
default:
|
default:
|
||||||
return fileTypeUnknown, fmt.Errorf("unknown fileType: %s", s)
|
return fileTypeUnknown, fmt.Errorf("unknown fileType: %s", s)
|
||||||
}
|
}
|
||||||
@@ -126,10 +150,26 @@ func (t fileType) String() string {
|
|||||||
return "IQ2_XS"
|
return "IQ2_XS"
|
||||||
case fileTypeQ2_K_S:
|
case fileTypeQ2_K_S:
|
||||||
return "Q2_K_S"
|
return "Q2_K_S"
|
||||||
case fileTypeQ3_K_XS:
|
case fileTypeIQ3_XS:
|
||||||
return "Q3_K_XS"
|
return "IQ3_XS"
|
||||||
case fileTypeIQ3_XXS:
|
case fileTypeIQ3_XXS:
|
||||||
return "IQ3_XXS"
|
return "IQ3_XXS"
|
||||||
|
case fileTypeIQ1_S:
|
||||||
|
return "IQ1_S"
|
||||||
|
case fileTypeIQ4_NL:
|
||||||
|
return "IQ4_NL"
|
||||||
|
case fileTypeIQ3_S:
|
||||||
|
return "IQ3_S"
|
||||||
|
case fileTypeIQ2_S:
|
||||||
|
return "IQ2_S"
|
||||||
|
case fileTypeIQ4_XS:
|
||||||
|
return "IQ4_XS"
|
||||||
|
case fileTypeIQ2_M:
|
||||||
|
return "IQ2_M"
|
||||||
|
case fileTypeIQ1_M:
|
||||||
|
return "IQ1_M"
|
||||||
|
case fileTypeBF16:
|
||||||
|
return "BF16"
|
||||||
default:
|
default:
|
||||||
return "unknown"
|
return "unknown"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -119,7 +119,7 @@ func (llm *ggla) decode(rs io.ReadSeeker) error {
|
|||||||
|
|
||||||
t.Offset = uint64(offset)
|
t.Offset = uint64(offset)
|
||||||
|
|
||||||
if _, err := rs.Seek(int64(t.size()), io.SeekCurrent); err != nil {
|
if _, err := rs.Seek(int64(t.Size()), io.SeekCurrent); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
38
llm/ggml.go
38
llm/ggml.go
@@ -106,7 +106,7 @@ type Layer map[string]*Tensor
|
|||||||
|
|
||||||
func (l Layer) size() (size uint64) {
|
func (l Layer) size() (size uint64) {
|
||||||
for _, t := range l {
|
for _, t := range l {
|
||||||
size += t.size()
|
size += t.Size()
|
||||||
}
|
}
|
||||||
|
|
||||||
return size
|
return size
|
||||||
@@ -124,12 +124,12 @@ type Tensor struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t Tensor) blockSize() uint64 {
|
func (t Tensor) blockSize() uint64 {
|
||||||
switch {
|
switch t.Kind {
|
||||||
case t.Kind < 2:
|
case 0, 1, 24, 25, 26, 27, 28, 31: // F32, F16, I8, I16, I32, I64, F64, BF16
|
||||||
return 1
|
return 1
|
||||||
case t.Kind < 10:
|
case 2, 3, 8, 9, 20: // Q4_0, Q4_1, Q8_0, Q8_1, IQ4_NL
|
||||||
return 32
|
return 32
|
||||||
default:
|
default: // All others
|
||||||
return 256
|
return 256
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -171,7 +171,29 @@ func (t Tensor) typeSize() uint64 {
|
|||||||
case 17: // IQ2_XS
|
case 17: // IQ2_XS
|
||||||
return 2 + 2*blockSize/8 + blockSize/32
|
return 2 + 2*blockSize/8 + blockSize/32
|
||||||
case 18: // IQ3_XXS
|
case 18: // IQ3_XXS
|
||||||
return 2 + 3*blockSize/8
|
return 2 + blockSize/4 + blockSize/8
|
||||||
|
case 19: // IQ1_S
|
||||||
|
return 2 + blockSize/8 + blockSize/16
|
||||||
|
case 20: // IQ4_NL
|
||||||
|
return 2 + blockSize/2
|
||||||
|
case 21: // IQ3_S
|
||||||
|
return 2 + blockSize/4 + blockSize/8 + blockSize/32 + 4
|
||||||
|
case 22: // IQ2_S
|
||||||
|
return 2 + blockSize/4 + blockSize/16
|
||||||
|
case 23: // IQ4_XS
|
||||||
|
return 2 + 2 + blockSize/2 + blockSize/64
|
||||||
|
case 24: // I8
|
||||||
|
return 1
|
||||||
|
case 25: // I16
|
||||||
|
return 2
|
||||||
|
case 26: // I32
|
||||||
|
return 4
|
||||||
|
case 27: // I64
|
||||||
|
return 8
|
||||||
|
case 28: // F64
|
||||||
|
return 8
|
||||||
|
case 29: // IQ1_M
|
||||||
|
return blockSize/8 + blockSize/16 + blockSize/32
|
||||||
default:
|
default:
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@@ -185,7 +207,7 @@ func (t Tensor) parameters() uint64 {
|
|||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t Tensor) size() uint64 {
|
func (t Tensor) Size() uint64 {
|
||||||
return t.parameters() * t.typeSize() / t.blockSize()
|
return t.parameters() * t.typeSize() / t.blockSize()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -288,7 +310,7 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
|
|||||||
// mixtral 8x22b
|
// mixtral 8x22b
|
||||||
ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32))
|
ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32))
|
||||||
partialOffload = max(
|
partialOffload = max(
|
||||||
3*ffnGateExpsWeight.size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV),
|
3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV),
|
||||||
4*(context*batch*heads+context*embedding/heads*headsKV+batch*1024+embedding/heads*headsKV*batch),
|
4*(context*batch*heads+context*embedding/heads*headsKV+batch*1024+embedding/heads*headsKV*batch),
|
||||||
)
|
)
|
||||||
} else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok {
|
} else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok {
|
||||||
|
|||||||
16
llm/gguf.go
16
llm/gguf.go
@@ -62,16 +62,6 @@ func (c *containerGGUF) Decode(rs io.ReadSeeker) (model, error) {
|
|||||||
return model, nil
|
return model, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
_ uint32 = iota
|
|
||||||
GGUFTokenNormal
|
|
||||||
GGUFTokenUnknown
|
|
||||||
GGUFTokenControl
|
|
||||||
GGUFTokenUserDefined
|
|
||||||
GGUFTokenUnused
|
|
||||||
GGUFTokenByte
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ggufTypeUint8 uint32 = iota
|
ggufTypeUint8 uint32 = iota
|
||||||
ggufTypeInt8
|
ggufTypeInt8
|
||||||
@@ -251,11 +241,11 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tensor := range llm.tensors {
|
for _, tensor := range llm.tensors {
|
||||||
if _, err := rs.Seek(int64(tensor.size()), io.SeekCurrent); err != nil {
|
if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
padding := llm.padding(int64(tensor.size()), int64(alignment))
|
padding := llm.padding(int64(tensor.Size()), int64(alignment))
|
||||||
if _, err := rs.Seek(padding, io.SeekCurrent); err != nil {
|
if _, err := rs.Seek(padding, io.SeekCurrent); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -480,9 +470,11 @@ var ggufKVOrder = map[string][]string{
|
|||||||
"gemma.attention.key_length",
|
"gemma.attention.key_length",
|
||||||
"gemma.attention.value_length",
|
"gemma.attention.value_length",
|
||||||
"general.file_type",
|
"general.file_type",
|
||||||
|
"tokenizer.ggml.pre",
|
||||||
"tokenizer.ggml.model",
|
"tokenizer.ggml.model",
|
||||||
"tokenizer.ggml.tokens",
|
"tokenizer.ggml.tokens",
|
||||||
"tokenizer.ggml.scores",
|
"tokenizer.ggml.scores",
|
||||||
|
"tokenizer.ggml.merges",
|
||||||
"tokenizer.ggml.token_type",
|
"tokenizer.ggml.token_type",
|
||||||
"tokenizer.ggml.bos_token_id",
|
"tokenizer.ggml.bos_token_id",
|
||||||
"tokenizer.ggml.eos_token_id",
|
"tokenizer.ggml.eos_token_id",
|
||||||
|
|||||||
Submodule llm/llama.cpp updated: 614d3b914e...74f33adf5f
31
llm/patches/01-load-progress.diff
Normal file
31
llm/patches/01-load-progress.diff
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
diff --git a/common/common.cpp b/common/common.cpp
|
||||||
|
index ba1ecf0e..cead57cc 100644
|
||||||
|
--- a/common/common.cpp
|
||||||
|
+++ b/common/common.cpp
|
||||||
|
@@ -1836,6 +1836,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
|
||||||
|
mparams.use_mmap = params.use_mmap;
|
||||||
|
mparams.use_mlock = params.use_mlock;
|
||||||
|
mparams.check_tensors = params.check_tensors;
|
||||||
|
+ mparams.progress_callback = params.progress_callback;
|
||||||
|
+ mparams.progress_callback_user_data = params.progress_callback_user_data;
|
||||||
|
if (params.kv_overrides.empty()) {
|
||||||
|
mparams.kv_overrides = NULL;
|
||||||
|
} else {
|
||||||
|
diff --git a/common/common.h b/common/common.h
|
||||||
|
index d80344f2..71e84834 100644
|
||||||
|
--- a/common/common.h
|
||||||
|
+++ b/common/common.h
|
||||||
|
@@ -174,6 +174,13 @@ struct gpt_params {
|
||||||
|
// multimodal models (see examples/llava)
|
||||||
|
std::string mmproj = ""; // path to multimodal projector
|
||||||
|
std::vector<std::string> image; // path to image file(s)
|
||||||
|
+
|
||||||
|
+ // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
|
||||||
|
+ // If the provided progress_callback returns true, model loading continues.
|
||||||
|
+ // If it returns false, model loading is immediately aborted.
|
||||||
|
+ llama_progress_callback progress_callback = NULL;
|
||||||
|
+ // context pointer passed to the progress callback
|
||||||
|
+ void * progress_callback_user_data;
|
||||||
|
};
|
||||||
|
|
||||||
|
void gpt_params_handle_model_default(gpt_params & params);
|
||||||
@@ -1,8 +1,17 @@
|
|||||||
|
From 544a2d2e646d39e878d87dfbb3398a356bc560ab Mon Sep 17 00:00:00 2001
|
||||||
|
From: Michael Yang <mxyng@pm.me>
|
||||||
|
Date: Thu, 23 May 2024 11:18:45 -0700
|
||||||
|
Subject: [PATCH] throw exception on load errors
|
||||||
|
|
||||||
|
---
|
||||||
|
llama.cpp | 25 ++++++++++++++++---------
|
||||||
|
1 file changed, 16 insertions(+), 9 deletions(-)
|
||||||
|
|
||||||
diff --git a/llama.cpp b/llama.cpp
|
diff --git a/llama.cpp b/llama.cpp
|
||||||
index 4225f955..7b762f86 100644
|
index 15c66077..8ba90b6a 100644
|
||||||
--- a/llama.cpp
|
--- a/llama.cpp
|
||||||
+++ b/llama.cpp
|
+++ b/llama.cpp
|
||||||
@@ -4756,7 +4756,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
@@ -6346,7 +6346,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
||||||
}
|
}
|
||||||
} catch (const std::exception & err) {
|
} catch (const std::exception & err) {
|
||||||
LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
|
LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
|
||||||
@@ -11,10 +20,10 @@ index 4225f955..7b762f86 100644
|
|||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -12102,16 +12102,22 @@ struct llama_model * llama_load_model_from_file(
|
@@ -15600,16 +15600,23 @@ struct llama_model * llama_load_model_from_file(
|
||||||
};
|
}
|
||||||
|
model->rpc_servers.push_back(servers);
|
||||||
}
|
}
|
||||||
|
|
||||||
- int status = llama_model_load(path_model, *model, params);
|
- int status = llama_model_load(path_model, *model, params);
|
||||||
- GGML_ASSERT(status <= 0);
|
- GGML_ASSERT(status <= 0);
|
||||||
- if (status < 0) {
|
- if (status < 0) {
|
||||||
@@ -22,6 +31,7 @@ index 4225f955..7b762f86 100644
|
|||||||
- LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
|
- LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
|
||||||
- } else if (status == -2) {
|
- } else if (status == -2) {
|
||||||
- LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
|
- LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
|
||||||
|
+
|
||||||
+ try {
|
+ try {
|
||||||
+ int status = llama_model_load(path_model, *model, params);
|
+ int status = llama_model_load(path_model, *model, params);
|
||||||
+ GGML_ASSERT(status <= 0);
|
+ GGML_ASSERT(status <= 0);
|
||||||
@@ -42,3 +52,6 @@ index 4225f955..7b762f86 100644
|
|||||||
}
|
}
|
||||||
|
|
||||||
return model;
|
return model;
|
||||||
|
--
|
||||||
|
2.45.1
|
||||||
|
|
||||||
|
|||||||
35
llm/patches/05-default-pretokenizer.diff
Normal file
35
llm/patches/05-default-pretokenizer.diff
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
From d02a06f3f45a09255ace8684a66590e06ce44605 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Michael Yang <mxyng@pm.me>
|
||||||
|
Date: Thu, 23 May 2024 11:33:20 -0700
|
||||||
|
Subject: [PATCH] default pretokenizer on unrecognized type
|
||||||
|
|
||||||
|
---
|
||||||
|
llama.cpp | 5 +----
|
||||||
|
1 file changed, 1 insertion(+), 4 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/llama.cpp b/llama.cpp
|
||||||
|
index 15c66077..af1aede3 100644
|
||||||
|
--- a/llama.cpp
|
||||||
|
+++ b/llama.cpp
|
||||||
|
@@ -4504,9 +4504,6 @@ static void llm_load_vocab(
|
||||||
|
LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
|
||||||
|
LLAMA_LOG_WARN("%s: \n", __func__);
|
||||||
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
|
- } else if (
|
||||||
|
- tokenizer_pre == "default") {
|
||||||
|
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
|
} else if (
|
||||||
|
tokenizer_pre == "llama3" ||
|
||||||
|
tokenizer_pre == "llama-v3" ||
|
||||||
|
@@ -4553,7 +4550,7 @@ static void llm_load_vocab(
|
||||||
|
tokenizer_pre == "dbrx") {
|
||||||
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
|
||||||
|
} else {
|
||||||
|
- throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
||||||
|
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
|
--
|
||||||
|
2.45.1
|
||||||
|
|
||||||
@@ -55,6 +55,7 @@ type llmServer struct {
|
|||||||
totalLayers uint64
|
totalLayers uint64
|
||||||
gpuCount int
|
gpuCount int
|
||||||
loadDuration time.Duration // Record how long it took the model to load
|
loadDuration time.Duration // Record how long it took the model to load
|
||||||
|
loadProgress float32
|
||||||
|
|
||||||
sem *semaphore.Weighted
|
sem *semaphore.Weighted
|
||||||
}
|
}
|
||||||
@@ -200,6 +201,23 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
|||||||
params = append(params, "--numa")
|
params = append(params, "--numa")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
flashAttnEnabled := envconfig.FlashAttention
|
||||||
|
|
||||||
|
// partial offloading does not support flash attention
|
||||||
|
if uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
|
||||||
|
flashAttnEnabled = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// only cuda (compute capability 7+) and metal support flash attention
|
||||||
|
for _, g := range gpus {
|
||||||
|
if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
|
||||||
|
flashAttnEnabled = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if flashAttnEnabled {
|
||||||
|
params = append(params, "--flash-attn")
|
||||||
|
}
|
||||||
|
|
||||||
numParallel := envconfig.NumParallel
|
numParallel := envconfig.NumParallel
|
||||||
|
|
||||||
// TODO (jmorganca): multimodal models don't support parallel yet
|
// TODO (jmorganca): multimodal models don't support parallel yet
|
||||||
@@ -412,6 +430,7 @@ type ServerStatusResp struct {
|
|||||||
SlotsIdle int `json:"slots_idle"`
|
SlotsIdle int `json:"slots_idle"`
|
||||||
SlotsProcessing int `json:"slots_processing"`
|
SlotsProcessing int `json:"slots_processing"`
|
||||||
Error string `json:"error"`
|
Error string `json:"error"`
|
||||||
|
Progress float32 `json:"progress"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
|
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
|
||||||
@@ -459,6 +478,7 @@ func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
|
|||||||
case "no slot available":
|
case "no slot available":
|
||||||
return ServerStatusNoSlotsAvailable, nil
|
return ServerStatusNoSlotsAvailable, nil
|
||||||
case "loading model":
|
case "loading model":
|
||||||
|
s.loadProgress = status.Progress
|
||||||
return ServerStatusLoadingModel, nil
|
return ServerStatusLoadingModel, nil
|
||||||
default:
|
default:
|
||||||
return ServerStatusError, fmt.Errorf("server error: %+v", status)
|
return ServerStatusError, fmt.Errorf("server error: %+v", status)
|
||||||
@@ -499,7 +519,8 @@ func (s *llmServer) Ping(ctx context.Context) error {
|
|||||||
|
|
||||||
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load
|
stallDuration := 60 * time.Second
|
||||||
|
stallTimer := time.Now().Add(stallDuration) // give up if we stall for
|
||||||
|
|
||||||
slog.Info("waiting for llama runner to start responding")
|
slog.Info("waiting for llama runner to start responding")
|
||||||
var lastStatus ServerStatus = -1
|
var lastStatus ServerStatus = -1
|
||||||
@@ -517,13 +538,13 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
|||||||
return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
|
return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
if time.Now().After(expiresAt) {
|
if time.Now().After(stallTimer) {
|
||||||
// timeout
|
// timeout
|
||||||
msg := ""
|
msg := ""
|
||||||
if s.status != nil && s.status.LastErrMsg != "" {
|
if s.status != nil && s.status.LastErrMsg != "" {
|
||||||
msg = s.status.LastErrMsg
|
msg = s.status.LastErrMsg
|
||||||
}
|
}
|
||||||
return fmt.Errorf("timed out waiting for llama runner to start: %s", msg)
|
return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
|
||||||
}
|
}
|
||||||
if s.cmd.ProcessState != nil {
|
if s.cmd.ProcessState != nil {
|
||||||
msg := ""
|
msg := ""
|
||||||
@@ -534,6 +555,7 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
|
ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
priorProgress := s.loadProgress
|
||||||
status, _ := s.getServerStatus(ctx)
|
status, _ := s.getServerStatus(ctx)
|
||||||
if lastStatus != status && status != ServerStatusReady {
|
if lastStatus != status && status != ServerStatusReady {
|
||||||
// Only log on status changes
|
// Only log on status changes
|
||||||
@@ -546,6 +568,11 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
lastStatus = status
|
lastStatus = status
|
||||||
|
// Reset the timer as long as we're making forward progress on the load
|
||||||
|
if priorProgress != s.loadProgress {
|
||||||
|
slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
|
||||||
|
stallTimer = time.Now().Add(stallDuration)
|
||||||
|
}
|
||||||
time.Sleep(time.Millisecond * 250)
|
time.Sleep(time.Millisecond * 250)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -162,7 +162,7 @@ app.on('before-quit', () => {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
const updateURL = `https://ollama.ai/api/update?os=${process.platform}&arch=${
|
const updateURL = `https://ollama.com/api/update?os=${process.platform}&arch=${
|
||||||
process.arch
|
process.arch
|
||||||
}&version=${app.getVersion()}&id=${id()}`
|
}&version=${app.getVersion()}&id=${id()}`
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
package model
|
package parser
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"unicode"
|
||||||
)
|
)
|
||||||
|
|
||||||
type File struct {
|
type File struct {
|
||||||
@@ -68,6 +69,11 @@ func ParseFile(r io.Reader) (*File, error) {
|
|||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
var role string
|
var role string
|
||||||
|
|
||||||
|
var lineCount int
|
||||||
|
var linePos int
|
||||||
|
|
||||||
|
var utf16 bool
|
||||||
|
|
||||||
var f File
|
var f File
|
||||||
|
|
||||||
br := bufio.NewReader(r)
|
br := bufio.NewReader(r)
|
||||||
@@ -79,6 +85,17 @@ func ParseFile(r io.Reader) (*File, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// the utf16 byte order mark will be read as "unreadable" by ReadRune()
|
||||||
|
if isUnreadable(r) && lineCount == 0 && linePos == 0 {
|
||||||
|
utf16 = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// skip the second byte if we're reading utf16
|
||||||
|
if utf16 && r == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
next, r, err := parseRuneForState(r, curr)
|
next, r, err := parseRuneForState(r, curr)
|
||||||
if errors.Is(err, io.ErrUnexpectedEOF) {
|
if errors.Is(err, io.ErrUnexpectedEOF) {
|
||||||
return nil, fmt.Errorf("%w: %s", err, b.String())
|
return nil, fmt.Errorf("%w: %s", err, b.String())
|
||||||
@@ -86,6 +103,13 @@ func ParseFile(r io.Reader) (*File, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if isNewline(r) {
|
||||||
|
lineCount++
|
||||||
|
linePos = 0
|
||||||
|
} else {
|
||||||
|
linePos++
|
||||||
|
}
|
||||||
|
|
||||||
// process the state transition, some transitions need to be intercepted and redirected
|
// process the state transition, some transitions need to be intercepted and redirected
|
||||||
if next != curr {
|
if next != curr {
|
||||||
switch curr {
|
switch curr {
|
||||||
@@ -285,6 +309,10 @@ func isNewline(r rune) bool {
|
|||||||
return r == '\r' || r == '\n'
|
return r == '\r' || r == '\n'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isUnreadable(r rune) bool {
|
||||||
|
return r == unicode.ReplacementChar
|
||||||
|
}
|
||||||
|
|
||||||
func isValidMessageRole(role string) bool {
|
func isValidMessageRole(role string) bool {
|
||||||
return role == "system" || role == "user" || role == "assistant"
|
return role == "system" || role == "user" || role == "assistant"
|
||||||
}
|
}
|
||||||
@@ -1,11 +1,13 @@
|
|||||||
package model
|
package parser
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"unicode/utf16"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
@@ -509,3 +511,37 @@ SYSTEM ""
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseFileUTF16ParseFile(t *testing.T) {
|
||||||
|
data := `FROM bob
|
||||||
|
PARAMETER param1 1
|
||||||
|
PARAMETER param2 4096
|
||||||
|
SYSTEM You are a utf16 file.
|
||||||
|
`
|
||||||
|
// simulate a utf16 le file
|
||||||
|
utf16File := utf16.Encode(append([]rune{'\ufffe'}, []rune(data)...))
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
err := binary.Write(buf, binary.LittleEndian, utf16File)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
actual, err := ParseFile(buf)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
expected := []Command{
|
||||||
|
{Name: "model", Args: "bob"},
|
||||||
|
{Name: "param1", Args: "1"},
|
||||||
|
{Name: "param2", Args: "4096"},
|
||||||
|
{Name: "system", Args: "You are a utf16 file."},
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, expected, actual.Commands)
|
||||||
|
|
||||||
|
// simulate a utf16 be file
|
||||||
|
buf = new(bytes.Buffer)
|
||||||
|
err = binary.Write(buf, binary.BigEndian, utf16File)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
actual, err = ParseFile(buf)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, actual.Commands)
|
||||||
|
}
|
||||||
@@ -31,6 +31,8 @@ var (
|
|||||||
RunnersDir string
|
RunnersDir string
|
||||||
// Set via OLLAMA_TMPDIR in the environment
|
// Set via OLLAMA_TMPDIR in the environment
|
||||||
TmpDir string
|
TmpDir string
|
||||||
|
// Experimental flash attention
|
||||||
|
FlashAttention bool
|
||||||
)
|
)
|
||||||
|
|
||||||
func AsMap() map[string]string {
|
func AsMap() map[string]string {
|
||||||
@@ -45,6 +47,7 @@ func AsMap() map[string]string {
|
|||||||
"OLLAMA_NUM_PARALLEL": fmt.Sprintf("%v", NumParallel),
|
"OLLAMA_NUM_PARALLEL": fmt.Sprintf("%v", NumParallel),
|
||||||
"OLLAMA_RUNNERS_DIR": fmt.Sprintf("%v", RunnersDir),
|
"OLLAMA_RUNNERS_DIR": fmt.Sprintf("%v", RunnersDir),
|
||||||
"OLLAMA_TMPDIR": fmt.Sprintf("%v", TmpDir),
|
"OLLAMA_TMPDIR": fmt.Sprintf("%v", TmpDir),
|
||||||
|
"OLLAMA_FLASH_ATTENTION": fmt.Sprintf("%v", FlashAttention),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,6 +81,13 @@ func LoadConfig() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if fa := clean("OLLAMA_FLASH_ATTENTION"); fa != "" {
|
||||||
|
d, err := strconv.ParseBool(fa)
|
||||||
|
if err == nil {
|
||||||
|
FlashAttention = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
RunnersDir = clean("OLLAMA_RUNNERS_DIR")
|
RunnersDir = clean("OLLAMA_RUNNERS_DIR")
|
||||||
if runtime.GOOS == "windows" && RunnersDir == "" {
|
if runtime.GOOS == "windows" && RunnersDir == "" {
|
||||||
// On Windows we do not carry the payloads inside the main executable
|
// On Windows we do not carry the payloads inside the main executable
|
||||||
|
|||||||
@@ -17,4 +17,7 @@ func TestConfig(t *testing.T) {
|
|||||||
t.Setenv("OLLAMA_DEBUG", "1")
|
t.Setenv("OLLAMA_DEBUG", "1")
|
||||||
LoadConfig()
|
LoadConfig()
|
||||||
require.True(t, Debug)
|
require.True(t, Debug)
|
||||||
|
t.Setenv("OLLAMA_FLASH_ATTENTION", "1")
|
||||||
|
LoadConfig()
|
||||||
|
require.True(t, FlashAttention)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ import (
|
|||||||
"github.com/ollama/ollama/auth"
|
"github.com/ollama/ollama/auth"
|
||||||
"github.com/ollama/ollama/format"
|
"github.com/ollama/ollama/format"
|
||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
|
"github.com/ollama/ollama/parser"
|
||||||
"github.com/ollama/ollama/server/envconfig"
|
"github.com/ollama/ollama/server/envconfig"
|
||||||
"github.com/ollama/ollama/types/errtypes"
|
"github.com/ollama/ollama/types/errtypes"
|
||||||
"github.com/ollama/ollama/types/model"
|
"github.com/ollama/ollama/types/model"
|
||||||
@@ -61,36 +62,36 @@ func (m *Model) IsEmbedding() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Model) String() string {
|
func (m *Model) String() string {
|
||||||
var modelfile model.File
|
var modelfile parser.File
|
||||||
|
|
||||||
modelfile.Commands = append(modelfile.Commands, model.Command{
|
modelfile.Commands = append(modelfile.Commands, parser.Command{
|
||||||
Name: "model",
|
Name: "model",
|
||||||
Args: m.ModelPath,
|
Args: m.ModelPath,
|
||||||
})
|
})
|
||||||
|
|
||||||
for _, adapter := range m.AdapterPaths {
|
for _, adapter := range m.AdapterPaths {
|
||||||
modelfile.Commands = append(modelfile.Commands, model.Command{
|
modelfile.Commands = append(modelfile.Commands, parser.Command{
|
||||||
Name: "adapter",
|
Name: "adapter",
|
||||||
Args: adapter,
|
Args: adapter,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, projector := range m.ProjectorPaths {
|
for _, projector := range m.ProjectorPaths {
|
||||||
modelfile.Commands = append(modelfile.Commands, model.Command{
|
modelfile.Commands = append(modelfile.Commands, parser.Command{
|
||||||
Name: "model",
|
Name: "model",
|
||||||
Args: projector,
|
Args: projector,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.Template != "" {
|
if m.Template != "" {
|
||||||
modelfile.Commands = append(modelfile.Commands, model.Command{
|
modelfile.Commands = append(modelfile.Commands, parser.Command{
|
||||||
Name: "template",
|
Name: "template",
|
||||||
Args: m.Template,
|
Args: m.Template,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.System != "" {
|
if m.System != "" {
|
||||||
modelfile.Commands = append(modelfile.Commands, model.Command{
|
modelfile.Commands = append(modelfile.Commands, parser.Command{
|
||||||
Name: "system",
|
Name: "system",
|
||||||
Args: m.System,
|
Args: m.System,
|
||||||
})
|
})
|
||||||
@@ -100,13 +101,13 @@ func (m *Model) String() string {
|
|||||||
switch v := v.(type) {
|
switch v := v.(type) {
|
||||||
case []any:
|
case []any:
|
||||||
for _, s := range v {
|
for _, s := range v {
|
||||||
modelfile.Commands = append(modelfile.Commands, model.Command{
|
modelfile.Commands = append(modelfile.Commands, parser.Command{
|
||||||
Name: k,
|
Name: k,
|
||||||
Args: fmt.Sprintf("%v", s),
|
Args: fmt.Sprintf("%v", s),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
modelfile.Commands = append(modelfile.Commands, model.Command{
|
modelfile.Commands = append(modelfile.Commands, parser.Command{
|
||||||
Name: k,
|
Name: k,
|
||||||
Args: fmt.Sprintf("%v", v),
|
Args: fmt.Sprintf("%v", v),
|
||||||
})
|
})
|
||||||
@@ -114,14 +115,14 @@ func (m *Model) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, license := range m.License {
|
for _, license := range m.License {
|
||||||
modelfile.Commands = append(modelfile.Commands, model.Command{
|
modelfile.Commands = append(modelfile.Commands, parser.Command{
|
||||||
Name: "license",
|
Name: "license",
|
||||||
Args: license,
|
Args: license,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, msg := range m.Messages {
|
for _, msg := range m.Messages {
|
||||||
modelfile.Commands = append(modelfile.Commands, model.Command{
|
modelfile.Commands = append(modelfile.Commands, parser.Command{
|
||||||
Name: "message",
|
Name: "message",
|
||||||
Args: fmt.Sprintf("%s %s", msg.Role, msg.Content),
|
Args: fmt.Sprintf("%s %s", msg.Role, msg.Content),
|
||||||
})
|
})
|
||||||
@@ -314,7 +315,7 @@ func realpath(rel, from string) string {
|
|||||||
return abspath
|
return abspath
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateModel(ctx context.Context, name, modelFileDir, quantization string, modelfile *model.File, fn func(resp api.ProgressResponse)) (err error) {
|
func CreateModel(ctx context.Context, name, modelFileDir, quantization string, modelfile *parser.File, fn func(resp api.ProgressResponse)) (err error) {
|
||||||
config := ConfigV2{
|
config := ConfigV2{
|
||||||
OS: "linux",
|
OS: "linux",
|
||||||
Architecture: "amd64",
|
Architecture: "amd64",
|
||||||
@@ -339,7 +340,24 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if strings.HasPrefix(c.Args, "@") {
|
} else if strings.HasPrefix(c.Args, "@") {
|
||||||
blobpath, err := GetBlobsPath(strings.TrimPrefix(c.Args, "@"))
|
digest := strings.TrimPrefix(c.Args, "@")
|
||||||
|
if ib, ok := intermediateBlobs[digest]; ok {
|
||||||
|
p, err := GetBlobsPath(ib)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(p); errors.Is(err, os.ErrNotExist) {
|
||||||
|
// pass
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
fn(api.ProgressResponse{Status: fmt.Sprintf("using cached layer %s", ib)})
|
||||||
|
digest = ib
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blobpath, err := GetBlobsPath(digest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -350,14 +368,14 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m
|
|||||||
}
|
}
|
||||||
defer blob.Close()
|
defer blob.Close()
|
||||||
|
|
||||||
baseLayers, err = parseFromFile(ctx, blob, fn)
|
baseLayers, err = parseFromFile(ctx, blob, digest, fn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if file, err := os.Open(realpath(modelFileDir, c.Args)); err == nil {
|
} else if file, err := os.Open(realpath(modelFileDir, c.Args)); err == nil {
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
baseLayers, err = parseFromFile(ctx, file, fn)
|
baseLayers, err = parseFromFile(ctx, file, "", fn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -397,10 +415,17 @@ func CreateModel(ctx context.Context, name, modelFileDir, quantization string, m
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
baseLayer.Layer, err = NewLayer(temp, baseLayer.Layer.MediaType)
|
layers, err := parseFromFile(ctx, temp, "", fn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(layers) != 1 {
|
||||||
|
return errors.New("quantization failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
baseLayer.Layer = layers[0].Layer
|
||||||
|
baseLayer.GGML = layers[0].GGML
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ func NewLayerFromLayer(digest, mediatype, from string) (*Layer, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Layer) Open() (io.ReadCloser, error) {
|
func (l *Layer) Open() (io.ReadSeekCloser, error) {
|
||||||
blob, err := GetBlobsPath(l.Digest)
|
blob, err := GetBlobsPath(l.Digest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -17,6 +17,8 @@ import (
|
|||||||
"github.com/ollama/ollama/types/model"
|
"github.com/ollama/ollama/types/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var intermediateBlobs map[string]string = make(map[string]string)
|
||||||
|
|
||||||
type layerWithGGML struct {
|
type layerWithGGML struct {
|
||||||
*Layer
|
*Layer
|
||||||
*llm.GGML
|
*llm.GGML
|
||||||
@@ -76,7 +78,7 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe
|
|||||||
return layers, nil
|
return layers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseFromZipFile(_ context.Context, file *os.File, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) {
|
func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) {
|
||||||
stat, err := file.Stat()
|
stat, err := file.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -165,16 +167,11 @@ func parseFromZipFile(_ context.Context, file *os.File, fn func(api.ProgressResp
|
|||||||
}
|
}
|
||||||
|
|
||||||
layer, err := NewLayer(temp, "application/vnd.ollama.image.model")
|
layer, err := NewLayer(temp, "application/vnd.ollama.image.model")
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("aaa: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
blobpath, err := GetBlobsPath(layer.Digest)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
bin, err := os.Open(blobpath)
|
bin, err := layer.Open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -185,16 +182,13 @@ func parseFromZipFile(_ context.Context, file *os.File, fn func(api.ProgressResp
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
layer, err = NewLayerFromLayer(layer.Digest, layer.MediaType, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
layers = append(layers, &layerWithGGML{layer, ggml})
|
layers = append(layers, &layerWithGGML{layer, ggml})
|
||||||
|
|
||||||
|
intermediateBlobs[digest] = layer.Digest
|
||||||
return layers, nil
|
return layers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseFromFile(ctx context.Context, file *os.File, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) {
|
func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) {
|
||||||
sr := io.NewSectionReader(file, 0, 512)
|
sr := io.NewSectionReader(file, 0, 512)
|
||||||
contentType, err := detectContentType(sr)
|
contentType, err := detectContentType(sr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -205,7 +199,7 @@ func parseFromFile(ctx context.Context, file *os.File, fn func(api.ProgressRespo
|
|||||||
case "gguf", "ggla":
|
case "gguf", "ggla":
|
||||||
// noop
|
// noop
|
||||||
case "application/zip":
|
case "application/zip":
|
||||||
return parseFromZipFile(ctx, file, fn)
|
return parseFromZipFile(ctx, file, digest, fn)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unsupported content type: %s", contentType)
|
return nil, fmt.Errorf("unsupported content type: %s", contentType)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ import (
|
|||||||
"github.com/ollama/ollama/gpu"
|
"github.com/ollama/ollama/gpu"
|
||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
"github.com/ollama/ollama/openai"
|
"github.com/ollama/ollama/openai"
|
||||||
|
"github.com/ollama/ollama/parser"
|
||||||
"github.com/ollama/ollama/server/envconfig"
|
"github.com/ollama/ollama/server/envconfig"
|
||||||
"github.com/ollama/ollama/types/errtypes"
|
"github.com/ollama/ollama/types/errtypes"
|
||||||
"github.com/ollama/ollama/types/model"
|
"github.com/ollama/ollama/types/model"
|
||||||
@@ -539,7 +540,7 @@ func (s *Server) CreateModelHandler(c *gin.Context) {
|
|||||||
r = f
|
r = f
|
||||||
}
|
}
|
||||||
|
|
||||||
modelfile, err := model.ParseFile(r)
|
modelfile, err := parser.ParseFile(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
@@ -840,6 +841,25 @@ func (s *Server) HeadBlobHandler(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) CreateBlobHandler(c *gin.Context) {
|
func (s *Server) CreateBlobHandler(c *gin.Context) {
|
||||||
|
if ib, ok := intermediateBlobs[c.Param("digest")]; ok {
|
||||||
|
p, err := GetBlobsPath(ib)
|
||||||
|
if err != nil {
|
||||||
|
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(p); errors.Is(err, os.ErrNotExist) {
|
||||||
|
slog.Info("evicting intermediate blob which no longer exists", "digest", ib)
|
||||||
|
delete(intermediateBlobs, c.Param("digest"))
|
||||||
|
} else if err != nil {
|
||||||
|
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
c.Status(http.StatusOK)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
path, err := GetBlobsPath(c.Param("digest"))
|
path, err := GetBlobsPath(c.Param("digest"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
"github.com/ollama/ollama/types/model"
|
"github.com/ollama/ollama/parser"
|
||||||
"github.com/ollama/ollama/version"
|
"github.com/ollama/ollama/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -56,7 +56,7 @@ func Test_Routes(t *testing.T) {
|
|||||||
fname := createTestFile(t, "ollama-model")
|
fname := createTestFile(t, "ollama-model")
|
||||||
|
|
||||||
r := strings.NewReader(fmt.Sprintf("FROM %s\nPARAMETER seed 42\nPARAMETER top_p 0.9\nPARAMETER stop foo\nPARAMETER stop bar", fname))
|
r := strings.NewReader(fmt.Sprintf("FROM %s\nPARAMETER seed 42\nPARAMETER top_p 0.9\nPARAMETER stop foo\nPARAMETER stop bar", fname))
|
||||||
modelfile, err := model.ParseFile(r)
|
modelfile, err := parser.ParseFile(r)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
fn := func(resp api.ProgressResponse) {
|
fn := func(resp api.ProgressResponse) {
|
||||||
t.Logf("Status: %s", resp.Status)
|
t.Logf("Status: %s", resp.Status)
|
||||||
|
|||||||
@@ -220,7 +220,7 @@ func (s *Scheduler) processCompleted(ctx context.Context) {
|
|||||||
runner := s.loaded[finished.model.ModelPath]
|
runner := s.loaded[finished.model.ModelPath]
|
||||||
s.loadedMu.Unlock()
|
s.loadedMu.Unlock()
|
||||||
if runner == nil {
|
if runner == nil {
|
||||||
slog.Error("finished requeset signal received after model unloaded", "modelPath", finished.model.ModelPath)
|
slog.Error("finished request signal received after model unloaded", "modelPath", finished.model.ModelPath)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
runner.refMu.Lock()
|
runner.refMu.Lock()
|
||||||
|
|||||||
@@ -151,7 +151,7 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRequests(t *testing.T) {
|
func TestRequests(t *testing.T) {
|
||||||
ctx, done := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
ctx, done := context.WithTimeout(context.Background(), time.Second)
|
||||||
defer done()
|
defer done()
|
||||||
|
|
||||||
// Same model, same request
|
// Same model, same request
|
||||||
|
|||||||
Reference in New Issue
Block a user