mirror of
https://github.com/likelovewant/ollama-for-amd.git
synced 2025-12-21 22:33:56 +00:00
ggml: Seperate tensor load from backend creation
Currently, when the backend is created, the tensors are loaded at the same time, which is a slow operation. This separates them to be two steps: - Create backend, including enumerating tensors and memory allocation - Loading tensor data This allows more flexibility in managing model loading.
This commit is contained in:
@@ -295,7 +295,7 @@ func convertFromSafetensors(files map[string]string, baseLayers []*layerGGML, is
|
||||
}
|
||||
defer bin.Close()
|
||||
|
||||
f, _, err := ggml.Decode(bin, -1)
|
||||
f, err := ggml.Decode(bin, -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -467,7 +467,7 @@ func quantizeLayer(layer *layerGGML, quantizeType string, fn func(resp api.Progr
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, _, err := ggml.Decode(temp, 1024)
|
||||
f, err := ggml.Decode(temp, 1024)
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("error decoding ggml: %s\n", err))
|
||||
return nil, err
|
||||
@@ -508,7 +508,7 @@ func ggufLayers(digest string, fn func(resp api.ProgressResponse)) ([]*layerGGML
|
||||
|
||||
var offset int64
|
||||
for offset < stat.Size() {
|
||||
f, n, err := ggml.Decode(blob, 1024)
|
||||
f, err := ggml.Decode(blob, 1024)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
@@ -523,7 +523,7 @@ func ggufLayers(digest string, fn func(resp api.ProgressResponse)) ([]*layerGGML
|
||||
}
|
||||
|
||||
var layer Layer
|
||||
if digest != "" && n == stat.Size() && offset == 0 {
|
||||
if digest != "" && f.Length == stat.Size() && offset == 0 {
|
||||
layer, err = NewLayerFromLayer(digest, mediatype, blob.Name())
|
||||
if err != nil {
|
||||
slog.Debug("could not create new layer from layer", "error", err)
|
||||
@@ -533,14 +533,14 @@ func ggufLayers(digest string, fn func(resp api.ProgressResponse)) ([]*layerGGML
|
||||
|
||||
// Fallback to creating layer from file copy (either NewLayerFromLayer failed, or digest empty/n != stat.Size())
|
||||
if layer.Digest == "" {
|
||||
layer, err = NewLayer(io.NewSectionReader(blob, offset, n), mediatype)
|
||||
layer, err = NewLayer(io.NewSectionReader(blob, offset, f.Length), mediatype)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
layers = append(layers, &layerGGML{layer, f})
|
||||
offset = n
|
||||
offset = f.Length
|
||||
}
|
||||
|
||||
return detectChatTemplate(layers)
|
||||
|
||||
@@ -75,7 +75,7 @@ func (m *Model) Capabilities() []model.Capability {
|
||||
if err == nil {
|
||||
defer r.Close()
|
||||
|
||||
f, _, err := ggml.Decode(r, 1024)
|
||||
f, err := ggml.Decode(r, 1024)
|
||||
if err == nil {
|
||||
if _, ok := f.KV()[fmt.Sprintf("%s.pooling_type", f.KV().Architecture())]; ok {
|
||||
capabilities = append(capabilities, model.CapabilityEmbedding)
|
||||
|
||||
@@ -64,7 +64,7 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe
|
||||
}
|
||||
defer blob.Close()
|
||||
|
||||
f, _, err := ggml.Decode(blob, -1)
|
||||
f, err := ggml.Decode(blob, -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -271,7 +271,7 @@ func TestQuantizeModel(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
defer fp.Close()
|
||||
meta, _, err := fsggml.Decode(fp, -1)
|
||||
meta, err := fsggml.Decode(fp, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -303,7 +303,7 @@ func TestQuantizeModel(t *testing.T) {
|
||||
t.Fatalf("failed to load the quantized model %s: %s", tmp.Name(), err)
|
||||
}
|
||||
defer fpNew.Close()
|
||||
newMeta, _, err := fsggml.Decode(fpNew, -1)
|
||||
newMeta, err := fsggml.Decode(fpNew, -1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load the quantized model %s: %s", tmp.Name(), err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user