mirror of
https://github.com/likelovewant/ollama-for-amd.git
synced 2025-12-21 22:33:56 +00:00
convert: qwen2 from safetensors (#8408)
Add native support for converting Qwen2 family models (including Qwen2.5) from safetensors to gguf format so we can run it.
This commit is contained in:
@@ -187,6 +187,8 @@ func ConvertModel(fsys fs.FS, ws io.WriteSeeker) error {
|
||||
conv = &gemma2Model{}
|
||||
case "Phi3ForCausalLM":
|
||||
conv = &phi3Model{}
|
||||
case "Qwen2ForCausalLM":
|
||||
conv = &qwen2Model{}
|
||||
case "BertModel":
|
||||
conv = &bertModel{}
|
||||
default:
|
||||
|
||||
Reference in New Issue
Block a user