mirror of
https://github.com/likelovewant/ollama-for-amd.git
synced 2025-12-21 22:33:56 +00:00
* feat: Bump llama.cpp to the latest master (17f7f4b) This brings in significant improvements to prefill performance for all models using the SSM_CONV and SSM_SCAN ops (granite4, jamba, falcon-h, nemotron-h, Qwen3 Next) on Apple Metal. See https://github.com/ggml-org/llama.cpp/pull/17876 Branch: LlamaCPPMetalSSMImprovements Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update patches 1-4 Branch: LlamaCPPMetalSSMImprovements Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: Update patches 5-12 Branch: LlamaCPPMetalSSMImprovements Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update patches 13-18 Branch: LlamaCPPMetalSSMImprovements Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update patch 20 Branch: LlamaCPPMetalSSMImprovements Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update patches 21-31 Branch: LlamaCPPMetalSSMImprovements Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Sync vendored code The two files I'm not sure about here are the swap from gemma3-iswa.cpp to gemma3.cpp (I chose to include this because I think it's required), and the inclusion of `ggml-zendnn.h` which I chose to omit. Branch: LlamaCPPMetalSSMImprovements Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> --------- Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
44 lines
2.3 KiB
Diff
44 lines
2.3 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Michael Yang <mxyng@pm.me>
|
|
Date: Mon, 16 Sep 2024 15:53:13 -0700
|
|
Subject: [PATCH] pretokenizer
|
|
|
|
allow for an unset pretokenizer with a warning in the
|
|
logs instead of throwing an error
|
|
---
|
|
src/llama-vocab.cpp | 14 +++-----------
|
|
1 file changed, 3 insertions(+), 11 deletions(-)
|
|
|
|
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
|
|
index e2cca66e4..8246a0a14 100644
|
|
--- a/src/llama-vocab.cpp
|
|
+++ b/src/llama-vocab.cpp
|
|
@@ -1825,16 +1825,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|
if (type == LLAMA_VOCAB_TYPE_BPE) {
|
|
add_space_prefix = false;
|
|
clean_spaces = true;
|
|
- if (tokenizer_pre.empty()) {
|
|
- LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
|
|
- LLAMA_LOG_WARN("%s: \n", __func__);
|
|
- LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
|
|
- LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED! \n", __func__);
|
|
- LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL \n", __func__);
|
|
- LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
|
|
- LLAMA_LOG_WARN("%s: \n", __func__);
|
|
- pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
|
- } else if (tokenizer_pre == "default") {
|
|
+ if (tokenizer_pre == "default") {
|
|
pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
|
} else if (
|
|
tokenizer_pre == "llama3" ||
|
|
@@ -2014,7 +2005,8 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|
pre_type = LLAMA_VOCAB_PRE_TYPE_MINIMAX_M2;
|
|
clean_spaces = false;
|
|
} else {
|
|
- throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
|
+ LLAMA_LOG_WARN("%s: missing or unrecognized pre-tokenizer type, using: 'default'\n", __func__);
|
|
+ pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
|
}
|
|
} else if (type == LLAMA_VOCAB_TYPE_SPM) {
|
|
pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|