mirror of
https://github.com/likelovewant/ollama-for-amd.git
synced 2025-12-23 15:08:27 +00:00
* feat: Bump llama.cpp to df1b612 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix(mtmd): Correctly encode text chunks during mtmd tokenization There can be text chunks that appear interspersed with the image embeddings that contain template delimiter tokens for some models. These need to be correctly translated to text tokens. Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * tests: Use MtmdChunk in image_test Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * style: Fix unnecessary conversion linting Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix(ggml): Revert changes to ggml_hip.cpp These changes were done largely by our code assistant and are likely wrong Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: Revert changes in mem_nvml.cpp Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update sync point to 1deee0 This brings in several more optimization commits and model support for EmbeddingGemma Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update patches for 1deee0 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: sync for bump to 1deee0 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: Bad patch updates with errant `+` Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Bump llama.cpp/ggml to 7049736 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: format-patches after latest bump Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> --------- Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
208 lines
8.4 KiB
Diff
208 lines
8.4 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: ParthSareen <parth.sareen@ollama.com>
|
|
Date: Mon, 21 Apr 2025 13:30:31 -0700
|
|
Subject: [PATCH] add ollama vocab for grammar support
|
|
|
|
---
|
|
src/llama-grammar.cpp | 49 ++++++++++++++++++++++++++++++++++++------
|
|
src/llama-grammar.h | 14 ++++++++++++
|
|
src/llama-sampling.cpp | 4 ++--
|
|
3 files changed, 58 insertions(+), 9 deletions(-)
|
|
|
|
diff --git a/src/llama-grammar.cpp b/src/llama-grammar.cpp
|
|
index bed706bb..b51cee09 100644
|
|
--- a/src/llama-grammar.cpp
|
|
+++ b/src/llama-grammar.cpp
|
|
@@ -907,6 +907,7 @@ llama_grammar_candidates llama_grammar_reject_candidates_for_stack(
|
|
|
|
struct llama_grammar * llama_grammar_init_impl(
|
|
const struct llama_vocab * vocab,
|
|
+ const struct ollama_vocab * ollama_vocab,
|
|
const llama_grammar_element ** rules,
|
|
size_t n_rules,
|
|
size_t start_rule_index) {
|
|
@@ -962,6 +963,7 @@ struct llama_grammar * llama_grammar_init_impl(
|
|
// then the pointers would be invalidated when the local vec_rules goes out of scope.
|
|
return new llama_grammar {
|
|
vocab,
|
|
+ ollama_vocab,
|
|
std::move(vec_rules),
|
|
std::move(stacks),
|
|
/* .partial_utf8 = */ {},
|
|
@@ -975,6 +977,7 @@ struct llama_grammar * llama_grammar_init_impl(
|
|
|
|
struct llama_grammar * llama_grammar_init_impl(
|
|
const struct llama_vocab * vocab,
|
|
+ const struct ollama_vocab * ollama_vocab,
|
|
const char * grammar_str,
|
|
const char * grammar_root,
|
|
bool lazy,
|
|
@@ -1067,6 +1070,7 @@ struct llama_grammar * llama_grammar_init_impl(
|
|
// then the pointers would be invalidated when the local vec_rules goes out of scope.
|
|
return new llama_grammar {
|
|
vocab,
|
|
+ ollama_vocab,
|
|
std::move(vec_rules),
|
|
std::move(stacks),
|
|
/* .partial_utf8 = */ {},
|
|
@@ -1089,6 +1093,7 @@ void llama_grammar_free_impl(struct llama_grammar * grammar) {
|
|
struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & grammar) {
|
|
auto * result = new llama_grammar {
|
|
grammar.vocab,
|
|
+ grammar.o_vocab,
|
|
grammar.rules,
|
|
grammar.stacks,
|
|
grammar.partial_utf8,
|
|
@@ -1116,7 +1121,6 @@ struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & gra
|
|
}
|
|
|
|
void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_data_array * cur_p) {
|
|
- GGML_ASSERT(grammar.vocab != nullptr);
|
|
|
|
if (grammar.awaiting_trigger) {
|
|
return;
|
|
@@ -1138,9 +1142,13 @@ void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_
|
|
|
|
for (size_t i = 0; i < cur_p->size; ++i) {
|
|
const llama_token id = cur_p->data[i].id;
|
|
- const std::string & piece = grammar.vocab->token_to_piece(id);
|
|
+ const std::string piece = grammar.o_vocab ?
|
|
+ grammar.o_vocab->token_to_piece(id) :
|
|
+ grammar.vocab->token_to_piece(id);
|
|
|
|
- if (grammar.vocab->is_eog(id)) {
|
|
+ const bool is_eog = grammar.o_vocab ? grammar.o_vocab->is_eog(id) : grammar.vocab->is_eog(id);
|
|
+
|
|
+ if (is_eog) {
|
|
if (!allow_eog) {
|
|
cur_p->data[i].logit = -INFINITY;
|
|
}
|
|
@@ -1159,9 +1167,10 @@ void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_
|
|
}
|
|
|
|
void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token) {
|
|
- GGML_ASSERT(grammar.vocab != nullptr);
|
|
|
|
- const auto & piece = grammar.vocab->token_to_piece(token);
|
|
+ const std::string piece = grammar.o_vocab ?
|
|
+ grammar.o_vocab->token_to_piece(token) :
|
|
+ grammar.vocab->token_to_piece(token);
|
|
|
|
if (grammar.awaiting_trigger) {
|
|
if (std::find(grammar.trigger_tokens.begin(), grammar.trigger_tokens.end(), token) != grammar.trigger_tokens.end()) {
|
|
@@ -1201,13 +1210,14 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token
|
|
}
|
|
}
|
|
|
|
- if (grammar.vocab->is_eog(token)) {
|
|
+ const bool is_eog = grammar.o_vocab ? grammar.o_vocab->is_eog(token) : grammar.vocab->is_eog(token);
|
|
+ if (is_eog) {
|
|
for (const auto & stack : grammar.stacks) {
|
|
if (stack.empty()) {
|
|
return;
|
|
}
|
|
}
|
|
- GGML_ABORT("fatal error");
|
|
+ GGML_ABORT("grammar error: end of grammar token received but grammar stack is not empty");
|
|
}
|
|
|
|
llama_grammar_accept_str(grammar, piece);
|
|
@@ -1227,3 +1237,28 @@ void llama_grammar_accept_str(struct llama_grammar & grammar, const std::string
|
|
throw std::runtime_error("Unexpected empty grammar stack after accepting piece: " + piece);
|
|
}
|
|
}
|
|
+
|
|
+
|
|
+const std::string & ollama_vocab::token_to_piece(const uint32_t token) const {
|
|
+ try {
|
|
+ return token_to_piece_map.at(token);
|
|
+ } catch (const std::out_of_range&) {
|
|
+ throw std::runtime_error("Token not found in vocabulary: " + std::to_string(token));
|
|
+ }
|
|
+}
|
|
+
|
|
+void ollama_vocab::add_token_pieces(const uint32_t* tokens, size_t n_tokens, const char** pieces) {
|
|
+ for (size_t i = 0; i < n_tokens; i++) {
|
|
+ token_to_piece_map[tokens[i]] = pieces[i];
|
|
+ }
|
|
+}
|
|
+
|
|
+bool ollama_vocab::is_eog(const uint32_t token) const {
|
|
+ return special_eog_ids.count(token) > 0;
|
|
+}
|
|
+
|
|
+void ollama_vocab::set_eog_tokens(const uint32_t* tokens, size_t n_tokens) {
|
|
+ for (size_t i = 0; i < n_tokens; i++) {
|
|
+ special_eog_ids.insert(tokens[i]);
|
|
+ }
|
|
+}
|
|
diff --git a/src/llama-grammar.h b/src/llama-grammar.h
|
|
index f8c291de..2a3a62db 100644
|
|
--- a/src/llama-grammar.h
|
|
+++ b/src/llama-grammar.h
|
|
@@ -6,8 +6,19 @@
|
|
#include <regex>
|
|
#include <string>
|
|
#include <vector>
|
|
+#include <set>
|
|
|
|
struct llama_vocab;
|
|
+struct ollama_vocab {
|
|
+ std::map<uint32_t, std::string> token_to_piece_map;
|
|
+ std::set<uint32_t> special_eog_ids;
|
|
+
|
|
+ const std::string & token_to_piece(const uint32_t token) const;
|
|
+ void add_token_pieces(const uint32_t* tokens, size_t n_tokens, const char** pieces);
|
|
+ void set_eog_tokens(const uint32_t* tokens, size_t n_tokens);
|
|
+ bool is_eog(const uint32_t token) const;
|
|
+
|
|
+};
|
|
|
|
// grammar element type
|
|
enum llama_gretype {
|
|
@@ -114,6 +125,7 @@ struct llama_grammar_trigger_pattern {
|
|
struct llama_grammar {
|
|
// note: allow null vocab for testing (not great)
|
|
const llama_vocab * vocab;
|
|
+ const ollama_vocab * o_vocab;
|
|
|
|
const llama_grammar_rules rules; // TODO: shared ptr
|
|
llama_grammar_stacks stacks;
|
|
@@ -141,12 +153,14 @@ struct llama_grammar {
|
|
// note: needed for tests (not great)
|
|
struct llama_grammar * llama_grammar_init_impl(
|
|
const struct llama_vocab * vocab,
|
|
+ const struct ollama_vocab * ollama_vocab,
|
|
const llama_grammar_element ** rules,
|
|
size_t n_rules,
|
|
size_t start_rule_index);
|
|
|
|
struct llama_grammar * llama_grammar_init_impl(
|
|
const struct llama_vocab * vocab,
|
|
+ const struct ollama_vocab * ollama_vocab,
|
|
const char * grammar_str,
|
|
const char * grammar_root,
|
|
bool lazy,
|
|
diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp
|
|
index 55d2e355..da34526b 100644
|
|
--- a/src/llama-sampling.cpp
|
|
+++ b/src/llama-sampling.cpp
|
|
@@ -1563,7 +1563,7 @@ static void llama_sampler_grammar_reset(struct llama_sampler * smpl) {
|
|
trigger_patterns_c.push_back(trigger_pattern.pattern.c_str());
|
|
}
|
|
|
|
- auto * grammar_new = llama_grammar_init_impl(ctx->grammar->vocab, ctx->grammar_str.c_str(), ctx->grammar_root.c_str(),
|
|
+ auto * grammar_new = llama_grammar_init_impl(ctx->grammar->vocab, nullptr, ctx->grammar_str.c_str(), ctx->grammar_root.c_str(),
|
|
ctx->grammar->lazy, trigger_patterns_c.data(), trigger_patterns_c.size(),
|
|
ctx->grammar->trigger_tokens.data(), ctx->grammar->trigger_tokens.size());
|
|
|
|
@@ -1645,7 +1645,7 @@ static struct llama_sampler * llama_sampler_init_grammar_impl(
|
|
/* .vocab = */ vocab,
|
|
/* .grammar_str = */ grammar_str,
|
|
/* .grammar_root = */ grammar_root,
|
|
- /* .grammar = */ llama_grammar_init_impl(vocab, grammar_str, grammar_root, lazy, trigger_patterns, num_trigger_patterns, trigger_tokens, num_trigger_tokens),
|
|
+ /* .grammar = */ llama_grammar_init_impl(vocab, nullptr, grammar_str, grammar_root, lazy, trigger_patterns, num_trigger_patterns, trigger_tokens, num_trigger_tokens),
|
|
};
|
|
if (!ctx->grammar) {
|
|
delete ctx;
|