llama: update vendor code to commit ba1cb19c (#8101)

This commit is contained in:
Jeffrey Morgan
2024-12-14 14:55:51 -08:00
committed by GitHub
parent 60f75560a2
commit 7a81daf026
273 changed files with 3194 additions and 1900 deletions

View File

@@ -8,10 +8,10 @@ Subject: [PATCH] embeddings
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/src/llama.cpp b/src/llama.cpp
index fa09f3b3..d1791af0 100644
index 626c3e3f..9e292c4f 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -17398,7 +17398,7 @@ static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
@@ -17419,7 +17419,7 @@ static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
const auto n_embd = hparams.n_embd;
// TODO: use a per-batch flag for logits presence instead
@@ -20,7 +20,7 @@ index fa09f3b3..d1791af0 100644
const bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE);
const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
@@ -17693,7 +17693,6 @@ static int llama_decode_internal(
@@ -17714,7 +17714,6 @@ static int llama_decode_internal(
res = nullptr;
embd = nullptr;
} else if (cparams.embeddings) {
@@ -28,7 +28,7 @@ index fa09f3b3..d1791af0 100644
embd = nullptr;
for (int i = ggml_graph_n_nodes(gf) - 1; i >= 0; --i) {
if (strcmp(ggml_graph_node(gf, i)->name, "result_embd_pooled") == 0) {
@@ -17701,11 +17700,15 @@ static int llama_decode_internal(
@@ -17722,11 +17721,15 @@ static int llama_decode_internal(
break;
}
}