mirror of
https://github.com/likelovewant/ollama-for-amd.git
synced 2025-12-21 22:33:56 +00:00
Llama cpp bump (df1b612): granite docling / mamba2 optimizations / multimodal encoding fixes (#12552)
* feat: Bump llama.cpp to df1b612 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix(mtmd): Correctly encode text chunks during mtmd tokenization There can be text chunks that appear interspersed with the image embeddings that contain template delimiter tokens for some models. These need to be correctly translated to text tokens. Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * tests: Use MtmdChunk in image_test Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * style: Fix unnecessary conversion linting Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix(ggml): Revert changes to ggml_hip.cpp These changes were done largely by our code assistant and are likely wrong Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: Revert changes in mem_nvml.cpp Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update sync point to 1deee0 This brings in several more optimization commits and model support for EmbeddingGemma Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update patches for 1deee0 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: sync for bump to 1deee0 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: Bad patch updates with errant `+` Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Bump llama.cpp/ggml to 7049736 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: format-patches after latest bump Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> --------- Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
This commit is contained in:
52
llama/llama.cpp/tools/mtmd/clip.cpp
vendored
52
llama/llama.cpp/tools/mtmd/clip.cpp
vendored
@@ -183,7 +183,9 @@ struct clip_hparams {
|
||||
int32_t projection_dim;
|
||||
int32_t n_head;
|
||||
int32_t n_layer;
|
||||
int32_t proj_scale_factor = 0; // idefics3
|
||||
// idefics3
|
||||
int32_t preproc_image_size = 0;
|
||||
int32_t proj_scale_factor = 0;
|
||||
|
||||
float image_mean[3];
|
||||
float image_std[3];
|
||||
@@ -2263,6 +2265,7 @@ struct clip_model_loader {
|
||||
|
||||
if (is_vision) {
|
||||
get_u32(KEY_IMAGE_SIZE, hparams.image_size);
|
||||
get_u32(KEY_PREPROC_IMAGE_SIZE, hparams.preproc_image_size, false);
|
||||
get_u32(KEY_PATCH_SIZE, hparams.patch_size);
|
||||
get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false);
|
||||
get_i32(KEY_MINICPMV_VERSION, hparams.minicpmv_version, false); // legacy
|
||||
@@ -3590,10 +3593,51 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str
|
||||
// res_imgs->data[0] = *res;
|
||||
res_imgs->entries.push_back(std::move(img_f32));
|
||||
return true;
|
||||
}
|
||||
else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE
|
||||
} else if (ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3) {
|
||||
// The refined size has two steps:
|
||||
// 1. Resize w/ aspect-ratio preserving such that the longer side is
|
||||
// the preprocessor longest size
|
||||
// 2. Resize w/out preserving aspect ratio such that both sides are
|
||||
// multiples of image_size (always rounding up)
|
||||
//
|
||||
// CITE: https://github.com/huggingface/transformers/blob/main/src/transformers/models/idefics3/image_processing_idefics3.py#L737
|
||||
const clip_image_size refined_size = image_manipulation::calc_size_preserved_ratio(
|
||||
original_size, params.image_size, params.preproc_image_size);
|
||||
|
||||
llava_uhd::slice_instructions instructions;
|
||||
instructions.overview_size = clip_image_size{params.image_size, params.image_size};
|
||||
instructions.refined_size = refined_size;
|
||||
instructions.grid_size = clip_image_size{
|
||||
static_cast<int>(std::ceil(static_cast<float>(refined_size.width) / params.image_size)),
|
||||
static_cast<int>(std::ceil(static_cast<float>(refined_size.height) / params.image_size)),
|
||||
};
|
||||
for (int y = 0; y < refined_size.height; y += params.image_size) {
|
||||
for (int x = 0; x < refined_size.width; x += params.image_size) {
|
||||
instructions.slices.push_back(llava_uhd::slice_coordinates{
|
||||
/* x */x,
|
||||
/* y */y,
|
||||
/* size */clip_image_size{
|
||||
std::min(params.image_size, refined_size.width - x),
|
||||
std::min(params.image_size, refined_size.height - y)
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
auto imgs = llava_uhd::slice_image(img, instructions);
|
||||
|
||||
// cast and normalize to f32
|
||||
for (size_t i = 0; i < imgs.size(); ++i) {
|
||||
// clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
|
||||
clip_image_f32_ptr res(clip_image_f32_init());
|
||||
normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
|
||||
res_imgs->entries.push_back(std::move(res));
|
||||
}
|
||||
|
||||
res_imgs->grid_x = instructions.grid_size.width;
|
||||
res_imgs->grid_y = instructions.grid_size.height;
|
||||
return true;
|
||||
} else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE
|
||||
|| ctx->proj_type() == PROJECTOR_TYPE_GEMMA3
|
||||
|| ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3
|
||||
|| ctx->proj_type() == PROJECTOR_TYPE_INTERNVL // TODO @ngxson : support dynamic resolution
|
||||
) {
|
||||
clip_image_u8 resized_image;
|
||||
|
||||
Reference in New Issue
Block a user