mirror of
https://github.com/likelovewant/ollama-for-amd.git
synced 2025-12-25 07:58:01 +00:00
* TEMPORARY: Update the llama.cpp upstream to my fork's Granite Four branch
This will be redone once my branch is merged upstream in llama.cpp
* feat: Update all patches
There are a number that are no longer needed at all:
- 0003-embeddings: Embeddings entirely overhauled on master
- 0008-ensure-KV-cache-is-fully-defragmented: KV caching entirely
overhauled on master
- 0019-metal-add-mean-kernel-14267: Merged upstream
- 0020-CUDA-add-mean-operation-14313: Merged upstream
* feat: Sync llama.cpp and ggml
* fix: Update rsync-filter for all moved/new/removed files
* fix: Add files missing from sync
* fix: Update ggml rsync-filter for new ggml-cpu/arch subdirs
* fix: Add ggml files missing from sync
* fix: Narrow llama.cpp rsync-filter to not include mtmd main tool cpp files
* fix: Remove mtmd main cpp files
* fix: Add missing include in sampling_ext.cpp
* fix: Update llama.go to use mtmd instead of clip/llava
* fix: Add patch for mtmd_input_text
* chore: Ignore *.patched in the patch directory
* fix: Fix support for arch-specific ggml-cpu source files with new arrangement
In https://github.com/ggml-org/llama.cpp/pull/13892, all arch-specific
implementations were split out into a nested tree structure under
ggml-cpu/arch. This conflicts with standard CGO layout where all
arch-specific source files are expected to live in the same directory as
the parent go module and use suffixes based on GOOS and GOARCH. As such,
there were really two options for getting this to work:
1. Add a patch on top of the GGML sync to rearrange the files to match the
GO layout convention
2. Use CGO directives to conditionally include the nested source files in
the compilation units
This commit does (2) in order to minimize the set of changes needed on top
of the upstream file layout. To get this to work, there are two key things
needed:
1. In cpu.go, #cgo directives are added to explicitly set __${GOARCH}__ in
the preprocessor directives
2. In arch-impls.c|cpp, use an #ifdef | #elif defined | #endif chain to
explicitly include the .c|.cpp files for the given architecture from the
nested directory
* fix: Use mtmd_helper to correctly load the bitmap for the image
* fix: Apply patch for mtmd_text_input
* fix: Add missing stb to llama.cpp rsync-filter
* fix: Add sync'ed stb vendored header
* fix: Use c++17 and include vendor for go wrapper modules
* fix: Update patch 0015 for upstream implementation of uuid
* feat: Bump to the latest tip of the branch
* fix: Update patches for bump
* feat: Bump back to the cenral repo and point at the latest master
This includes granite 4 and a number of other model architectures!
* fix: Revert changes to ggml export GPU UUID patch
* fix: Add patch for GGML_VERSION and GGML_COMMIT constants
* feat: Sync all patched code
* build: Include cmake/common.cmake in ggml sync
* build: Add top-level include for GNUINstallDirs in CMakeLists.txt
This is used to populate CMAKE_INSTALL_BINDIR
* fix: Add a patch to avoid power throttling API on non-msvc windows builds
* fix: Sync patch changes for ggml-cpu.c
* feat: Bump llama.cpp to 4a4f42
This picks up support for Kimi K2 and PLaMO-2
* feat: Sync llama.cpp
* fix: Handle multi-chunk image encodings from mtmd
* fix: Re-number patches after merge with `main`
* feat: Bump to 41e78c in the makefile
* fix: Fix Solar and argsort/copy patches after bump
* fix: Remove Gemma3n CUDA Graphs patch
It was implemented upstream:
https://github.com/ggml-org/llama.cpp/pull/14741
* feat: Sync llama.cpp / ggml after latest bump
* build: Remove unnecessary CFLAGS definitions in cpu.go
* fix: Remove unnecessary additions in the rsync-filter
* fix: Remove unused vendored code for chat template parsing
* Revert "fix: Remove Gemma3n CUDA Graphs patch"
This reverts commit d724caced3ce21f08924d4b7801f94ce6638f6ea.
* fix: Update 0020 CUDA Graphs for gemma3n to keep both llama.cpp and ollama fixes
https://github.com/ollama/ollama/pull/11195#issuecomment-3137312394
* fix: Sync ggml-cuda.cu after keeping both style cuda graph fixes for gemma3n
* unwind mxfp4 patch
Prepare to bump ggml with their impl for mxfp4
* bump
* fix windows build error
* Convert tensors at load time
Repack the mxfp4 tensors as ggmls kernels expect them to be.
* convert mlp bf16 to f32
* buffer the conversion better
* reshape earlier
* openai swiglu
* add ids
* split qkv, gate_up
* fix nested alt tags
* fast attention
* remove debug messages
* fix lint
* remove redundant test
* remap values only if source/target are different
* add back i32->i32 copy
* refactor cpu quants
* clean up vendor
* update patch instructions
* clean up patches
* remove webgpu
* update mem
* also handle gpt-oss
* revert convert changes
---------
Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
Co-authored-by: Gabe Goodhart <ghart@us.ibm.com>
Co-authored-by: Daniel Hiltgen <daniel@ollama.com>
177 lines
5.8 KiB
C++
Vendored
177 lines
5.8 KiB
C++
Vendored
#pragma once
|
|
|
|
#include "llama.h"
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
#include <memory>
|
|
|
|
// pre-tokenization types
|
|
enum llama_vocab_pre_type {
|
|
LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0,
|
|
LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1,
|
|
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2,
|
|
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3,
|
|
LLAMA_VOCAB_PRE_TYPE_FALCON = 4,
|
|
LLAMA_VOCAB_PRE_TYPE_MPT = 5,
|
|
LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
|
|
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
|
|
LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
|
|
LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
|
|
LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10,
|
|
LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11,
|
|
LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
|
|
LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
|
|
LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
|
|
LLAMA_VOCAB_PRE_TYPE_PORO = 15,
|
|
LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16,
|
|
LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17,
|
|
LLAMA_VOCAB_PRE_TYPE_VIKING = 18,
|
|
LLAMA_VOCAB_PRE_TYPE_JAIS = 19,
|
|
LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20,
|
|
LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21,
|
|
LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22,
|
|
LLAMA_VOCAB_PRE_TYPE_BLOOM = 23,
|
|
LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24,
|
|
LLAMA_VOCAB_PRE_TYPE_EXAONE = 25,
|
|
LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26,
|
|
LLAMA_VOCAB_PRE_TYPE_MINERVA = 27,
|
|
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28,
|
|
LLAMA_VOCAB_PRE_TYPE_GPT4O = 29,
|
|
LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30,
|
|
LLAMA_VOCAB_PRE_TYPE_TRILLION = 31,
|
|
LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32,
|
|
LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33,
|
|
LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34,
|
|
LLAMA_VOCAB_PRE_TYPE_SEED_CODER = 35,
|
|
LLAMA_VOCAB_PRE_TYPE_HUNYUAN = 36,
|
|
LLAMA_VOCAB_PRE_TYPE_KIMI_K2 = 37,
|
|
LLAMA_VOCAB_PRE_TYPE_HUNYUAN_DENSE = 38,
|
|
};
|
|
|
|
struct LLM_KV;
|
|
struct llama_model_loader;
|
|
|
|
struct llama_vocab {
|
|
struct token_data {
|
|
std::string text;
|
|
float score;
|
|
llama_token_attr attr;
|
|
};
|
|
|
|
llama_vocab();
|
|
~llama_vocab();
|
|
|
|
void load(llama_model_loader & ml, const LLM_KV & kv);
|
|
|
|
std::string get_tokenizer_model() const;
|
|
std::string get_tokenizer_pre() const;
|
|
|
|
enum llama_vocab_type get_type() const;
|
|
enum llama_vocab_pre_type get_pre_type() const;
|
|
|
|
uint32_t n_tokens() const;
|
|
uint32_t n_token_types() const;
|
|
|
|
std::string type_name() const;
|
|
|
|
bool is_normal (llama_token id) const;
|
|
bool is_unknown (llama_token id) const;
|
|
bool is_control (llama_token id) const;
|
|
bool is_byte (llama_token id) const;
|
|
bool is_user_defined(llama_token id) const;
|
|
bool is_unused (llama_token id) const;
|
|
bool is_eog (llama_token id) const;
|
|
|
|
uint8_t token_to_byte(llama_token id) const;
|
|
llama_token byte_to_token(uint8_t ch) const;
|
|
|
|
llama_token text_to_token(const std::string & text) const;
|
|
|
|
const token_data & get_token_data(llama_token id) const;
|
|
|
|
const char * token_get_text (llama_token id) const;
|
|
float token_get_score(llama_token id) const;
|
|
llama_token_attr token_get_attr (llama_token id) const;
|
|
|
|
llama_token token_bos() const;
|
|
llama_token token_eos() const;
|
|
llama_token token_eot() const;
|
|
llama_token token_eom() const;
|
|
llama_token token_unk() const;
|
|
llama_token token_sep() const;
|
|
llama_token token_nl () const;
|
|
llama_token token_pad() const;
|
|
llama_token token_mask() const;
|
|
|
|
llama_token token_prefix() const;
|
|
llama_token token_middle() const;
|
|
llama_token token_suffix() const;
|
|
|
|
llama_token token_fim_pre() const;
|
|
llama_token token_fim_suf() const;
|
|
llama_token token_fim_mid() const;
|
|
llama_token token_fim_pad() const;
|
|
llama_token token_fim_rep() const;
|
|
llama_token token_fim_sep() const;
|
|
|
|
bool get_add_space_prefix () const;
|
|
bool get_add_bos () const;
|
|
bool get_add_eos () const;
|
|
bool get_add_sep () const;
|
|
bool get_ignore_merges () const;
|
|
bool get_clean_spaces () const;
|
|
bool get_remove_extra_whitespaces () const;
|
|
bool get_escape_whitespaces () const;
|
|
bool get_treat_whitespace_as_suffix() const;
|
|
|
|
int max_token_len() const;
|
|
|
|
int find_bpe_rank(const std::string & token_left, const std::string & token_right) const;
|
|
std::vector<std::string> get_bpe_merges() const;
|
|
|
|
std::vector<char> get_precompiled_charsmap() const;
|
|
|
|
int32_t tokenize(
|
|
const char * text,
|
|
int32_t text_len,
|
|
llama_token * tokens,
|
|
int32_t n_tokens_max,
|
|
bool add_special,
|
|
bool parse_special) const;
|
|
|
|
std::vector<llama_token> tokenize(
|
|
const std::string & raw_text,
|
|
bool add_special,
|
|
bool parse_special = false) const;
|
|
|
|
// does not write null-terminator to buf
|
|
int32_t token_to_piece(
|
|
llama_token token,
|
|
char * buf,
|
|
int32_t length,
|
|
int32_t lstrip,
|
|
bool special) const;
|
|
|
|
// use cached data
|
|
const std::string & token_to_piece(llama_token token) const;
|
|
|
|
int32_t detokenize(
|
|
const llama_token * tokens,
|
|
int32_t n_tokens,
|
|
char * text,
|
|
int32_t text_len_max,
|
|
bool remove_special,
|
|
bool unparse_special) const;
|
|
|
|
std::string detokenize(
|
|
const std::vector<llama_token> & tokens,
|
|
bool special) const;
|
|
|
|
void print_info() const;
|
|
|
|
private:
|
|
struct impl;
|
|
std::unique_ptr<impl> pimpl;
|
|
};
|