Skip to content

Commit

Permalink
chore: bump llama.cpp
Browse files Browse the repository at this point in the history
Signed-off-by: thxCode <thxcode0824@gmail.com>
  • Loading branch information
thxCode committed Jan 14, 2025
1 parent 7a2ddd6 commit ccf473b
Show file tree
Hide file tree
Showing 4 changed files with 7 additions and 7 deletions.
4 changes: 2 additions & 2 deletions llama-box/patches/llama.cpp/model.patch
Original file line number Diff line number Diff line change
Expand Up @@ -133,10 +133,10 @@ index f90f5e74..818caf0b 100644
case LLM_ARCH_NOMIC_BERT:
case LLM_ARCH_STABLELM:
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index 96b74e93..6cc05279 100644
index 4969d262..3c178c90 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -1429,7 +1429,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
@@ -1430,7 +1430,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
special_mask_id = LLAMA_TOKEN_NULL;

const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str());
Expand Down
2 changes: 1 addition & 1 deletion llama-box/patches/llama.cpp/model_py.patch
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py
index 4dc9837a..ba252f57 100755
index 4dc9837a..d488122e 100755
--- a/convert_hf_to_gguf.py
+++ b/convert_hf_to_gguf.py
@@ -4,6 +4,7 @@
Expand Down
6 changes: 3 additions & 3 deletions llama-box/patches/llama.cpp/vocab.patch
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ index a184884c..ec814882 100644
LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab);
LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab);
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index 96b74e93..c57a8648 100644
index 4969d262..44b68cbf 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -1586,7 +1586,8 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
@@ -1587,7 +1587,8 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
tokenizer_pre == "megrez") {
pre_type = LLAMA_VOCAB_PRE_TYPE_QWEN2;
} else {
Expand All @@ -24,7 +24,7 @@ index 96b74e93..c57a8648 100644
}
} else if (type == LLAMA_VOCAB_TYPE_SPM) {
pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
@@ -3074,6 +3075,10 @@ llama_token llama_vocab_pad(const struct llama_vocab * vocab) {
@@ -3075,6 +3076,10 @@ llama_token llama_vocab_pad(const struct llama_vocab * vocab) {
return vocab->token_pad();
}

Expand Down
2 changes: 1 addition & 1 deletion llama.cpp

0 comments on commit ccf473b

Please sign in to comment.