llama_cpp 0.20.3 → 0.21.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +10 -1
- data/ext/llama_cpp/llama_cpp.c +20 -38
- data/lib/llama_cpp/version.rb +2 -2
- metadata +1 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 691ff52591a63387090485f1d322726b8dc5dc89630cdac4fe2bfcb3372da50d
|
4
|
+
data.tar.gz: c27f3a43878e787f32eaeaa2538d03dca4e2e3de0484ad2e920ec14826cf6ba5
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: df049a84a78bb2d95cd4fe1f63f05c1bb9f965c0e5c3bfaca9668e98fca0db2eb8d80560ffa819540fa25b0a099a0b2a3feafc85a38d73632f3db79445d19a07
|
7
|
+
data.tar.gz: d9138045ba1d37dbaab919ea973ef81306dde30dd6d67048660108dd9f90fe1ffe671becd7f295afa2b01150d73269ac045474373c98ff5805ce35c582dd28d4
|
data/CHANGELOG.md
CHANGED
@@ -1,10 +1,19 @@
|
|
1
|
+
## [[0.21.0](https://github.com/yoshoku/llama_cpp.rb/compare/v0.20.4...v0.21.0)] - 2025-07-12
|
2
|
+
|
3
|
+
- Change supported llama.cpp version to b5870.
|
4
|
+
- Remove constants for `llama_voca_pre_type` such as `LLAMA_VOCAB_PRE_TYPE_DEFAULT` and `LLAMA_VOCAB_PRE_TYPE_LLAMA3`.
|
5
|
+
|
6
|
+
## [[0.20.4](https://github.com/yoshoku/llama_cpp.rb/compare/v0.20.3...v0.20.4)] - 2025-06-21
|
7
|
+
|
8
|
+
- Change supported llama.cpp version to b5720.
|
9
|
+
- Add `llama_vocab_get_add_sep` module function.
|
10
|
+
|
1
11
|
## [[0.20.3](https://github.com/yoshoku/llama_cpp.rb/compare/v0.20.2...v0.20.3)] - 2025-06-14
|
2
12
|
|
3
13
|
- Change supported llama.cpp version to b5650
|
4
14
|
- Add `data` argument to `llama_memory_clear` module function.
|
5
15
|
- Fix llama_memory_t wrapper by removing unnecessary struct keyword and pointer symbol.
|
6
16
|
|
7
|
-
|
8
17
|
## [[0.20.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.20.1...v0.20.2)] - 2025-06-07
|
9
18
|
|
10
19
|
- Change supported llama.cpp version to b5600
|
data/ext/llama_cpp/llama_cpp.c
CHANGED
@@ -2972,6 +2972,22 @@ static VALUE rb_llama_vocab_get_add_eos(VALUE self, VALUE vocab) {
|
|
2972
2972
|
return flag ? Qtrue : Qfalse;
|
2973
2973
|
}
|
2974
2974
|
|
2975
|
+
/**
|
2976
|
+
* @overload llama_vocab_get_add_sep(vocab)
|
2977
|
+
* @param [LlamaVocab] vocab
|
2978
|
+
* @return [Boolean]
|
2979
|
+
*/
|
2980
|
+
static VALUE rb_llama_vocab_get_add_sep(VALUE self, VALUE vocab) {
|
2981
|
+
if (!rb_obj_is_kind_of(vocab, rb_cLlamaVocab)) {
|
2982
|
+
rb_raise(rb_eArgError, "vocab must be a LlamaVocab");
|
2983
|
+
return Qnil;
|
2984
|
+
}
|
2985
|
+
llama_vocab_wrapper* vocab_wrapper = get_llama_vocab_wrapper(vocab);
|
2986
|
+
const bool flag = llama_vocab_get_add_sep(vocab_wrapper->vocab);
|
2987
|
+
RB_GC_GUARD(vocab);
|
2988
|
+
return flag ? Qtrue : Qfalse;
|
2989
|
+
}
|
2990
|
+
|
2975
2991
|
/**
|
2976
2992
|
* @overload llama_vocab_fim_pre(vocab)
|
2977
2993
|
* @param [LlamaVocab] vocab
|
@@ -4138,44 +4154,6 @@ void Init_llama_cpp(void) {
|
|
4138
4154
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_TYPE_WPM", INT2NUM(LLAMA_VOCAB_TYPE_WPM));
|
4139
4155
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_TYPE_UGM", INT2NUM(LLAMA_VOCAB_TYPE_UGM));
|
4140
4156
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_TYPE_RWKV", INT2NUM(LLAMA_VOCAB_TYPE_RWKV));
|
4141
|
-
/* llama_vocab_pre_type */
|
4142
|
-
/* Document-const: LlamaCpp::LLAMA_VOCAB_PRE_TYPE_DEFAULT */
|
4143
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_DEFAULT", INT2NUM(LLAMA_VOCAB_PRE_TYPE_DEFAULT));
|
4144
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_LLAMA3", INT2NUM(LLAMA_VOCAB_PRE_TYPE_LLAMA3));
|
4145
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM", INT2NUM(LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM));
|
4146
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER", INT2NUM(LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER));
|
4147
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_FALCON", INT2NUM(LLAMA_VOCAB_PRE_TYPE_FALCON));
|
4148
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_MPT", INT2NUM(LLAMA_VOCAB_PRE_TYPE_MPT));
|
4149
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_STARCODER", INT2NUM(LLAMA_VOCAB_PRE_TYPE_STARCODER));
|
4150
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_GPT2", INT2NUM(LLAMA_VOCAB_PRE_TYPE_GPT2));
|
4151
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_REFACT", INT2NUM(LLAMA_VOCAB_PRE_TYPE_REFACT));
|
4152
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_COMMAND_R", INT2NUM(LLAMA_VOCAB_PRE_TYPE_COMMAND_R));
|
4153
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_STABLELM2", INT2NUM(LLAMA_VOCAB_PRE_TYPE_STABLELM2));
|
4154
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_QWEN2", INT2NUM(LLAMA_VOCAB_PRE_TYPE_QWEN2));
|
4155
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_OLMO", INT2NUM(LLAMA_VOCAB_PRE_TYPE_OLMO));
|
4156
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_DBRX", INT2NUM(LLAMA_VOCAB_PRE_TYPE_DBRX));
|
4157
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_SMAUG", INT2NUM(LLAMA_VOCAB_PRE_TYPE_SMAUG));
|
4158
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_PORO", INT2NUM(LLAMA_VOCAB_PRE_TYPE_PORO));
|
4159
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_CHATGLM3", INT2NUM(LLAMA_VOCAB_PRE_TYPE_CHATGLM3));
|
4160
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_CHATGLM4", INT2NUM(LLAMA_VOCAB_PRE_TYPE_CHATGLM4));
|
4161
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_VIKING", INT2NUM(LLAMA_VOCAB_PRE_TYPE_VIKING));
|
4162
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_JAIS", INT2NUM(LLAMA_VOCAB_PRE_TYPE_JAIS));
|
4163
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_TEKKEN", INT2NUM(LLAMA_VOCAB_PRE_TYPE_TEKKEN));
|
4164
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_SMOLLM", INT2NUM(LLAMA_VOCAB_PRE_TYPE_SMOLLM));
|
4165
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_CODESHELL", INT2NUM(LLAMA_VOCAB_PRE_TYPE_CODESHELL));
|
4166
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_BLOOM", INT2NUM(LLAMA_VOCAB_PRE_TYPE_BLOOM));
|
4167
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH", INT2NUM(LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH));
|
4168
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_EXAONE", INT2NUM(LLAMA_VOCAB_PRE_TYPE_EXAONE));
|
4169
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_CHAMELEON", INT2NUM(LLAMA_VOCAB_PRE_TYPE_CHAMELEON));
|
4170
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_MINERVA", INT2NUM(LLAMA_VOCAB_PRE_TYPE_MINERVA));
|
4171
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM", INT2NUM(LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM));
|
4172
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_GPT4O", INT2NUM(LLAMA_VOCAB_PRE_TYPE_GPT4O));
|
4173
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_SUPERBPE", INT2NUM(LLAMA_VOCAB_PRE_TYPE_SUPERBPE));
|
4174
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_TRILLION", INT2NUM(LLAMA_VOCAB_PRE_TYPE_TRILLION));
|
4175
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_BAILINGMOE", INT2NUM(LLAMA_VOCAB_PRE_TYPE_BAILINGMOE));
|
4176
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_LLAMA4", INT2NUM(LLAMA_VOCAB_PRE_TYPE_LLAMA4));
|
4177
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_PIXTRAL", INT2NUM(LLAMA_VOCAB_PRE_TYPE_PIXTRAL));
|
4178
|
-
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_SEED_CODER", INT2NUM(LLAMA_VOCAB_PRE_TYPE_SEED_CODER));
|
4179
4157
|
/* llama_rope_type */
|
4180
4158
|
/* Document-const: LlamaCpp::LLAMA_ROPE_TYPE_NONE */
|
4181
4159
|
rb_define_const(rb_mLlamaCpp, "LLAMA_ROPE_TYPE_NONE", INT2NUM(LLAMA_ROPE_TYPE_NONE));
|
@@ -4879,6 +4857,7 @@ void Init_llama_cpp(void) {
|
|
4879
4857
|
/* TODO: void* imatrix */
|
4880
4858
|
/* TODO: void* kv_overrides */
|
4881
4859
|
/* TODO: void* tensor_types */
|
4860
|
+
/* TODO: void* prune_layers */
|
4882
4861
|
|
4883
4862
|
/**
|
4884
4863
|
* Document-class: LlamaCpp::LlamaLogitBias
|
@@ -5275,6 +5254,9 @@ void Init_llama_cpp(void) {
|
|
5275
5254
|
/* llama_vocab_get_add_eos */
|
5276
5255
|
rb_define_module_function(rb_mLlamaCpp, "llama_vocab_get_add_eos", rb_llama_vocab_get_add_eos, 1);
|
5277
5256
|
|
5257
|
+
/* llama_vocab_get_add_sep */
|
5258
|
+
rb_define_module_function(rb_mLlamaCpp, "llama_vocab_get_add_sep", rb_llama_vocab_get_add_sep, 1);
|
5259
|
+
|
5278
5260
|
/* llama_vocab_fim_pre */
|
5279
5261
|
rb_define_module_function(rb_mLlamaCpp, "llama_vocab_fim_pre", rb_llama_vocab_fim_pre, 1);
|
5280
5262
|
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LlamaCpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.
|
6
|
+
VERSION = '0.21.0'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b5870'
|
10
10
|
end
|