llama_cpp 0.18.1 → 0.18.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/ext/llama_cpp/llama_cpp.c +18 -0
- data/lib/llama_cpp/version.rb +2 -2
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a043b8beda10d1eed3dfd6ab21932f567fa9ff69a82e14048f19993bbcff9c3a
|
4
|
+
data.tar.gz: '08bba39155267edd1d5a99ad4170e44b3075fe552b096d16bac38d438a9004e5'
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 404c50dedbd90e8c457113145d3029f130a2c6cf8cd9aee9be867c03ac7061c271907649cfb59dd23ff1331d9c64eaf234434a2354c8f3079265a4efe7cd9ea0
|
7
|
+
data.tar.gz: b2b75ed8a826c5eb5683010dcf7ca0ad9f99d3f0cfd5c2016acf1717489e17eef005ad9eaa9997fb2529c9c75f40bc301c4c3804e24b3334036244dec69f4b92
|
data/CHANGELOG.md
CHANGED
@@ -1,4 +1,10 @@
|
|
1
1
|
|
2
|
+
## [[0.18.1](https://github.com/yoshoku/llama_cpp.rb/compare/v0.18.1...v0.18.2)] - 2025-03-01
|
3
|
+
|
4
|
+
- Change supported llama.cpp version to b4793
|
5
|
+
- Add `llama_model_n_head_kv` module function.
|
6
|
+
- Add `LLAMA_VOCAB_PRE_TYPE_GPT4O` constant.
|
7
|
+
|
2
8
|
## [[0.18.1](https://github.com/yoshoku/llama_cpp.rb/compare/v0.18.0...v0.18.1)] - 2025-02-15
|
3
9
|
|
4
10
|
- Change supported llama.cpp version to b4713
|
data/ext/llama_cpp/llama_cpp.c
CHANGED
@@ -1452,6 +1452,20 @@ static VALUE rb_llama_model_n_head(VALUE self, VALUE model) {
|
|
1452
1452
|
return INT2NUM(llama_model_n_head(model_wrapper->model));
|
1453
1453
|
}
|
1454
1454
|
|
1455
|
+
/**
|
1456
|
+
* @overload llama_model_n_head_kv(model)
|
1457
|
+
* @param [LlamaModel] model
|
1458
|
+
* @return [Integer]
|
1459
|
+
*/
|
1460
|
+
static VALUE rb_llama_model_n_head_kv(VALUE self, VALUE model) {
|
1461
|
+
if (!rb_obj_is_kind_of(model, rb_cLlamaModel)) {
|
1462
|
+
rb_raise(rb_eArgError, "model must be a LlamaModel");
|
1463
|
+
return Qnil;
|
1464
|
+
}
|
1465
|
+
llama_model_wrapper* model_wrapper = get_llama_model_wrapper(model);
|
1466
|
+
return INT2NUM(llama_model_n_head_kv(model_wrapper->model));
|
1467
|
+
}
|
1468
|
+
|
1455
1469
|
/**
|
1456
1470
|
* @overload llama_model_rope_freq_scale_train(model)
|
1457
1471
|
* @param [LlamaModel] model
|
@@ -3895,6 +3909,7 @@ void Init_llama_cpp(void) {
|
|
3895
3909
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_CHAMELEON", INT2NUM(LLAMA_VOCAB_PRE_TYPE_CHAMELEON));
|
3896
3910
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_MINERVA", INT2NUM(LLAMA_VOCAB_PRE_TYPE_MINERVA));
|
3897
3911
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM", INT2NUM(LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM));
|
3912
|
+
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_GPT4O", INT2NUM(LLAMA_VOCAB_PRE_TYPE_GPT4O));
|
3898
3913
|
/* llama_rope_type */
|
3899
3914
|
/* Document-const: LlamaCpp::LLAMA_ROPE_TYPE_NONE */
|
3900
3915
|
rb_define_const(rb_mLlamaCpp, "LLAMA_ROPE_TYPE_NONE", INT2NUM(LLAMA_ROPE_TYPE_NONE));
|
@@ -4714,6 +4729,9 @@ void Init_llama_cpp(void) {
|
|
4714
4729
|
/* llama_model_n_head */
|
4715
4730
|
rb_define_module_function(rb_mLlamaCpp, "llama_model_n_head", rb_llama_model_n_head, 1);
|
4716
4731
|
|
4732
|
+
/* llama_model_n_head_kv */
|
4733
|
+
rb_define_module_function(rb_mLlamaCpp, "llama_model_n_head_kv", rb_llama_model_n_head_kv, 1);
|
4734
|
+
|
4717
4735
|
/* llama_model_rope_freq_scale_train */
|
4718
4736
|
rb_define_module_function(rb_mLlamaCpp, "llama_model_rope_freq_scale_train", rb_llama_model_rope_freq_scale_train, 1);
|
4719
4737
|
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LlamaCpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.18.
|
6
|
+
VERSION = '0.18.2'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b4793'
|
10
10
|
end
|
metadata
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llama_cpp
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.18.
|
4
|
+
version: 0.18.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yoshoku
|
8
8
|
bindir: exe
|
9
9
|
cert_chain: []
|
10
|
-
date: 2025-
|
10
|
+
date: 2025-03-01 00:00:00.000000000 Z
|
11
11
|
dependencies: []
|
12
12
|
description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
13
13
|
email:
|