llama_cpp 0.23.2 → 0.23.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -0
- data/ext/llama_cpp/llama_cpp.c +40 -0
- data/lib/llama_cpp/version.rb +2 -2
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 735200bc9f1e7f6c3ab139f99b75e875bc637e640850953d0e6c577e3c5e6f93
|
4
|
+
data.tar.gz: fb6f65de3231f69fb877ceb61a1a7c3a5bd3a299f12b987c2e0e0038dcf9b7e4
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 22706463a44f2f7434dc86494806b21633ae171789edb85f4ffd51d828114c1be2ef803ef90170e2857f4bf2640ebe8708fdb56edeede12b71ee8a6e39e23df6
|
7
|
+
data.tar.gz: c4c3d1d45cf692da4719aeab96c2c49559d747a7601cdc463e8d6418e3ecf5a336523f4749990cb67e92cbc4abd90a8afb8aa3da9548176b795513761c0f24e5
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,10 @@
|
|
1
|
+
## [[0.23.3](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.2...v0.23.3)] - 2025-10-11
|
2
|
+
|
3
|
+
- Change supported llama.cpp version to b6730.
|
4
|
+
- Add `llama_model_is_hybrid?` module function.
|
5
|
+
- Add `no_host` accessor to `LlamaModelParams`.
|
6
|
+
- Add `LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY` constant.
|
7
|
+
|
1
8
|
## [[0.23.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.1...v0.23.2)] - 2025-09-27
|
2
9
|
|
3
10
|
- Change supported llama.cpp version to b6580.
|
data/ext/llama_cpp/llama_cpp.c
CHANGED
@@ -540,6 +540,17 @@ static VALUE llama_model_params_set_use_extra_bufts(VALUE self, VALUE use_extra_
|
|
540
540
|
return use_extra_bufts;
|
541
541
|
}
|
542
542
|
|
543
|
+
static VALUE llama_model_params_get_no_host(VALUE self) {
|
544
|
+
struct llama_model_params* data = get_llama_model_params(self);
|
545
|
+
return data->no_host ? Qtrue : Qfalse;
|
546
|
+
}
|
547
|
+
|
548
|
+
static VALUE llama_model_params_set_no_host(VALUE self, VALUE no_host) {
|
549
|
+
struct llama_model_params* data = get_llama_model_params(self);
|
550
|
+
data->no_host = RTEST(no_host) ? true : false;
|
551
|
+
return no_host;
|
552
|
+
}
|
553
|
+
|
543
554
|
/* struct llama_context_params */
|
544
555
|
static void llama_context_params_free(void *ptr) {
|
545
556
|
if (ptr) {
|
@@ -1784,6 +1795,20 @@ static VALUE rb_llama_model_is_recurrent(VALUE self, VALUE model) {
|
|
1784
1795
|
return llama_model_is_recurrent(model_wrapper->model) ? Qtrue : Qfalse;
|
1785
1796
|
}
|
1786
1797
|
|
1798
|
+
/**
|
1799
|
+
* @overload llama_model_is_hybrid?(model)
|
1800
|
+
* @param [LlamaModel] model
|
1801
|
+
* @return [Boolean]
|
1802
|
+
*/
|
1803
|
+
static VALUE rb_llama_model_is_hybrid(VALUE self, VALUE model) {
|
1804
|
+
if (!rb_obj_is_kind_of(model, rb_cLlamaModel)) {
|
1805
|
+
rb_raise(rb_eArgError, "model must be a LlamaModel");
|
1806
|
+
return Qnil;
|
1807
|
+
}
|
1808
|
+
llama_model_wrapper* model_wrapper = get_llama_model_wrapper(model);
|
1809
|
+
return llama_model_is_hybrid(model_wrapper->model) ? Qtrue : Qfalse;
|
1810
|
+
}
|
1811
|
+
|
1787
1812
|
/**
|
1788
1813
|
* @overload llama_model_is_diffusion?(model)
|
1789
1814
|
* @param [LlamaModel] model
|
@@ -3951,6 +3976,7 @@ void Init_llama_cpp(void) {
|
|
3951
3976
|
|
3952
3977
|
rb_define_const(rb_mLlamaCpp, "LLAMA_TOKEN_NULL", INT2NUM(LLAMA_TOKEN_NULL));
|
3953
3978
|
rb_define_const(rb_mLlamaCpp, "LLAMA_STATE_SEQ_FLAGS_SWA_ONLY", INT2NUM(LLAMA_STATE_SEQ_FLAGS_SWA_ONLY));
|
3979
|
+
rb_define_const(rb_mLlamaCpp, "LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY", INT2NUM(LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY));
|
3954
3980
|
|
3955
3981
|
sprintf(tmp, "0x%x", LLAMA_FILE_MAGIC_GGLA);
|
3956
3982
|
rb_define_const(rb_mLlamaCpp, "LLAMA_FILE_MAGIC_GGLA", rb_str_new2(tmp));
|
@@ -4304,6 +4330,17 @@ void Init_llama_cpp(void) {
|
|
4304
4330
|
* @return [Boolean]
|
4305
4331
|
*/
|
4306
4332
|
rb_define_method(rb_cLlamaModelParams, "use_extra_bufts=", RUBY_METHOD_FUNC(llama_model_params_set_use_extra_bufts), 1);
|
4333
|
+
/**
|
4334
|
+
* Document-method: no_host
|
4335
|
+
* @return [Boolean]
|
4336
|
+
*/
|
4337
|
+
rb_define_method(rb_cLlamaModelParams, "no_host", RUBY_METHOD_FUNC(llama_model_params_get_no_host), 0);
|
4338
|
+
/**
|
4339
|
+
* Document-method: no_host=
|
4340
|
+
* @param [Boolean] no_host
|
4341
|
+
* @return [Boolean]
|
4342
|
+
*/
|
4343
|
+
rb_define_method(rb_cLlamaModelParams, "no_host=", RUBY_METHOD_FUNC(llama_model_params_set_no_host), 1);
|
4307
4344
|
|
4308
4345
|
/**
|
4309
4346
|
* Document-class: LlamaCpp::LlamaContextParams
|
@@ -4911,6 +4948,9 @@ void Init_llama_cpp(void) {
|
|
4911
4948
|
/* llama_model_is_recurrent */
|
4912
4949
|
rb_define_module_function(rb_mLlamaCpp, "llama_model_is_recurrent?", rb_llama_model_is_recurrent, 1);
|
4913
4950
|
|
4951
|
+
/* llama_model_is_hybrid */
|
4952
|
+
rb_define_module_function(rb_mLlamaCpp, "llama_model_is_hybrid?", rb_llama_model_is_hybrid, 1);
|
4953
|
+
|
4914
4954
|
/* llama_model_is_diffusion */
|
4915
4955
|
rb_define_module_function(rb_mLlamaCpp, "llama_model_is_diffusion?", rb_llama_model_is_diffusion, 1);
|
4916
4956
|
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LlamaCpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.23.
|
6
|
+
VERSION = '0.23.3'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b6730'
|
10
10
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llama_cpp
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.23.
|
4
|
+
version: 0.23.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yoshoku
|
@@ -33,7 +33,7 @@ metadata:
|
|
33
33
|
homepage_uri: https://github.com/yoshoku/llama_cpp.rb
|
34
34
|
source_code_uri: https://github.com/yoshoku/llama_cpp.rb
|
35
35
|
changelog_uri: https://github.com/yoshoku/llama_cpp.rb/blob/main/CHANGELOG.md
|
36
|
-
documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.
|
36
|
+
documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.3/
|
37
37
|
rubygems_mfa_required: 'true'
|
38
38
|
rdoc_options: []
|
39
39
|
require_paths:
|