llama_cpp 0.23.8 → 0.23.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/ext/llama_cpp/llama_cpp.c +36 -0
- data/lib/llama_cpp/version.rb +2 -2
- metadata +2 -2
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 68f7460b6ab553b85308547847c5883ea0c38c32cdc27cd8e6584ec0e91a8e25
|
|
4
|
+
data.tar.gz: ece3d349b204f06232f8ffdaccb989bf679475526e10e2991ea4b940598e6e67
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 66a8a170146f84f50db79c0ae42aa7e808c24dc37ebda1f4132f3a1fc2cf95f333fe8251d43bb43ad80e47b2702dd9f7458838a7ed2173b22340cfa654b3e0af
|
|
7
|
+
data.tar.gz: f9c27b9ea682c4a5cfd70f2bf2e1a522505b405fd87965413abe544cf1124d6648a3705281dd28afe9681849697858731fd350d73e945788fe6bb9de30450189
|
data/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,9 @@
|
|
|
1
|
+
## [[0.23.9](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.8...v0.23.9)] - 2025-12-20
|
|
2
|
+
|
|
3
|
+
- Change supported llama.cpp version to b7470
|
|
4
|
+
- Add `no_alloc` accessor to `LlamaModelParams`.
|
|
5
|
+
- Add `llama_max_tensor_buft_overrides` module function to `LlamaCpp`.
|
|
6
|
+
|
|
1
7
|
## [[0.23.8](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.7...v0.23.8)] - 2025-11-30
|
|
2
8
|
|
|
3
9
|
- Change supported llama.cpp version to b7180
|
data/ext/llama_cpp/llama_cpp.c
CHANGED
|
@@ -551,6 +551,17 @@ static VALUE llama_model_params_set_no_host(VALUE self, VALUE no_host) {
|
|
|
551
551
|
return no_host;
|
|
552
552
|
}
|
|
553
553
|
|
|
554
|
+
static VALUE llama_model_params_get_no_alloc(VALUE self) {
|
|
555
|
+
struct llama_model_params* data = get_llama_model_params(self);
|
|
556
|
+
return data->no_alloc ? Qtrue : Qfalse;
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
static VALUE llama_model_params_set_no_alloc(VALUE self, VALUE no_alloc) {
|
|
560
|
+
struct llama_model_params* data = get_llama_model_params(self);
|
|
561
|
+
data->no_alloc = RTEST(no_alloc) ? true : false;
|
|
562
|
+
return no_alloc;
|
|
563
|
+
}
|
|
564
|
+
|
|
554
565
|
/* struct llama_context_params */
|
|
555
566
|
static void llama_context_params_free(void *ptr) {
|
|
556
567
|
if (ptr) {
|
|
@@ -1363,6 +1374,14 @@ static VALUE rb_llama_max_parallel_sequences(VALUE self) {
|
|
|
1363
1374
|
return SIZET2NUM(llama_max_parallel_sequences());
|
|
1364
1375
|
}
|
|
1365
1376
|
|
|
1377
|
+
/**
|
|
1378
|
+
* @overload llama_max_tensor_buft_overrides
|
|
1379
|
+
* @return [Integer]
|
|
1380
|
+
*/
|
|
1381
|
+
static VALUE rb_llama_max_tensor_buft_overrides(VALUE self) {
|
|
1382
|
+
return SIZET2NUM(llama_max_tensor_buft_overrides());
|
|
1383
|
+
}
|
|
1384
|
+
|
|
1366
1385
|
/**
|
|
1367
1386
|
* @overload llama_supports_mmap?
|
|
1368
1387
|
* @return [Boolean]
|
|
@@ -4413,6 +4432,17 @@ void Init_llama_cpp(void) {
|
|
|
4413
4432
|
* @return [Boolean]
|
|
4414
4433
|
*/
|
|
4415
4434
|
rb_define_method(rb_cLlamaModelParams, "no_host=", RUBY_METHOD_FUNC(llama_model_params_set_no_host), 1);
|
|
4435
|
+
/**
|
|
4436
|
+
* Document-method: no_alloc
|
|
4437
|
+
* @return [Boolean]
|
|
4438
|
+
*/
|
|
4439
|
+
rb_define_method(rb_cLlamaModelParams, "no_alloc", RUBY_METHOD_FUNC(llama_model_params_get_no_alloc), 0);
|
|
4440
|
+
/**
|
|
4441
|
+
* Document-method: no_alloc=
|
|
4442
|
+
* @param [Boolean] no_alloc_
|
|
4443
|
+
* @return [Boolean]
|
|
4444
|
+
*/
|
|
4445
|
+
rb_define_method(rb_cLlamaModelParams, "no_alloc=", RUBY_METHOD_FUNC(llama_model_params_set_no_alloc), 1);
|
|
4416
4446
|
|
|
4417
4447
|
/**
|
|
4418
4448
|
* Document-class: LlamaCpp::LlamaContextParams
|
|
@@ -4910,6 +4940,8 @@ void Init_llama_cpp(void) {
|
|
|
4910
4940
|
/* llama_free */
|
|
4911
4941
|
rb_define_module_function(rb_mLlamaCpp, "llama_free", rb_llama_free, 1);
|
|
4912
4942
|
|
|
4943
|
+
/* TODO: llama_params_fit */
|
|
4944
|
+
|
|
4913
4945
|
/* llama_time_us */
|
|
4914
4946
|
rb_define_module_function(rb_mLlamaCpp, "llama_time_us", rb_llama_time_us, 0);
|
|
4915
4947
|
|
|
@@ -4919,6 +4951,9 @@ void Init_llama_cpp(void) {
|
|
|
4919
4951
|
/* llama_max_parallel_sequences */
|
|
4920
4952
|
rb_define_module_function(rb_mLlamaCpp, "llama_max_parallel_sequences", rb_llama_max_parallel_sequences, 0);
|
|
4921
4953
|
|
|
4954
|
+
/* llama_max_tensor_buft_overrides */
|
|
4955
|
+
rb_define_module_function(rb_mLlamaCpp, "llama_max_tensor_buft_overrides", rb_llama_max_tensor_buft_overrides, 0);
|
|
4956
|
+
|
|
4922
4957
|
/* llama_supports_mmap */
|
|
4923
4958
|
rb_define_module_function(rb_mLlamaCpp, "llama_supports_mmap?", rb_llama_supports_mmap, 0);
|
|
4924
4959
|
|
|
@@ -5346,6 +5381,7 @@ void Init_llama_cpp(void) {
|
|
|
5346
5381
|
/* llama_print_system_info */
|
|
5347
5382
|
rb_define_module_function(rb_mLlamaCpp, "llama_print_system_info", rb_llama_print_system_info, 0);
|
|
5348
5383
|
|
|
5384
|
+
/* TODO: llama_log_get */
|
|
5349
5385
|
/* TODO: llama_log_set */
|
|
5350
5386
|
|
|
5351
5387
|
/**
|
data/lib/llama_cpp/version.rb
CHANGED
|
@@ -3,8 +3,8 @@
|
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
|
4
4
|
module LlamaCpp
|
|
5
5
|
# The version of llama_cpp.rb you install.
|
|
6
|
-
VERSION = '0.23.
|
|
6
|
+
VERSION = '0.23.9'
|
|
7
7
|
|
|
8
8
|
# The supported version of llama.cpp.
|
|
9
|
-
LLAMA_CPP_VERSION = '
|
|
9
|
+
LLAMA_CPP_VERSION = 'b7470'
|
|
10
10
|
end
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: llama_cpp
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.23.
|
|
4
|
+
version: 0.23.9
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- yoshoku
|
|
@@ -33,7 +33,7 @@ metadata:
|
|
|
33
33
|
homepage_uri: https://github.com/yoshoku/llama_cpp.rb
|
|
34
34
|
source_code_uri: https://github.com/yoshoku/llama_cpp.rb
|
|
35
35
|
changelog_uri: https://github.com/yoshoku/llama_cpp.rb/blob/main/CHANGELOG.md
|
|
36
|
-
documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.
|
|
36
|
+
documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.9/
|
|
37
37
|
rubygems_mfa_required: 'true'
|
|
38
38
|
rdoc_options: []
|
|
39
39
|
require_paths:
|