llama_cpp 0.23.1 → 0.23.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 839ef41e6f1588768629f776034abce6ea4c668b5de753ea8d9ac5c4c0d93ddd
4
- data.tar.gz: c18f5ac34f673247eea8eb63e3e10aed14cd95abe9f8fcb90d7931a32b94482d
3
+ metadata.gz: 735200bc9f1e7f6c3ab139f99b75e875bc637e640850953d0e6c577e3c5e6f93
4
+ data.tar.gz: fb6f65de3231f69fb877ceb61a1a7c3a5bd3a299f12b987c2e0e0038dcf9b7e4
5
5
  SHA512:
6
- metadata.gz: 1045e721b28f804e6536461f15c0de640fc1201d02c97c8ca807c383e9730d779795fbc745d0404dab51dffa59f90e28a90335dff7513bd1909294e4b3382cd9
7
- data.tar.gz: 9c8db351db13d57153c1b939eccf73cd355dae7b7281faaa55e91a41e5762db854e685cd69c25c4c1d9e06a9379dd41d07cb18d1fe82154e9bbf7070617a779e
6
+ metadata.gz: 22706463a44f2f7434dc86494806b21633ae171789edb85f4ffd51d828114c1be2ef803ef90170e2857f4bf2640ebe8708fdb56edeede12b71ee8a6e39e23df6
7
+ data.tar.gz: c4c3d1d45cf692da4719aeab96c2c49559d747a7601cdc463e8d6418e3ecf5a336523f4749990cb67e92cbc4abd90a8afb8aa3da9548176b795513761c0f24e5
data/CHANGELOG.md CHANGED
@@ -1,3 +1,15 @@
1
+ ## [[0.23.3](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.2...v0.23.3)] - 2025-10-11
2
+
3
+ - Change supported llama.cpp version to b6730.
4
+ - Add `llama_model_is_hybrid?` module function.
5
+ - Add `no_host` accessor to `LlamaModelParams`.
6
+ - Add `LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY` constant.
7
+
8
+ ## [[0.23.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.1...v0.23.2)] - 2025-09-27
9
+
10
+ - Change supported llama.cpp version to b6580.
11
+ - Add `llama_memory_breakdown_print` module function.
12
+
1
13
  ## [[0.23.1](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.0...v0.23.1)] - 2025-09-13
2
14
 
3
15
  - Change supported llama.cpp version to b6440.
@@ -540,6 +540,17 @@ static VALUE llama_model_params_set_use_extra_bufts(VALUE self, VALUE use_extra_
540
540
  return use_extra_bufts;
541
541
  }
542
542
 
543
+ static VALUE llama_model_params_get_no_host(VALUE self) {
544
+ struct llama_model_params* data = get_llama_model_params(self);
545
+ return data->no_host ? Qtrue : Qfalse;
546
+ }
547
+
548
+ static VALUE llama_model_params_set_no_host(VALUE self, VALUE no_host) {
549
+ struct llama_model_params* data = get_llama_model_params(self);
550
+ data->no_host = RTEST(no_host) ? true : false;
551
+ return no_host;
552
+ }
553
+
543
554
  /* struct llama_context_params */
544
555
  static void llama_context_params_free(void *ptr) {
545
556
  if (ptr) {
@@ -1784,6 +1795,20 @@ static VALUE rb_llama_model_is_recurrent(VALUE self, VALUE model) {
1784
1795
  return llama_model_is_recurrent(model_wrapper->model) ? Qtrue : Qfalse;
1785
1796
  }
1786
1797
 
1798
+ /**
1799
+ * @overload llama_model_is_hybrid?(model)
1800
+ * @param [LlamaModel] model
1801
+ * @return [Boolean]
1802
+ */
1803
+ static VALUE rb_llama_model_is_hybrid(VALUE self, VALUE model) {
1804
+ if (!rb_obj_is_kind_of(model, rb_cLlamaModel)) {
1805
+ rb_raise(rb_eArgError, "model must be a LlamaModel");
1806
+ return Qnil;
1807
+ }
1808
+ llama_model_wrapper* model_wrapper = get_llama_model_wrapper(model);
1809
+ return llama_model_is_hybrid(model_wrapper->model) ? Qtrue : Qfalse;
1810
+ }
1811
+
1787
1812
  /**
1788
1813
  * @overload llama_model_is_diffusion?(model)
1789
1814
  * @param [LlamaModel] model
@@ -1963,7 +1988,7 @@ static VALUE rb_llama_adapter_get_alora_n_invocation_tokens(VALUE self, VALUE ad
1963
1988
  return Qnil;
1964
1989
  }
1965
1990
  llama_adapter_lora_wrapper* adapter_wrapper = get_llama_adapter_lora_wrapper(adapter);
1966
- return UINT2NUM(llama_adapter_get_alora_n_invocation_tokens(adapter_wrapper->adapter));
1991
+ return ULONG2NUM(llama_adapter_get_alora_n_invocation_tokens(adapter_wrapper->adapter));
1967
1992
  }
1968
1993
 
1969
1994
  /* llama_memory_t wrapper */
@@ -3151,6 +3176,22 @@ static VALUE rb_llama_sampler_reset(VALUE self, VALUE sampler) {
3151
3176
  return Qnil;
3152
3177
  }
3153
3178
 
3179
+ /**
3180
+ * @overload llama_memory_breakdown_print(context)
3181
+ * @param [LlamaContext] context
3182
+ * @return [NilClass]
3183
+ */
3184
+ static VALUE rb_llama_memory_breakdown_print(VALUE self, VALUE ctx) {
3185
+ if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
3186
+ rb_raise(rb_eArgError, "ctx must be a LlamaContext");
3187
+ return Qnil;
3188
+ }
3189
+ llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
3190
+ llama_memory_breakdown_print(context_wrapper->context);
3191
+ RB_GC_GUARD(ctx);
3192
+ return Qnil;
3193
+ }
3194
+
3154
3195
  /**
3155
3196
  * @overload llama_sampler_clone(sampler)
3156
3197
  * @param [LlamaSampler] sampler
@@ -3935,6 +3976,7 @@ void Init_llama_cpp(void) {
3935
3976
 
3936
3977
  rb_define_const(rb_mLlamaCpp, "LLAMA_TOKEN_NULL", INT2NUM(LLAMA_TOKEN_NULL));
3937
3978
  rb_define_const(rb_mLlamaCpp, "LLAMA_STATE_SEQ_FLAGS_SWA_ONLY", INT2NUM(LLAMA_STATE_SEQ_FLAGS_SWA_ONLY));
3979
+ rb_define_const(rb_mLlamaCpp, "LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY", INT2NUM(LLAMA_STATE_SEQ_FLAGS_PARTIAL_ONLY));
3938
3980
 
3939
3981
  sprintf(tmp, "0x%x", LLAMA_FILE_MAGIC_GGLA);
3940
3982
  rb_define_const(rb_mLlamaCpp, "LLAMA_FILE_MAGIC_GGLA", rb_str_new2(tmp));
@@ -4288,6 +4330,17 @@ void Init_llama_cpp(void) {
4288
4330
  * @return [Boolean]
4289
4331
  */
4290
4332
  rb_define_method(rb_cLlamaModelParams, "use_extra_bufts=", RUBY_METHOD_FUNC(llama_model_params_set_use_extra_bufts), 1);
4333
+ /**
4334
+ * Document-method: no_host
4335
+ * @return [Boolean]
4336
+ */
4337
+ rb_define_method(rb_cLlamaModelParams, "no_host", RUBY_METHOD_FUNC(llama_model_params_get_no_host), 0);
4338
+ /**
4339
+ * Document-method: no_host=
4340
+ * @param [Boolean] no_host
4341
+ * @return [Boolean]
4342
+ */
4343
+ rb_define_method(rb_cLlamaModelParams, "no_host=", RUBY_METHOD_FUNC(llama_model_params_set_no_host), 1);
4291
4344
 
4292
4345
  /**
4293
4346
  * Document-class: LlamaCpp::LlamaContextParams
@@ -4895,6 +4948,9 @@ void Init_llama_cpp(void) {
4895
4948
  /* llama_model_is_recurrent */
4896
4949
  rb_define_module_function(rb_mLlamaCpp, "llama_model_is_recurrent?", rb_llama_model_is_recurrent, 1);
4897
4950
 
4951
+ /* llama_model_is_hybrid */
4952
+ rb_define_module_function(rb_mLlamaCpp, "llama_model_is_hybrid?", rb_llama_model_is_hybrid, 1);
4953
+
4898
4954
  /* llama_model_is_diffusion */
4899
4955
  rb_define_module_function(rb_mLlamaCpp, "llama_model_is_diffusion?", rb_llama_model_is_diffusion, 1);
4900
4956
 
@@ -5288,6 +5344,9 @@ void Init_llama_cpp(void) {
5288
5344
  /* llama_perf_sampler_reset */
5289
5345
  rb_define_module_function(rb_mLlamaCpp, "llama_perf_sampler_reset", rb_llama_perf_sampler_reset, 1);
5290
5346
 
5347
+ /* llama_memory_breakdown_print */
5348
+ rb_define_module_function(rb_mLlamaCpp, "llama_memory_breakdown_print", rb_llama_memory_breakdown_print, 1);
5349
+
5291
5350
  /* TODO: typedef bool (*llama_opt_param_filter) */
5292
5351
  /* TODO: bool llama_opt_param_filter_all */
5293
5352
  /* TODO: struct llama_opt_params */
@@ -3,8 +3,8 @@
3
3
  # llama_cpp.rb provides Ruby bindings for the llama.cpp.
4
4
  module LlamaCpp
5
5
  # The version of llama_cpp.rb you install.
6
- VERSION = '0.23.1'
6
+ VERSION = '0.23.3'
7
7
 
8
8
  # The supported version of llama.cpp.
9
- LLAMA_CPP_VERSION = 'b6440'
9
+ LLAMA_CPP_VERSION = 'b6730'
10
10
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.23.1
4
+ version: 0.23.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
@@ -33,7 +33,7 @@ metadata:
33
33
  homepage_uri: https://github.com/yoshoku/llama_cpp.rb
34
34
  source_code_uri: https://github.com/yoshoku/llama_cpp.rb
35
35
  changelog_uri: https://github.com/yoshoku/llama_cpp.rb/blob/main/CHANGELOG.md
36
- documentation_uri: https://yoshoku.github.io/llama_cpp.rb/doc/
36
+ documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.3/
37
37
  rubygems_mfa_required: 'true'
38
38
  rdoc_options: []
39
39
  require_paths:
@@ -49,7 +49,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
49
49
  - !ruby/object:Gem::Version
50
50
  version: '0'
51
51
  requirements: []
52
- rubygems_version: 3.7.0
52
+ rubygems_version: 3.6.9
53
53
  specification_version: 4
54
54
  summary: Ruby bindings for the llama.cpp.
55
55
  test_files: []