llama_cpp 0.23.0 → 0.23.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: bdc82f63eb7cca5133f24159dd648fa2722896a3b9cee9cafc11022e28646b5d
4
- data.tar.gz: 0adad024f89582c57f1e541cb85b4220b171b64c4b12bb2f38d635efed4b6458
3
+ metadata.gz: 8309a3a939b3f863ab2759f9302137a6d938c504665c5e5dbf2c2db1780648f4
4
+ data.tar.gz: 0c84a705a3a2a201dd4adeaed79f4262c7f708d714c1f2af83460ce4c5477907
5
5
  SHA512:
6
- metadata.gz: b2e9ce298ed0f5d2cb684c4362ed4edadd14af7b3c99ccb989308483b58b81eb659c64b8c91216eb27684f17f36170833a9bb3e3dd4c05ceacf9bc3f0603c159
7
- data.tar.gz: aee0bcd5b2cecd91baf0473705ba4e43b74325a0a490f3d7a872a47e629248734cc76076dd18f584855db83671ae00896bf8185a046086cfbc9bcc741fc67064
6
+ metadata.gz: 2ec3524987e7815c7324710d11f6048776715f1c7b6d6d2b14492823b859cc89d5f60aa27fb363cca375ee426ac029f3aa6ae12c9a6066430d03bdd2f4c3dfe0
7
+ data.tar.gz: 1d461e4773a2d25b44f9490ead1ab31c51e37a4b4d0cb5b104d24d14292e67491f3f3dbfbaf37895531c8fb9cdeec3cd2eca0add6987315ca792e82ee0f9e92d
data/CHANGELOG.md CHANGED
@@ -1,3 +1,13 @@
1
+ ## [[0.23.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.1...v0.23.2)] - 2025-09-27
2
+
3
+ - Change supported llama.cpp version to b6580.
4
+ - Add `llama_memory_breakdown_print` module function.
5
+
6
+ ## [[0.23.1](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.0...v0.23.1)] - 2025-09-13
7
+
8
+ - Change supported llama.cpp version to b6440.
9
+ - Add `llama_adapter_get_alora_n_invocation_tokens` module function.
10
+
1
11
  ## [[0.23.0](https://github.com/yoshoku/llama_cpp.rb/compare/v0.22.1...v0.23.0)] - 2025-09-05
2
12
 
3
13
  - Change supported llama.cpp version to b6380.
@@ -1952,6 +1952,20 @@ static VALUE rb_llama_adapter_lora_free(VALUE self, VALUE adapter) {
1952
1952
  return Qnil;
1953
1953
  }
1954
1954
 
1955
+ /**
1956
+ * @overload llama_adapter_get_alora_n_invocation_tokens(adapter)
1957
+ * @param [LlamaAdapterLora] adapter
1958
+ * @return [Integer]
1959
+ */
1960
+ static VALUE rb_llama_adapter_get_alora_n_invocation_tokens(VALUE self, VALUE adapter) {
1961
+ if (!rb_obj_is_kind_of(adapter, rb_cLlamaAdapterLora)) {
1962
+ rb_raise(rb_eArgError, "adapter must be a LlamaAdapterLora");
1963
+ return Qnil;
1964
+ }
1965
+ llama_adapter_lora_wrapper* adapter_wrapper = get_llama_adapter_lora_wrapper(adapter);
1966
+ return ULONG2NUM(llama_adapter_get_alora_n_invocation_tokens(adapter_wrapper->adapter));
1967
+ }
1968
+
1955
1969
  /* llama_memory_t wrapper */
1956
1970
  typedef struct {
1957
1971
  llama_memory_t memory;
@@ -3137,6 +3151,22 @@ static VALUE rb_llama_sampler_reset(VALUE self, VALUE sampler) {
3137
3151
  return Qnil;
3138
3152
  }
3139
3153
 
3154
+ /**
3155
+ * @overload llama_memory_breakdown_print(context)
3156
+ * @param [LlamaContext] context
3157
+ * @return [NilClass]
3158
+ */
3159
+ static VALUE rb_llama_memory_breakdown_print(VALUE self, VALUE ctx) {
3160
+ if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
3161
+ rb_raise(rb_eArgError, "ctx must be a LlamaContext");
3162
+ return Qnil;
3163
+ }
3164
+ llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
3165
+ llama_memory_breakdown_print(context_wrapper->context);
3166
+ RB_GC_GUARD(ctx);
3167
+ return Qnil;
3168
+ }
3169
+
3140
3170
  /**
3141
3171
  * @overload llama_sampler_clone(sampler)
3142
3172
  * @param [LlamaSampler] sampler
@@ -4910,6 +4940,11 @@ void Init_llama_cpp(void) {
4910
4940
  /* llama_adapter_lora_free */
4911
4941
  rb_define_module_function(rb_mLlamaCpp, "llama_adapter_lora_free", rb_llama_adapter_lora_free, 1);
4912
4942
 
4943
+ /* llama_adapter_get_alora_n_invocation_tokens */
4944
+ rb_define_module_function(rb_mLlamaCpp, "llama_adapter_get_alora_n_invocation_tokens", rb_llama_adapter_get_alora_n_invocation_tokens, 1);
4945
+
4946
+ /* TODO: llama_adapter_get_alora_invocation_tokens */
4947
+
4913
4948
  /* TODO: llama_apply_adapter_cvec */
4914
4949
 
4915
4950
  /**
@@ -5269,6 +5304,9 @@ void Init_llama_cpp(void) {
5269
5304
  /* llama_perf_sampler_reset */
5270
5305
  rb_define_module_function(rb_mLlamaCpp, "llama_perf_sampler_reset", rb_llama_perf_sampler_reset, 1);
5271
5306
 
5307
+ /* llama_memory_breakdown_print */
5308
+ rb_define_module_function(rb_mLlamaCpp, "llama_memory_breakdown_print", rb_llama_memory_breakdown_print, 1);
5309
+
5272
5310
  /* TODO: typedef bool (*llama_opt_param_filter) */
5273
5311
  /* TODO: bool llama_opt_param_filter_all */
5274
5312
  /* TODO: struct llama_opt_params */
@@ -3,8 +3,8 @@
3
3
  # llama_cpp.rb provides Ruby bindings for the llama.cpp.
4
4
  module LlamaCpp
5
5
  # The version of llama_cpp.rb you install.
6
- VERSION = '0.23.0'
6
+ VERSION = '0.23.2'
7
7
 
8
8
  # The supported version of llama.cpp.
9
- LLAMA_CPP_VERSION = 'b6380'
9
+ LLAMA_CPP_VERSION = 'b6580'
10
10
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.23.0
4
+ version: 0.23.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
@@ -33,7 +33,7 @@ metadata:
33
33
  homepage_uri: https://github.com/yoshoku/llama_cpp.rb
34
34
  source_code_uri: https://github.com/yoshoku/llama_cpp.rb
35
35
  changelog_uri: https://github.com/yoshoku/llama_cpp.rb/blob/main/CHANGELOG.md
36
- documentation_uri: https://yoshoku.github.io/llama_cpp.rb/doc/
36
+ documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.2/
37
37
  rubygems_mfa_required: 'true'
38
38
  rdoc_options: []
39
39
  require_paths:
@@ -49,7 +49,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
49
49
  - !ruby/object:Gem::Version
50
50
  version: '0'
51
51
  requirements: []
52
- rubygems_version: 3.7.0
52
+ rubygems_version: 3.6.9
53
53
  specification_version: 4
54
54
  summary: Ruby bindings for the llama.cpp.
55
55
  test_files: []