llama_cpp 0.23.11 → 0.24.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5fc81dd7e098dace7f301394170fa517ec4d214bc41d76dbbf4b79c162ebff85
4
- data.tar.gz: 8641cedea81065a2d7ced2e8db028bf8209de42312e5b3431749c19f605d5134
3
+ metadata.gz: 841ca6c9c2770fcc1eb8b8783466dc91b8c6fed3ee501622876c158ad58202d4
4
+ data.tar.gz: 64f01d2b2a42f1b86d673252111ce1f97a286e5a4d651d98ab7410c2e8dcc5b4
5
5
  SHA512:
6
- metadata.gz: 3cb0176fc18bb430ee7d00177a911ebe204f975ef7d2db88a79c696a1e4b3fde2ae74cc9fa34648294a552b4f026238b00572e35c9bb20b892c14e8108286557
7
- data.tar.gz: f1431d0adb6348e78b62e96c96fa017dda979c282a5d5c61a164f287a6a85b3650bb83cf1206c40d0eb3387ddd9c6cab2325d6d2c12824e0ca5a7df4d9576602
6
+ metadata.gz: a8416e5693835cf0e1d19a0caa04570883bcdfb7f76541739eec647162fdc20282357f243388678bb85e992c9566cd408111a1cdd3e7f1d4e5f63df085054ae3
7
+ data.tar.gz: ddaf029d96c7676164dfbcd24639b805f2431f7eb62b659c05d0e06597a113ad6feb3cb4b8ac7625dd0479ecbf866c87eb0882800fa48983113af0d9f251c4d7
data/CHANGELOG.md CHANGED
@@ -1,3 +1,11 @@
1
+ ## [[0.24.0](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.11...v0.24.0)] - 2026-02-22
2
+
3
+ - Change supported llama.cpp version to b8110.
4
+ - Add `llama_set_adapters_lora` module function to `LlamaCpp`.
5
+ - Remove `llama_set_adapter_lora` module function.
6
+ - Remove `llama_clear_adapter_lora` module function.
7
+ - Remove `llama_rm_adapter_lora` module function.
8
+
1
9
  ## [[0.23.11](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.10...v0.23.11)] - 2026-01-24
2
10
 
3
11
  - Change supported llama.cpp version to b7790.
@@ -1992,70 +1992,62 @@ static VALUE rb_llama_adapter_meta_count(VALUE self, VALUE adapter) {
1992
1992
  }
1993
1993
 
1994
1994
  /**
1995
- * @overload llama_set_adapter_lora(context, adapter, scale)
1995
+ * @overload llama_set_adapters_lora(context, adapters, scales)
1996
1996
  * @param [LlamaContext] context
1997
- * @param [LlamaAdapterLora] adapter
1998
- * @param [Float] scale
1997
+ * @param [Array<LlamaAdapterLora>] adapters
1998
+ * @param [Array<Float>] scales
1999
1999
  * @return [Integer]
2000
2000
  */
2001
- static VALUE rb_llama_set_adapter_lora(VALUE self, VALUE ctx, VALUE adapter, VALUE scale) {
2001
+ static VALUE rb_llama_set_adapters_lora(VALUE self, VALUE ctx, VALUE adapters, VALUE scales) {
2002
2002
  if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
2003
2003
  rb_raise(rb_eArgError, "ctx must be a LlamaContext");
2004
2004
  return Qnil;
2005
2005
  }
2006
- if (!rb_obj_is_kind_of(adapter, rb_cLlamaAdapterLora)) {
2007
- rb_raise(rb_eArgError, "adapter must be a LlamaAdapterLora");
2006
+ if (!RB_TYPE_P(adapters, T_ARRAY)) {
2007
+ rb_raise(rb_eArgError, "adapters must be an Array");
2008
2008
  return Qnil;
2009
2009
  }
2010
- if (!RB_FLOAT_TYPE_P(scale)) {
2011
- rb_raise(rb_eArgError, "scale must be a Float");
2010
+ if (!RB_TYPE_P(scales, T_ARRAY)) {
2011
+ rb_raise(rb_eArgError, "scales must be an Array");
2012
2012
  return Qnil;
2013
2013
  }
2014
- llama_adapter_lora_wrapper* adapter_wrapper = get_llama_adapter_lora_wrapper(adapter);
2015
- llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
2016
- const int32_t res = llama_set_adapter_lora(context_wrapper->context, adapter_wrapper->adapter, (float)NUM2DBL(scale));
2017
- RB_GC_GUARD(ctx);
2018
- RB_GC_GUARD(adapter);
2019
- return NUM2INT(res);
2020
- }
2021
-
2022
- /**
2023
- * @overload llama_rm_adapter_lora(context, adapter)
2024
- * @param [LlamaContext] context
2025
- * @param [LlamaAdapterLora] adapter
2026
- * @return [Integer]
2027
- */
2028
- static VALUE rb_llama_rm_adapter_lora(VALUE self, VALUE ctx, VALUE adapter) {
2029
- if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
2030
- rb_raise(rb_eArgError, "ctx must be a LlamaContext");
2014
+ long n_adapters = RARRAY_LEN(adapters);
2015
+ long n_scales = RARRAY_LEN(scales);
2016
+ if (n_adapters != n_scales) {
2017
+ rb_raise(rb_eArgError, "adapters and scales must have the same length");
2031
2018
  return Qnil;
2032
2019
  }
2033
- if (!rb_obj_is_kind_of(adapter, rb_cLlamaAdapterLora)) {
2034
- rb_raise(rb_eArgError, "adapter must be a LlamaAdapterLora");
2035
- return Qnil;
2020
+ for (long i = 0; i < n_adapters; i++) {
2021
+ VALUE adapter = rb_ary_entry(adapters, i);
2022
+ if (!rb_obj_is_kind_of(adapter, rb_cLlamaAdapterLora)) {
2023
+ rb_raise(rb_eArgError, "adapters must be an Array of LlamaAdapterLora");
2024
+ return Qnil;
2025
+ }
2036
2026
  }
2037
- llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
2038
- llama_adapter_lora_wrapper* adapter_wrapper = get_llama_adapter_lora_wrapper(adapter);
2039
- const int32_t res = llama_rm_adapter_lora(context_wrapper->context, adapter_wrapper->adapter);
2040
- RB_GC_GUARD(ctx);
2041
- RB_GC_GUARD(adapter);
2042
- return NUM2INT(res);
2043
- }
2044
-
2045
- /**
2046
- * @overload llama_clear_adapter_lora(context)
2047
- * @param [LlamaContext] context
2048
- * @return [NilClass]
2049
- */
2050
- static VALUE rb_llama_clear_adapter_lora(VALUE self, VALUE ctx) {
2051
- if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
2052
- rb_raise(rb_eArgError, "ctx must be a LlamaContext");
2053
- return Qnil;
2027
+ for (long i = 0; i < n_scales; i++) {
2028
+ VALUE scale = rb_ary_entry(scales, i);
2029
+ if (!RB_FLOAT_TYPE_P(scale)) {
2030
+ rb_raise(rb_eArgError, "scales must be an Array of Float");
2031
+ return Qnil;
2032
+ }
2033
+ }
2034
+ struct llama_adapter_lora** adapters_ = ALLOCA_N(struct llama_adapter_lora*, n_adapters);
2035
+ for (long i = 0; i < n_adapters; i++) {
2036
+ VALUE adapter = rb_ary_entry(adapters, i);
2037
+ llama_adapter_lora_wrapper* adapter_wrapper = get_llama_adapter_lora_wrapper(adapter);
2038
+ adapters_[i] = adapter_wrapper->adapter;
2039
+ }
2040
+ float* scales_ = ALLOCA_N(float, n_scales);
2041
+ for (long i = 0; i < n_scales; i++) {
2042
+ VALUE scale = rb_ary_entry(scales, i);
2043
+ scales_[i] = (float)NUM2DBL(scale);
2054
2044
  }
2055
2045
  llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
2056
- llama_clear_adapter_lora(context_wrapper->context);
2046
+ const int32_t res = llama_set_adapters_lora(context_wrapper->context, adapters_, n_adapters, scales_);
2057
2047
  RB_GC_GUARD(ctx);
2058
- return Qnil;
2048
+ RB_GC_GUARD(adapters);
2049
+ RB_GC_GUARD(scales);
2050
+ return NUM2INT(res);
2059
2051
  }
2060
2052
 
2061
2053
  /**
@@ -5159,14 +5151,8 @@ void Init_llama_cpp(void) {
5159
5151
  /* TODO: llama_adapter_meta_key_by_index */
5160
5152
  /* TODO: llama_adapter_meta_val_str_by_index */
5161
5153
 
5162
- /* llama_set_adapter_lora */
5163
- rb_define_module_function(rb_mLlamaCpp, "llama_set_adapter_lora", rb_llama_set_adapter_lora, 3);
5164
-
5165
- /* llama_rm_adapter_lora */
5166
- rb_define_module_function(rb_mLlamaCpp, "llama_rm_adapter_lora", rb_llama_rm_adapter_lora, 2);
5167
-
5168
- /* llama_clear_adapter_lora */
5169
- rb_define_module_function(rb_mLlamaCpp, "llama_clear_adapter_lora", rb_llama_clear_adapter_lora, 1);
5154
+ /* llama_set_adapters_lora */
5155
+ rb_define_module_function(rb_mLlamaCpp, "llama_set_adapters_lora", rb_llama_set_adapters_lora, 3);
5170
5156
 
5171
5157
  /* llama_adapter_lora_free */
5172
5158
  rb_define_module_function(rb_mLlamaCpp, "llama_adapter_lora_free", rb_llama_adapter_lora_free, 1);
@@ -3,8 +3,8 @@
3
3
  # llama_cpp.rb provides Ruby bindings for the llama.cpp.
4
4
  module LlamaCpp
5
5
  # The version of llama_cpp.rb you install.
6
- VERSION = '0.23.11'
6
+ VERSION = '0.24.0'
7
7
 
8
8
  # The supported version of llama.cpp.
9
- LLAMA_CPP_VERSION = 'b7790'
9
+ LLAMA_CPP_VERSION = 'b8110'
10
10
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.23.11
4
+ version: 0.24.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
@@ -33,7 +33,7 @@ metadata:
33
33
  homepage_uri: https://github.com/yoshoku/llama_cpp.rb
34
34
  source_code_uri: https://github.com/yoshoku/llama_cpp.rb
35
35
  changelog_uri: https://github.com/yoshoku/llama_cpp.rb/blob/main/CHANGELOG.md
36
- documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.11/
36
+ documentation_uri: https://gemdocs.org/gems/llama_cpp/0.24.0/
37
37
  rubygems_mfa_required: 'true'
38
38
  rdoc_options: []
39
39
  require_paths: