llama_cpp 0.23.1 → 0.23.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/ext/llama_cpp/llama_cpp.c +20 -1
- data/lib/llama_cpp/version.rb +2 -2
- metadata +3 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 8309a3a939b3f863ab2759f9302137a6d938c504665c5e5dbf2c2db1780648f4
|
4
|
+
data.tar.gz: 0c84a705a3a2a201dd4adeaed79f4262c7f708d714c1f2af83460ce4c5477907
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2ec3524987e7815c7324710d11f6048776715f1c7b6d6d2b14492823b859cc89d5f60aa27fb363cca375ee426ac029f3aa6ae12c9a6066430d03bdd2f4c3dfe0
|
7
|
+
data.tar.gz: 1d461e4773a2d25b44f9490ead1ab31c51e37a4b4d0cb5b104d24d14292e67491f3f3dbfbaf37895531c8fb9cdeec3cd2eca0add6987315ca792e82ee0f9e92d
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,8 @@
|
|
1
|
+
## [[0.23.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.1...v0.23.2)] - 2025-09-27
|
2
|
+
|
3
|
+
- Change supported llama.cpp version to b6580.
|
4
|
+
- Add `llama_memory_breakdown_print` module function.
|
5
|
+
|
1
6
|
## [[0.23.1](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.0...v0.23.1)] - 2025-09-13
|
2
7
|
|
3
8
|
- Change supported llama.cpp version to b6440.
|
data/ext/llama_cpp/llama_cpp.c
CHANGED
@@ -1963,7 +1963,7 @@ static VALUE rb_llama_adapter_get_alora_n_invocation_tokens(VALUE self, VALUE ad
|
|
1963
1963
|
return Qnil;
|
1964
1964
|
}
|
1965
1965
|
llama_adapter_lora_wrapper* adapter_wrapper = get_llama_adapter_lora_wrapper(adapter);
|
1966
|
-
return
|
1966
|
+
return ULONG2NUM(llama_adapter_get_alora_n_invocation_tokens(adapter_wrapper->adapter));
|
1967
1967
|
}
|
1968
1968
|
|
1969
1969
|
/* llama_memory_t wrapper */
|
@@ -3151,6 +3151,22 @@ static VALUE rb_llama_sampler_reset(VALUE self, VALUE sampler) {
|
|
3151
3151
|
return Qnil;
|
3152
3152
|
}
|
3153
3153
|
|
3154
|
+
/**
|
3155
|
+
* @overload llama_memory_breakdown_print(context)
|
3156
|
+
* @param [LlamaContext] context
|
3157
|
+
* @return [NilClass]
|
3158
|
+
*/
|
3159
|
+
static VALUE rb_llama_memory_breakdown_print(VALUE self, VALUE ctx) {
|
3160
|
+
if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
|
3161
|
+
rb_raise(rb_eArgError, "ctx must be a LlamaContext");
|
3162
|
+
return Qnil;
|
3163
|
+
}
|
3164
|
+
llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
|
3165
|
+
llama_memory_breakdown_print(context_wrapper->context);
|
3166
|
+
RB_GC_GUARD(ctx);
|
3167
|
+
return Qnil;
|
3168
|
+
}
|
3169
|
+
|
3154
3170
|
/**
|
3155
3171
|
* @overload llama_sampler_clone(sampler)
|
3156
3172
|
* @param [LlamaSampler] sampler
|
@@ -5288,6 +5304,9 @@ void Init_llama_cpp(void) {
|
|
5288
5304
|
/* llama_perf_sampler_reset */
|
5289
5305
|
rb_define_module_function(rb_mLlamaCpp, "llama_perf_sampler_reset", rb_llama_perf_sampler_reset, 1);
|
5290
5306
|
|
5307
|
+
/* llama_memory_breakdown_print */
|
5308
|
+
rb_define_module_function(rb_mLlamaCpp, "llama_memory_breakdown_print", rb_llama_memory_breakdown_print, 1);
|
5309
|
+
|
5291
5310
|
/* TODO: typedef bool (*llama_opt_param_filter) */
|
5292
5311
|
/* TODO: bool llama_opt_param_filter_all */
|
5293
5312
|
/* TODO: struct llama_opt_params */
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LlamaCpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.23.
|
6
|
+
VERSION = '0.23.2'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b6580'
|
10
10
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llama_cpp
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.23.
|
4
|
+
version: 0.23.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yoshoku
|
@@ -33,7 +33,7 @@ metadata:
|
|
33
33
|
homepage_uri: https://github.com/yoshoku/llama_cpp.rb
|
34
34
|
source_code_uri: https://github.com/yoshoku/llama_cpp.rb
|
35
35
|
changelog_uri: https://github.com/yoshoku/llama_cpp.rb/blob/main/CHANGELOG.md
|
36
|
-
documentation_uri: https://
|
36
|
+
documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.2/
|
37
37
|
rubygems_mfa_required: 'true'
|
38
38
|
rdoc_options: []
|
39
39
|
require_paths:
|
@@ -49,7 +49,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
49
49
|
- !ruby/object:Gem::Version
|
50
50
|
version: '0'
|
51
51
|
requirements: []
|
52
|
-
rubygems_version: 3.
|
52
|
+
rubygems_version: 3.6.9
|
53
53
|
specification_version: 4
|
54
54
|
summary: Ruby bindings for the llama.cpp.
|
55
55
|
test_files: []
|