llama_cpp 0.23.4 → 0.23.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +10 -0
- data/ext/llama_cpp/llama_cpp.c +34 -0
- data/lib/llama_cpp/version.rb +2 -2
- metadata +2 -2
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: fb3a91df1f8212ccadeaa8472342d319f47810ee6d020117efe513de2d80f9f1
|
|
4
|
+
data.tar.gz: 6d6e63c0807c87ce80ce4fcbfc578e73bfae0edb5aa5db6ef248b1aa1c71acce
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: bf2a09e356f8a5572a9e8e7162fc884a95c09542458f6591544cd53bd1716fc4f0b68815fcf8a2b8e8632acda303007f5c34af8f4ceb535fa0c139fbe134c65a
|
|
7
|
+
data.tar.gz: f7be7d1080f636ff44332098c95e04b2ce59a1a90fc3c5ed3bf179e08360d0717e5f7f5a4a186ed773ad15f1bff59036f30c57cc65b02558750b7aab47a4c8b3
|
data/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,13 @@
|
|
|
1
|
+
## [[0.23.6](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.5...v0.23.6)] - 2025-11-15
|
|
2
|
+
|
|
3
|
+
- Change supported llama.cpp version to b7030.
|
|
4
|
+
- Add `llama_model_n_embd_inp` module function to `LlamaCpp`.
|
|
5
|
+
|
|
6
|
+
## [[0.23.5](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.4...v0.23.5)] - 2025-11-08
|
|
7
|
+
|
|
8
|
+
- Change supported llama.cpp version to b6970.
|
|
9
|
+
- Add `llama_n_ctx_seq` module function to `LlamaCpp`.
|
|
10
|
+
|
|
1
11
|
## [[0.23.4](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.3...v0.23.4)] - 2025-11-01
|
|
2
12
|
|
|
3
13
|
- Change supported llama.cpp version to b6900.
|
data/ext/llama_cpp/llama_cpp.c
CHANGED
|
@@ -1409,6 +1409,20 @@ static VALUE rb_llama_n_ctx(VALUE self, VALUE ctx) {
|
|
|
1409
1409
|
return UINT2NUM(llama_n_ctx(context_wrapper->context));
|
|
1410
1410
|
}
|
|
1411
1411
|
|
|
1412
|
+
/**
|
|
1413
|
+
* @overload llama_n_ctx_seq(context)
|
|
1414
|
+
* @param [LlamaContext] context
|
|
1415
|
+
* @return [Integer]
|
|
1416
|
+
*/
|
|
1417
|
+
static VALUE rb_llama_n_ctx_seq(VALUE self, VALUE ctx) {
|
|
1418
|
+
if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
|
|
1419
|
+
rb_raise(rb_eArgError, "ctx must be a LlamaContext");
|
|
1420
|
+
return Qnil;
|
|
1421
|
+
}
|
|
1422
|
+
llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
|
|
1423
|
+
return UINT2NUM(llama_n_ctx_seq(context_wrapper->context));
|
|
1424
|
+
}
|
|
1425
|
+
|
|
1412
1426
|
/**
|
|
1413
1427
|
* @overload llama_n_batch(context)
|
|
1414
1428
|
* @param [LlamaContext] context
|
|
@@ -1543,6 +1557,20 @@ static VALUE rb_llama_model_n_embd(VALUE self, VALUE model) {
|
|
|
1543
1557
|
return INT2NUM(llama_model_n_embd(model_wrapper->model));
|
|
1544
1558
|
}
|
|
1545
1559
|
|
|
1560
|
+
/**
|
|
1561
|
+
* @overload llama_model_n_embd_inp(model)
|
|
1562
|
+
* @param [LlamaModel] model
|
|
1563
|
+
* @return [Integer]
|
|
1564
|
+
*/
|
|
1565
|
+
static VALUE rb_llama_model_n_embd_inp(VALUE self, VALUE model) {
|
|
1566
|
+
if (!rb_obj_is_kind_of(model, rb_cLlamaModel)) {
|
|
1567
|
+
rb_raise(rb_eArgError, "model must be a LlamaModel");
|
|
1568
|
+
return Qnil;
|
|
1569
|
+
}
|
|
1570
|
+
llama_model_wrapper* model_wrapper = get_llama_model_wrapper(model);
|
|
1571
|
+
return INT2NUM(llama_model_n_embd_inp(model_wrapper->model));
|
|
1572
|
+
}
|
|
1573
|
+
|
|
1546
1574
|
/**
|
|
1547
1575
|
* @overload llama_model_n_layer(model)
|
|
1548
1576
|
* @param [LlamaModel] model
|
|
@@ -4863,6 +4891,9 @@ void Init_llama_cpp(void) {
|
|
|
4863
4891
|
/* llama_n_ctx */
|
|
4864
4892
|
rb_define_module_function(rb_mLlamaCpp, "llama_n_ctx", rb_llama_n_ctx, 1);
|
|
4865
4893
|
|
|
4894
|
+
/* llama_n_ctx_seq */
|
|
4895
|
+
rb_define_module_function(rb_mLlamaCpp, "llama_n_ctx_seq", rb_llama_n_ctx_seq, 1);
|
|
4896
|
+
|
|
4866
4897
|
/* llama_n_batch */
|
|
4867
4898
|
rb_define_module_function(rb_mLlamaCpp, "llama_n_batch", rb_llama_n_batch, 1);
|
|
4868
4899
|
|
|
@@ -4893,6 +4924,9 @@ void Init_llama_cpp(void) {
|
|
|
4893
4924
|
/* llama_model_n_embd */
|
|
4894
4925
|
rb_define_module_function(rb_mLlamaCpp, "llama_model_n_embd", rb_llama_model_n_embd, 1);
|
|
4895
4926
|
|
|
4927
|
+
/* llama_model_n_embd_inp */
|
|
4928
|
+
rb_define_module_function(rb_mLlamaCpp, "llama_model_n_embd_inp", rb_llama_model_n_embd_inp, 1);
|
|
4929
|
+
|
|
4896
4930
|
/* llama_model_n_layer */
|
|
4897
4931
|
rb_define_module_function(rb_mLlamaCpp, "llama_model_n_layer", rb_llama_model_n_layer, 1);
|
|
4898
4932
|
|
data/lib/llama_cpp/version.rb
CHANGED
|
@@ -3,8 +3,8 @@
|
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
|
4
4
|
module LlamaCpp
|
|
5
5
|
# The version of llama_cpp.rb you install.
|
|
6
|
-
VERSION = '0.23.
|
|
6
|
+
VERSION = '0.23.6'
|
|
7
7
|
|
|
8
8
|
# The supported version of llama.cpp.
|
|
9
|
-
LLAMA_CPP_VERSION = '
|
|
9
|
+
LLAMA_CPP_VERSION = 'b7030'
|
|
10
10
|
end
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: llama_cpp
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.23.
|
|
4
|
+
version: 0.23.6
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- yoshoku
|
|
@@ -33,7 +33,7 @@ metadata:
|
|
|
33
33
|
homepage_uri: https://github.com/yoshoku/llama_cpp.rb
|
|
34
34
|
source_code_uri: https://github.com/yoshoku/llama_cpp.rb
|
|
35
35
|
changelog_uri: https://github.com/yoshoku/llama_cpp.rb/blob/main/CHANGELOG.md
|
|
36
|
-
documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.
|
|
36
|
+
documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.6/
|
|
37
37
|
rubygems_mfa_required: 'true'
|
|
38
38
|
rdoc_options: []
|
|
39
39
|
require_paths:
|