llama_cpp 0.23.3 → 0.23.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 735200bc9f1e7f6c3ab139f99b75e875bc637e640850953d0e6c577e3c5e6f93
4
- data.tar.gz: fb6f65de3231f69fb877ceb61a1a7c3a5bd3a299f12b987c2e0e0038dcf9b7e4
3
+ metadata.gz: c99f433d260e0a14afb6b9a22dd2be6adabe85ff6ea31a1f633a1fcd9ffe8370
4
+ data.tar.gz: d7373bdc9bcf29bc820f21df169da899166add8efe99a907b866f1febc2d8e28
5
5
  SHA512:
6
- metadata.gz: 22706463a44f2f7434dc86494806b21633ae171789edb85f4ffd51d828114c1be2ef803ef90170e2857f4bf2640ebe8708fdb56edeede12b71ee8a6e39e23df6
7
- data.tar.gz: c4c3d1d45cf692da4719aeab96c2c49559d747a7601cdc463e8d6418e3ecf5a336523f4749990cb67e92cbc4abd90a8afb8aa3da9548176b795513761c0f24e5
6
+ metadata.gz: 12f367cbbc6cdb1fc9aabd1ddfb9003d9905cb6348567115cbe7560cc8450d4416aee18e655a213518deec04c01d2b17d9409c1ef25ecda1edefe05704b63610
7
+ data.tar.gz: d5c943ec5347d0297a605eed4b299a134ef28c40130cf0e028dcdc8846518a5d579da872a431586b7914e5bf23454e598aa3c1919cad4be20356c9947dd28ad8
data/CHANGELOG.md CHANGED
@@ -1,3 +1,13 @@
1
+ ## [[0.23.5](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.4...v0.23.5)] - 2025-11-08
2
+
3
+ - Change supported llama.cpp version to b6970.
4
+ - Add `llama_n_ctx_seq` module function to `LlamaCpp`.
5
+
6
+ ## [[0.23.4](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.3...v0.23.4)] - 2025-11-01
7
+
8
+ - Change supported llama.cpp version to b6900.
9
+ - Add `LLAMA_ROPE_TYPE_IMROPE` constant.
10
+
1
11
  ## [[0.23.3](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.2...v0.23.3)] - 2025-10-11
2
12
 
3
13
  - Change supported llama.cpp version to b6730.
data/README.md CHANGED
@@ -6,6 +6,9 @@
6
6
 
7
7
  llama_cpp.rb provides Ruby bindings for the [llama.cpp](https://github.com/ggerganov/llama.cpp).
8
8
 
9
+ Note: [rllama](https://github.com/docusealco/rllama) is another Ruby binding for llama.cpp using FFI.
10
+ It provides a high-level API for easier integration.
11
+
9
12
  ## Installation
10
13
 
11
14
  Install the llama.cpp. If you use homebrew, install it by executing:
@@ -1409,6 +1409,20 @@ static VALUE rb_llama_n_ctx(VALUE self, VALUE ctx) {
1409
1409
  return UINT2NUM(llama_n_ctx(context_wrapper->context));
1410
1410
  }
1411
1411
 
1412
+ /**
1413
+ * @overload llama_n_ctx_seq(context)
1414
+ * @param [LlamaContext] context
1415
+ * @return [Integer]
1416
+ */
1417
+ static VALUE rb_llama_n_ctx_seq(VALUE self, VALUE ctx) {
1418
+ if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
1419
+ rb_raise(rb_eArgError, "ctx must be a LlamaContext");
1420
+ return Qnil;
1421
+ }
1422
+ llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
1423
+ return UINT2NUM(llama_n_ctx_seq(context_wrapper->context));
1424
+ }
1425
+
1412
1426
  /**
1413
1427
  * @overload llama_n_batch(context)
1414
1428
  * @param [LlamaContext] context
@@ -4009,6 +4023,7 @@ void Init_llama_cpp(void) {
4009
4023
  rb_define_const(rb_mLlamaCpp, "LLAMA_ROPE_TYPE_NORM", INT2NUM(LLAMA_ROPE_TYPE_NORM));
4010
4024
  rb_define_const(rb_mLlamaCpp, "LLAMA_ROPE_TYPE_NEOX", INT2NUM(LLAMA_ROPE_TYPE_NEOX));
4011
4025
  rb_define_const(rb_mLlamaCpp, "LLAMA_ROPE_TYPE_MROPE", INT2NUM(LLAMA_ROPE_TYPE_MROPE));
4026
+ rb_define_const(rb_mLlamaCpp, "LLAMA_ROPE_TYPE_IMROPE", INT2NUM(LLAMA_ROPE_TYPE_IMROPE));
4012
4027
  rb_define_const(rb_mLlamaCpp, "LLAMA_ROPE_TYPE_VISION", INT2NUM(LLAMA_ROPE_TYPE_VISION));
4013
4028
  /* llama_token_type */
4014
4029
  /* Document-const: LlamaCpp::LLAMA_TOKEN_TYPE_UNDEFINED */
@@ -4862,6 +4877,9 @@ void Init_llama_cpp(void) {
4862
4877
  /* llama_n_ctx */
4863
4878
  rb_define_module_function(rb_mLlamaCpp, "llama_n_ctx", rb_llama_n_ctx, 1);
4864
4879
 
4880
+ /* llama_n_ctx_seq */
4881
+ rb_define_module_function(rb_mLlamaCpp, "llama_n_ctx_seq", rb_llama_n_ctx_seq, 1);
4882
+
4865
4883
  /* llama_n_batch */
4866
4884
  rb_define_module_function(rb_mLlamaCpp, "llama_n_batch", rb_llama_n_batch, 1);
4867
4885
 
@@ -3,8 +3,8 @@
3
3
  # llama_cpp.rb provides Ruby bindings for the llama.cpp.
4
4
  module LlamaCpp
5
5
  # The version of llama_cpp.rb you install.
6
- VERSION = '0.23.3'
6
+ VERSION = '0.23.5'
7
7
 
8
8
  # The supported version of llama.cpp.
9
- LLAMA_CPP_VERSION = 'b6730'
9
+ LLAMA_CPP_VERSION = 'b6970'
10
10
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.23.3
4
+ version: 0.23.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
@@ -33,7 +33,7 @@ metadata:
33
33
  homepage_uri: https://github.com/yoshoku/llama_cpp.rb
34
34
  source_code_uri: https://github.com/yoshoku/llama_cpp.rb
35
35
  changelog_uri: https://github.com/yoshoku/llama_cpp.rb/blob/main/CHANGELOG.md
36
- documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.3/
36
+ documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.5/
37
37
  rubygems_mfa_required: 'true'
38
38
  rdoc_options: []
39
39
  require_paths: