llama_cpp 0.24.3 → 0.25.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e4ec06dde3c07e65bccca17e900a37042082fc706c8cddd34e447affe1198ba5
4
- data.tar.gz: a4ed3057e3da8fc8be59ab578faf7fdd89773c17a6ee99d03cb25bb5a9af282d
3
+ metadata.gz: 953b205d7cedadd2f1db35fc301c6b94b1db87e0121317f6c154c204e09e9d56
4
+ data.tar.gz: bbbc1eef7f7312e667fd238b1e5ef353861beaa445c348595470879eccd12280
5
5
  SHA512:
6
- metadata.gz: 71803b7ed68c4c9911bf1a91a63e5a5e8d588e297f8ba8d9a28665e06cc367c2eecf6b644033b57ab50358aacc4cb3c6c69f44c54a121dc521fd843e2d4ac727
7
- data.tar.gz: f35964446f5d954531da4f62862007b97123e158211b01817ffba01409569a8f949b30cf2dc8b8915eebffcf90793978fa5d9d25abf3fbacee991ccfbef48ea8
6
+ metadata.gz: a38097de3f8e5a8acf862bf28db94140efc8f5e31d2be8e9fcbbf461ad79c77614a65acb873d25a706380056295b09cc14f94d90661e4c05e81a7c84a7cc461d
7
+ data.tar.gz: 2225bfdc2526274a6c3b60ec7c6ad0d88c62a291c847d511dd43772b7cc4c978cecbf69dd0fa50d10fafc3a915b95c44d8eebfdb14815777ace34318f1b28066
data/CHANGELOG.md CHANGED
@@ -1,3 +1,13 @@
1
+ ## [[0.25.0](https://github.com/yoshoku/llama_cpp.rb/compare/v0.24.3...v0.25.0)] - 2026-04-25
2
+
3
+ - Change supported llama.cpp version to b8920.
4
+ - Remove `LLAMA_PARAMS_FIT_STATUS_SUCCESS` constant value.
5
+ - Remove `LLAMA_PARAMS_FIT_STATUS_FAILURE` constant value.
6
+ - Remove `LLAMA_PARAMS_FIT_STATUS_ERROR` constant value.
7
+ - Remove `llama_memory_breakdown_print` module function.
8
+ - Add `LLAMA_FTYPE_MOSTLY_Q1_0` constant value.
9
+ - Add `LLAMA_SPLIT_MODE_TENSOR` constant value.
10
+
1
11
  ## [[0.24.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.24.2...v0.24.3)] - 2026-04-06
2
12
 
3
13
  - Change supported llama.cpp version to b8640.
@@ -3375,22 +3375,6 @@ static VALUE rb_llama_sampler_reset(VALUE self, VALUE sampler) {
3375
3375
  return Qnil;
3376
3376
  }
3377
3377
 
3378
- /**
3379
- * @overload llama_memory_breakdown_print(context)
3380
- * @param [LlamaContext] context
3381
- * @return [NilClass]
3382
- */
3383
- static VALUE rb_llama_memory_breakdown_print(VALUE self, VALUE ctx) {
3384
- if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
3385
- rb_raise(rb_eArgError, "ctx must be a LlamaContext");
3386
- return Qnil;
3387
- }
3388
- llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
3389
- llama_memory_breakdown_print(context_wrapper->context);
3390
- RB_GC_GUARD(ctx);
3391
- return Qnil;
3392
- }
3393
-
3394
3378
  /**
3395
3379
  * @overload llama_sampler_clone(sampler)
3396
3380
  * @param [LlamaSampler] sampler
@@ -4292,6 +4276,7 @@ void Init_llama_cpp(void) {
4292
4276
  rb_define_const(rb_mLlamaCpp, "LLAMA_FTYPE_MOSTLY_TQ2_0", INT2NUM(LLAMA_FTYPE_MOSTLY_TQ2_0));
4293
4277
  rb_define_const(rb_mLlamaCpp, "LLAMA_FTYPE_MOSTLY_MXFP4_MOE", INT2NUM(LLAMA_FTYPE_MOSTLY_MXFP4_MOE));
4294
4278
  rb_define_const(rb_mLlamaCpp, "LLAMA_FTYPE_MOSTLY_NVFP4", INT2NUM(LLAMA_FTYPE_MOSTLY_NVFP4));
4279
+ rb_define_const(rb_mLlamaCpp, "LLAMA_FTYPE_MOSTLY_Q1_0", INT2NUM(LLAMA_FTYPE_MOSTLY_Q1_0));
4295
4280
  rb_define_const(rb_mLlamaCpp, "LLAMA_FTYPE_GUESSED", INT2NUM(LLAMA_FTYPE_GUESSED));
4296
4281
  /* llama_rope_scaling_type */
4297
4282
  /* Document-const: LlamaCpp::LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED */
@@ -4324,6 +4309,7 @@ void Init_llama_cpp(void) {
4324
4309
  rb_define_const(rb_mLlamaCpp, "LLAMA_SPLIT_MODE_NONE", INT2NUM(LLAMA_SPLIT_MODE_NONE));
4325
4310
  rb_define_const(rb_mLlamaCpp, "LLAMA_SPLIT_MODE_LAYER", INT2NUM(LLAMA_SPLIT_MODE_LAYER));
4326
4311
  rb_define_const(rb_mLlamaCpp, "LLAMA_SPLIT_MODE_ROW", INT2NUM(LLAMA_SPLIT_MODE_ROW));
4312
+ rb_define_const(rb_mLlamaCpp, "LLAMA_SPLIT_MODE_TENSOR", INT2NUM(LLAMA_SPLIT_MODE_TENSOR));
4327
4313
 
4328
4314
  rb_define_module_function(rb_mLlamaCpp, "llama_flash_attn_type_name", rb_llama_flash_attn_type_name, 1);
4329
4315
 
@@ -5157,12 +5143,6 @@ void Init_llama_cpp(void) {
5157
5143
  /* llama_free */
5158
5144
  rb_define_module_function(rb_mLlamaCpp, "llama_free", rb_llama_free, 1);
5159
5145
 
5160
- /* llama_params_fit_status */
5161
- /* Document-const: LlamaCpp::LLAMA_PARAMS_FIT_STATUS_SUCCESS */
5162
- rb_define_const(rb_mLlamaCpp, "LLAMA_PARAMS_FIT_STATUS_SUCCESS", INT2NUM(LLAMA_PARAMS_FIT_STATUS_SUCCESS));
5163
- rb_define_const(rb_mLlamaCpp, "LLAMA_PARAMS_FIT_STATUS_FAILURE", INT2NUM(LLAMA_PARAMS_FIT_STATUS_FAILURE));
5164
- rb_define_const(rb_mLlamaCpp, "LLAMA_PARAMS_FIT_STATUS_ERROR", INT2NUM(LLAMA_PARAMS_FIT_STATUS_ERROR));
5165
-
5166
5146
  /* TODO: llama_params_fit */
5167
5147
 
5168
5148
  /* llama_time_us */
@@ -5699,9 +5679,6 @@ void Init_llama_cpp(void) {
5699
5679
  /* llama_perf_sampler_reset */
5700
5680
  rb_define_module_function(rb_mLlamaCpp, "llama_perf_sampler_reset", rb_llama_perf_sampler_reset, 1);
5701
5681
 
5702
- /* llama_memory_breakdown_print */
5703
- rb_define_module_function(rb_mLlamaCpp, "llama_memory_breakdown_print", rb_llama_memory_breakdown_print, 1);
5704
-
5705
5682
  /* TODO: typedef bool (*llama_opt_param_filter) */
5706
5683
  /* TODO: bool llama_opt_param_filter_all */
5707
5684
  /* TODO: struct llama_opt_params */
@@ -3,8 +3,8 @@
3
3
  # llama_cpp.rb provides Ruby bindings for the llama.cpp.
4
4
  module LlamaCpp
5
5
  # The version of llama_cpp.rb you install.
6
- VERSION = '0.24.3'
6
+ VERSION = '0.25.0'
7
7
 
8
8
  # The supported version of llama.cpp.
9
- LLAMA_CPP_VERSION = 'b8640'
9
+ LLAMA_CPP_VERSION = 'b8920'
10
10
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.24.3
4
+ version: 0.25.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
@@ -33,7 +33,7 @@ metadata:
33
33
  homepage_uri: https://github.com/yoshoku/llama_cpp.rb
34
34
  source_code_uri: https://github.com/yoshoku/llama_cpp.rb
35
35
  changelog_uri: https://github.com/yoshoku/llama_cpp.rb/blob/main/CHANGELOG.md
36
- documentation_uri: https://gemdocs.org/gems/llama_cpp/0.24.3/
36
+ documentation_uri: https://gemdocs.org/gems/llama_cpp/0.25.0/
37
37
  rubygems_mfa_required: 'true'
38
38
  rdoc_options: []
39
39
  require_paths: