llama_cpp 0.23.7 → 0.23.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a758c0d22d950187f8512312c4866a37a128e3092ca842d18bf499465957ef62
4
- data.tar.gz: 7dd5c14b42e608ee03af4f436b0c1eee8ba985dabc7a52a893aaef67068b43c4
3
+ metadata.gz: 7baa70c4e1f7ee628a347b34d4ed15822aba050c83e6438a7e1c8edf6acef8bb
4
+ data.tar.gz: 7125d429d103d711741b9d6ce5ac97f8ec3a100a0b5d442b707e30f61ba538d8
5
5
  SHA512:
6
- metadata.gz: 4acc2505785d72b668a394a31430f3abbe059d829e91385d318aaf4fc1bbb39983554b6850b36383a44f1e8874eb903b21c18e222f1487b27aee8e3d8f2e20d7
7
- data.tar.gz: d08c7c7626b66ae6d9d9c323c26aa200e8b05bdae9974002e9cacf1ca812530ad2904daed73795e61850a00c2f1240dcea7a8df443698c680d06b207126bf8b0
6
+ metadata.gz: 5c766e9ec655976829d04e956adf898cc444e4938d49070e4a2d4a2dab47a3580448f0f1137c906ee37978f7162da56820a2a03aff23a78394a5526164ef94f2
7
+ data.tar.gz: d0fb98fa54a3db05481fe6c271e693716be3445c47b082b4981a87563ed8fc08df38e12eff68ee0c8fbee7b66b8e1ecaafed3625cd58cc4bbb8a044966a8a1ad
data/CHANGELOG.md CHANGED
@@ -1,3 +1,22 @@
1
+ ## [[0.23.8](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.7...v0.23.8)] - 2025-11-30
2
+
3
+ - Change supported llama.cpp version to b7180
4
+ - Add `llama_model_meta_key_str` module function to `LlamaCpp`.
5
+ - Add constant values for `enum llama_model_meta_key` to `LlamaCpp`.
6
+ - `LLAMA_MODEL_META_KEY_SAMPLING_SEQUENCE`
7
+ - `LLAMA_MODEL_META_KEY_SAMPLING_TOP_K`
8
+ - `LLAMA_MODEL_META_KEY_SAMPLING_TOP_P`
9
+ - `LLAMA_MODEL_META_KEY_SAMPLING_MIN_P`
10
+ - `LLAMA_MODEL_META_KEY_SAMPLING_XTC_PROBABILITY`
11
+ - `LLAMA_MODEL_META_KEY_SAMPLING_XTC_THRESHOLD`
12
+ - `LLAMA_MODEL_META_KEY_SAMPLING_TEMP`
13
+ - `LLAMA_MODEL_META_KEY_SAMPLING_PENALTY_LAST_N`
14
+ - `LLAMA_MODEL_META_KEY_SAMPLING_PENALTY_REPEAT`
15
+ - `LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT`
16
+ - `LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_TAU`
17
+ - `LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_ETA`
18
+ - Add `llama_model_meta_count` module function to `LlamaCpp`
19
+
1
20
  ## [[0.23.7](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.6...v0.23.7)] - 2025-11-22
2
21
 
3
22
  - Fix to use require for compatibility with distributions installing extensions separately.
@@ -1708,6 +1708,34 @@ static VALUE rb_llama_vocab_n_tokens(VALUE self, VALUE vocab) {
1708
1708
  return n_tokens;
1709
1709
  }
1710
1710
 
1711
+ /**
1712
+ * @overload llama_model_meta_count(model)
1713
+ * @param [LlamaModel] model
1714
+ * @return [Integer]
1715
+ */
1716
+ static VALUE rb_llama_model_meta_count(VALUE self, VALUE model) {
1717
+ if (!rb_obj_is_kind_of(model, rb_cLlamaModel)) {
1718
+ rb_raise(rb_eArgError, "model must be a LlamaModel");
1719
+ return Qnil;
1720
+ }
1721
+ llama_model_wrapper* model_wrapper = get_llama_model_wrapper(model);
1722
+ return INT2NUM(llama_model_meta_count(model_wrapper->model));
1723
+ }
1724
+
1725
+ /**
1726
+ * @overload llama_model_meta_key_str(key)
1727
+ * @param [Integer] key (must be one of Llama::LLAMA_MODEL_META_KEY_* constants)
1728
+ * @return [String]
1729
+ */
1730
+ static VALUE rb_llama_model_meta_key_str(VALUE self, VALUE key) {
1731
+ if (!RB_INTEGER_TYPE_P(key)) {
1732
+ rb_raise(rb_eArgError, "key must be an Integer");
1733
+ return Qnil;
1734
+ }
1735
+ const char* key_str = llama_model_meta_key_str(NUM2INT(key));
1736
+ return rb_utf8_str_new_cstr(key_str);
1737
+ }
1738
+
1711
1739
  /**
1712
1740
  * @overload llama_model_desc(model)
1713
1741
  * @param [LlamaModel] model
@@ -4198,13 +4226,28 @@ void Init_llama_cpp(void) {
4198
4226
  /* TODO: llama_seq_id** seq_id */
4199
4227
  /* TODO: int8_t* logits */
4200
4228
 
4201
- /* llama_model_kv_override_type */
4229
+ /* enum llama_model_kv_override_type */
4202
4230
  /* Document-const: LlamaCpp::LLAMA_KV_OVERRIDE_TYPE_INT */
4203
4231
  rb_define_const(rb_mLlamaCpp, "LLAMA_KV_OVERRIDE_TYPE_INT", INT2NUM(LLAMA_KV_OVERRIDE_TYPE_INT));
4204
4232
  rb_define_const(rb_mLlamaCpp, "LLAMA_KV_OVERRIDE_TYPE_FLOAT", INT2NUM(LLAMA_KV_OVERRIDE_TYPE_FLOAT));
4205
4233
  rb_define_const(rb_mLlamaCpp, "LLAMA_KV_OVERRIDE_TYPE_BOOL", INT2NUM(LLAMA_KV_OVERRIDE_TYPE_BOOL));
4206
4234
  rb_define_const(rb_mLlamaCpp, "LLAMA_KV_OVERRIDE_TYPE_STR", INT2NUM(LLAMA_KV_OVERRIDE_TYPE_STR));
4207
4235
 
4236
+ /* enum llama_model_meta_key */
4237
+ /* Document-const: LlamaCpp::LLAMA_MODEL_META_KEY_SAMPLING_SEQUENCE */
4238
+ rb_define_const(rb_mLlamaCpp, "LLAMA_MODEL_META_KEY_SAMPLING_SEQUENCE", INT2NUM(LLAMA_MODEL_META_KEY_SAMPLING_SEQUENCE));
4239
+ rb_define_const(rb_mLlamaCpp, "LLAMA_MODEL_META_KEY_SAMPLING_TOP_K", INT2NUM(LLAMA_MODEL_META_KEY_SAMPLING_TOP_K));
4240
+ rb_define_const(rb_mLlamaCpp, "LLAMA_MODEL_META_KEY_SAMPLING_TOP_P", INT2NUM(LLAMA_MODEL_META_KEY_SAMPLING_TOP_P));
4241
+ rb_define_const(rb_mLlamaCpp, "LLAMA_MODEL_META_KEY_SAMPLING_MIN_P", INT2NUM(LLAMA_MODEL_META_KEY_SAMPLING_MIN_P));
4242
+ rb_define_const(rb_mLlamaCpp, "LLAMA_MODEL_META_KEY_SAMPLING_XTC_PROBABILITY", INT2NUM(LLAMA_MODEL_META_KEY_SAMPLING_XTC_PROBABILITY));
4243
+ rb_define_const(rb_mLlamaCpp, "LLAMA_MODEL_META_KEY_SAMPLING_XTC_THRESHOLD", INT2NUM(LLAMA_MODEL_META_KEY_SAMPLING_XTC_THRESHOLD));
4244
+ rb_define_const(rb_mLlamaCpp, "LLAMA_MODEL_META_KEY_SAMPLING_TEMP", INT2NUM(LLAMA_MODEL_META_KEY_SAMPLING_TEMP));
4245
+ rb_define_const(rb_mLlamaCpp, "LLAMA_MODEL_META_KEY_SAMPLING_PENALTY_LAST_N", INT2NUM(LLAMA_MODEL_META_KEY_SAMPLING_PENALTY_LAST_N));
4246
+ rb_define_const(rb_mLlamaCpp, "LLAMA_MODEL_META_KEY_SAMPLING_PENALTY_REPEAT", INT2NUM(LLAMA_MODEL_META_KEY_SAMPLING_PENALTY_REPEAT));
4247
+ rb_define_const(rb_mLlamaCpp, "LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT", INT2NUM(LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT));
4248
+ rb_define_const(rb_mLlamaCpp, "LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_TAU", INT2NUM(LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_TAU));
4249
+ rb_define_const(rb_mLlamaCpp, "LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_ETA", INT2NUM(LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_ETA));
4250
+
4208
4251
  /**
4209
4252
  * Document-class: LlamaCpp::LlamaModelKvOverride
4210
4253
  * "struct llama_model_kv_override" wrapper class
@@ -4954,8 +4997,11 @@ void Init_llama_cpp(void) {
4954
4997
  /* llama_vocab_n_tokens */
4955
4998
  rb_define_module_function(rb_mLlamaCpp, "llama_vocab_n_tokens", rb_llama_vocab_n_tokens, 1);
4956
4999
 
5000
+ /* llama_model_meta_count */
5001
+ rb_define_module_function(rb_mLlamaCpp, "llama_model_meta_count", rb_llama_model_meta_count, 1);
5002
+ /* llama_model_meta_key_str */
5003
+ rb_define_module_function(rb_mLlamaCpp, "llama_model_meta_key_str", rb_llama_model_meta_key_str, 1);
4957
5004
  /* TODO: llama_model_meta_val_str */
4958
- /* TODO: llama_model_meta_count */
4959
5005
  /* TODO: llama_model_meta_key_by_index */
4960
5006
  /* TODO: llama_model_meta_val_str_by_index */
4961
5007
 
@@ -3,8 +3,8 @@
3
3
  # llama_cpp.rb provides Ruby bindings for the llama.cpp.
4
4
  module LlamaCpp
5
5
  # The version of llama_cpp.rb you install.
6
- VERSION = '0.23.7'
6
+ VERSION = '0.23.8'
7
7
 
8
8
  # The supported version of llama.cpp.
9
- LLAMA_CPP_VERSION = 'b7120'
9
+ LLAMA_CPP_VERSION = 'b7180'
10
10
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.23.7
4
+ version: 0.23.8
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
@@ -33,7 +33,7 @@ metadata:
33
33
  homepage_uri: https://github.com/yoshoku/llama_cpp.rb
34
34
  source_code_uri: https://github.com/yoshoku/llama_cpp.rb
35
35
  changelog_uri: https://github.com/yoshoku/llama_cpp.rb/blob/main/CHANGELOG.md
36
- documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.7/
36
+ documentation_uri: https://gemdocs.org/gems/llama_cpp/0.23.8/
37
37
  rubygems_mfa_required: 'true'
38
38
  rdoc_options: []
39
39
  require_paths: