llama_cpp 0.20.2 → 0.20.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 28d21cfaf9a14c0796cac348176745618950a8a50c6697a97038c164901dbb2b
4
- data.tar.gz: e3ad3a9ae3b7de6817ac3095d09139656f87eba715818849cfef8e6860d112fd
3
+ metadata.gz: 6acca29477a43c9703c7035e53acd69450de7103b2d7f242506c7e2016f1a261
4
+ data.tar.gz: 77c108b2f1ea33588a9dbe6c0538e289f90ea5be2090adbf8f663fff8b8b0221
5
5
  SHA512:
6
- metadata.gz: f5c78500983f3363357be99de9968105c4f13ee1d4fd6709473624dcc752fa80d356d7b43422a61392da67f89dd6f906829bfb45b6aa243af997e7b147775c68
7
- data.tar.gz: 5c8861198147c411374acd9fea4aa959f8c225516b1858e81640d6b5460ea0239af2fb24d13832f82d0aa647623097a2603904e7943cfa39baefbdfc53c4c149
6
+ metadata.gz: 69d453a3cf9c23ab3aaa60c6a20d80a7fc75424cb762c631a06712a2134fc7cf6830168241a3d689fd0b7b621804e27b6461415977fb51c096c10fbb2aa0e922
7
+ data.tar.gz: bc8de61663616ffd40c90e34df71095a7c85e3aa3373fc0c395fe101ded4f38e1670af42aeb11c6380c6932d2939d137d299d377db3ce9dc0510fd56e9d8b7a5
data/CHANGELOG.md CHANGED
@@ -1,3 +1,16 @@
1
+ ## [[0.20.4](https://github.com/yoshoku/llama_cpp.rb/compare/v0.20.3...v0.20.4)] - 2025-06-21
2
+
3
+ - Change supported llama.cpp version to b5720.
4
+ - Add `llama_vocab_get_add_sep` module function.
5
+
6
+ ## [[0.20.3](https://github.com/yoshoku/llama_cpp.rb/compare/v0.20.2...v0.20.3)] - 2025-06-14
7
+
8
+
9
+ - Change supported llama.cpp version to b5650
10
+ - Add `data` argument to `llama_memory_clear` module function.
11
+ - Fix llama_memory_t wrapper by removing unnecessary struct keyword and pointer symbol.
12
+
13
+
1
14
  ## [[0.20.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.20.1...v0.20.2)] - 2025-06-07
2
15
 
3
16
  - Change supported llama.cpp version to b5600
@@ -1905,14 +1905,14 @@ static VALUE rb_llama_adapter_lora_free(VALUE self, VALUE adapter) {
1905
1905
 
1906
1906
  /* llama_memory_t wrapper */
1907
1907
  typedef struct {
1908
- struct llama_memory_t* memory;
1908
+ llama_memory_t memory;
1909
1909
  } llama_memory_t_wrapper;
1910
1910
 
1911
1911
  static void llama_memory_t_wrapper_free(void *ptr) {
1912
1912
  llama_memory_t_wrapper* memory_wrapper = (llama_memory_t_wrapper*)ptr;
1913
1913
  if (memory_wrapper) {
1914
1914
  if (memory_wrapper->memory != NULL) {
1915
- llama_memory_clear(memory_wrapper->memory);
1915
+ llama_memory_clear(memory_wrapper->memory, true);
1916
1916
  memory_wrapper->memory = NULL;
1917
1917
  }
1918
1918
  }
@@ -1947,13 +1947,13 @@ static llama_memory_t_wrapper* get_llama_memory_t_wrapper(VALUE self) {
1947
1947
  return data;
1948
1948
  }
1949
1949
 
1950
- static VALUE rb_llama_memory_clear(VALUE self, VALUE memory) {
1950
+ static VALUE rb_llama_memory_clear(VALUE self, VALUE memory, VALUE data) {
1951
1951
  if (!rb_obj_is_kind_of(memory, rb_cLlamaMemoryT)) {
1952
1952
  rb_raise(rb_eArgError, "memory must be a LlamaMemoryT");
1953
1953
  return Qnil;
1954
1954
  }
1955
1955
  llama_memory_t_wrapper* memory_wrapper = get_llama_memory_t_wrapper(memory);
1956
- llama_memory_clear(memory_wrapper->memory);
1956
+ llama_memory_clear(memory_wrapper->memory, RTEST(data) ? true : false);
1957
1957
  RB_GC_GUARD(memory);
1958
1958
  return Qnil;
1959
1959
  }
@@ -2972,6 +2972,22 @@ static VALUE rb_llama_vocab_get_add_eos(VALUE self, VALUE vocab) {
2972
2972
  return flag ? Qtrue : Qfalse;
2973
2973
  }
2974
2974
 
2975
+ /**
2976
+ * @overload llama_vocab_get_add_sep(vocab)
2977
+ * @param [LlamaVocab] vocab
2978
+ * @return [Boolean]
2979
+ */
2980
+ static VALUE rb_llama_vocab_get_add_sep(VALUE self, VALUE vocab) {
2981
+ if (!rb_obj_is_kind_of(vocab, rb_cLlamaVocab)) {
2982
+ rb_raise(rb_eArgError, "vocab must be a LlamaVocab");
2983
+ return Qnil;
2984
+ }
2985
+ llama_vocab_wrapper* vocab_wrapper = get_llama_vocab_wrapper(vocab);
2986
+ const bool flag = llama_vocab_get_add_sep(vocab_wrapper->vocab);
2987
+ RB_GC_GUARD(vocab);
2988
+ return flag ? Qtrue : Qfalse;
2989
+ }
2990
+
2975
2991
  /**
2976
2992
  * @overload llama_vocab_fim_pre(vocab)
2977
2993
  * @param [LlamaVocab] vocab
@@ -5110,7 +5126,7 @@ void Init_llama_cpp(void) {
5110
5126
  rb_define_alloc_func(rb_cLlamaMemoryT, llama_memory_t_wrapper_alloc);
5111
5127
 
5112
5128
  /* llama_memory_clear */
5113
- rb_define_module_function(rb_mLlamaCpp, "llama_memory_clear", rb_llama_memory_clear, 1);
5129
+ rb_define_module_function(rb_mLlamaCpp, "llama_memory_clear", rb_llama_memory_clear, 2);
5114
5130
 
5115
5131
  /* llama_memory_seq_rm */
5116
5132
  rb_define_module_function(rb_mLlamaCpp, "llama_memory_seq_rm", rb_llama_memory_seq_rm, 4);
@@ -5275,6 +5291,9 @@ void Init_llama_cpp(void) {
5275
5291
  /* llama_vocab_get_add_eos */
5276
5292
  rb_define_module_function(rb_mLlamaCpp, "llama_vocab_get_add_eos", rb_llama_vocab_get_add_eos, 1);
5277
5293
 
5294
+ /* llama_vocab_get_add_sep */
5295
+ rb_define_module_function(rb_mLlamaCpp, "llama_vocab_get_add_sep", rb_llama_vocab_get_add_sep, 1);
5296
+
5278
5297
  /* llama_vocab_fim_pre */
5279
5298
  rb_define_module_function(rb_mLlamaCpp, "llama_vocab_fim_pre", rb_llama_vocab_fim_pre, 1);
5280
5299
 
@@ -3,8 +3,8 @@
3
3
  # llama_cpp.rb provides Ruby bindings for the llama.cpp.
4
4
  module LlamaCpp
5
5
  # The version of llama_cpp.rb you install.
6
- VERSION = '0.20.2'
6
+ VERSION = '0.20.4'
7
7
 
8
8
  # The supported version of llama.cpp.
9
- LLAMA_CPP_VERSION = 'b5600'
9
+ LLAMA_CPP_VERSION = 'b5720'
10
10
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.20.2
4
+ version: 0.20.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
@@ -49,7 +49,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
49
49
  - !ruby/object:Gem::Version
50
50
  version: '0'
51
51
  requirements: []
52
- rubygems_version: 3.6.7
52
+ rubygems_version: 3.6.9
53
53
  specification_version: 4
54
54
  summary: Ruby bindings for the llama.cpp.
55
55
  test_files: []