llama_cpp 0.17.9 → 0.17.10

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d8e90419e0ffcd183523d6aa45f2ef54d4a5697d80e0862e6c358e27ba4a6a1e
4
- data.tar.gz: 85128e367d0523e99a5302b9c765c20d6d55b3ed357f1785921f31a5fc2a0cea
3
+ metadata.gz: ec7b41006345cfe57e355d207718ae460a5a2c519d67027daddb7d40ab346754
4
+ data.tar.gz: efcfa7eef5374790cef788f719bede1ab3cd30e5dec79be0ba9844bfbcd433c2
5
5
  SHA512:
6
- metadata.gz: 76218e8970d649e01ebfee4c90727d1d563bf1f456727910a0755e52642e50ddf65160ee4c8613632eab419747f933aaced032f331293bed6e59810539b1fa05
7
- data.tar.gz: b3aa1b46bb1262fd681c677fbcf10bcc43eb507b3fd3db9c1c243b5c61c87ab290b1a38e3eb265109d2a4e5bbcc61bc9aa0a2d84aaf004822af2f529afb436bd
6
+ metadata.gz: c39150e77652060fcfee2c73b1334ff53ed40caf58679efa727a0d0d54bffe036b8795be9751cef8517b5d4b2300fb77fa5753a9c94b803988ec429b999591c2
7
+ data.tar.gz: 6054dee2f39ac2690de2836405ac793bf987538ef0780cffab2700544bc60ceaca8a4b109407c66dcf2dd9f38ffe6b98659d7ca4ddb5c9e93fa85d9884cea26e
data/CHANGELOG.md CHANGED
@@ -1,3 +1,12 @@
1
+ ## [[0.17.10](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.9...v0.17.10)] - 2024-09-07
2
+
3
+ - Change supported llama.cpp version to b3676.
4
+ - Add `LLAMA_VOCAB_TYPE_RWKV` constant.
5
+ - Add `LLAMA_FTYPE_MOSTLY_TQ1_0` and `LLAMA_FTYPE_MOSTLY_TQ2_0` constants.
6
+ - Change type of n_threads and n_threads_batch from uint32_t to int32 in native extension codes.
7
+
8
+ Implementation bindings for llama_attach_threadpool and llama_detach_threadpool have been skipped.
9
+
1
10
  ## [[0.17.9](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.8...v0.17.9)] - 2024-08-31
2
11
 
3
12
  - Change supported llama.cpp version to b3639.
@@ -2512,7 +2512,7 @@ private:
2512
2512
  rb_raise(rb_eArgError, "LLaMA context is not initialized");
2513
2513
  return Qnil;
2514
2514
  }
2515
- llama_set_n_threads(ptr->ctx, NUM2UINT(n_threads), NUM2UINT(n_threads_batch));
2515
+ llama_set_n_threads(ptr->ctx, NUM2INT(n_threads), NUM2INT(n_threads_batch));
2516
2516
  return Qnil;
2517
2517
  }
2518
2518
 
@@ -2558,7 +2558,7 @@ private:
2558
2558
  rb_raise(rb_eRuntimeError, "LLaMA context is not initialized");
2559
2559
  return Qnil;
2560
2560
  }
2561
- return UINT2NUM(llama_n_threads(ptr->ctx));
2561
+ return INT2NUM(llama_n_threads(ptr->ctx));
2562
2562
  }
2563
2563
 
2564
2564
  static VALUE _llama_context_n_threads_batch(VALUE self) {
@@ -2567,7 +2567,7 @@ private:
2567
2567
  rb_raise(rb_eRuntimeError, "LLaMA context is not initialized");
2568
2568
  return Qnil;
2569
2569
  }
2570
- return UINT2NUM(llama_n_threads_batch(ptr->ctx));
2570
+ return INT2NUM(llama_n_threads_batch(ptr->ctx));
2571
2571
  }
2572
2572
 
2573
2573
  static VALUE _llama_context_get_timings(VALUE self) {
@@ -3611,6 +3611,7 @@ extern "C" void Init_llama_cpp(void) {
3611
3611
  rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_TYPE_BPE", INT2NUM(LLAMA_VOCAB_TYPE_BPE));
3612
3612
  rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_TYPE_WPM", INT2NUM(LLAMA_VOCAB_TYPE_WPM));
3613
3613
  rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_TYPE_UGM", INT2NUM(LLAMA_VOCAB_TYPE_UGM));
3614
+ rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_TYPE_RWKV", INT2NUM(LLAMA_VOCAB_TYPE_RWKV));
3614
3615
 
3615
3616
  rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_DEFAULT", INT2NUM(LLAMA_VOCAB_PRE_TYPE_DEFAULT));
3616
3617
  rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_LLAMA3", INT2NUM(LLAMA_VOCAB_PRE_TYPE_LLAMA3));
@@ -3690,6 +3691,8 @@ extern "C" void Init_llama_cpp(void) {
3690
3691
  rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q4_0_4_4", INT2NUM(LLAMA_FTYPE_MOSTLY_Q4_0_4_4));
3691
3692
  rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q4_0_4_8", INT2NUM(LLAMA_FTYPE_MOSTLY_Q4_0_4_8));
3692
3693
  rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q4_0_8_8", INT2NUM(LLAMA_FTYPE_MOSTLY_Q4_0_8_8));
3694
+ rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_TQ1_0", INT2NUM(LLAMA_FTYPE_MOSTLY_TQ1_0));
3695
+ rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_TQ2_0", INT2NUM(LLAMA_FTYPE_MOSTLY_TQ2_0));
3693
3696
 
3694
3697
  rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_GUESSED", INT2NUM(LLAMA_FTYPE_GUESSED));
3695
3698
 
@@ -3,8 +3,8 @@
3
3
  # llama_cpp.rb provides Ruby bindings for the llama.cpp.
4
4
  module LLaMACpp
5
5
  # The version of llama_cpp.rb you install.
6
- VERSION = '0.17.9'
6
+ VERSION = '0.17.10'
7
7
 
8
8
  # The supported version of llama.cpp.
9
- LLAMA_CPP_VERSION = 'b3639'
9
+ LLAMA_CPP_VERSION = 'b3676'
10
10
  end
data/sig/llama_cpp.rbs CHANGED
@@ -16,6 +16,7 @@ module LLaMACpp
16
16
  LLAMA_VOCAB_TYPE_BPE: Integer
17
17
  LLAMA_VOCAB_TYPE_WPM: Integer
18
18
  LLAMA_VOCAB_TYPE_UGM: Integer
19
+ LLAMA_VOCAB_TYPE_RWKV: Integer
19
20
 
20
21
  LLAMA_VOCAB_PRE_TYPE_DEFAULT: Integer
21
22
  LLAMA_VOCAB_PRE_TYPE_LLAMA3: Integer
@@ -87,6 +88,8 @@ module LLaMACpp
87
88
  LLAMA_FTYPE_MOSTLY_Q4_0_4_4: Integer
88
89
  LLAMA_FTYPE_MOSTLY_Q4_0_4_8: Integer
89
90
  LLAMA_FTYPE_MOSTLY_Q4_0_8_8: Integer
91
+ LLAMA_FTYPE_MOSTLY_TQ1_0: Integer
92
+ LLAMA_FTYPE_MOSTLY_TQ2_0: Integer
90
93
  LLAMA_FTYPE_GUESSED: Integer
91
94
 
92
95
  LLAMA_KV_OVERRIDE_TYPE_INT: Integer
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.17.9
4
+ version: 0.17.10
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku