llama_cpp 0.17.6 → 0.17.7

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 430d6b56cbdaeea159e406963dd67dfd3b41630064e1334b4431cb343a5447a4
4
- data.tar.gz: '0873a1f379f4af01a932c264bae5911eebce5b34554eaded582c652e7c687206'
3
+ metadata.gz: ad273fdefc8934e4fdb11f18f20379cd77e89ed9d7e1db84ea4d6d847a676540
4
+ data.tar.gz: ad91a3d8589e66c8ba3fb7e12a354d917fb0aa9596fd75ae773db79eace59cb5
5
5
  SHA512:
6
- metadata.gz: 8976d5cf3c9ed858b974fa3de757496951dc7393946cd0313294eb5debae738fddd8c13a52556c4e09e0a26783e6fe3c22cc2dcdc6f86e2dd11b30fe5194754b
7
- data.tar.gz: 1e46e819eec264bbbcc93dcc0d9f13a8b78614f54e39bc665c1c3f1b3c464c484ecfe1cbbd61ce0c20b524adabf754382b805bd90b274bb58c893d2e3e56e022
6
+ metadata.gz: 733d18dc4a3c7a72a7de1a4df331f5861d0d2321baf7d439c0f581ceec6ad7246b5dda45739b5154b9ec94c61a094915230506292b48de232f01840b29515f1f
7
+ data.tar.gz: 745e2d6f265f72871c258a52af907ecc616bdc2bf1f9dc8fc10d1d81597c930c0a999d4a518fe7f3a6d807df90fee8e559e1ef31e19f03f3d81cfa84d6105b9a
data/CHANGELOG.md CHANGED
@@ -1,3 +1,8 @@
1
+ ## [[0.17.7](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.5...v0.17.6)] - 2024-08-17
2
+
3
+ - Change supported llama.cpp version to b3590.
4
+ - Add `LLAMA_VOCAB_PRE_TYPE_BLOOM` and `LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH` constants
5
+
1
6
  ## [[0.17.6](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.5...v0.17.6)] - 2024-08-09
2
7
 
3
8
  - Change supported llama.cpp version to b3524.
@@ -1553,6 +1553,7 @@ public:
1553
1553
  rb_define_method(rb_cLLaMAModel, "token_is_eog?", RUBY_METHOD_FUNC(_llama_model_token_is_eog), 1);
1554
1554
  rb_define_method(rb_cLLaMAModel, "token_is_control?", RUBY_METHOD_FUNC(_llama_model_token_is_control), 1);
1555
1555
  rb_define_method(rb_cLLaMAModel, "has_encoder?", RUBY_METHOD_FUNC(_llama_model_has_encoder), 0);
1556
+ rb_define_method(rb_cLLaMAModel, "has_decoder?", RUBY_METHOD_FUNC(_llama_model_has_decoder), 0);
1556
1557
  rb_define_method(rb_cLLaMAModel, "decoder_start_token", RUBY_METHOD_FUNC(_llama_model_decoder_start_token), 0);
1557
1558
  rb_define_method(rb_cLLaMAModel, "detokenize", RUBY_METHOD_FUNC(_llama_model_detokenize), -1);
1558
1559
  }
@@ -1903,6 +1904,11 @@ private:
1903
1904
  return llama_model_has_encoder(ptr->model) ? Qtrue : Qfalse;
1904
1905
  }
1905
1906
 
1907
+ static VALUE _llama_model_has_decoder(VALUE self) {
1908
+ LLaMAModelWrapper* ptr = get_llama_model(self);
1909
+ return llama_model_has_decoder(ptr->model) ? Qtrue : Qfalse;
1910
+ }
1911
+
1906
1912
  static VALUE _llama_model_decoder_start_token(VALUE self) {
1907
1913
  LLaMAModelWrapper* ptr = get_llama_model(self);
1908
1914
  return INT2NUM(llama_model_decoder_start_token(ptr->model));
@@ -3623,6 +3629,8 @@ extern "C" void Init_llama_cpp(void) {
3623
3629
  rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_TEKKEN", INT2NUM(LLAMA_VOCAB_PRE_TYPE_TEKKEN));
3624
3630
  rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_SMOLLM", INT2NUM(LLAMA_VOCAB_PRE_TYPE_SMOLLM));
3625
3631
  rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_CODESHELL", INT2NUM(LLAMA_VOCAB_PRE_TYPE_CODESHELL));
3632
+ rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_BLOOM", INT2NUM(LLAMA_VOCAB_PRE_TYPE_BLOOM));
3633
+ rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH", INT2NUM(LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH));
3626
3634
 
3627
3635
  rb_define_const(rb_mLLaMACpp, "LLAMA_TOKEN_TYPE_UNDEFINED", INT2NUM(LLAMA_TOKEN_TYPE_UNDEFINED));
3628
3636
  rb_define_const(rb_mLLaMACpp, "LLAMA_TOKEN_TYPE_NORMAL", INT2NUM(LLAMA_TOKEN_TYPE_NORMAL));
@@ -3,8 +3,8 @@
3
3
  # llama_cpp.rb provides Ruby bindings for the llama.cpp.
4
4
  module LLaMACpp
5
5
  # The version of llama_cpp.rb you install.
6
- VERSION = '0.17.6'
6
+ VERSION = '0.17.7'
7
7
 
8
8
  # The supported version of llama.cpp.
9
- LLAMA_CPP_VERSION = 'b3524'
9
+ LLAMA_CPP_VERSION = 'b3590'
10
10
  end
data/sig/llama_cpp.rbs CHANGED
@@ -40,6 +40,8 @@ module LLaMACpp
40
40
  LLAMA_VOCAB_PRE_TYPE_TEKKEN: Integer
41
41
  LLAMA_VOCAB_PRE_TYPE_SMOLLM: Integer
42
42
  LLAMA_VOCAB_PRE_TYPE_CODESHELL: Integer
43
+ LLAMA_VOCAB_PRE_TYPE_BLOOM: Integer
44
+ LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH: Integer
43
45
 
44
46
  LLAMA_TOKEN_ATTR_UNDEFINED: Integer
45
47
  LLAMA_TOKEN_ATTR_UNKNOWN: Integer
@@ -193,6 +195,7 @@ module LLaMACpp
193
195
  def token_is_eog?: (Integer) -> bool
194
196
  def token_is_control?: (Integer) -> bool
195
197
  def has_encoder?: () -> bool
198
+ def has_decoder?: () -> bool
196
199
  def decoder_start_token: () -> Integer
197
200
  def detokenize: (Array[Integer], ?remove_special: bool, ?unparse_special: bool) -> String
198
201
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.17.6
4
+ version: 0.17.7
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-08-09 00:00:00.000000000 Z
11
+ date: 2024-08-17 00:00:00.000000000 Z
12
12
  dependencies: []
13
13
  description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
14
14
  email: