llama_cpp 0.17.7 → 0.17.8
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -1
- data/ext/llama_cpp/llama_cpp.cpp +7 -0
- data/lib/llama_cpp/version.rb +2 -2
- data/sig/llama_cpp.rbs +2 -0
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d4283cb48aef0a09e98f7e065a4e1f7fdd9532a334c7e184149d4b9ff4593927
|
4
|
+
data.tar.gz: 92db5cafd75f507c8748548ecf12697356ba4b8de08a0f6f9fdb9ee790757883
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 9d18d4bbfe834cfb4c16a86bc231421fc038a5560cb7a2f5fb436db89a418029373057da6a2ad2331830b970df0a90453d4a8da9181ce39dadcb6957172f0d1a
|
7
|
+
data.tar.gz: 3976c8a29227e9cd37e91dee00b9a9ad7fcad3716ba3cda9c6f15773de2ec8ee112d00149717ac89c10c0a198f06e046d9be6cc2cbb1ef45e8fac5403ed4dcab
|
data/CHANGELOG.md
CHANGED
@@ -1,4 +1,10 @@
|
|
1
|
-
## [[0.17.
|
1
|
+
## [[0.17.8](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.7...v0.17.8)] - 2024-08-25
|
2
|
+
|
3
|
+
- Change supported llama.cpp version to b3614.
|
4
|
+
- Add `LLAMA_VOCAB_PRE_TYPE_EXAONE` constant.
|
5
|
+
- Add `is_recurrent?` method to `Model`.
|
6
|
+
|
7
|
+
## [[0.17.7](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.6...v0.17.7)] - 2024-08-17
|
2
8
|
|
3
9
|
- Change supported llama.cpp version to b3590.
|
4
10
|
- Add `LLAMA_VOCAB_PRE_TYPE_BLOOM` and `LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH` constants
|
data/ext/llama_cpp/llama_cpp.cpp
CHANGED
@@ -1555,6 +1555,7 @@ public:
|
|
1555
1555
|
rb_define_method(rb_cLLaMAModel, "has_encoder?", RUBY_METHOD_FUNC(_llama_model_has_encoder), 0);
|
1556
1556
|
rb_define_method(rb_cLLaMAModel, "has_decoder?", RUBY_METHOD_FUNC(_llama_model_has_decoder), 0);
|
1557
1557
|
rb_define_method(rb_cLLaMAModel, "decoder_start_token", RUBY_METHOD_FUNC(_llama_model_decoder_start_token), 0);
|
1558
|
+
rb_define_method(rb_cLLaMAModel, "is_recurrent?", RUBY_METHOD_FUNC(_llama_model_is_recurrent), 0);
|
1558
1559
|
rb_define_method(rb_cLLaMAModel, "detokenize", RUBY_METHOD_FUNC(_llama_model_detokenize), -1);
|
1559
1560
|
}
|
1560
1561
|
|
@@ -1914,6 +1915,11 @@ private:
|
|
1914
1915
|
return INT2NUM(llama_model_decoder_start_token(ptr->model));
|
1915
1916
|
}
|
1916
1917
|
|
1918
|
+
static VALUE _llama_model_is_recurrent(VALUE self) {
|
1919
|
+
LLaMAModelWrapper* ptr = get_llama_model(self);
|
1920
|
+
return llama_model_is_recurrent(ptr->model) ? Qtrue : Qfalse;
|
1921
|
+
}
|
1922
|
+
|
1917
1923
|
static VALUE _llama_model_detokenize(int argc, VALUE* argv, VALUE self) {
|
1918
1924
|
VALUE kw_args = Qnil;
|
1919
1925
|
ID kw_table[2] = { rb_intern("remove_special"), rb_intern("unparse_special") };
|
@@ -3631,6 +3637,7 @@ extern "C" void Init_llama_cpp(void) {
|
|
3631
3637
|
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_CODESHELL", INT2NUM(LLAMA_VOCAB_PRE_TYPE_CODESHELL));
|
3632
3638
|
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_BLOOM", INT2NUM(LLAMA_VOCAB_PRE_TYPE_BLOOM));
|
3633
3639
|
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH", INT2NUM(LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH));
|
3640
|
+
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_EXAONE", INT2NUM(LLAMA_VOCAB_PRE_TYPE_EXAONE));
|
3634
3641
|
|
3635
3642
|
rb_define_const(rb_mLLaMACpp, "LLAMA_TOKEN_TYPE_UNDEFINED", INT2NUM(LLAMA_TOKEN_TYPE_UNDEFINED));
|
3636
3643
|
rb_define_const(rb_mLLaMACpp, "LLAMA_TOKEN_TYPE_NORMAL", INT2NUM(LLAMA_TOKEN_TYPE_NORMAL));
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LLaMACpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.17.
|
6
|
+
VERSION = '0.17.8'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b3614'
|
10
10
|
end
|
data/sig/llama_cpp.rbs
CHANGED
@@ -42,6 +42,7 @@ module LLaMACpp
|
|
42
42
|
LLAMA_VOCAB_PRE_TYPE_CODESHELL: Integer
|
43
43
|
LLAMA_VOCAB_PRE_TYPE_BLOOM: Integer
|
44
44
|
LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH: Integer
|
45
|
+
LLAMA_VOCAB_PRE_TYPE_EXAONE: Integer
|
45
46
|
|
46
47
|
LLAMA_TOKEN_ATTR_UNDEFINED: Integer
|
47
48
|
LLAMA_TOKEN_ATTR_UNKNOWN: Integer
|
@@ -197,6 +198,7 @@ module LLaMACpp
|
|
197
198
|
def has_encoder?: () -> bool
|
198
199
|
def has_decoder?: () -> bool
|
199
200
|
def decoder_start_token: () -> Integer
|
201
|
+
def is_recurrent?: () -> bool
|
200
202
|
def detokenize: (Array[Integer], ?remove_special: bool, ?unparse_special: bool) -> String
|
201
203
|
end
|
202
204
|
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llama_cpp
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.17.
|
4
|
+
version: 0.17.8
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yoshoku
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-08-
|
11
|
+
date: 2024-08-25 00:00:00.000000000 Z
|
12
12
|
dependencies: []
|
13
13
|
description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
14
14
|
email:
|