llama_cpp 0.17.6 → 0.17.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/ext/llama_cpp/llama_cpp.cpp +8 -0
- data/lib/llama_cpp/version.rb +2 -2
- data/sig/llama_cpp.rbs +3 -0
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: ad273fdefc8934e4fdb11f18f20379cd77e89ed9d7e1db84ea4d6d847a676540
|
4
|
+
data.tar.gz: ad91a3d8589e66c8ba3fb7e12a354d917fb0aa9596fd75ae773db79eace59cb5
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 733d18dc4a3c7a72a7de1a4df331f5861d0d2321baf7d439c0f581ceec6ad7246b5dda45739b5154b9ec94c61a094915230506292b48de232f01840b29515f1f
|
7
|
+
data.tar.gz: 745e2d6f265f72871c258a52af907ecc616bdc2bf1f9dc8fc10d1d81597c930c0a999d4a518fe7f3a6d807df90fee8e559e1ef31e19f03f3d81cfa84d6105b9a
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,8 @@
|
|
1
|
+
## [[0.17.7](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.5...v0.17.6)] - 2024-08-17
|
2
|
+
|
3
|
+
- Change supported llama.cpp version to b3590.
|
4
|
+
- Add `LLAMA_VOCAB_PRE_TYPE_BLOOM` and `LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH` constants
|
5
|
+
|
1
6
|
## [[0.17.6](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.5...v0.17.6)] - 2024-08-09
|
2
7
|
|
3
8
|
- Change supported llama.cpp version to b3524.
|
data/ext/llama_cpp/llama_cpp.cpp
CHANGED
@@ -1553,6 +1553,7 @@ public:
|
|
1553
1553
|
rb_define_method(rb_cLLaMAModel, "token_is_eog?", RUBY_METHOD_FUNC(_llama_model_token_is_eog), 1);
|
1554
1554
|
rb_define_method(rb_cLLaMAModel, "token_is_control?", RUBY_METHOD_FUNC(_llama_model_token_is_control), 1);
|
1555
1555
|
rb_define_method(rb_cLLaMAModel, "has_encoder?", RUBY_METHOD_FUNC(_llama_model_has_encoder), 0);
|
1556
|
+
rb_define_method(rb_cLLaMAModel, "has_decoder?", RUBY_METHOD_FUNC(_llama_model_has_decoder), 0);
|
1556
1557
|
rb_define_method(rb_cLLaMAModel, "decoder_start_token", RUBY_METHOD_FUNC(_llama_model_decoder_start_token), 0);
|
1557
1558
|
rb_define_method(rb_cLLaMAModel, "detokenize", RUBY_METHOD_FUNC(_llama_model_detokenize), -1);
|
1558
1559
|
}
|
@@ -1903,6 +1904,11 @@ private:
|
|
1903
1904
|
return llama_model_has_encoder(ptr->model) ? Qtrue : Qfalse;
|
1904
1905
|
}
|
1905
1906
|
|
1907
|
+
static VALUE _llama_model_has_decoder(VALUE self) {
|
1908
|
+
LLaMAModelWrapper* ptr = get_llama_model(self);
|
1909
|
+
return llama_model_has_decoder(ptr->model) ? Qtrue : Qfalse;
|
1910
|
+
}
|
1911
|
+
|
1906
1912
|
static VALUE _llama_model_decoder_start_token(VALUE self) {
|
1907
1913
|
LLaMAModelWrapper* ptr = get_llama_model(self);
|
1908
1914
|
return INT2NUM(llama_model_decoder_start_token(ptr->model));
|
@@ -3623,6 +3629,8 @@ extern "C" void Init_llama_cpp(void) {
|
|
3623
3629
|
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_TEKKEN", INT2NUM(LLAMA_VOCAB_PRE_TYPE_TEKKEN));
|
3624
3630
|
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_SMOLLM", INT2NUM(LLAMA_VOCAB_PRE_TYPE_SMOLLM));
|
3625
3631
|
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_CODESHELL", INT2NUM(LLAMA_VOCAB_PRE_TYPE_CODESHELL));
|
3632
|
+
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_BLOOM", INT2NUM(LLAMA_VOCAB_PRE_TYPE_BLOOM));
|
3633
|
+
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH", INT2NUM(LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH));
|
3626
3634
|
|
3627
3635
|
rb_define_const(rb_mLLaMACpp, "LLAMA_TOKEN_TYPE_UNDEFINED", INT2NUM(LLAMA_TOKEN_TYPE_UNDEFINED));
|
3628
3636
|
rb_define_const(rb_mLLaMACpp, "LLAMA_TOKEN_TYPE_NORMAL", INT2NUM(LLAMA_TOKEN_TYPE_NORMAL));
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LLaMACpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.17.
|
6
|
+
VERSION = '0.17.7'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b3590'
|
10
10
|
end
|
data/sig/llama_cpp.rbs
CHANGED
@@ -40,6 +40,8 @@ module LLaMACpp
|
|
40
40
|
LLAMA_VOCAB_PRE_TYPE_TEKKEN: Integer
|
41
41
|
LLAMA_VOCAB_PRE_TYPE_SMOLLM: Integer
|
42
42
|
LLAMA_VOCAB_PRE_TYPE_CODESHELL: Integer
|
43
|
+
LLAMA_VOCAB_PRE_TYPE_BLOOM: Integer
|
44
|
+
LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH: Integer
|
43
45
|
|
44
46
|
LLAMA_TOKEN_ATTR_UNDEFINED: Integer
|
45
47
|
LLAMA_TOKEN_ATTR_UNKNOWN: Integer
|
@@ -193,6 +195,7 @@ module LLaMACpp
|
|
193
195
|
def token_is_eog?: (Integer) -> bool
|
194
196
|
def token_is_control?: (Integer) -> bool
|
195
197
|
def has_encoder?: () -> bool
|
198
|
+
def has_decoder?: () -> bool
|
196
199
|
def decoder_start_token: () -> Integer
|
197
200
|
def detokenize: (Array[Integer], ?remove_special: bool, ?unparse_special: bool) -> String
|
198
201
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llama_cpp
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.17.
|
4
|
+
version: 0.17.7
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yoshoku
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-08-
|
11
|
+
date: 2024-08-17 00:00:00.000000000 Z
|
12
12
|
dependencies: []
|
13
13
|
description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
14
14
|
email:
|