llama_cpp 0.17.2 → 0.17.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/ext/llama_cpp/llama_cpp.cpp +3 -1
- data/lib/llama_cpp/version.rb +2 -2
- data/sig/llama_cpp.rbs +4 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: b4a52ad29464deda885f25d070e74dd9f2d05dc635535556cd7286aa9950f08c
|
4
|
+
data.tar.gz: e51513df4e7d36b2dce2356daf8ee500b035f4ef4dcb33933bc35f7fa61e1063
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: e4c092b79017ea8b393bd364dc29cc418f259915781e2be6fc7181029ba4446863845d915196aa49dbb4e0fb25d3d534373917be0b93ef897c28f0e79f687a98
|
7
|
+
data.tar.gz: 71bab30d25455e31bcb25277fd91dc56bede4013454cf6b9ace3833694c04712d7a667f903d754117c907e39684e90344cdc4b4f67394fa2f09dc5c264ad5401
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,11 @@
|
|
1
|
+
## [[0.17.3](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.2...v0.17.3)] - 2024-07-21
|
2
|
+
|
3
|
+
- Change supported llama.cpp version to b3405.
|
4
|
+
- Remove `LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16` constant.
|
5
|
+
- Add model file type constans: `LLAMA_FTYPE_MOSTLY_Q4_0_4_4`, `LLAMA_FTYPE_MOSTLY_Q4_0_4_8`, and `LLAMA_FTYPE_MOSTLY_Q4_0_8_8`.
|
6
|
+
|
7
|
+
Implementation bindings for llama_lora_adapter_init, llama_lora_adapter_set, llama_lora_adapter_remove, and llama_lora_adapter_free has been skipped.
|
8
|
+
|
1
9
|
## [[0.17.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.1...v0.17.2)] - 2024-07-14
|
2
10
|
|
3
11
|
- Change supported llama.cpp version to b3358.
|
data/ext/llama_cpp/llama_cpp.cpp
CHANGED
@@ -3645,7 +3645,6 @@ extern "C" void Init_llama_cpp(void) {
|
|
3645
3645
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_F16", INT2NUM(LLAMA_FTYPE_MOSTLY_F16));
|
3646
3646
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q4_0", INT2NUM(LLAMA_FTYPE_MOSTLY_Q4_0));
|
3647
3647
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q4_1", INT2NUM(LLAMA_FTYPE_MOSTLY_Q4_1));
|
3648
|
-
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16", INT2NUM(LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16));
|
3649
3648
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q8_0", INT2NUM(LLAMA_FTYPE_MOSTLY_Q8_0));
|
3650
3649
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q5_0", INT2NUM(LLAMA_FTYPE_MOSTLY_Q5_0));
|
3651
3650
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q5_1", INT2NUM(LLAMA_FTYPE_MOSTLY_Q5_1));
|
@@ -3670,6 +3669,9 @@ extern "C" void Init_llama_cpp(void) {
|
|
3670
3669
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_IQ4_XS", INT2NUM(LLAMA_FTYPE_MOSTLY_IQ4_XS));
|
3671
3670
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_IQ1_M", INT2NUM(LLAMA_FTYPE_MOSTLY_IQ1_M));
|
3672
3671
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_BF16", INT2NUM(LLAMA_FTYPE_MOSTLY_BF16));
|
3672
|
+
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q4_0_4_4", INT2NUM(LLAMA_FTYPE_MOSTLY_Q4_0_4_4));
|
3673
|
+
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q4_0_4_8", INT2NUM(LLAMA_FTYPE_MOSTLY_Q4_0_4_8));
|
3674
|
+
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q4_0_8_8", INT2NUM(LLAMA_FTYPE_MOSTLY_Q4_0_8_8));
|
3673
3675
|
|
3674
3676
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_GUESSED", INT2NUM(LLAMA_FTYPE_GUESSED));
|
3675
3677
|
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LLaMACpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.17.
|
6
|
+
VERSION = '0.17.3'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b3405'
|
10
10
|
end
|
data/sig/llama_cpp.rbs
CHANGED
@@ -54,7 +54,6 @@ module LLaMACpp
|
|
54
54
|
LLAMA_FTYPE_MOSTLY_F16: Integer
|
55
55
|
LLAMA_FTYPE_MOSTLY_Q4_0: Integer
|
56
56
|
LLAMA_FTYPE_MOSTLY_Q4_1: Integer
|
57
|
-
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16: Integer
|
58
57
|
LLAMA_FTYPE_MOSTLY_Q8_0: Integer
|
59
58
|
LLAMA_FTYPE_MOSTLY_Q5_0: Integer
|
60
59
|
LLAMA_FTYPE_MOSTLY_Q5_1: Integer
|
@@ -79,6 +78,10 @@ module LLaMACpp
|
|
79
78
|
LLAMA_FTYPE_MOSTLY_IQ4_XS: Integer
|
80
79
|
LLAMA_FTYPE_MOSTLY_IQ1_M: Integer
|
81
80
|
LLAMA_FTYPE_MOSTLY_BF16: Integer
|
81
|
+
LLAMA_FTYPE_MOSTLY_Q4_0_4_4: Integer
|
82
|
+
LLAMA_FTYPE_MOSTLY_Q4_0_4_8: Integer
|
83
|
+
LLAMA_FTYPE_MOSTLY_Q4_0_8_8: Integer
|
84
|
+
LLAMA_FTYPE_GUESSED: Integer
|
82
85
|
|
83
86
|
LLAMA_KV_OVERRIDE_TYPE_INT: Integer
|
84
87
|
LLAMA_KV_OVERRIDE_TYPE_FLOAT: Integer
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llama_cpp
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.17.
|
4
|
+
version: 0.17.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yoshoku
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-07-
|
11
|
+
date: 2024-07-21 00:00:00.000000000 Z
|
12
12
|
dependencies: []
|
13
13
|
description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
14
14
|
email:
|