llama_cpp 0.17.8 → 0.17.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +14 -0
- data/ext/llama_cpp/llama_cpp.cpp +6 -3
- data/lib/llama_cpp/version.rb +2 -2
- data/sig/llama_cpp.rbs +3 -0
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: ec7b41006345cfe57e355d207718ae460a5a2c519d67027daddb7d40ab346754
|
4
|
+
data.tar.gz: efcfa7eef5374790cef788f719bede1ab3cd30e5dec79be0ba9844bfbcd433c2
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c39150e77652060fcfee2c73b1334ff53ed40caf58679efa727a0d0d54bffe036b8795be9751cef8517b5d4b2300fb77fa5753a9c94b803988ec429b999591c2
|
7
|
+
data.tar.gz: 6054dee2f39ac2690de2836405ac793bf987538ef0780cffab2700544bc60ceaca8a4b109407c66dcf2dd9f38ffe6b98659d7ca4ddb5c9e93fa85d9884cea26e
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,17 @@
|
|
1
|
+
## [[0.17.10](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.9...v0.17.10)] - 2024-09-07
|
2
|
+
|
3
|
+
- Change supported llama.cpp version to b3676.
|
4
|
+
- Add `LLAMA_VOCAB_TYPE_RWKV` constant.
|
5
|
+
- Add `LLAMA_FTYPE_MOSTLY_TQ1_0` and `LLAMA_FTYPE_MOSTLY_TQ2_0` constants.
|
6
|
+
- Change type of n_threads and n_threads_batch from uint32_t to int32 in native extension codes.
|
7
|
+
|
8
|
+
Implementation bindings for llama_attach_threadpool and llama_detach_threadpool have been skipped.
|
9
|
+
|
10
|
+
## [[0.17.9](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.8...v0.17.9)] - 2024-08-31
|
11
|
+
|
12
|
+
- Change supported llama.cpp version to b3639.
|
13
|
+
- There are no changes in the API.
|
14
|
+
|
1
15
|
## [[0.17.8](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.7...v0.17.8)] - 2024-08-25
|
2
16
|
|
3
17
|
- Change supported llama.cpp version to b3614.
|
data/ext/llama_cpp/llama_cpp.cpp
CHANGED
@@ -2512,7 +2512,7 @@ private:
|
|
2512
2512
|
rb_raise(rb_eArgError, "LLaMA context is not initialized");
|
2513
2513
|
return Qnil;
|
2514
2514
|
}
|
2515
|
-
llama_set_n_threads(ptr->ctx,
|
2515
|
+
llama_set_n_threads(ptr->ctx, NUM2INT(n_threads), NUM2INT(n_threads_batch));
|
2516
2516
|
return Qnil;
|
2517
2517
|
}
|
2518
2518
|
|
@@ -2558,7 +2558,7 @@ private:
|
|
2558
2558
|
rb_raise(rb_eRuntimeError, "LLaMA context is not initialized");
|
2559
2559
|
return Qnil;
|
2560
2560
|
}
|
2561
|
-
return
|
2561
|
+
return INT2NUM(llama_n_threads(ptr->ctx));
|
2562
2562
|
}
|
2563
2563
|
|
2564
2564
|
static VALUE _llama_context_n_threads_batch(VALUE self) {
|
@@ -2567,7 +2567,7 @@ private:
|
|
2567
2567
|
rb_raise(rb_eRuntimeError, "LLaMA context is not initialized");
|
2568
2568
|
return Qnil;
|
2569
2569
|
}
|
2570
|
-
return
|
2570
|
+
return INT2NUM(llama_n_threads_batch(ptr->ctx));
|
2571
2571
|
}
|
2572
2572
|
|
2573
2573
|
static VALUE _llama_context_get_timings(VALUE self) {
|
@@ -3611,6 +3611,7 @@ extern "C" void Init_llama_cpp(void) {
|
|
3611
3611
|
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_TYPE_BPE", INT2NUM(LLAMA_VOCAB_TYPE_BPE));
|
3612
3612
|
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_TYPE_WPM", INT2NUM(LLAMA_VOCAB_TYPE_WPM));
|
3613
3613
|
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_TYPE_UGM", INT2NUM(LLAMA_VOCAB_TYPE_UGM));
|
3614
|
+
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_TYPE_RWKV", INT2NUM(LLAMA_VOCAB_TYPE_RWKV));
|
3614
3615
|
|
3615
3616
|
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_DEFAULT", INT2NUM(LLAMA_VOCAB_PRE_TYPE_DEFAULT));
|
3616
3617
|
rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_LLAMA3", INT2NUM(LLAMA_VOCAB_PRE_TYPE_LLAMA3));
|
@@ -3690,6 +3691,8 @@ extern "C" void Init_llama_cpp(void) {
|
|
3690
3691
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q4_0_4_4", INT2NUM(LLAMA_FTYPE_MOSTLY_Q4_0_4_4));
|
3691
3692
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q4_0_4_8", INT2NUM(LLAMA_FTYPE_MOSTLY_Q4_0_4_8));
|
3692
3693
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_Q4_0_8_8", INT2NUM(LLAMA_FTYPE_MOSTLY_Q4_0_8_8));
|
3694
|
+
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_TQ1_0", INT2NUM(LLAMA_FTYPE_MOSTLY_TQ1_0));
|
3695
|
+
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_MOSTLY_TQ2_0", INT2NUM(LLAMA_FTYPE_MOSTLY_TQ2_0));
|
3693
3696
|
|
3694
3697
|
rb_define_const(rb_mLLaMACpp, "LLAMA_FTYPE_GUESSED", INT2NUM(LLAMA_FTYPE_GUESSED));
|
3695
3698
|
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LLaMACpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.17.
|
6
|
+
VERSION = '0.17.10'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b3676'
|
10
10
|
end
|
data/sig/llama_cpp.rbs
CHANGED
@@ -16,6 +16,7 @@ module LLaMACpp
|
|
16
16
|
LLAMA_VOCAB_TYPE_BPE: Integer
|
17
17
|
LLAMA_VOCAB_TYPE_WPM: Integer
|
18
18
|
LLAMA_VOCAB_TYPE_UGM: Integer
|
19
|
+
LLAMA_VOCAB_TYPE_RWKV: Integer
|
19
20
|
|
20
21
|
LLAMA_VOCAB_PRE_TYPE_DEFAULT: Integer
|
21
22
|
LLAMA_VOCAB_PRE_TYPE_LLAMA3: Integer
|
@@ -87,6 +88,8 @@ module LLaMACpp
|
|
87
88
|
LLAMA_FTYPE_MOSTLY_Q4_0_4_4: Integer
|
88
89
|
LLAMA_FTYPE_MOSTLY_Q4_0_4_8: Integer
|
89
90
|
LLAMA_FTYPE_MOSTLY_Q4_0_8_8: Integer
|
91
|
+
LLAMA_FTYPE_MOSTLY_TQ1_0: Integer
|
92
|
+
LLAMA_FTYPE_MOSTLY_TQ2_0: Integer
|
90
93
|
LLAMA_FTYPE_GUESSED: Integer
|
91
94
|
|
92
95
|
LLAMA_KV_OVERRIDE_TYPE_INT: Integer
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llama_cpp
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.17.
|
4
|
+
version: 0.17.10
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yoshoku
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-09-07 00:00:00.000000000 Z
|
12
12
|
dependencies: []
|
13
13
|
description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
14
14
|
email:
|