llama_cpp 0.19.3 → 0.19.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +11 -1
- data/ext/llama_cpp/llama_cpp.c +2 -22
- data/lib/llama_cpp/version.rb +2 -2
- metadata +3 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 42542ad4104e7926eb1418e671e127caf96a3ff748b0ab1d3697aeaae56de939
|
4
|
+
data.tar.gz: 6fd5680b283f050992c49b70dc2e8bc64ef18afc1ceefe7d944902def30d8ab0
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: df888e40ecd7a9917663dda356b5b29537c32643a8b998e7e707d229cee91b544a0a5c62b9a613073bb030819f9d12b82c87efa4035e0371bba33cd719489b8a
|
7
|
+
data.tar.gz: a7c1655973159b888a1d89a4b209e5805620b5be6a65d38d167cf9812affc38ab13371648778a5ac16fddd45b5b002bead63f0ec23796b23c729c0bd48b24fa9
|
data/CHANGELOG.md
CHANGED
@@ -1,4 +1,14 @@
|
|
1
|
-
## [[0.19.
|
1
|
+
## [[0.19.5](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.4...v0.19.5)] - 2025-05-10
|
2
|
+
|
3
|
+
- Change supported llama.cpp version to b5320
|
4
|
+
- Remove deprecated logits_all accessor in LlamaContextParams.
|
5
|
+
|
6
|
+
## [[0.19.4](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.3...v0.19.4)] - 2025-04-26
|
7
|
+
|
8
|
+
- Change supported llama.cpp version to b5180
|
9
|
+
- Add `LLAMA_VOCAB_PRE_TYPE_PIXTRAL` constant.
|
10
|
+
|
11
|
+
## [[0.19.3](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.2...v0.19.3)] - 2025-04-12
|
2
12
|
|
3
13
|
- Change supported llama.cpp version to b5120
|
4
14
|
- Add `LLAMA_VOCAB_PRE_TYPE_LLAMA4` constant.
|
data/ext/llama_cpp/llama_cpp.c
CHANGED
@@ -772,17 +772,6 @@ static VALUE llama_context_params_set_type_v(VALUE self, VALUE type_v) {
|
|
772
772
|
return type_v;
|
773
773
|
}
|
774
774
|
|
775
|
-
static VALUE llama_context_params_get_logits_all(VALUE self) {
|
776
|
-
struct llama_context_params* data = get_llama_context_params(self);
|
777
|
-
return data->logits_all ? Qtrue : Qfalse;
|
778
|
-
}
|
779
|
-
|
780
|
-
static VALUE llama_context_params_set_logits_all(VALUE self, VALUE logits_all) {
|
781
|
-
struct llama_context_params* data = get_llama_context_params(self);
|
782
|
-
data->logits_all = RTEST(logits_all) ? true : false;
|
783
|
-
return logits_all;
|
784
|
-
}
|
785
|
-
|
786
775
|
static VALUE llama_context_params_get_embeddings(VALUE self) {
|
787
776
|
struct llama_context_params* data = get_llama_context_params(self);
|
788
777
|
return data->embeddings ? Qtrue : Qfalse;
|
@@ -4031,6 +4020,7 @@ void Init_llama_cpp(void) {
|
|
4031
4020
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_TRILLION", INT2NUM(LLAMA_VOCAB_PRE_TYPE_TRILLION));
|
4032
4021
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_BAILINGMOE", INT2NUM(LLAMA_VOCAB_PRE_TYPE_BAILINGMOE));
|
4033
4022
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_LLAMA4", INT2NUM(LLAMA_VOCAB_PRE_TYPE_LLAMA4));
|
4023
|
+
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_PIXTRAL", INT2NUM(LLAMA_VOCAB_PRE_TYPE_PIXTRAL));
|
4034
4024
|
/* llama_rope_type */
|
4035
4025
|
/* Document-const: LlamaCpp::LLAMA_ROPE_TYPE_NONE */
|
4036
4026
|
rb_define_const(rb_mLlamaCpp, "LLAMA_ROPE_TYPE_NONE", INT2NUM(LLAMA_ROPE_TYPE_NONE));
|
@@ -4557,17 +4547,6 @@ void Init_llama_cpp(void) {
|
|
4557
4547
|
* @return [Integer]
|
4558
4548
|
*/
|
4559
4549
|
rb_define_method(rb_cLlamaContextParams, "type_v=", RUBY_METHOD_FUNC(llama_context_params_set_type_v), 1);
|
4560
|
-
/**
|
4561
|
-
* Document-method: logits_all
|
4562
|
-
* @return [Boolean]
|
4563
|
-
*/
|
4564
|
-
rb_define_method(rb_cLlamaContextParams, "logits_all", RUBY_METHOD_FUNC(llama_context_params_get_logits_all), 0);
|
4565
|
-
/**
|
4566
|
-
* Document-method: logits_all=
|
4567
|
-
* @param [Boolean] logits_all
|
4568
|
-
* @return [Boolean]
|
4569
|
-
*/
|
4570
|
-
rb_define_method(rb_cLlamaContextParams, "logits_all=", RUBY_METHOD_FUNC(llama_context_params_set_logits_all), 1);
|
4571
4550
|
/**
|
4572
4551
|
* Document-method: embeddings
|
4573
4552
|
* @return [Boolean]
|
@@ -4722,6 +4701,7 @@ void Init_llama_cpp(void) {
|
|
4722
4701
|
rb_define_method(rb_cLlamaModelQuantizeParams, "keep_split=", RUBY_METHOD_FUNC(llama_model_quantize_params_set_keep_split), 1);
|
4723
4702
|
/* TODO: void* imatrix */
|
4724
4703
|
/* TODO: void* kv_overrides */
|
4704
|
+
/* TODO: void* tensor_types */
|
4725
4705
|
|
4726
4706
|
/**
|
4727
4707
|
* Document-class: LlamaCpp::LlamaLogitBias
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LlamaCpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.19.
|
6
|
+
VERSION = '0.19.5'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b5320'
|
10
10
|
end
|
metadata
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llama_cpp
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.19.
|
4
|
+
version: 0.19.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yoshoku
|
8
8
|
bindir: exe
|
9
9
|
cert_chain: []
|
10
|
-
date:
|
10
|
+
date: 1980-01-02 00:00:00.000000000 Z
|
11
11
|
dependencies: []
|
12
12
|
description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
13
13
|
email:
|
@@ -49,7 +49,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
49
49
|
- !ruby/object:Gem::Version
|
50
50
|
version: '0'
|
51
51
|
requirements: []
|
52
|
-
rubygems_version: 3.6.
|
52
|
+
rubygems_version: 3.6.7
|
53
53
|
specification_version: 4
|
54
54
|
summary: Ruby bindings for the llama.cpp.
|
55
55
|
test_files: []
|