llama_cpp 0.19.0 → 0.19.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/ext/llama_cpp/llama_cpp.c +22 -0
- data/lib/llama_cpp/version.rb +2 -2
- metadata +3 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: bb93f6aa74a3f022cb15dde5df9b4e6deb4c228a5b0fbfc01ff2ebed9446f152
|
4
|
+
data.tar.gz: 0b94a7d328ed8ac1bdc637a9b79ff490f5e5af1df3c5893e88c91dcf4a73ed07
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 97f0be1659fabb158b328294b3d7b5df9ea7774f8d0063246ea5d006f39f6d9799d80236a712d6026b6d9df4acd5b20ced809d93145379dadc13a61e0813a689
|
7
|
+
data.tar.gz: 5072945b4b0152fe5dfa41807253a511274020fb5149988586e1932c65f0ce020876f200cf97b291ba2d804cac03aea731c5d57adb6ab1ef8d448a210104836b
|
data/CHANGELOG.md
CHANGED
@@ -1,4 +1,10 @@
|
|
1
1
|
|
2
|
+
## [[0.19.0](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.0...v0.19.1)] - 2025-03-29
|
3
|
+
|
4
|
+
- Change supported llama.cpp version to b4980
|
5
|
+
- Add `LLAMA_VOCAB_PRE_TYPE_SUPERBPE` constant.
|
6
|
+
- Add `llama_set_warmup` module function.
|
7
|
+
|
2
8
|
## [[0.19.0](https://github.com/yoshoku/llama_cpp.rb/compare/v0.18.2...v0.19.0)] - 2025-03-16
|
3
9
|
|
4
10
|
**Breaking Changes**
|
data/ext/llama_cpp/llama_cpp.c
CHANGED
@@ -2518,6 +2518,24 @@ static VALUE rb_llama_set_causal_attn(VALUE self, VALUE ctx, VALUE causal_attn)
|
|
2518
2518
|
return Qnil;
|
2519
2519
|
}
|
2520
2520
|
|
2521
|
+
/**
|
2522
|
+
* @overload llama_set_warmup(context, warmup)
|
2523
|
+
* @param [LlamaContext] context
|
2524
|
+
* @param [Boolean] warmup
|
2525
|
+
* @return [NilClass]
|
2526
|
+
*/
|
2527
|
+
static VALUE rb_llama_set_warmup(VALUE self, VALUE ctx, VALUE warmup) {
|
2528
|
+
if (!rb_obj_is_kind_of(ctx, rb_cLlamaContext)) {
|
2529
|
+
rb_raise(rb_eArgError, "ctx must be a LlamaContext");
|
2530
|
+
return Qnil;
|
2531
|
+
}
|
2532
|
+
llama_context_wrapper* context_wrapper = get_llama_context_wrapper(ctx);
|
2533
|
+
const bool warmup_ = RTEST(warmup) ? true : false;
|
2534
|
+
llama_set_warmup(context_wrapper->context, warmup_);
|
2535
|
+
RB_GC_GUARD(ctx);
|
2536
|
+
return Qnil;
|
2537
|
+
}
|
2538
|
+
|
2521
2539
|
/**
|
2522
2540
|
* @overload llama_synchronize(context)
|
2523
2541
|
* @param [LlamaContext] context
|
@@ -3965,6 +3983,7 @@ void Init_llama_cpp(void) {
|
|
3965
3983
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_MINERVA", INT2NUM(LLAMA_VOCAB_PRE_TYPE_MINERVA));
|
3966
3984
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM", INT2NUM(LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM));
|
3967
3985
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_GPT4O", INT2NUM(LLAMA_VOCAB_PRE_TYPE_GPT4O));
|
3986
|
+
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_SUPERBPE", INT2NUM(LLAMA_VOCAB_PRE_TYPE_SUPERBPE));
|
3968
3987
|
/* llama_rope_type */
|
3969
3988
|
/* Document-const: LlamaCpp::LLAMA_ROPE_TYPE_NONE */
|
3970
3989
|
rb_define_const(rb_mLlamaCpp, "LLAMA_ROPE_TYPE_NONE", INT2NUM(LLAMA_ROPE_TYPE_NONE));
|
@@ -4995,6 +5014,9 @@ void Init_llama_cpp(void) {
|
|
4995
5014
|
/* llama_set_causal_attn */
|
4996
5015
|
rb_define_module_function(rb_mLlamaCpp, "llama_set_causal_attn", rb_llama_set_causal_attn, 2);
|
4997
5016
|
|
5017
|
+
/* llama_set_warmup */
|
5018
|
+
rb_define_module_function(rb_mLlamaCpp, "llama_set_warmup", rb_llama_set_warmup, 2);
|
5019
|
+
|
4998
5020
|
/* llama_set_abort_callback */
|
4999
5021
|
|
5000
5022
|
/* llama_synchronize */
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LlamaCpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.19.
|
6
|
+
VERSION = '0.19.1'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b4980'
|
10
10
|
end
|
metadata
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llama_cpp
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.19.
|
4
|
+
version: 0.19.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yoshoku
|
8
8
|
bindir: exe
|
9
9
|
cert_chain: []
|
10
|
-
date: 2025-03-
|
10
|
+
date: 2025-03-29 00:00:00.000000000 Z
|
11
11
|
dependencies: []
|
12
12
|
description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
13
13
|
email:
|
@@ -49,7 +49,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
49
49
|
- !ruby/object:Gem::Version
|
50
50
|
version: '0'
|
51
51
|
requirements: []
|
52
|
-
rubygems_version: 3.6.
|
52
|
+
rubygems_version: 3.6.6
|
53
53
|
specification_version: 4
|
54
54
|
summary: Ruby bindings for the llama.cpp.
|
55
55
|
test_files: []
|