llama_cpp 0.24.0 → 0.24.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +10 -0
- data/ext/llama_cpp/llama_cpp.c +24 -0
- data/lib/llama_cpp/version.rb +2 -2
- metadata +2 -2
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 1130bd5d4bd478e4aed2e67d836fe66aa0bf166dac85e28557e05814b75d48b2
|
|
4
|
+
data.tar.gz: fb276cff62ba89f3726b526c7efea7d6b76ff4164b3885cd70c07c36ae2a4ec7
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: f45b85cc4dfebd8a0afb0592a1ddee159656a749033cc5abf395f88c19742a705263018b72acb234142d2821b3b5ba2e1a09ff1884347b0a73bea5b1a6b0c3bc
|
|
7
|
+
data.tar.gz: 9b000d84f97eaa7e4f6b775ce03899a60aae832440fb53b28115d576441df330d42bd3761984fe2765c3b9fed60121caeaed7c697288140a58fb193467e9d082
|
data/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,13 @@
|
|
|
1
|
+
## [[0.24.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.24.1...v0.24.2)] - 2026-03-15
|
|
2
|
+
|
|
3
|
+
- Change supported llama.cpp version to b8340.
|
|
4
|
+
- Add `LLAMA_FTYPE_MOSTLY_NVFP4` constant value.
|
|
5
|
+
|
|
6
|
+
## [[0.24.1](https://github.com/yoshoku/llama_cpp.rb/compare/v0.24.0...v0.24.1)] - 2026-03-01
|
|
7
|
+
|
|
8
|
+
- Change supported llama.cpp version to b8170.
|
|
9
|
+
- Add `dry_run` accessor to `LlamaModelQuantizeParams`.
|
|
10
|
+
|
|
1
11
|
## [[0.24.0](https://github.com/yoshoku/llama_cpp.rb/compare/v0.23.11...v0.24.0)] - 2026-02-22
|
|
2
12
|
|
|
3
13
|
- Change supported llama.cpp version to b8110.
|
data/ext/llama_cpp/llama_cpp.c
CHANGED
|
@@ -1024,6 +1024,17 @@ static VALUE llama_model_quantize_params_set_keep_split(VALUE self, VALUE keep_s
|
|
|
1024
1024
|
return keep_split;
|
|
1025
1025
|
}
|
|
1026
1026
|
|
|
1027
|
+
static VALUE llama_model_quantize_params_get_dry_run(VALUE self) {
|
|
1028
|
+
llama_model_quantize_params* data = get_llama_model_quantize_params(self);
|
|
1029
|
+
return data->dry_run ? Qtrue : Qfalse;
|
|
1030
|
+
}
|
|
1031
|
+
|
|
1032
|
+
static VALUE llama_model_quantize_params_set_dry_run(VALUE self, VALUE dry_run) {
|
|
1033
|
+
llama_model_quantize_params* data = get_llama_model_quantize_params(self);
|
|
1034
|
+
data->dry_run = RTEST(dry_run) ? true : false;
|
|
1035
|
+
return dry_run;
|
|
1036
|
+
}
|
|
1037
|
+
|
|
1027
1038
|
/* llama_logit_bias */
|
|
1028
1039
|
static void llama_logit_bias_free(void *ptr) {
|
|
1029
1040
|
if (ptr) {
|
|
@@ -4184,6 +4195,7 @@ void Init_llama_cpp(void) {
|
|
|
4184
4195
|
rb_define_const(rb_mLlamaCpp, "LLAMA_FTYPE_MOSTLY_TQ1_0", INT2NUM(LLAMA_FTYPE_MOSTLY_TQ1_0));
|
|
4185
4196
|
rb_define_const(rb_mLlamaCpp, "LLAMA_FTYPE_MOSTLY_TQ2_0", INT2NUM(LLAMA_FTYPE_MOSTLY_TQ2_0));
|
|
4186
4197
|
rb_define_const(rb_mLlamaCpp, "LLAMA_FTYPE_MOSTLY_MXFP4_MOE", INT2NUM(LLAMA_FTYPE_MOSTLY_MXFP4_MOE));
|
|
4198
|
+
rb_define_const(rb_mLlamaCpp, "LLAMA_FTYPE_MOSTLY_NVFP4", INT2NUM(LLAMA_FTYPE_MOSTLY_NVFP4));
|
|
4187
4199
|
rb_define_const(rb_mLlamaCpp, "LLAMA_FTYPE_GUESSED", INT2NUM(LLAMA_FTYPE_GUESSED));
|
|
4188
4200
|
/* llama_rope_scaling_type */
|
|
4189
4201
|
/* Document-const: LlamaCpp::LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED */
|
|
@@ -4904,6 +4916,17 @@ void Init_llama_cpp(void) {
|
|
|
4904
4916
|
* @return [Boolean]
|
|
4905
4917
|
*/
|
|
4906
4918
|
rb_define_method(rb_cLlamaModelQuantizeParams, "keep_split=", RUBY_METHOD_FUNC(llama_model_quantize_params_set_keep_split), 1);
|
|
4919
|
+
/**
|
|
4920
|
+
* Document-method: dry_run
|
|
4921
|
+
* @return [Boolean]
|
|
4922
|
+
*/
|
|
4923
|
+
rb_define_method(rb_cLlamaModelQuantizeParams, "dry_run", RUBY_METHOD_FUNC(llama_model_quantize_params_get_dry_run), 0);
|
|
4924
|
+
/**
|
|
4925
|
+
* Document-method: dry_run=
|
|
4926
|
+
* @param [Boolean] dry_run
|
|
4927
|
+
* @return [Boolean]
|
|
4928
|
+
*/
|
|
4929
|
+
rb_define_method(rb_cLlamaModelQuantizeParams, "dry_run=", RUBY_METHOD_FUNC(llama_model_quantize_params_set_dry_run), 1);
|
|
4907
4930
|
/* TODO: void* imatrix */
|
|
4908
4931
|
/* TODO: void* kv_overrides */
|
|
4909
4932
|
/* TODO: void* tensor_types */
|
|
@@ -4979,6 +5002,7 @@ void Init_llama_cpp(void) {
|
|
|
4979
5002
|
|
|
4980
5003
|
/* TODO: llama_attach_threadpool */
|
|
4981
5004
|
/* TODO: llama_detach_threadpool */
|
|
5005
|
+
/* TODO: llama_model_init_from_user */
|
|
4982
5006
|
|
|
4983
5007
|
/* llama_model_load_from_file */
|
|
4984
5008
|
rb_define_module_function(rb_mLlamaCpp, "llama_model_load_from_file", rb_llama_model_load_from_file, 2);
|
data/lib/llama_cpp/version.rb
CHANGED
|
@@ -3,8 +3,8 @@
|
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
|
4
4
|
module LlamaCpp
|
|
5
5
|
# The version of llama_cpp.rb you install.
|
|
6
|
-
VERSION = '0.24.
|
|
6
|
+
VERSION = '0.24.2'
|
|
7
7
|
|
|
8
8
|
# The supported version of llama.cpp.
|
|
9
|
-
LLAMA_CPP_VERSION = '
|
|
9
|
+
LLAMA_CPP_VERSION = 'b8340'
|
|
10
10
|
end
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: llama_cpp
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.24.
|
|
4
|
+
version: 0.24.2
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- yoshoku
|
|
@@ -33,7 +33,7 @@ metadata:
|
|
|
33
33
|
homepage_uri: https://github.com/yoshoku/llama_cpp.rb
|
|
34
34
|
source_code_uri: https://github.com/yoshoku/llama_cpp.rb
|
|
35
35
|
changelog_uri: https://github.com/yoshoku/llama_cpp.rb/blob/main/CHANGELOG.md
|
|
36
|
-
documentation_uri: https://gemdocs.org/gems/llama_cpp/0.24.
|
|
36
|
+
documentation_uri: https://gemdocs.org/gems/llama_cpp/0.24.2/
|
|
37
37
|
rubygems_mfa_required: 'true'
|
|
38
38
|
rdoc_options: []
|
|
39
39
|
require_paths:
|