llama_cpp 0.19.5 → 0.19.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -0
- data/ext/llama_cpp/llama_cpp.c +55 -0
- data/lib/llama_cpp/version.rb +2 -2
- metadata +1 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 220f180ef84d2828f0b568c58679e31f85e11481a2c16c261a0d0f7dbb03d22a
|
4
|
+
data.tar.gz: 167fe1082d5bb11fd301d3f0cd3de878e4c7fe4d94ba8fff262f8b076e3df5da
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4c0b2c285be1c53c3e004179fe5ca06af5408ebe70ccf08eb756c2dbd22ab8dcc6c435e756d8bfe095c3080290c045e2a66ca8c061878e444e3a569beaaa4346
|
7
|
+
data.tar.gz: f2d6835bd5cf91881d699f040535a942fe336d197b829508ab76c977e45a813c2a5b9f4640e26fa41b0ebb14eaf8ede70acdebb21124f1198dad1432a026a5de
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,10 @@
|
|
1
|
+
## [[0.19.6](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.5...v0.19.6)] - 2025-05-17
|
2
|
+
|
3
|
+
- Change supported llama.cpp version to b5410
|
4
|
+
- Add `LLAMA_VOCAB_PRE_TYPE_SEED_CODER` constant.
|
5
|
+
- Add `op_offload` accessor to `LlamaContextParams`.
|
6
|
+
- Add `llama_model_save_to_file` module function.
|
7
|
+
|
1
8
|
## [[0.19.5](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.4...v0.19.5)] - 2025-05-10
|
2
9
|
|
3
10
|
- Change supported llama.cpp version to b5320
|
data/ext/llama_cpp/llama_cpp.c
CHANGED
@@ -816,6 +816,17 @@ static VALUE llama_context_params_set_no_perf(VALUE self, VALUE no_perf) {
|
|
816
816
|
return no_perf;
|
817
817
|
}
|
818
818
|
|
819
|
+
static VALUE llama_context_params_get_op_offload(VALUE self) {
|
820
|
+
struct llama_context_params* data = get_llama_context_params(self);
|
821
|
+
return data->op_offload ? Qtrue : Qfalse;
|
822
|
+
}
|
823
|
+
|
824
|
+
static VALUE llama_context_params_set_op_offload(VALUE self, VALUE op_offload) {
|
825
|
+
struct llama_context_params* data = get_llama_context_params(self);
|
826
|
+
data->op_offload = RTEST(op_offload) ? true : false;
|
827
|
+
return op_offload;
|
828
|
+
}
|
829
|
+
|
819
830
|
/* llama_model_quantize_params */
|
820
831
|
static void llama_model_quantize_params_free(void *ptr) {
|
821
832
|
if (ptr) {
|
@@ -1220,6 +1231,29 @@ static VALUE rb_llama_model_load_from_splits(VALUE self, VALUE paths, VALUE para
|
|
1220
1231
|
return TypedData_Wrap_Struct(rb_cLlamaModel, &llama_model_wrapper_data_type, model_wrapper);
|
1221
1232
|
}
|
1222
1233
|
|
1234
|
+
/**
|
1235
|
+
* @overload llama_model_save_to_file(model, path_model)
|
1236
|
+
* @param [LlamaModel] model
|
1237
|
+
* @param [String] path_model
|
1238
|
+
* @return [NilClass]
|
1239
|
+
*/
|
1240
|
+
static VALUE rb_llama_model_save_to_file(VALUE self, VALUE model, VALUE path_model) {
|
1241
|
+
if (!rb_obj_is_kind_of(model, rb_cLlamaModel)) {
|
1242
|
+
rb_raise(rb_eArgError, "model must be a LlamaModel");
|
1243
|
+
return Qnil;
|
1244
|
+
}
|
1245
|
+
if (!RB_TYPE_P(path_model, T_STRING)) {
|
1246
|
+
rb_raise(rb_eArgError, "path_model must be a String");
|
1247
|
+
return Qnil;
|
1248
|
+
}
|
1249
|
+
llama_model_wrapper* model_wrapper = get_llama_model_wrapper(model);
|
1250
|
+
const char* path_model_ = StringValueCStr(path_model);
|
1251
|
+
llama_model_save_to_file(model_wrapper->model, path_model_);
|
1252
|
+
RB_GC_GUARD(model);
|
1253
|
+
RB_GC_GUARD(path_model);
|
1254
|
+
return Qnil;
|
1255
|
+
}
|
1256
|
+
|
1223
1257
|
/**
|
1224
1258
|
* @overload llama_init_from_model(model, params)
|
1225
1259
|
* @param [LlamaModel] model
|
@@ -4021,6 +4055,7 @@ void Init_llama_cpp(void) {
|
|
4021
4055
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_BAILINGMOE", INT2NUM(LLAMA_VOCAB_PRE_TYPE_BAILINGMOE));
|
4022
4056
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_LLAMA4", INT2NUM(LLAMA_VOCAB_PRE_TYPE_LLAMA4));
|
4023
4057
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_PIXTRAL", INT2NUM(LLAMA_VOCAB_PRE_TYPE_PIXTRAL));
|
4058
|
+
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_SEED_CODER", INT2NUM(LLAMA_VOCAB_PRE_TYPE_SEED_CODER));
|
4024
4059
|
/* llama_rope_type */
|
4025
4060
|
/* Document-const: LlamaCpp::LLAMA_ROPE_TYPE_NONE */
|
4026
4061
|
rb_define_const(rb_mLlamaCpp, "LLAMA_ROPE_TYPE_NONE", INT2NUM(LLAMA_ROPE_TYPE_NONE));
|
@@ -4591,6 +4626,17 @@ void Init_llama_cpp(void) {
|
|
4591
4626
|
* @return [Boolean]
|
4592
4627
|
*/
|
4593
4628
|
rb_define_method(rb_cLlamaContextParams, "no_perf=", RUBY_METHOD_FUNC(llama_context_params_set_no_perf), 1);
|
4629
|
+
/**
|
4630
|
+
* Document-method: op_offload
|
4631
|
+
* @return [Boolean]
|
4632
|
+
*/
|
4633
|
+
rb_define_method(rb_cLlamaContextParams, "op_offload", RUBY_METHOD_FUNC(llama_context_params_get_op_offload), 0);
|
4634
|
+
/**
|
4635
|
+
* Document-method: op_offload=
|
4636
|
+
* @param [Boolean] op_offload
|
4637
|
+
* @return [Boolean]
|
4638
|
+
*/
|
4639
|
+
rb_define_method(rb_cLlamaContextParams, "op_offload=", RUBY_METHOD_FUNC(llama_context_params_set_op_offload), 1);
|
4594
4640
|
/* TODO: ggml_abort_callback abort_callback */
|
4595
4641
|
/* TODO: void* abort_callback_data */
|
4596
4642
|
|
@@ -4780,6 +4826,9 @@ void Init_llama_cpp(void) {
|
|
4780
4826
|
/* llama_model_load_from_splits */
|
4781
4827
|
rb_define_module_function(rb_mLlamaCpp, "llama_model_load_from_splits", rb_llama_model_load_from_splits, 2);
|
4782
4828
|
|
4829
|
+
/* llama_model_save_to_file */
|
4830
|
+
rb_define_module_function(rb_mLlamaCpp, "llama_model_save_to_file", rb_llama_model_save_to_file, 2);
|
4831
|
+
|
4783
4832
|
/* llama_model_free */
|
4784
4833
|
rb_define_module_function(rb_mLlamaCpp, "llama_model_free", rb_llama_model_free, 1);
|
4785
4834
|
|
@@ -5316,4 +5365,10 @@ void Init_llama_cpp(void) {
|
|
5316
5365
|
|
5317
5366
|
/* llama_perf_sampler_reset */
|
5318
5367
|
rb_define_module_function(rb_mLlamaCpp, "llama_perf_sampler_reset", rb_llama_perf_sampler_reset, 1);
|
5368
|
+
|
5369
|
+
/* TODO: typedef bool (*llama_opt_param_filter) */
|
5370
|
+
/* TODO: bool llama_opt_param_filter_all */
|
5371
|
+
/* TODO: struct llama_opt_params */
|
5372
|
+
/* TODO: void llama_opt_init */
|
5373
|
+
/* TODO: void llama_opt_epoch */
|
5319
5374
|
}
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LlamaCpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.19.
|
6
|
+
VERSION = '0.19.6'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b5410'
|
10
10
|
end
|