llama_cpp 0.19.4 → 0.19.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +12 -0
- data/ext/llama_cpp/llama_cpp.c +55 -22
- data/lib/llama_cpp/version.rb +2 -2
- metadata +1 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 220f180ef84d2828f0b568c58679e31f85e11481a2c16c261a0d0f7dbb03d22a
|
4
|
+
data.tar.gz: 167fe1082d5bb11fd301d3f0cd3de878e4c7fe4d94ba8fff262f8b076e3df5da
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4c0b2c285be1c53c3e004179fe5ca06af5408ebe70ccf08eb756c2dbd22ab8dcc6c435e756d8bfe095c3080290c045e2a66ca8c061878e444e3a569beaaa4346
|
7
|
+
data.tar.gz: f2d6835bd5cf91881d699f040535a942fe336d197b829508ab76c977e45a813c2a5b9f4640e26fa41b0ebb14eaf8ede70acdebb21124f1198dad1432a026a5de
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,15 @@
|
|
1
|
+
## [[0.19.6](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.5...v0.19.6)] - 2025-05-17
|
2
|
+
|
3
|
+
- Change supported llama.cpp version to b5410
|
4
|
+
- Add `LLAMA_VOCAB_PRE_TYPE_SEED_CODER` constant.
|
5
|
+
- Add `op_offload` accessor to `LlamaContextParams`.
|
6
|
+
- Add `llama_model_save_to_file` module function.
|
7
|
+
|
8
|
+
## [[0.19.5](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.4...v0.19.5)] - 2025-05-10
|
9
|
+
|
10
|
+
- Change supported llama.cpp version to b5320
|
11
|
+
- Remove deprecated logits_all accessor in LlamaContextParams.
|
12
|
+
|
1
13
|
## [[0.19.4](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.3...v0.19.4)] - 2025-04-26
|
2
14
|
|
3
15
|
- Change supported llama.cpp version to b5180
|
data/ext/llama_cpp/llama_cpp.c
CHANGED
@@ -772,17 +772,6 @@ static VALUE llama_context_params_set_type_v(VALUE self, VALUE type_v) {
|
|
772
772
|
return type_v;
|
773
773
|
}
|
774
774
|
|
775
|
-
static VALUE llama_context_params_get_logits_all(VALUE self) {
|
776
|
-
struct llama_context_params* data = get_llama_context_params(self);
|
777
|
-
return data->logits_all ? Qtrue : Qfalse;
|
778
|
-
}
|
779
|
-
|
780
|
-
static VALUE llama_context_params_set_logits_all(VALUE self, VALUE logits_all) {
|
781
|
-
struct llama_context_params* data = get_llama_context_params(self);
|
782
|
-
data->logits_all = RTEST(logits_all) ? true : false;
|
783
|
-
return logits_all;
|
784
|
-
}
|
785
|
-
|
786
775
|
static VALUE llama_context_params_get_embeddings(VALUE self) {
|
787
776
|
struct llama_context_params* data = get_llama_context_params(self);
|
788
777
|
return data->embeddings ? Qtrue : Qfalse;
|
@@ -827,6 +816,17 @@ static VALUE llama_context_params_set_no_perf(VALUE self, VALUE no_perf) {
|
|
827
816
|
return no_perf;
|
828
817
|
}
|
829
818
|
|
819
|
+
static VALUE llama_context_params_get_op_offload(VALUE self) {
|
820
|
+
struct llama_context_params* data = get_llama_context_params(self);
|
821
|
+
return data->op_offload ? Qtrue : Qfalse;
|
822
|
+
}
|
823
|
+
|
824
|
+
static VALUE llama_context_params_set_op_offload(VALUE self, VALUE op_offload) {
|
825
|
+
struct llama_context_params* data = get_llama_context_params(self);
|
826
|
+
data->op_offload = RTEST(op_offload) ? true : false;
|
827
|
+
return op_offload;
|
828
|
+
}
|
829
|
+
|
830
830
|
/* llama_model_quantize_params */
|
831
831
|
static void llama_model_quantize_params_free(void *ptr) {
|
832
832
|
if (ptr) {
|
@@ -1231,6 +1231,29 @@ static VALUE rb_llama_model_load_from_splits(VALUE self, VALUE paths, VALUE para
|
|
1231
1231
|
return TypedData_Wrap_Struct(rb_cLlamaModel, &llama_model_wrapper_data_type, model_wrapper);
|
1232
1232
|
}
|
1233
1233
|
|
1234
|
+
/**
|
1235
|
+
* @overload llama_model_save_to_file(model, path_model)
|
1236
|
+
* @param [LlamaModel] model
|
1237
|
+
* @param [String] path_model
|
1238
|
+
* @return [NilClass]
|
1239
|
+
*/
|
1240
|
+
static VALUE rb_llama_model_save_to_file(VALUE self, VALUE model, VALUE path_model) {
|
1241
|
+
if (!rb_obj_is_kind_of(model, rb_cLlamaModel)) {
|
1242
|
+
rb_raise(rb_eArgError, "model must be a LlamaModel");
|
1243
|
+
return Qnil;
|
1244
|
+
}
|
1245
|
+
if (!RB_TYPE_P(path_model, T_STRING)) {
|
1246
|
+
rb_raise(rb_eArgError, "path_model must be a String");
|
1247
|
+
return Qnil;
|
1248
|
+
}
|
1249
|
+
llama_model_wrapper* model_wrapper = get_llama_model_wrapper(model);
|
1250
|
+
const char* path_model_ = StringValueCStr(path_model);
|
1251
|
+
llama_model_save_to_file(model_wrapper->model, path_model_);
|
1252
|
+
RB_GC_GUARD(model);
|
1253
|
+
RB_GC_GUARD(path_model);
|
1254
|
+
return Qnil;
|
1255
|
+
}
|
1256
|
+
|
1234
1257
|
/**
|
1235
1258
|
* @overload llama_init_from_model(model, params)
|
1236
1259
|
* @param [LlamaModel] model
|
@@ -4032,6 +4055,7 @@ void Init_llama_cpp(void) {
|
|
4032
4055
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_BAILINGMOE", INT2NUM(LLAMA_VOCAB_PRE_TYPE_BAILINGMOE));
|
4033
4056
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_LLAMA4", INT2NUM(LLAMA_VOCAB_PRE_TYPE_LLAMA4));
|
4034
4057
|
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_PIXTRAL", INT2NUM(LLAMA_VOCAB_PRE_TYPE_PIXTRAL));
|
4058
|
+
rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_SEED_CODER", INT2NUM(LLAMA_VOCAB_PRE_TYPE_SEED_CODER));
|
4035
4059
|
/* llama_rope_type */
|
4036
4060
|
/* Document-const: LlamaCpp::LLAMA_ROPE_TYPE_NONE */
|
4037
4061
|
rb_define_const(rb_mLlamaCpp, "LLAMA_ROPE_TYPE_NONE", INT2NUM(LLAMA_ROPE_TYPE_NONE));
|
@@ -4558,17 +4582,6 @@ void Init_llama_cpp(void) {
|
|
4558
4582
|
* @return [Integer]
|
4559
4583
|
*/
|
4560
4584
|
rb_define_method(rb_cLlamaContextParams, "type_v=", RUBY_METHOD_FUNC(llama_context_params_set_type_v), 1);
|
4561
|
-
/**
|
4562
|
-
* Document-method: logits_all
|
4563
|
-
* @return [Boolean]
|
4564
|
-
*/
|
4565
|
-
rb_define_method(rb_cLlamaContextParams, "logits_all", RUBY_METHOD_FUNC(llama_context_params_get_logits_all), 0);
|
4566
|
-
/**
|
4567
|
-
* Document-method: logits_all=
|
4568
|
-
* @param [Boolean] logits_all
|
4569
|
-
* @return [Boolean]
|
4570
|
-
*/
|
4571
|
-
rb_define_method(rb_cLlamaContextParams, "logits_all=", RUBY_METHOD_FUNC(llama_context_params_set_logits_all), 1);
|
4572
4585
|
/**
|
4573
4586
|
* Document-method: embeddings
|
4574
4587
|
* @return [Boolean]
|
@@ -4613,6 +4626,17 @@ void Init_llama_cpp(void) {
|
|
4613
4626
|
* @return [Boolean]
|
4614
4627
|
*/
|
4615
4628
|
rb_define_method(rb_cLlamaContextParams, "no_perf=", RUBY_METHOD_FUNC(llama_context_params_set_no_perf), 1);
|
4629
|
+
/**
|
4630
|
+
* Document-method: op_offload
|
4631
|
+
* @return [Boolean]
|
4632
|
+
*/
|
4633
|
+
rb_define_method(rb_cLlamaContextParams, "op_offload", RUBY_METHOD_FUNC(llama_context_params_get_op_offload), 0);
|
4634
|
+
/**
|
4635
|
+
* Document-method: op_offload=
|
4636
|
+
* @param [Boolean] op_offload
|
4637
|
+
* @return [Boolean]
|
4638
|
+
*/
|
4639
|
+
rb_define_method(rb_cLlamaContextParams, "op_offload=", RUBY_METHOD_FUNC(llama_context_params_set_op_offload), 1);
|
4616
4640
|
/* TODO: ggml_abort_callback abort_callback */
|
4617
4641
|
/* TODO: void* abort_callback_data */
|
4618
4642
|
|
@@ -4802,6 +4826,9 @@ void Init_llama_cpp(void) {
|
|
4802
4826
|
/* llama_model_load_from_splits */
|
4803
4827
|
rb_define_module_function(rb_mLlamaCpp, "llama_model_load_from_splits", rb_llama_model_load_from_splits, 2);
|
4804
4828
|
|
4829
|
+
/* llama_model_save_to_file */
|
4830
|
+
rb_define_module_function(rb_mLlamaCpp, "llama_model_save_to_file", rb_llama_model_save_to_file, 2);
|
4831
|
+
|
4805
4832
|
/* llama_model_free */
|
4806
4833
|
rb_define_module_function(rb_mLlamaCpp, "llama_model_free", rb_llama_model_free, 1);
|
4807
4834
|
|
@@ -5338,4 +5365,10 @@ void Init_llama_cpp(void) {
|
|
5338
5365
|
|
5339
5366
|
/* llama_perf_sampler_reset */
|
5340
5367
|
rb_define_module_function(rb_mLlamaCpp, "llama_perf_sampler_reset", rb_llama_perf_sampler_reset, 1);
|
5368
|
+
|
5369
|
+
/* TODO: typedef bool (*llama_opt_param_filter) */
|
5370
|
+
/* TODO: bool llama_opt_param_filter_all */
|
5371
|
+
/* TODO: struct llama_opt_params */
|
5372
|
+
/* TODO: void llama_opt_init */
|
5373
|
+
/* TODO: void llama_opt_epoch */
|
5341
5374
|
}
|
data/lib/llama_cpp/version.rb
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
# llama_cpp.rb provides Ruby bindings for the llama.cpp.
|
4
4
|
module LlamaCpp
|
5
5
|
# The version of llama_cpp.rb you install.
|
6
|
-
VERSION = '0.19.
|
6
|
+
VERSION = '0.19.6'
|
7
7
|
|
8
8
|
# The supported version of llama.cpp.
|
9
|
-
LLAMA_CPP_VERSION = '
|
9
|
+
LLAMA_CPP_VERSION = 'b5410'
|
10
10
|
end
|