llama_cpp 0.19.1 → 0.19.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: bb93f6aa74a3f022cb15dde5df9b4e6deb4c228a5b0fbfc01ff2ebed9446f152
4
- data.tar.gz: 0b94a7d328ed8ac1bdc637a9b79ff490f5e5af1df3c5893e88c91dcf4a73ed07
3
+ metadata.gz: b9d8c3d491c1da2a69c57d881b9a712078868e9d11f9fdbda3cc9510a4d19c77
4
+ data.tar.gz: 4e4db26e6f428f9d9eac6ba2097a4453baa61515eff0249a12a1f57b51d89eb3
5
5
  SHA512:
6
- metadata.gz: 97f0be1659fabb158b328294b3d7b5df9ea7774f8d0063246ea5d006f39f6d9799d80236a712d6026b6d9df4acd5b20ced809d93145379dadc13a61e0813a689
7
- data.tar.gz: 5072945b4b0152fe5dfa41807253a511274020fb5149988586e1932c65f0ce020876f200cf97b291ba2d804cac03aea731c5d57adb6ab1ef8d448a210104836b
6
+ metadata.gz: dfa8562de2134396f10e8b085c4935a58bdfbc37948d29e668592788739fcdbef565bc14faed2e3d7580df2d99592b20c36964fad0b66c33e707efab13477d6e
7
+ data.tar.gz: ee7d052d9daa1acaf3ae7ecc9b9e87df30d4743781ca145553c32d62b6e7b849b85d50644da8b8e10829b9396f4389989f9252319f8abb272ed102170bbdf39e
data/CHANGELOG.md CHANGED
@@ -1,5 +1,12 @@
1
1
 
2
- ## [[0.19.0](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.0...v0.19.1)] - 2025-03-29
2
+ ## [[0.19.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.1...v0.19.2)] - 2025-04-05
3
+
4
+ - Change supported llama.cpp version to b5050
5
+ - Add `LLAMA_VOCAB_PRE_TYPE_TRILLION` constant.
6
+ - Add `LLAMA_VOCAB_PRE_TYPE_BAILINGMOE` constant.
7
+ - Add `LlamaModelTensorBuftOverride` class.
8
+
9
+ ## [[0.19.1](https://github.com/yoshoku/llama_cpp.rb/compare/v0.19.0...v0.19.1)] - 2025-03-29
3
10
 
4
11
  - Change supported llama.cpp version to b4980
5
12
  - Add `LLAMA_VOCAB_PRE_TYPE_SUPERBPE` constant.
@@ -4,6 +4,7 @@ VALUE rb_mLlamaCpp;
4
4
  VALUE rb_cLlamaVocab;
5
5
  VALUE rb_cLlamaModel;
6
6
  VALUE rb_cLlamaContext;
7
+ VALUE rb_cLlamaModelTensorBuftOverride;
7
8
  VALUE rb_cLlamaModelParams;
8
9
  VALUE rb_cLlamaContextParams;
9
10
  VALUE rb_cLlamaModelQuantizeParams;
@@ -361,6 +362,49 @@ static VALUE llama_model_kv_override_get_val_str(VALUE self) {
361
362
  return rb_utf8_str_new_cstr(data->val_str);
362
363
  }
363
364
 
365
+ /* struct llama_model_tensor_buft_override */
366
+ static void llama_model_tensor_buft_override_free(void *ptr) {
367
+ if (ptr) {
368
+ ruby_xfree(ptr);
369
+ }
370
+ }
371
+
372
+ static size_t llama_model_tensor_buft_override_size(const void *ptr) {
373
+ return sizeof(*((struct llama_model_tensor_buft_override*)ptr));
374
+ }
375
+
376
+ static rb_data_type_t llama_model_tensor_buft_override_type = {
377
+ "LlamaModelTensorBuftOverride",
378
+ { NULL,
379
+ llama_model_tensor_buft_override_free,
380
+ llama_model_tensor_buft_override_size },
381
+ NULL,
382
+ NULL,
383
+ RUBY_TYPED_FREE_IMMEDIATELY
384
+ };
385
+
386
+ static VALUE llama_model_tensor_buft_override_alloc(VALUE self) {
387
+ struct llama_model_tensor_buft_override* data = (struct llama_model_tensor_buft_override*)ruby_xmalloc(sizeof(struct llama_model_tensor_buft_override));
388
+ return TypedData_Wrap_Struct(self, &llama_model_tensor_buft_override_type, data);
389
+ }
390
+
391
+ static struct llama_model_tensor_buft_override* get_llama_model_tensor_buft_override(VALUE self) {
392
+ struct llama_model_tensor_buft_override* data = NULL;
393
+ TypedData_Get_Struct(self, struct llama_model_tensor_buft_override, &llama_model_tensor_buft_override_type, data);
394
+ return data;
395
+ }
396
+
397
+ static VALUE llama_model_tensor_buft_override_get_pattern(VALUE self) {
398
+ struct llama_model_tensor_buft_override* data = get_llama_model_tensor_buft_override(self);
399
+ const char* pattern = data->pattern;
400
+ return rb_utf8_str_new_cstr(pattern);
401
+ }
402
+
403
+ // static VALUE llama_model_tensor_buft_override_get_buft(VALUE self) {
404
+ // struct llama_model_tensor_buft_override* data = get_llama_model_tensor_buft_override(self);
405
+ // return TypedData_Wrap_Struct(rb_cGgmlBackendBufferTypeT, &llama_model_wrapper_data_type, data->buft);
406
+ // }
407
+
364
408
  /* struct llama_model_params */
365
409
  static void llama_model_params_free(void *ptr) {
366
410
  if (ptr) {
@@ -3984,6 +4028,8 @@ void Init_llama_cpp(void) {
3984
4028
  rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM", INT2NUM(LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM));
3985
4029
  rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_GPT4O", INT2NUM(LLAMA_VOCAB_PRE_TYPE_GPT4O));
3986
4030
  rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_SUPERBPE", INT2NUM(LLAMA_VOCAB_PRE_TYPE_SUPERBPE));
4031
+ rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_TRILLION", INT2NUM(LLAMA_VOCAB_PRE_TYPE_TRILLION));
4032
+ rb_define_const(rb_mLlamaCpp, "LLAMA_VOCAB_PRE_TYPE_BAILINGMOE", INT2NUM(LLAMA_VOCAB_PRE_TYPE_BAILINGMOE));
3987
4033
  /* llama_rope_type */
3988
4034
  /* Document-const: LlamaCpp::LLAMA_ROPE_TYPE_NONE */
3989
4035
  rb_define_const(rb_mLlamaCpp, "LLAMA_ROPE_TYPE_NONE", INT2NUM(LLAMA_ROPE_TYPE_NONE));
@@ -4186,6 +4232,18 @@ void Init_llama_cpp(void) {
4186
4232
  */
4187
4233
  rb_define_method(rb_cLlamaModelKvOverride, "val_str", RUBY_METHOD_FUNC(llama_model_kv_override_get_val_str), 0);
4188
4234
 
4235
+ /**
4236
+ * Document-class: LlamaCpp::LlamaModelTensorBuftOverride
4237
+ * "struct llama_model_tensor_buf_override" wrapper class
4238
+ */
4239
+ rb_cLlamaModelTensorBuftOverride = rb_define_class_under(rb_mLlamaCpp, "LlamaModelTensorBuftOverride", rb_cObject);
4240
+ rb_define_alloc_func(rb_cLlamaModelTensorBuftOverride, llama_model_tensor_buft_override_alloc);
4241
+ /**
4242
+ * Document-method: pattern
4243
+ * @return [String]
4244
+ */
4245
+ rb_define_method(rb_cLlamaModelTensorBuftOverride, "pattern", RUBY_METHOD_FUNC(llama_model_tensor_buft_override_get_pattern), 0);
4246
+ /* TODO: ggml_backend_buffer_type_t buff */
4189
4247
 
4190
4248
  /**
4191
4249
  * Document-class: LlamaCpp::LlamaModelParams
@@ -4194,6 +4252,7 @@ void Init_llama_cpp(void) {
4194
4252
  rb_cLlamaModelParams = rb_define_class_under(rb_mLlamaCpp, "LlamaModelParams", rb_cObject);
4195
4253
  rb_define_alloc_func(rb_cLlamaModelParams, llama_model_params_alloc);
4196
4254
  /* TODO: ggml_backend_dev_t* devices */
4255
+ /* TODO: const struct llama_model_tensor_buft_override * tensor_buft_overrides */
4197
4256
  /**
4198
4257
  * Document-method: n_gpu_layers
4199
4258
  * @return [Integer]
@@ -3,8 +3,8 @@
3
3
  # llama_cpp.rb provides Ruby bindings for the llama.cpp.
4
4
  module LlamaCpp
5
5
  # The version of llama_cpp.rb you install.
6
- VERSION = '0.19.1'
6
+ VERSION = '0.19.2'
7
7
 
8
8
  # The supported version of llama.cpp.
9
- LLAMA_CPP_VERSION = 'b4980'
9
+ LLAMA_CPP_VERSION = 'b5050'
10
10
  end
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.19.1
4
+ version: 0.19.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
8
8
  bindir: exe
9
9
  cert_chain: []
10
- date: 2025-03-29 00:00:00.000000000 Z
10
+ date: 2025-04-05 00:00:00.000000000 Z
11
11
  dependencies: []
12
12
  description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
13
13
  email: