llama_cpp 0.10.0 → 0.10.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 7f406c15621a7c247adaacf1d588ddf278225e6846466afd1184c00f1ee61768
4
- data.tar.gz: df73657c75a80cb44f41d34a3c1054676cf59a5d7d56cb1c2ce8a94264002293
3
+ metadata.gz: 9bd5dbea8695fdb41ea6d97e372c2cea452ee8ed070e26bd558a720d6c24fe27
4
+ data.tar.gz: 8d7bfd02445df81644eebb3a6db49bb1ddf241a344fef76a0b949f1c12d0639d
5
5
  SHA512:
6
- metadata.gz: acd08d5099f14bf2bd4c8f9bf016253f0e316179b79d72fbe7066b0d645ca31e9bab427fcc53d93874f8df74cb1746731e2cd21864bfecdecff91f9778919b42
7
- data.tar.gz: 5014a1bd545be90c56bebd48119a198cf7276513cb6c5f00d8322aa6eaa9a27442bc51bf06953a11c2fc04145f797c630cefee17b36589fe38f9226003416a09
6
+ metadata.gz: 116423f4581f605ee379bcd690299e152087c03665be6171bc137205f0824be5d5e0ce6d3c0b548fc6d28193c601679c3cc27a101b6f5b58968c69388a70cbfc
7
+ data.tar.gz: fd162b1c4e26732573d32ba7439fc44dd2ff09a9024c64ff5adce997e154a934d9556f0ff3cfd6b83dd7dcbfb8366cc6ce394b8324f39ef349d4cb5834ea43f1
data/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ ## [[0.10.1](https://github.com/yoshoku/llama_cpp.rb/compare/v0.10.0...v0.10.1)] - 2023-12-16
2
+
3
+ - Bump bundled llama.cpp from b1620 to b1641.
4
+ - Add attribute reader for `params` to `Model`.
5
+ - Add `Batch` class, this class was not published because the author forgot to write `rb_define_class`.
6
+
1
7
  ## [[0.10.0](https://github.com/yoshoku/llama_cpp.rb/compare/v0.9.5...v0.10.0)] - 2023-12-09
2
8
 
3
9
  - Bump bundled llama.cpp from b1593 to b1620.
@@ -1333,6 +1333,7 @@ public:
1333
1333
  static void define_class(VALUE outer) {
1334
1334
  rb_cLLaMAModel = rb_define_class_under(outer, "Model", rb_cObject);
1335
1335
  rb_define_alloc_func(rb_cLLaMAModel, llama_model_alloc);
1336
+ rb_define_attr(rb_cLLaMAModel, "params", 1, 0);
1336
1337
  rb_define_method(rb_cLLaMAModel, "initialize", RUBY_METHOD_FUNC(_llama_model_initialize), -1);
1337
1338
  rb_define_method(rb_cLLaMAModel, "empty?", RUBY_METHOD_FUNC(_llama_model_empty), 0);
1338
1339
  rb_define_method(rb_cLLaMAModel, "free", RUBY_METHOD_FUNC(_llama_model_free), 0);
@@ -3071,6 +3072,7 @@ static VALUE rb_llama_max_devices(VALUE self) {
3071
3072
  extern "C" void Init_llama_cpp(void) {
3072
3073
  rb_mLLaMACpp = rb_define_module("LLaMACpp");
3073
3074
 
3075
+ RbLLaMABatch::define_class(rb_mLLaMACpp);
3074
3076
  RbLLaMATokenData::define_class(rb_mLLaMACpp);
3075
3077
  RbLLaMATokenDataArray::define_class(rb_mLLaMACpp);
3076
3078
  RbLLaMAModel::define_class(rb_mLLaMACpp);
@@ -43,7 +43,7 @@ GGML_API size_t ggml_allocr_alloc_graph(ggml_allocr_t alloc, struct ggml_cgraph
43
43
  // ggml-backend v2 API
44
44
  //
45
45
 
46
- // Seperate tensor and graph allocator objects
46
+ // Separate tensor and graph allocator objects
47
47
  // This is necessary for multi-backend allocation because the graph allocator needs to use multiple tensor allocators
48
48
  // The original API is kept as a wrapper around the new API
49
49