llama_cpp 0.9.2 → 0.9.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 66c53ea31dd93cc684d6bbc5331bb7e9f12abe2a23e6e16b8f8a3407e62961a0
4
- data.tar.gz: 723d4f1d879c314d1733c84411e39d470f619a22be6a17d589406e831d8ea97b
3
+ metadata.gz: 0fe656f26d7680d1b96c6949d40f4f615209c1c752b45ef145ac0f68b4af1d26
4
+ data.tar.gz: fb4d3c5b54a854edeeaf070b5497ba6656a5cff59b6b911b638551462004efb3
5
5
  SHA512:
6
- metadata.gz: bee0ffe56796ec8bf6240178246c7c95c38ec7cec2bd29f61c1cd85e1230291751c13da850c330fca644089ee2ff524a767b132b5bc6658e95205114e7399ba4
7
- data.tar.gz: 382d05658c0a0d8df1c03dcaf93c8861bff3326e1d1e0c0cb3b0638f38cc3de5d36990b1f4df6d0bf3ce19337e9507cd5a2d196d893d8baf56d9b38a49738bc2
6
+ metadata.gz: 6dc8bc34fcb2635e5fa99c31f134dca12af4c48a0c3f1effbbf209e6e3156f1f95bf133ed33c2eabc6e9f7988d668dcbdb0545a3807b38969680618ba8774848
7
+ data.tar.gz: 591d9ed44ed3b3a40424d3903659ad868afff727a2cfaffefd6222ba54f8a51fbfbab109ceea22a9a6bd3ca4661fb3947ca8f3f179ac2d0ad8cf8ba917b30ffe
data/CHANGELOG.md CHANGED
@@ -1,3 +1,13 @@
1
+ ## [[0.9.4](https://github.com/yoshoku/llama_cpp.rb/compare/v0.9.3...v0.9.4)] - 2023-11-25
2
+
3
+ - Bump bundled llama.cpp from b1523 to b1555.
4
+
5
+ ## [[0.9.3](https://github.com/yoshoku/llama_cpp.rb/compare/v0.9.2...v0.9.3)] - 2023-11-18
6
+
7
+ - Bump bundled llama.cpp from b1500 to b1523.
8
+ - Add `add_bos_token?` method to Model.
9
+ - Add `add_eos_token?` method to Model.
10
+
1
11
  ## [[0.9.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.9.1...v0.9.2)] - 2023-11-11
2
12
 
3
13
  - Bump bundled llama.cpp from b1472 to b1500.
@@ -6,7 +16,7 @@
6
16
 
7
17
  - Bump bundled llama.cpp from b1429 to b1472
8
18
  - Rename `kv_cahe_tokens_rm` method to `kv_cahce_clear` in Context.
9
- - Add `sample_min_p method` to Context.
19
+ - Add `sample_min_p` method to Context.
10
20
  - Add `rope_scaling_type`, `rope_freq_base`, `rope_freq_scale`, `yarn_ext_factor`, `yarn_attn_factor`, `yarn_beta_fast`, `yarn_beta_slow`, and `yarn_orig_ctx` to ContextParams.
11
21
  - Add `pure` to ModelQuantizeParams.
12
22
  - Add contstants for RoPE scaling type.
@@ -1252,6 +1252,8 @@ public:
1252
1252
  rb_define_method(rb_cLLaMAModel, "token_bos", RUBY_METHOD_FUNC(_llama_model_token_bos), 0);
1253
1253
  rb_define_method(rb_cLLaMAModel, "token_eos", RUBY_METHOD_FUNC(_llama_model_token_eos), 0);
1254
1254
  rb_define_method(rb_cLLaMAModel, "token_nl", RUBY_METHOD_FUNC(_llama_model_token_nl), 0);
1255
+ rb_define_method(rb_cLLaMAModel, "add_bos_token?", RUBY_METHOD_FUNC(_llama_model_add_bos_token), 0);
1256
+ rb_define_method(rb_cLLaMAModel, "add_eos_token?", RUBY_METHOD_FUNC(_llama_model_add_eos_token), 0);
1255
1257
  rb_define_method(rb_cLLaMAModel, "token_prefix", RUBY_METHOD_FUNC(_llama_model_token_prefix), 0);
1256
1258
  rb_define_method(rb_cLLaMAModel, "token_middle", RUBY_METHOD_FUNC(_llama_model_token_middle), 0);
1257
1259
  rb_define_method(rb_cLLaMAModel, "token_suffix", RUBY_METHOD_FUNC(_llama_model_token_suffix), 0);
@@ -1541,6 +1543,16 @@ private:
1541
1543
  return INT2NUM(llama_token_nl(ptr->model));
1542
1544
  }
1543
1545
 
1546
+ static VALUE _llama_model_add_bos_token(VALUE self) {
1547
+ LLaMAModelWrapper* ptr = get_llama_model(self);
1548
+ return llama_add_bos_token(ptr->model) ? Qtrue : Qfalse;
1549
+ }
1550
+
1551
+ static VALUE _llama_model_add_eos_token(VALUE self) {
1552
+ LLaMAModelWrapper* ptr = get_llama_model(self);
1553
+ return llama_add_eos_token(ptr->model) ? Qtrue : Qfalse;
1554
+ }
1555
+
1544
1556
  static VALUE _llama_model_token_prefix(VALUE self) {
1545
1557
  LLaMAModelWrapper* ptr = get_llama_model(self);
1546
1558
  return INT2NUM(llama_token_prefix(ptr->model));