llama_cpp 0.17.0 → 0.17.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a63238d7d4a852e4a57667ba3e144364db201a691b9460c62fc8aa783677593d
4
- data.tar.gz: 7a879c04eebc5a308ae3f937f35972b11c5d15edd5000885416e3c57cfe21648
3
+ metadata.gz: 2b2fec35458bc9b745aa4e2526b2c50ca52201e8f29f608d84993b1eddff5a2f
4
+ data.tar.gz: 00f8d95bec17dcb422eb833623d5eca5028598e2a212dd71a248ad5f63434165
5
5
  SHA512:
6
- metadata.gz: a76006fc44d8a7b4295c4d10bcee87a2f161868b9c119ddfae1c2aecd0a5d7989bd33134dc64d8f1994b41732a64e2ca91472a8245ee58e3fb4fdcb01a1b24f2
7
- data.tar.gz: 63160f285f7fdb89e6d03e9cb83b064acbe8869ae384f9b3d32f0a822d7fc63354cf0fb6b6da39758140d885493baff716d31c42a956e3437c47adaf74172783
6
+ metadata.gz: fd7e98833df714d4c355820995e79964b74f31e0a4dc516360191a9c8c290108a5bf3d90b1ae704f1920ebb3db0152c2de17e2a8ec955fdc3ae1e979abae66ae
7
+ data.tar.gz: c1e32582670b1069187a1c2f8277296b0878c5dc613dca3c733378689086dc10fca2e1ee7d8e6aec6d9db95ebdc2bdfbb5b5c108808b5e489735d1ba19c52cd4
data/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ ## [[0.17.1](https://github.com/yoshoku/llama_cpp.rb/compare/v0.17.0...v0.17.1)] - 2024-07-06
2
+
3
+ - Update usage section on README.
4
+ - Change supported llama.cpp version to b3291.
5
+ - Add `LLAMA_VOCAB_PRE_TYPE_JAIS` constant.
6
+
1
7
  ## [[0.17.0](https://github.com/yoshoku/llama_cpp.rb/compare/v0.16.2...v0.17.0)] - 2024-06-29
2
8
 
3
9
  **Breaking Changes**
@@ -12,7 +18,7 @@ $ brew install llama.cpp
12
18
  $ gem install llama_cpp -- --with-opt-dir=/opt/homebrew
13
19
  ```
14
20
 
15
- - Change supported llama.cpp version to b3265
21
+ - Change supported llama.cpp version to b3265.
16
22
  - Add `LLAMA_VOCAB_TYPE_UGM` and `LLAMA_VOCAB_PRE_TYPE_VIKING` constants.
17
23
  - Add `token_pad` method to `Model`.
18
24
 
data/README.md CHANGED
@@ -32,7 +32,10 @@ $ gem install llama_cpp -- --with-opt-dir=/opt/homebrew
32
32
  ## Usage
33
33
 
34
34
  Prepare the quantized model by refering to [the usage section on the llama.cpp README](https://github.com/ggerganov/llama.cpp#usage).
35
- For example, preparing the quatization model based on [open_llama_7b](https://huggingface.co/openlm-research/open_llama_7b) is as follows:
35
+ For example, you could prepare the quatization model based on
36
+ [open_llama_7b](https://huggingface.co/openlm-research/open_llama_7b)
37
+ or more useful in the context of Ruby might be a smaller model such as
38
+ [tiny_llama_1b](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0):
36
39
 
37
40
  ```sh
38
41
  $ cd ~/
@@ -44,9 +47,9 @@ $ python3 -m pip install -r requirements.txt
44
47
  $ cd models
45
48
  $ git clone https://huggingface.co/openlm-research/open_llama_7b
46
49
  $ cd ../
47
- $ python3 convert.py models/open_llama_7b
50
+ $ python3 convert-hf-to-gguf.py models/open_llama_7b
48
51
  $ make
49
- $ ./quantize ./models/open_llama_7b/ggml-model-f16.gguf ./models/open_llama_7b/ggml-model-q4_0.bin q4_0
52
+ $ ./llama-quantize ./models/open_llama_7b/ggml-model-f16.gguf ./models/open_llama_7b/ggml-model-q4_0.bin q4_0
50
53
  ```
51
54
 
52
55
  An example of Ruby code that generates sentences with the quantization model is as follows:
@@ -3514,6 +3514,7 @@ extern "C" void Init_llama_cpp(void) {
3514
3514
  rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_SMAUG", INT2NUM(LLAMA_VOCAB_PRE_TYPE_SMAUG));
3515
3515
  rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_PORO", INT2NUM(LLAMA_VOCAB_PRE_TYPE_PORO));
3516
3516
  rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_VIKING", INT2NUM(LLAMA_VOCAB_PRE_TYPE_VIKING));
3517
+ rb_define_const(rb_mLLaMACpp, "LLAMA_VOCAB_PRE_TYPE_JAIS", INT2NUM(LLAMA_VOCAB_PRE_TYPE_JAIS));
3517
3518
 
3518
3519
  rb_define_const(rb_mLLaMACpp, "LLAMA_TOKEN_TYPE_UNDEFINED", INT2NUM(LLAMA_TOKEN_TYPE_UNDEFINED));
3519
3520
  rb_define_const(rb_mLLaMACpp, "LLAMA_TOKEN_TYPE_NORMAL", INT2NUM(LLAMA_TOKEN_TYPE_NORMAL));
@@ -3,8 +3,8 @@
3
3
  # llama_cpp.rb provides Ruby bindings for the llama.cpp.
4
4
  module LLaMACpp
5
5
  # The version of llama_cpp.rb you install.
6
- VERSION = '0.17.0'
6
+ VERSION = '0.17.1'
7
7
 
8
8
  # The supported version of llama.cpp.
9
- LLAMA_CPP_VERSION = 'b3265'
9
+ LLAMA_CPP_VERSION = 'b3291'
10
10
  end
data/sig/llama_cpp.rbs CHANGED
@@ -34,6 +34,7 @@ module LLaMACpp
34
34
  LLAMA_VOCAB_PRE_TYPE_SMAUG: Integer
35
35
  LLAMA_VOCAB_PRE_TYPE_PORO: Integer
36
36
  LLAMA_VOCAB_PRE_TYPE_VIKING: Integer
37
+ LLAMA_VOCAB_PRE_TYPE_JAIS: Integer
37
38
 
38
39
  LLAMA_TOKEN_ATTR_UNDEFINED: Integer
39
40
  LLAMA_TOKEN_ATTR_UNKNOWN: Integer
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llama_cpp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.17.0
4
+ version: 0.17.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-06-29 00:00:00.000000000 Z
11
+ date: 2024-07-06 00:00:00.000000000 Z
12
12
  dependencies: []
13
13
  description: llama_cpp.rb provides Ruby bindings for the llama.cpp.
14
14
  email: