llama_cpp 0.3.2 → 0.3.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f1fcd28849baae5e90c466665aff4fe5da1d848193ebcf74c3fe333c5674191c
4
- data.tar.gz: fcb0c64528d24c5cfad677f17bfd6e1e817a4b8279317ca5b2113302735598b9
3
+ metadata.gz: cf337091019bb773e47cf206ff2ff30ed0bef963094494e6493455cad7c59840
4
+ data.tar.gz: fdbae8e08a6b87d49c5658d5c1857f20bf8efdf5a5371906630dccf4eb0f1159
5
5
  SHA512:
6
- metadata.gz: c70b5f919feb7a585efbe21b3360254c2f5789504cd73fecee12fd686483c77eeb763ed91a8e7434d5852208555a78f168b358d0895f15b1ea7e774d36d6910a
7
- data.tar.gz: f554ad58fc9d68c39b80995b7f424468386b32a5847dbdefbceb1cba53ff7182da35be8599523d82a6daa8fee23667d07e06faedc4c727d52e8fc594d0bc7d3f
6
+ metadata.gz: f0fee68294960c5ab9f56ebfe7256a00f9330e55f4954f2b016e07cbc023570298fa8f8b578f3e187fe9183b869769085311931122f93a033c6c21158b4e9485
7
+ data.tar.gz: 7eec8c98ae9ec1a56fa4bdb4e83a2dc2bdea407fc037af8d1b8f09a30c0d1246333d410707f4d66f3f473bf73574757cf12e56a86a0cb47074501f63f65f0c02
data/CHANGELOG.md CHANGED
@@ -1,3 +1,16 @@
1
+ ## [[0.3.3](https://github.com/yoshoku/llama_cpp.rb/compare/v0.3.2...v0.3.3)] - 2023-07-15
2
+
3
+ - Bump bundled llama.cpp from master-481f793 to master-32c5411.
4
+ - Add MPI config options:
5
+ ```
6
+ $ gem install llama_cpp -- --with-mpi
7
+ ```
8
+ - Add `backend_free` module function to `LLaMACpp`. This method should be called once at the end of the program when the MPI option is enabled.
9
+ - Add `sample_classifier_free_guidance` method to `Context`.
10
+
11
+ **Breaking Changes**
12
+ - Rename `init_backend` method to `backend_init`. This method is called internally at `require 'llama_cpp'`.
13
+
1
14
  ## [[0.3.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.3.1...v0.3.2)] - 2023-07-08
2
15
 
3
16
  - Bump bundled llama.cpp from master-b8c8dda to master-481f793.
@@ -7,6 +7,7 @@ abort 'libstdc++ is not found.' unless have_library('stdc++')
7
7
 
8
8
  $srcs = %w[ggml.c llama.cpp llama_cpp.cpp]
9
9
  $srcs << 'ggml-opencl.cpp' if with_config('clblast')
10
+ $srcs << 'ggml-mpi.c' if with_config('mpi')
10
11
  $CFLAGS << ' -w -DNDEBUG'
11
12
  $CXXFLAGS << ' -std=c++11 -DNDEBUG'
12
13
  $INCFLAGS << ' -I$(srcdir)/src'
@@ -76,6 +77,14 @@ if with_config('clblast')
76
77
  end
77
78
  end
78
79
 
80
+ if with_config('mpi')
81
+ abort 'libmpi is not found.' unless have_library('mpi')
82
+ abort 'mpi.h is not found.' unless have_header('mpi.h')
83
+
84
+ $CFLAGS << ' -DGGML_USE_MPI -Wno-cast-qual'
85
+ $CXXFLAGS << ' -DGGML_USE_MPI -Wno-cast-qual'
86
+ end
87
+
79
88
  UNAME_M = RbConfig::CONFIG['build_cpu'] || RbConfig::CONFIG['host_cpu'] || RbConfig::CONFIG['target_cpu']
80
89
 
81
90
  # rubocop:disable Layout/LineLength