llama_cpp 0.3.2 → 0.3.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +13 -0
- data/ext/llama_cpp/extconf.rb +9 -0
- data/ext/llama_cpp/llama_cpp.cpp +165 -112
- data/ext/llama_cpp/src/ggml-cuda.cu +217 -76
- data/ext/llama_cpp/src/ggml-metal.h +5 -1
- data/ext/llama_cpp/src/ggml-metal.m +16 -5
- data/ext/llama_cpp/src/ggml-metal.metal +56 -47
- data/ext/llama_cpp/src/ggml-mpi.c +216 -0
- data/ext/llama_cpp/src/ggml-mpi.h +39 -0
- data/ext/llama_cpp/src/ggml.c +1082 -774
- data/ext/llama_cpp/src/ggml.h +64 -18
- data/ext/llama_cpp/src/llama.cpp +179 -51
- data/ext/llama_cpp/src/llama.h +15 -1
- data/lib/llama_cpp/version.rb +2 -2
- data/lib/llama_cpp.rb +1 -1
- data/sig/llama_cpp.rbs +3 -1
- metadata +4 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: cf337091019bb773e47cf206ff2ff30ed0bef963094494e6493455cad7c59840
|
4
|
+
data.tar.gz: fdbae8e08a6b87d49c5658d5c1857f20bf8efdf5a5371906630dccf4eb0f1159
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f0fee68294960c5ab9f56ebfe7256a00f9330e55f4954f2b016e07cbc023570298fa8f8b578f3e187fe9183b869769085311931122f93a033c6c21158b4e9485
|
7
|
+
data.tar.gz: 7eec8c98ae9ec1a56fa4bdb4e83a2dc2bdea407fc037af8d1b8f09a30c0d1246333d410707f4d66f3f473bf73574757cf12e56a86a0cb47074501f63f65f0c02
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,16 @@
|
|
1
|
+
## [[0.3.3](https://github.com/yoshoku/llama_cpp.rb/compare/v0.3.2...v0.3.3)] - 2023-07-15
|
2
|
+
|
3
|
+
- Bump bundled llama.cpp from master-481f793 to master-32c5411.
|
4
|
+
- Add MPI config options:
|
5
|
+
```
|
6
|
+
$ gem install llama_cpp -- --with-mpi
|
7
|
+
```
|
8
|
+
- Add `backend_free` module function to `LLaMACpp`. This method should be called once at the end of the program when the MPI option is enabled.
|
9
|
+
- Add `sample_classifier_free_guidance` method to `Context`.
|
10
|
+
|
11
|
+
**Breaking Changes**
|
12
|
+
- Rename `init_backend` method to `backend_init`. This method is called internally at `require 'llama_cpp'`.
|
13
|
+
|
1
14
|
## [[0.3.2](https://github.com/yoshoku/llama_cpp.rb/compare/v0.3.1...v0.3.2)] - 2023-07-08
|
2
15
|
|
3
16
|
- Bump bundled llama.cpp from master-b8c8dda to master-481f793.
|
data/ext/llama_cpp/extconf.rb
CHANGED
@@ -7,6 +7,7 @@ abort 'libstdc++ is not found.' unless have_library('stdc++')
|
|
7
7
|
|
8
8
|
$srcs = %w[ggml.c llama.cpp llama_cpp.cpp]
|
9
9
|
$srcs << 'ggml-opencl.cpp' if with_config('clblast')
|
10
|
+
$srcs << 'ggml-mpi.c' if with_config('mpi')
|
10
11
|
$CFLAGS << ' -w -DNDEBUG'
|
11
12
|
$CXXFLAGS << ' -std=c++11 -DNDEBUG'
|
12
13
|
$INCFLAGS << ' -I$(srcdir)/src'
|
@@ -76,6 +77,14 @@ if with_config('clblast')
|
|
76
77
|
end
|
77
78
|
end
|
78
79
|
|
80
|
+
if with_config('mpi')
|
81
|
+
abort 'libmpi is not found.' unless have_library('mpi')
|
82
|
+
abort 'mpi.h is not found.' unless have_header('mpi.h')
|
83
|
+
|
84
|
+
$CFLAGS << ' -DGGML_USE_MPI -Wno-cast-qual'
|
85
|
+
$CXXFLAGS << ' -DGGML_USE_MPI -Wno-cast-qual'
|
86
|
+
end
|
87
|
+
|
79
88
|
UNAME_M = RbConfig::CONFIG['build_cpu'] || RbConfig::CONFIG['host_cpu'] || RbConfig::CONFIG['target_cpu']
|
80
89
|
|
81
90
|
# rubocop:disable Layout/LineLength
|