whisper.rn 0.5.0-rc.9 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/build.gradle +2 -1
- package/android/gradle.properties +1 -1
- package/cpp/ggml-alloc.c +265 -141
- package/cpp/ggml-backend-impl.h +4 -1
- package/cpp/ggml-backend-reg.cpp +30 -13
- package/cpp/ggml-backend.cpp +221 -38
- package/cpp/ggml-backend.h +17 -1
- package/cpp/ggml-common.h +17 -0
- package/cpp/ggml-cpu/amx/amx.cpp +4 -2
- package/cpp/ggml-cpu/arch/arm/quants.c +132 -596
- package/cpp/ggml-cpu/arch/arm/repack.cpp +14 -286
- package/cpp/ggml-cpu/arch/x86/quants.c +184 -675
- package/cpp/ggml-cpu/arch/x86/repack.cpp +4679 -1657
- package/cpp/ggml-cpu/arch-fallback.h +32 -2
- package/cpp/ggml-cpu/common.h +14 -0
- package/cpp/ggml-cpu/ggml-cpu-impl.h +13 -6
- package/cpp/ggml-cpu/ggml-cpu.c +70 -42
- package/cpp/ggml-cpu/ggml-cpu.cpp +35 -28
- package/cpp/ggml-cpu/ops.cpp +1587 -1177
- package/cpp/ggml-cpu/ops.h +5 -8
- package/cpp/ggml-cpu/quants.c +35 -0
- package/cpp/ggml-cpu/quants.h +8 -0
- package/cpp/ggml-cpu/repack.cpp +458 -47
- package/cpp/ggml-cpu/repack.h +22 -0
- package/cpp/ggml-cpu/simd-mappings.h +89 -60
- package/cpp/ggml-cpu/traits.cpp +2 -2
- package/cpp/ggml-cpu/traits.h +1 -1
- package/cpp/ggml-cpu/vec.cpp +170 -26
- package/cpp/ggml-cpu/vec.h +506 -63
- package/cpp/ggml-cpu.h +1 -1
- package/cpp/ggml-impl.h +119 -9
- package/cpp/ggml-metal/ggml-metal-common.cpp +446 -0
- package/cpp/ggml-metal/ggml-metal-common.h +52 -0
- package/cpp/ggml-metal/ggml-metal-context.h +33 -0
- package/cpp/ggml-metal/ggml-metal-context.m +600 -0
- package/cpp/ggml-metal/ggml-metal-device.cpp +1376 -0
- package/cpp/ggml-metal/ggml-metal-device.h +226 -0
- package/cpp/ggml-metal/ggml-metal-device.m +1312 -0
- package/cpp/ggml-metal/ggml-metal-impl.h +722 -0
- package/cpp/ggml-metal/ggml-metal-ops.cpp +3158 -0
- package/cpp/ggml-metal/ggml-metal-ops.h +82 -0
- package/cpp/ggml-metal/ggml-metal.cpp +718 -0
- package/cpp/ggml-metal/ggml-whisper-sim.metallib +0 -0
- package/cpp/ggml-metal/ggml-whisper.metallib +0 -0
- package/cpp/ggml-metal-impl.h +90 -51
- package/cpp/ggml-metal.h +1 -6
- package/cpp/ggml-opt.cpp +97 -41
- package/cpp/ggml-opt.h +25 -6
- package/cpp/ggml-quants.c +111 -16
- package/cpp/ggml-quants.h +6 -0
- package/cpp/ggml.c +486 -98
- package/cpp/ggml.h +221 -16
- package/cpp/gguf.cpp +8 -1
- package/cpp/jsi/RNWhisperJSI.cpp +25 -6
- package/cpp/jsi/ThreadPool.h +3 -3
- package/cpp/whisper.cpp +100 -76
- package/cpp/whisper.h +1 -0
- package/ios/CMakeLists.txt +6 -1
- package/ios/RNWhisper.mm +6 -6
- package/ios/RNWhisperContext.mm +2 -0
- package/ios/RNWhisperVadContext.mm +16 -13
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-backend-impl.h +4 -1
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-backend.h +17 -1
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-common.h +17 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-cpu.h +1 -1
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-impl.h +119 -9
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-metal-impl.h +90 -51
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-metal.h +1 -6
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-opt.h +25 -6
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-quants.h +6 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml.h +221 -16
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/whisper.h +1 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Info.plist +0 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/ggml-whisper.metallib +0 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/rnwhisper +0 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend-impl.h +4 -1
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend.h +17 -1
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-common.h +17 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpu.h +1 -1
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-impl.h +119 -9
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal-impl.h +90 -51
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal.h +1 -6
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-opt.h +25 -6
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-quants.h +6 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml.h +221 -16
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper.h +1 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Info.plist +0 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/_CodeSignature/CodeResources +1 -1
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/ggml-whisper-sim.metallib +0 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/rnwhisper +0 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-backend-impl.h +4 -1
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-backend.h +17 -1
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-common.h +17 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-cpu.h +1 -1
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-impl.h +119 -9
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-metal-impl.h +90 -51
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-metal.h +1 -6
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-opt.h +25 -6
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-quants.h +6 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml.h +221 -16
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/whisper.h +1 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Info.plist +0 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/ggml-whisper.metallib +0 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/rnwhisper +0 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend-impl.h +4 -1
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend.h +17 -1
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-common.h +17 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpu.h +1 -1
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-impl.h +119 -9
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal-impl.h +90 -51
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal.h +1 -6
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-opt.h +25 -6
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-quants.h +6 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml.h +221 -16
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper.h +1 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Info.plist +0 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/_CodeSignature/CodeResources +1 -1
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/ggml-whisper-sim.metallib +0 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/rnwhisper +0 -0
- package/lib/commonjs/realtime-transcription/RealtimeTranscriber.js +13 -0
- package/lib/commonjs/realtime-transcription/RealtimeTranscriber.js.map +1 -1
- package/lib/commonjs/version.json +1 -1
- package/lib/module/realtime-transcription/RealtimeTranscriber.js +13 -0
- package/lib/module/realtime-transcription/RealtimeTranscriber.js.map +1 -1
- package/lib/module/version.json +1 -1
- package/lib/typescript/realtime-transcription/RealtimeTranscriber.d.ts.map +1 -1
- package/lib/typescript/realtime-transcription/types.d.ts +6 -0
- package/lib/typescript/realtime-transcription/types.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/realtime-transcription/RealtimeTranscriber.ts +17 -0
- package/src/realtime-transcription/types.ts +6 -0
- package/src/version.json +1 -1
- package/whisper-rn.podspec +8 -9
- package/cpp/ggml-metal.m +0 -6284
- package/cpp/ggml-whisper-sim.metallib +0 -0
- package/cpp/ggml-whisper.metallib +0 -0
package/cpp/ggml-backend.cpp
CHANGED
|
@@ -19,9 +19,8 @@
|
|
|
19
19
|
#include <stdio.h>
|
|
20
20
|
#include <stdlib.h>
|
|
21
21
|
#include <string.h>
|
|
22
|
-
#include <string>
|
|
23
|
-
#include <vector>
|
|
24
22
|
#include <algorithm>
|
|
23
|
+
#include <vector>
|
|
25
24
|
|
|
26
25
|
#ifdef __APPLE__
|
|
27
26
|
#include <sys/types.h>
|
|
@@ -32,6 +31,7 @@
|
|
|
32
31
|
// backend buffer type
|
|
33
32
|
|
|
34
33
|
const char * wsp_ggml_backend_buft_name(wsp_ggml_backend_buffer_type_t buft) {
|
|
34
|
+
WSP_GGML_ASSERT(buft);
|
|
35
35
|
return buft->iface.get_name(buft);
|
|
36
36
|
}
|
|
37
37
|
|
|
@@ -41,14 +41,17 @@ wsp_ggml_backend_buffer_t wsp_ggml_backend_buft_alloc_buffer(wsp_ggml_backend_bu
|
|
|
41
41
|
return wsp_ggml_backend_buffer_init(buft, {}, NULL, 0);
|
|
42
42
|
}
|
|
43
43
|
|
|
44
|
+
WSP_GGML_ASSERT(buft);
|
|
44
45
|
return buft->iface.alloc_buffer(buft, size);
|
|
45
46
|
}
|
|
46
47
|
|
|
47
48
|
size_t wsp_ggml_backend_buft_get_alignment(wsp_ggml_backend_buffer_type_t buft) {
|
|
49
|
+
WSP_GGML_ASSERT(buft);
|
|
48
50
|
return buft->iface.get_alignment(buft);
|
|
49
51
|
}
|
|
50
52
|
|
|
51
53
|
size_t wsp_ggml_backend_buft_get_max_size(wsp_ggml_backend_buffer_type_t buft) {
|
|
54
|
+
WSP_GGML_ASSERT(buft);
|
|
52
55
|
// get_max_size is optional, defaults to SIZE_MAX
|
|
53
56
|
if (buft->iface.get_max_size) {
|
|
54
57
|
return buft->iface.get_max_size(buft);
|
|
@@ -57,6 +60,7 @@ size_t wsp_ggml_backend_buft_get_max_size(wsp_ggml_backend_buffer_type_t buft) {
|
|
|
57
60
|
}
|
|
58
61
|
|
|
59
62
|
size_t wsp_ggml_backend_buft_get_alloc_size(wsp_ggml_backend_buffer_type_t buft, const struct wsp_ggml_tensor * tensor) {
|
|
63
|
+
WSP_GGML_ASSERT(buft);
|
|
60
64
|
// get_alloc_size is optional, defaults to wsp_ggml_nbytes
|
|
61
65
|
if (buft->iface.get_alloc_size) {
|
|
62
66
|
size_t size = buft->iface.get_alloc_size(buft, tensor);
|
|
@@ -67,6 +71,7 @@ size_t wsp_ggml_backend_buft_get_alloc_size(wsp_ggml_backend_buffer_type_t buft,
|
|
|
67
71
|
}
|
|
68
72
|
|
|
69
73
|
bool wsp_ggml_backend_buft_is_host(wsp_ggml_backend_buffer_type_t buft) {
|
|
74
|
+
WSP_GGML_ASSERT(buft);
|
|
70
75
|
if (buft->iface.is_host) {
|
|
71
76
|
return buft->iface.is_host(buft);
|
|
72
77
|
}
|
|
@@ -74,6 +79,7 @@ bool wsp_ggml_backend_buft_is_host(wsp_ggml_backend_buffer_type_t buft) {
|
|
|
74
79
|
}
|
|
75
80
|
|
|
76
81
|
wsp_ggml_backend_dev_t wsp_ggml_backend_buft_get_device(wsp_ggml_backend_buffer_type_t buft) {
|
|
82
|
+
WSP_GGML_ASSERT(buft);
|
|
77
83
|
return buft->device;
|
|
78
84
|
}
|
|
79
85
|
|
|
@@ -111,10 +117,12 @@ void wsp_ggml_backend_buffer_free(wsp_ggml_backend_buffer_t buffer) {
|
|
|
111
117
|
}
|
|
112
118
|
|
|
113
119
|
size_t wsp_ggml_backend_buffer_get_size(wsp_ggml_backend_buffer_t buffer) {
|
|
120
|
+
WSP_GGML_ASSERT(buffer);
|
|
114
121
|
return buffer->size;
|
|
115
122
|
}
|
|
116
123
|
|
|
117
124
|
void * wsp_ggml_backend_buffer_get_base(wsp_ggml_backend_buffer_t buffer) {
|
|
125
|
+
WSP_GGML_ASSERT(buffer);
|
|
118
126
|
// get_base is optional if the buffer is zero-sized
|
|
119
127
|
if (buffer->size == 0) {
|
|
120
128
|
return NULL;
|
|
@@ -128,6 +136,7 @@ void * wsp_ggml_backend_buffer_get_base(wsp_ggml_backend_buffer_t buffer) {
|
|
|
128
136
|
}
|
|
129
137
|
|
|
130
138
|
enum wsp_ggml_status wsp_ggml_backend_buffer_init_tensor(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor) {
|
|
139
|
+
WSP_GGML_ASSERT(buffer);
|
|
131
140
|
// init_tensor is optional
|
|
132
141
|
if (buffer->iface.init_tensor) {
|
|
133
142
|
return buffer->iface.init_tensor(buffer, tensor);
|
|
@@ -136,6 +145,7 @@ enum wsp_ggml_status wsp_ggml_backend_buffer_init_tensor(wsp_ggml_backend_buffer
|
|
|
136
145
|
}
|
|
137
146
|
|
|
138
147
|
void wsp_ggml_backend_buffer_clear(wsp_ggml_backend_buffer_t buffer, uint8_t value) {
|
|
148
|
+
WSP_GGML_ASSERT(buffer);
|
|
139
149
|
// clear is optional if the buffer is zero-sized
|
|
140
150
|
if (buffer->size == 0) {
|
|
141
151
|
return;
|
|
@@ -161,6 +171,7 @@ bool wsp_ggml_backend_buffer_is_host(wsp_ggml_backend_buffer_t buffer) {
|
|
|
161
171
|
}
|
|
162
172
|
|
|
163
173
|
void wsp_ggml_backend_buffer_set_usage(wsp_ggml_backend_buffer_t buffer, enum wsp_ggml_backend_buffer_usage usage) {
|
|
174
|
+
WSP_GGML_ASSERT(buffer);
|
|
164
175
|
buffer->usage = usage;
|
|
165
176
|
|
|
166
177
|
// FIXME: add a generic callback to the buffer interface
|
|
@@ -170,14 +181,17 @@ void wsp_ggml_backend_buffer_set_usage(wsp_ggml_backend_buffer_t buffer, enum ws
|
|
|
170
181
|
}
|
|
171
182
|
|
|
172
183
|
enum wsp_ggml_backend_buffer_usage wsp_ggml_backend_buffer_get_usage(wsp_ggml_backend_buffer_t buffer) {
|
|
184
|
+
WSP_GGML_ASSERT(buffer);
|
|
173
185
|
return buffer->usage;
|
|
174
186
|
}
|
|
175
187
|
|
|
176
188
|
wsp_ggml_backend_buffer_type_t wsp_ggml_backend_buffer_get_type(wsp_ggml_backend_buffer_t buffer) {
|
|
189
|
+
WSP_GGML_ASSERT(buffer);
|
|
177
190
|
return buffer->buft;
|
|
178
191
|
}
|
|
179
192
|
|
|
180
193
|
void wsp_ggml_backend_buffer_reset(wsp_ggml_backend_buffer_t buffer) {
|
|
194
|
+
WSP_GGML_ASSERT(buffer);
|
|
181
195
|
if (buffer->iface.reset) {
|
|
182
196
|
buffer->iface.reset(buffer);
|
|
183
197
|
}
|
|
@@ -216,6 +230,7 @@ void wsp_ggml_backend_free(wsp_ggml_backend_t backend) {
|
|
|
216
230
|
}
|
|
217
231
|
|
|
218
232
|
wsp_ggml_backend_buffer_type_t wsp_ggml_backend_get_default_buffer_type(wsp_ggml_backend_t backend) {
|
|
233
|
+
WSP_GGML_ASSERT(backend);
|
|
219
234
|
return wsp_ggml_backend_dev_buffer_type(backend->device);
|
|
220
235
|
}
|
|
221
236
|
|
|
@@ -232,6 +247,8 @@ size_t wsp_ggml_backend_get_max_size(wsp_ggml_backend_t backend) {
|
|
|
232
247
|
}
|
|
233
248
|
|
|
234
249
|
void wsp_ggml_backend_tensor_set_async(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
|
250
|
+
WSP_GGML_ASSERT(backend);
|
|
251
|
+
WSP_GGML_ASSERT(tensor);
|
|
235
252
|
WSP_GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
|
|
236
253
|
WSP_GGML_ASSERT(offset + size <= wsp_ggml_nbytes(tensor) && "tensor write out of bounds");
|
|
237
254
|
|
|
@@ -243,6 +260,8 @@ void wsp_ggml_backend_tensor_set_async(wsp_ggml_backend_t backend, struct wsp_gg
|
|
|
243
260
|
}
|
|
244
261
|
|
|
245
262
|
void wsp_ggml_backend_tensor_get_async(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
|
263
|
+
WSP_GGML_ASSERT(backend);
|
|
264
|
+
WSP_GGML_ASSERT(tensor);
|
|
246
265
|
WSP_GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
|
|
247
266
|
WSP_GGML_ASSERT(offset + size <= wsp_ggml_nbytes(tensor) && "tensor read out of bounds");
|
|
248
267
|
|
|
@@ -284,6 +303,7 @@ void wsp_ggml_backend_tensor_get(const struct wsp_ggml_tensor * tensor, void * d
|
|
|
284
303
|
}
|
|
285
304
|
|
|
286
305
|
void wsp_ggml_backend_tensor_memset(struct wsp_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
|
|
306
|
+
WSP_GGML_ASSERT(tensor);
|
|
287
307
|
wsp_ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
|
|
288
308
|
|
|
289
309
|
if (size == 0) {
|
|
@@ -299,6 +319,7 @@ void wsp_ggml_backend_tensor_memset(struct wsp_ggml_tensor * tensor, uint8_t val
|
|
|
299
319
|
}
|
|
300
320
|
|
|
301
321
|
void wsp_ggml_backend_synchronize(wsp_ggml_backend_t backend) {
|
|
322
|
+
WSP_GGML_ASSERT(backend);
|
|
302
323
|
if (backend->iface.synchronize == NULL) {
|
|
303
324
|
return;
|
|
304
325
|
}
|
|
@@ -307,18 +328,21 @@ void wsp_ggml_backend_synchronize(wsp_ggml_backend_t backend) {
|
|
|
307
328
|
}
|
|
308
329
|
|
|
309
330
|
wsp_ggml_backend_graph_plan_t wsp_ggml_backend_graph_plan_create(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph) {
|
|
331
|
+
WSP_GGML_ASSERT(backend);
|
|
310
332
|
WSP_GGML_ASSERT(backend->iface.graph_plan_create != NULL);
|
|
311
333
|
|
|
312
334
|
return backend->iface.graph_plan_create(backend, cgraph);
|
|
313
335
|
}
|
|
314
336
|
|
|
315
337
|
void wsp_ggml_backend_graph_plan_free(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan) {
|
|
338
|
+
WSP_GGML_ASSERT(backend);
|
|
316
339
|
WSP_GGML_ASSERT(backend->iface.graph_plan_free != NULL);
|
|
317
340
|
|
|
318
341
|
backend->iface.graph_plan_free(backend, plan);
|
|
319
342
|
}
|
|
320
343
|
|
|
321
344
|
enum wsp_ggml_status wsp_ggml_backend_graph_plan_compute(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan) {
|
|
345
|
+
WSP_GGML_ASSERT(backend);
|
|
322
346
|
WSP_GGML_ASSERT(backend->iface.graph_plan_compute != NULL);
|
|
323
347
|
|
|
324
348
|
return backend->iface.graph_plan_compute(backend, plan);
|
|
@@ -331,42 +355,32 @@ enum wsp_ggml_status wsp_ggml_backend_graph_compute(wsp_ggml_backend_t backend,
|
|
|
331
355
|
}
|
|
332
356
|
|
|
333
357
|
enum wsp_ggml_status wsp_ggml_backend_graph_compute_async(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph) {
|
|
358
|
+
WSP_GGML_ASSERT(backend);
|
|
334
359
|
return backend->iface.graph_compute(backend, cgraph);
|
|
335
360
|
}
|
|
336
361
|
|
|
337
362
|
bool wsp_ggml_backend_supports_op(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op) {
|
|
363
|
+
WSP_GGML_ASSERT(backend);
|
|
338
364
|
return wsp_ggml_backend_dev_supports_op(backend->device, op);
|
|
339
365
|
}
|
|
340
366
|
|
|
341
367
|
bool wsp_ggml_backend_supports_buft(wsp_ggml_backend_t backend, wsp_ggml_backend_buffer_type_t buft) {
|
|
368
|
+
WSP_GGML_ASSERT(backend);
|
|
342
369
|
return wsp_ggml_backend_dev_supports_buft(backend->device, buft);
|
|
343
370
|
}
|
|
344
371
|
|
|
345
372
|
bool wsp_ggml_backend_offload_op(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op) {
|
|
373
|
+
WSP_GGML_ASSERT(backend);
|
|
346
374
|
return wsp_ggml_backend_dev_offload_op(backend->device, op);
|
|
347
375
|
}
|
|
348
376
|
|
|
349
377
|
wsp_ggml_backend_dev_t wsp_ggml_backend_get_device(wsp_ggml_backend_t backend) {
|
|
378
|
+
WSP_GGML_ASSERT(backend);
|
|
350
379
|
return backend->device;
|
|
351
380
|
}
|
|
352
381
|
|
|
353
382
|
// backend copy
|
|
354
383
|
|
|
355
|
-
static bool wsp_ggml_are_same_layout(const struct wsp_ggml_tensor * a, const struct wsp_ggml_tensor * b) {
|
|
356
|
-
if (a->type != b->type) {
|
|
357
|
-
return false;
|
|
358
|
-
}
|
|
359
|
-
for (int i = 0; i < WSP_GGML_MAX_DIMS; i++) {
|
|
360
|
-
if (a->ne[i] != b->ne[i]) {
|
|
361
|
-
return false;
|
|
362
|
-
}
|
|
363
|
-
if (a->nb[i] != b->nb[i]) {
|
|
364
|
-
return false;
|
|
365
|
-
}
|
|
366
|
-
}
|
|
367
|
-
return true;
|
|
368
|
-
}
|
|
369
|
-
|
|
370
384
|
void wsp_ggml_backend_tensor_copy(struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst) {
|
|
371
385
|
WSP_GGML_ASSERT(wsp_ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
|
|
372
386
|
|
|
@@ -397,6 +411,7 @@ void wsp_ggml_backend_tensor_copy_async(wsp_ggml_backend_t backend_src, wsp_ggml
|
|
|
397
411
|
return;
|
|
398
412
|
}
|
|
399
413
|
|
|
414
|
+
WSP_GGML_ASSERT(backend_dst);
|
|
400
415
|
if (backend_dst->iface.cpy_tensor_async != NULL) {
|
|
401
416
|
if (backend_dst->iface.cpy_tensor_async(backend_src, backend_dst, src, dst)) {
|
|
402
417
|
return;
|
|
@@ -428,38 +443,52 @@ void wsp_ggml_backend_event_free(wsp_ggml_backend_event_t event) {
|
|
|
428
443
|
}
|
|
429
444
|
|
|
430
445
|
void wsp_ggml_backend_event_record(wsp_ggml_backend_event_t event, wsp_ggml_backend_t backend) {
|
|
446
|
+
WSP_GGML_ASSERT(backend);
|
|
431
447
|
WSP_GGML_ASSERT(backend->iface.event_record != NULL);
|
|
432
448
|
|
|
433
449
|
backend->iface.event_record(backend, event);
|
|
434
450
|
}
|
|
435
451
|
|
|
436
452
|
void wsp_ggml_backend_event_synchronize(wsp_ggml_backend_event_t event) {
|
|
453
|
+
WSP_GGML_ASSERT(event);
|
|
437
454
|
WSP_GGML_ASSERT(event->device->iface.event_synchronize);
|
|
438
455
|
|
|
439
456
|
event->device->iface.event_synchronize(event->device, event);
|
|
440
457
|
}
|
|
441
458
|
|
|
442
459
|
void wsp_ggml_backend_event_wait(wsp_ggml_backend_t backend, wsp_ggml_backend_event_t event) {
|
|
460
|
+
WSP_GGML_ASSERT(backend);
|
|
443
461
|
WSP_GGML_ASSERT(backend->iface.event_wait != NULL);
|
|
444
462
|
|
|
445
463
|
backend->iface.event_wait(backend, event);
|
|
446
464
|
}
|
|
447
465
|
|
|
466
|
+
static void wsp_ggml_backend_graph_optimize(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph) {
|
|
467
|
+
WSP_GGML_ASSERT(backend);
|
|
468
|
+
if (backend->iface.graph_optimize != NULL) {
|
|
469
|
+
backend->iface.graph_optimize(backend, cgraph);
|
|
470
|
+
}
|
|
471
|
+
}
|
|
472
|
+
|
|
448
473
|
// Backend device
|
|
449
474
|
|
|
450
475
|
const char * wsp_ggml_backend_dev_name(wsp_ggml_backend_dev_t device) {
|
|
476
|
+
WSP_GGML_ASSERT(device);
|
|
451
477
|
return device->iface.get_name(device);
|
|
452
478
|
}
|
|
453
479
|
|
|
454
480
|
const char * wsp_ggml_backend_dev_description(wsp_ggml_backend_dev_t device) {
|
|
481
|
+
WSP_GGML_ASSERT(device);
|
|
455
482
|
return device->iface.get_description(device);
|
|
456
483
|
}
|
|
457
484
|
|
|
458
485
|
void wsp_ggml_backend_dev_memory(wsp_ggml_backend_dev_t device, size_t * free, size_t * total) {
|
|
486
|
+
WSP_GGML_ASSERT(device);
|
|
459
487
|
device->iface.get_memory(device, free, total);
|
|
460
488
|
}
|
|
461
489
|
|
|
462
490
|
enum wsp_ggml_backend_dev_type wsp_ggml_backend_dev_type(wsp_ggml_backend_dev_t device) {
|
|
491
|
+
WSP_GGML_ASSERT(device);
|
|
463
492
|
return device->iface.get_type(device);
|
|
464
493
|
}
|
|
465
494
|
|
|
@@ -469,18 +498,22 @@ void wsp_ggml_backend_dev_get_props(wsp_ggml_backend_dev_t device, struct wsp_gg
|
|
|
469
498
|
}
|
|
470
499
|
|
|
471
500
|
wsp_ggml_backend_reg_t wsp_ggml_backend_dev_backend_reg(wsp_ggml_backend_dev_t device) {
|
|
501
|
+
WSP_GGML_ASSERT(device);
|
|
472
502
|
return device->reg;
|
|
473
503
|
}
|
|
474
504
|
|
|
475
505
|
wsp_ggml_backend_t wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_t device, const char * params) {
|
|
506
|
+
WSP_GGML_ASSERT(device);
|
|
476
507
|
return device->iface.init_backend(device, params);
|
|
477
508
|
}
|
|
478
509
|
|
|
479
510
|
wsp_ggml_backend_buffer_type_t wsp_ggml_backend_dev_buffer_type(wsp_ggml_backend_dev_t device) {
|
|
511
|
+
WSP_GGML_ASSERT(device);
|
|
480
512
|
return device->iface.get_buffer_type(device);
|
|
481
513
|
}
|
|
482
514
|
|
|
483
515
|
wsp_ggml_backend_buffer_type_t wsp_ggml_backend_dev_host_buffer_type(wsp_ggml_backend_dev_t device) {
|
|
516
|
+
WSP_GGML_ASSERT(device);
|
|
484
517
|
if (device->iface.get_host_buffer_type == NULL) {
|
|
485
518
|
return NULL;
|
|
486
519
|
}
|
|
@@ -489,18 +522,22 @@ wsp_ggml_backend_buffer_type_t wsp_ggml_backend_dev_host_buffer_type(wsp_ggml_ba
|
|
|
489
522
|
}
|
|
490
523
|
|
|
491
524
|
wsp_ggml_backend_buffer_t wsp_ggml_backend_dev_buffer_from_host_ptr(wsp_ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size) {
|
|
525
|
+
WSP_GGML_ASSERT(device);
|
|
492
526
|
return device->iface.buffer_from_host_ptr(device, ptr, size, max_tensor_size);
|
|
493
527
|
}
|
|
494
528
|
|
|
495
529
|
bool wsp_ggml_backend_dev_supports_op(wsp_ggml_backend_dev_t device, const struct wsp_ggml_tensor * op) {
|
|
530
|
+
WSP_GGML_ASSERT(device);
|
|
496
531
|
return device->iface.supports_op(device, op);
|
|
497
532
|
}
|
|
498
533
|
|
|
499
534
|
bool wsp_ggml_backend_dev_supports_buft(wsp_ggml_backend_dev_t device, wsp_ggml_backend_buffer_type_t buft) {
|
|
535
|
+
WSP_GGML_ASSERT(device);
|
|
500
536
|
return device->iface.supports_buft(device, buft);
|
|
501
537
|
}
|
|
502
538
|
|
|
503
539
|
bool wsp_ggml_backend_dev_offload_op(wsp_ggml_backend_dev_t device, const struct wsp_ggml_tensor * op) {
|
|
540
|
+
WSP_GGML_ASSERT(device);
|
|
504
541
|
if (device->iface.offload_op != NULL) {
|
|
505
542
|
return device->iface.offload_op(device, op);
|
|
506
543
|
}
|
|
@@ -511,18 +548,22 @@ bool wsp_ggml_backend_dev_offload_op(wsp_ggml_backend_dev_t device, const struct
|
|
|
511
548
|
// Backend (reg)
|
|
512
549
|
|
|
513
550
|
const char * wsp_ggml_backend_reg_name(wsp_ggml_backend_reg_t reg) {
|
|
551
|
+
WSP_GGML_ASSERT(reg);
|
|
514
552
|
return reg->iface.get_name(reg);
|
|
515
553
|
}
|
|
516
554
|
|
|
517
555
|
size_t wsp_ggml_backend_reg_dev_count(wsp_ggml_backend_reg_t reg) {
|
|
556
|
+
WSP_GGML_ASSERT(reg);
|
|
518
557
|
return reg->iface.get_device_count(reg);
|
|
519
558
|
}
|
|
520
559
|
|
|
521
560
|
wsp_ggml_backend_dev_t wsp_ggml_backend_reg_dev_get(wsp_ggml_backend_reg_t reg, size_t index) {
|
|
561
|
+
WSP_GGML_ASSERT(reg);
|
|
522
562
|
return reg->iface.get_device(reg, index);
|
|
523
563
|
}
|
|
524
564
|
|
|
525
565
|
void * wsp_ggml_backend_reg_get_proc_address(wsp_ggml_backend_reg_t reg, const char * name) {
|
|
566
|
+
WSP_GGML_ASSERT(reg);
|
|
526
567
|
if (!reg->iface.get_proc_address) {
|
|
527
568
|
return NULL;
|
|
528
569
|
}
|
|
@@ -537,6 +578,7 @@ struct wsp_ggml_backend_multi_buffer_context {
|
|
|
537
578
|
};
|
|
538
579
|
|
|
539
580
|
static void wsp_ggml_backend_multi_buffer_free_buffer(wsp_ggml_backend_buffer_t buffer) {
|
|
581
|
+
WSP_GGML_ASSERT(buffer);
|
|
540
582
|
wsp_ggml_backend_multi_buffer_context * ctx = (wsp_ggml_backend_multi_buffer_context *) buffer->context;
|
|
541
583
|
for (size_t i = 0; i < ctx->n_buffers; i++) {
|
|
542
584
|
wsp_ggml_backend_buffer_free(ctx->buffers[i]);
|
|
@@ -547,6 +589,7 @@ static void wsp_ggml_backend_multi_buffer_free_buffer(wsp_ggml_backend_buffer_t
|
|
|
547
589
|
}
|
|
548
590
|
|
|
549
591
|
static void wsp_ggml_backend_multi_buffer_clear(wsp_ggml_backend_buffer_t buffer, uint8_t value) {
|
|
592
|
+
WSP_GGML_ASSERT(buffer);
|
|
550
593
|
wsp_ggml_backend_multi_buffer_context * ctx = (wsp_ggml_backend_multi_buffer_context *) buffer->context;
|
|
551
594
|
for (size_t i = 0; i < ctx->n_buffers; i++) {
|
|
552
595
|
wsp_ggml_backend_buffer_clear(ctx->buffers[i], value);
|
|
@@ -582,10 +625,12 @@ wsp_ggml_backend_buffer_t wsp_ggml_backend_multi_buffer_alloc_buffer(wsp_ggml_ba
|
|
|
582
625
|
}
|
|
583
626
|
|
|
584
627
|
bool wsp_ggml_backend_buffer_is_multi_buffer(wsp_ggml_backend_buffer_t buffer) {
|
|
628
|
+
WSP_GGML_ASSERT(buffer);
|
|
585
629
|
return buffer->iface.free_buffer == wsp_ggml_backend_multi_buffer_free_buffer;
|
|
586
630
|
}
|
|
587
631
|
|
|
588
632
|
void wsp_ggml_backend_multi_buffer_set_usage(wsp_ggml_backend_buffer_t buffer, enum wsp_ggml_backend_buffer_usage usage) {
|
|
633
|
+
WSP_GGML_ASSERT(buffer);
|
|
589
634
|
WSP_GGML_ASSERT(wsp_ggml_backend_buffer_is_multi_buffer(buffer));
|
|
590
635
|
wsp_ggml_backend_multi_buffer_context * ctx = (wsp_ggml_backend_multi_buffer_context *) buffer->context;
|
|
591
636
|
for (size_t i = 0; i < ctx->n_buffers; i++) {
|
|
@@ -613,7 +658,7 @@ static bool wsp_ggml_is_view_op(enum wsp_ggml_op op) {
|
|
|
613
658
|
#endif
|
|
614
659
|
|
|
615
660
|
#ifndef WSP_GGML_SCHED_MAX_SPLIT_INPUTS
|
|
616
|
-
#define WSP_GGML_SCHED_MAX_SPLIT_INPUTS
|
|
661
|
+
#define WSP_GGML_SCHED_MAX_SPLIT_INPUTS 30
|
|
617
662
|
#endif
|
|
618
663
|
|
|
619
664
|
#ifndef WSP_GGML_SCHED_MAX_COPIES
|
|
@@ -662,6 +707,7 @@ struct wsp_ggml_backend_sched {
|
|
|
662
707
|
// pipeline parallelism support
|
|
663
708
|
int n_copies;
|
|
664
709
|
int cur_copy;
|
|
710
|
+
int next_copy;
|
|
665
711
|
wsp_ggml_backend_event_t events[WSP_GGML_SCHED_MAX_BACKENDS][WSP_GGML_SCHED_MAX_COPIES];
|
|
666
712
|
struct wsp_ggml_tensor * graph_inputs[WSP_GGML_SCHED_MAX_SPLIT_INPUTS];
|
|
667
713
|
int n_graph_inputs;
|
|
@@ -863,7 +909,7 @@ static void wsp_ggml_backend_sched_set_if_supported(wsp_ggml_backend_sched_t sch
|
|
|
863
909
|
}
|
|
864
910
|
|
|
865
911
|
// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
|
|
866
|
-
|
|
912
|
+
void wsp_ggml_backend_sched_split_graph(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph) {
|
|
867
913
|
// reset splits
|
|
868
914
|
sched->n_splits = 0;
|
|
869
915
|
sched->n_graph_inputs = 0;
|
|
@@ -1085,6 +1131,11 @@ static void wsp_ggml_backend_sched_split_graph(wsp_ggml_backend_sched_t sched, s
|
|
|
1085
1131
|
}
|
|
1086
1132
|
}
|
|
1087
1133
|
}
|
|
1134
|
+
// if the node is still unassigned, assign it to the first backend that supports it
|
|
1135
|
+
for (int b = 0; b < sched->n_backends && *cur_backend_id == -1; b++) {
|
|
1136
|
+
wsp_ggml_backend_sched_set_if_supported(sched, node, b, cur_backend_id);
|
|
1137
|
+
}
|
|
1138
|
+
WSP_GGML_ASSERT(*cur_backend_id != -1);
|
|
1088
1139
|
}
|
|
1089
1140
|
|
|
1090
1141
|
// pass 5: split graph, find tensors that need to be copied
|
|
@@ -1112,7 +1163,7 @@ static void wsp_ggml_backend_sched_split_graph(wsp_ggml_backend_sched_t sched, s
|
|
|
1112
1163
|
|
|
1113
1164
|
const int node_backend_id = tensor_backend_id(node);
|
|
1114
1165
|
|
|
1115
|
-
|
|
1166
|
+
WSP_GGML_ASSERT(node_backend_id != -1); // all nodes should be assigned by now, this can happen if there is no CPU fallback
|
|
1116
1167
|
|
|
1117
1168
|
// check if we should start a new split based on the sources of the current node
|
|
1118
1169
|
bool need_new_split = false;
|
|
@@ -1170,7 +1221,7 @@ static void wsp_ggml_backend_sched_split_graph(wsp_ggml_backend_sched_t sched, s
|
|
|
1170
1221
|
|
|
1171
1222
|
size_t src_id = hash_id(src);
|
|
1172
1223
|
const int src_backend_id = sched->hv_tensor_backend_ids[src_id];
|
|
1173
|
-
|
|
1224
|
+
WSP_GGML_ASSERT(src_backend_id != -1); // all inputs should be assigned by now
|
|
1174
1225
|
|
|
1175
1226
|
if (src->flags & WSP_GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) {
|
|
1176
1227
|
if (tensor_id_copy(src_id, src_backend_id, 0) == NULL) {
|
|
@@ -1254,6 +1305,10 @@ static void wsp_ggml_backend_sched_split_graph(wsp_ggml_backend_sched_t sched, s
|
|
|
1254
1305
|
struct wsp_ggml_backend_sched_split * split = &sched->splits[i];
|
|
1255
1306
|
split->graph = wsp_ggml_graph_view(graph, split->i_start, split->i_end);
|
|
1256
1307
|
|
|
1308
|
+
// Optimize this split of the graph. This needs to happen before we make graph_copy,
|
|
1309
|
+
// so they are in sync.
|
|
1310
|
+
wsp_ggml_backend_graph_optimize(sched->backends[split->backend_id], &split->graph);
|
|
1311
|
+
|
|
1257
1312
|
// add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split
|
|
1258
1313
|
for (int j = 0; j < split->n_inputs; j++) {
|
|
1259
1314
|
assert(graph_copy->size > (graph_copy->n_nodes + 1));
|
|
@@ -1359,17 +1414,22 @@ static bool wsp_ggml_backend_sched_alloc_splits(wsp_ggml_backend_sched_t sched)
|
|
|
1359
1414
|
}
|
|
1360
1415
|
|
|
1361
1416
|
static enum wsp_ggml_status wsp_ggml_backend_sched_compute_splits(wsp_ggml_backend_sched_t sched) {
|
|
1417
|
+
WSP_GGML_ASSERT(sched);
|
|
1362
1418
|
struct wsp_ggml_backend_sched_split * splits = sched->splits;
|
|
1363
1419
|
|
|
1364
|
-
|
|
1365
|
-
|
|
1420
|
+
wsp_ggml_tensor * prev_ids_tensor = nullptr;
|
|
1421
|
+
std::vector<int32_t> ids;
|
|
1422
|
+
std::vector<wsp_ggml_bitset_t> used_ids;
|
|
1423
|
+
|
|
1424
|
+
for (int split_id = 0; split_id < sched->n_splits; split_id++) {
|
|
1425
|
+
struct wsp_ggml_backend_sched_split * split = &splits[split_id];
|
|
1366
1426
|
int split_backend_id = split->backend_id;
|
|
1367
1427
|
wsp_ggml_backend_t split_backend = sched->backends[split_backend_id];
|
|
1368
1428
|
|
|
1369
1429
|
// copy the input tensors to the split backend
|
|
1370
|
-
for (int
|
|
1371
|
-
wsp_ggml_backend_t input_backend = wsp_ggml_backend_sched_get_tensor_backend(sched, split->inputs[
|
|
1372
|
-
struct wsp_ggml_tensor * input = split->inputs[
|
|
1430
|
+
for (int input_id = 0; input_id < split->n_inputs; input_id++) {
|
|
1431
|
+
wsp_ggml_backend_t input_backend = wsp_ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]);
|
|
1432
|
+
struct wsp_ggml_tensor * input = split->inputs[input_id];
|
|
1373
1433
|
struct wsp_ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy);
|
|
1374
1434
|
|
|
1375
1435
|
if (input->flags & WSP_GGML_TENSOR_FLAG_INPUT) {
|
|
@@ -1387,16 +1447,104 @@ static enum wsp_ggml_status wsp_ggml_backend_sched_compute_splits(wsp_ggml_backe
|
|
|
1387
1447
|
} else {
|
|
1388
1448
|
wsp_ggml_backend_synchronize(split_backend);
|
|
1389
1449
|
}
|
|
1390
|
-
|
|
1391
|
-
//
|
|
1392
|
-
|
|
1450
|
+
|
|
1451
|
+
// when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used
|
|
1452
|
+
wsp_ggml_tensor * node = split->graph.nodes[0];
|
|
1453
|
+
if (split->graph.n_nodes > 0 &&
|
|
1454
|
+
wsp_ggml_backend_buffer_get_usage(input->buffer) == WSP_GGML_BACKEND_BUFFER_USAGE_WEIGHTS &&
|
|
1455
|
+
wsp_ggml_backend_buffer_is_host(input->buffer) && (
|
|
1456
|
+
(node->src[0] == input_cpy && node->op == WSP_GGML_OP_MUL_MAT_ID)
|
|
1457
|
+
//|| (node->src[1] == input_cpy && node->op == WSP_GGML_OP_ADD_ID) /* WSP_GGML_OP_ADD_ID weights are small and not worth splitting */
|
|
1458
|
+
)) {
|
|
1459
|
+
|
|
1460
|
+
const int64_t n_expert = node->op == WSP_GGML_OP_MUL_MAT_ID ? input->ne[2] : input->ne[1];
|
|
1461
|
+
const size_t expert_size = node->op == WSP_GGML_OP_MUL_MAT_ID ? input->nb[2] : input->nb[1];
|
|
1462
|
+
|
|
1393
1463
|
wsp_ggml_backend_synchronize(input_backend);
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1464
|
+
|
|
1465
|
+
// get the ids
|
|
1466
|
+
wsp_ggml_tensor * ids_tensor = node->src[2];
|
|
1467
|
+
wsp_ggml_backend_t ids_backend = split_backend;
|
|
1468
|
+
|
|
1469
|
+
// if the ids tensor is also an input of the split, it may not have been copied yet to the split backend
|
|
1470
|
+
// in that case, we use the original ids tensor
|
|
1471
|
+
for (int i = input_id + 1; i < split->n_inputs; i++) {
|
|
1472
|
+
if (ids_tensor == tensor_copy(split->inputs[i], split_backend_id, sched->cur_copy)) {
|
|
1473
|
+
ids_tensor = split->inputs[i];
|
|
1474
|
+
ids_backend = wsp_ggml_backend_sched_get_tensor_backend(sched, split->inputs[i]);
|
|
1475
|
+
break;
|
|
1476
|
+
}
|
|
1477
|
+
}
|
|
1478
|
+
|
|
1479
|
+
if (ids_tensor != prev_ids_tensor) {
|
|
1480
|
+
ids.resize(wsp_ggml_nbytes(ids_tensor) / sizeof(int32_t));
|
|
1481
|
+
wsp_ggml_backend_tensor_get_async(ids_backend, ids_tensor, ids.data(), 0, wsp_ggml_nbytes(ids_tensor));
|
|
1482
|
+
wsp_ggml_backend_synchronize(ids_backend);
|
|
1483
|
+
|
|
1484
|
+
// find the used experts
|
|
1485
|
+
used_ids.clear();
|
|
1486
|
+
used_ids.resize(wsp_ggml_bitset_size(n_expert));
|
|
1487
|
+
for (int64_t i1 = 0; i1 < ids_tensor->ne[1]; i1++) {
|
|
1488
|
+
for (int64_t i0 = 0; i0 < ids_tensor->ne[0]; i0++) {
|
|
1489
|
+
int32_t id = ids[i1 * ids_tensor->nb[1]/sizeof(int32_t) + i0 * ids_tensor->nb[0]/sizeof(int32_t)];
|
|
1490
|
+
WSP_GGML_ASSERT(id >= 0 && id < n_expert);
|
|
1491
|
+
wsp_ggml_bitset_set(used_ids.data(), id);
|
|
1492
|
+
}
|
|
1493
|
+
}
|
|
1494
|
+
|
|
1495
|
+
prev_ids_tensor = ids_tensor;
|
|
1496
|
+
}
|
|
1497
|
+
|
|
1498
|
+
// group consecutive experts and copy them together
|
|
1499
|
+
auto copy_experts = [&](int32_t first_id, int32_t last_id) {
|
|
1500
|
+
const size_t expert_offset = first_id * expert_size;
|
|
1501
|
+
const size_t expert_size_copy = (last_id - first_id + 1) * expert_size;
|
|
1502
|
+
const size_t padding = std::min<size_t>(expert_size, 512);
|
|
1503
|
+
const size_t padding_end = last_id < n_expert - 1 ? padding : 0;
|
|
1504
|
+
|
|
1505
|
+
wsp_ggml_backend_tensor_set_async(split_backend,
|
|
1506
|
+
input_cpy,
|
|
1507
|
+
(const uint8_t *)input->data + expert_offset, expert_offset,
|
|
1508
|
+
// copy a bit extra at the to ensure there are no NaNs in the padding of the last expert
|
|
1509
|
+
// this is necessary for MMQ in the CUDA backend
|
|
1510
|
+
expert_size_copy + padding_end);
|
|
1511
|
+
};
|
|
1512
|
+
|
|
1513
|
+
int id = 0;
|
|
1514
|
+
while (!wsp_ggml_bitset_get(used_ids.data(), id)) {
|
|
1515
|
+
id++;
|
|
1516
|
+
}
|
|
1517
|
+
int32_t first_id = id;
|
|
1518
|
+
int32_t last_id = first_id;
|
|
1519
|
+
|
|
1520
|
+
for (++id; id < n_expert; ++id) {
|
|
1521
|
+
if (!wsp_ggml_bitset_get(used_ids.data(), id)) {
|
|
1522
|
+
continue;
|
|
1523
|
+
}
|
|
1524
|
+
|
|
1525
|
+
if (id == last_id + 1) {
|
|
1526
|
+
last_id = id;
|
|
1527
|
+
continue;
|
|
1528
|
+
}
|
|
1529
|
+
|
|
1530
|
+
copy_experts(first_id, last_id);
|
|
1531
|
+
|
|
1532
|
+
first_id = id;
|
|
1533
|
+
last_id = id;
|
|
1534
|
+
}
|
|
1535
|
+
copy_experts(first_id, last_id);
|
|
1536
|
+
} else {
|
|
1537
|
+
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
|
|
1538
|
+
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
|
|
1539
|
+
if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
|
|
1540
|
+
wsp_ggml_backend_synchronize(input_backend);
|
|
1541
|
+
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
|
|
1542
|
+
wsp_ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
|
|
1543
|
+
} else {
|
|
1544
|
+
wsp_ggml_backend_synchronize(split_backend);
|
|
1545
|
+
}
|
|
1546
|
+
wsp_ggml_backend_tensor_copy(input, input_cpy);
|
|
1398
1547
|
}
|
|
1399
|
-
wsp_ggml_backend_tensor_copy(input, input_cpy);
|
|
1400
1548
|
}
|
|
1401
1549
|
}
|
|
1402
1550
|
}
|
|
@@ -1448,8 +1596,6 @@ static enum wsp_ggml_status wsp_ggml_backend_sched_compute_splits(wsp_ggml_backe
|
|
|
1448
1596
|
}
|
|
1449
1597
|
}
|
|
1450
1598
|
|
|
1451
|
-
sched->cur_copy = (sched->cur_copy + 1) % sched->n_copies;
|
|
1452
|
-
|
|
1453
1599
|
return WSP_GGML_STATUS_SUCCESS;
|
|
1454
1600
|
}
|
|
1455
1601
|
|
|
@@ -1537,6 +1683,7 @@ void wsp_ggml_backend_sched_free(wsp_ggml_backend_sched_t sched) {
|
|
|
1537
1683
|
}
|
|
1538
1684
|
|
|
1539
1685
|
void wsp_ggml_backend_sched_reset(wsp_ggml_backend_sched_t sched) {
|
|
1686
|
+
WSP_GGML_ASSERT(sched);
|
|
1540
1687
|
// reset state for the next run
|
|
1541
1688
|
if (!sched->is_reset) {
|
|
1542
1689
|
wsp_ggml_hash_set_reset(&sched->hash_set);
|
|
@@ -1548,12 +1695,15 @@ void wsp_ggml_backend_sched_reset(wsp_ggml_backend_sched_t sched) {
|
|
|
1548
1695
|
}
|
|
1549
1696
|
|
|
1550
1697
|
bool wsp_ggml_backend_sched_reserve(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * measure_graph) {
|
|
1698
|
+
WSP_GGML_ASSERT(sched);
|
|
1551
1699
|
WSP_GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs);
|
|
1552
1700
|
|
|
1553
|
-
|
|
1701
|
+
wsp_ggml_backend_sched_reset(sched);
|
|
1554
1702
|
|
|
1555
1703
|
wsp_ggml_backend_sched_synchronize(sched);
|
|
1556
1704
|
|
|
1705
|
+
wsp_ggml_backend_sched_split_graph(sched, measure_graph);
|
|
1706
|
+
|
|
1557
1707
|
if (!wsp_ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids)) {
|
|
1558
1708
|
return false;
|
|
1559
1709
|
}
|
|
@@ -1564,7 +1714,12 @@ bool wsp_ggml_backend_sched_reserve(wsp_ggml_backend_sched_t sched, struct wsp_g
|
|
|
1564
1714
|
}
|
|
1565
1715
|
|
|
1566
1716
|
bool wsp_ggml_backend_sched_alloc_graph(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph) {
|
|
1717
|
+
WSP_GGML_ASSERT(sched);
|
|
1567
1718
|
WSP_GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + graph->n_leafs);
|
|
1719
|
+
WSP_GGML_ASSERT(!sched->is_alloc);
|
|
1720
|
+
|
|
1721
|
+
sched->cur_copy = sched->next_copy;
|
|
1722
|
+
sched->next_copy = (sched->next_copy + 1) % sched->n_copies;
|
|
1568
1723
|
|
|
1569
1724
|
wsp_ggml_backend_sched_split_graph(sched, graph);
|
|
1570
1725
|
|
|
@@ -1584,6 +1739,7 @@ enum wsp_ggml_status wsp_ggml_backend_sched_graph_compute(wsp_ggml_backend_sched
|
|
|
1584
1739
|
}
|
|
1585
1740
|
|
|
1586
1741
|
enum wsp_ggml_status wsp_ggml_backend_sched_graph_compute_async(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph) {
|
|
1742
|
+
WSP_GGML_ASSERT(sched);
|
|
1587
1743
|
if (!sched->is_reset && !sched->is_alloc) {
|
|
1588
1744
|
wsp_ggml_backend_sched_reset(sched);
|
|
1589
1745
|
}
|
|
@@ -1598,6 +1754,7 @@ enum wsp_ggml_status wsp_ggml_backend_sched_graph_compute_async(wsp_ggml_backend
|
|
|
1598
1754
|
}
|
|
1599
1755
|
|
|
1600
1756
|
void wsp_ggml_backend_sched_synchronize(wsp_ggml_backend_sched_t sched) {
|
|
1757
|
+
WSP_GGML_ASSERT(sched);
|
|
1601
1758
|
for (int i = 0; i < sched->n_backends; i++) {
|
|
1602
1759
|
wsp_ggml_backend_synchronize(sched->backends[i]);
|
|
1603
1760
|
}
|
|
@@ -1605,33 +1762,47 @@ void wsp_ggml_backend_sched_synchronize(wsp_ggml_backend_sched_t sched) {
|
|
|
1605
1762
|
// if the graph is not already allocated, always use copy 0 after a synchronization
|
|
1606
1763
|
// this ensures that during generation the same copy is used every time,
|
|
1607
1764
|
// which avoids changes in the graph that could cause CUDA or other graphs to be disabled
|
|
1608
|
-
sched->
|
|
1765
|
+
sched->next_copy = 0;
|
|
1609
1766
|
}
|
|
1610
1767
|
}
|
|
1611
1768
|
|
|
1612
1769
|
void wsp_ggml_backend_sched_set_eval_callback(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_sched_eval_callback callback, void * user_data) {
|
|
1770
|
+
WSP_GGML_ASSERT(sched);
|
|
1613
1771
|
sched->callback_eval = callback;
|
|
1614
1772
|
sched->callback_eval_user_data = user_data;
|
|
1615
1773
|
}
|
|
1616
1774
|
|
|
1617
1775
|
int wsp_ggml_backend_sched_get_n_splits(wsp_ggml_backend_sched_t sched) {
|
|
1776
|
+
WSP_GGML_ASSERT(sched);
|
|
1618
1777
|
return sched->n_splits;
|
|
1619
1778
|
}
|
|
1620
1779
|
|
|
1621
1780
|
int wsp_ggml_backend_sched_get_n_copies(wsp_ggml_backend_sched_t sched) {
|
|
1781
|
+
WSP_GGML_ASSERT(sched);
|
|
1622
1782
|
return sched->n_copies;
|
|
1623
1783
|
}
|
|
1624
1784
|
|
|
1625
1785
|
int wsp_ggml_backend_sched_get_n_backends(wsp_ggml_backend_sched_t sched) {
|
|
1786
|
+
WSP_GGML_ASSERT(sched);
|
|
1626
1787
|
return sched->n_backends;
|
|
1627
1788
|
}
|
|
1628
1789
|
|
|
1629
1790
|
wsp_ggml_backend_t wsp_ggml_backend_sched_get_backend(wsp_ggml_backend_sched_t sched, int i) {
|
|
1791
|
+
WSP_GGML_ASSERT(sched);
|
|
1630
1792
|
WSP_GGML_ASSERT(i >= 0 && i < sched->n_backends);
|
|
1631
1793
|
return sched->backends[i];
|
|
1632
1794
|
}
|
|
1633
1795
|
|
|
1796
|
+
wsp_ggml_backend_buffer_type_t wsp_ggml_backend_sched_get_buffer_type(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend) {
|
|
1797
|
+
WSP_GGML_ASSERT(sched);
|
|
1798
|
+
int backend_index = wsp_ggml_backend_sched_backend_id(sched, backend);
|
|
1799
|
+
WSP_GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
|
|
1800
|
+
|
|
1801
|
+
return sched->bufts[backend_index];
|
|
1802
|
+
}
|
|
1803
|
+
|
|
1634
1804
|
size_t wsp_ggml_backend_sched_get_buffer_size(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend) {
|
|
1805
|
+
WSP_GGML_ASSERT(sched);
|
|
1635
1806
|
int backend_index = wsp_ggml_backend_sched_backend_id(sched, backend);
|
|
1636
1807
|
WSP_GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
|
|
1637
1808
|
|
|
@@ -1639,6 +1810,7 @@ size_t wsp_ggml_backend_sched_get_buffer_size(wsp_ggml_backend_sched_t sched, ws
|
|
|
1639
1810
|
}
|
|
1640
1811
|
|
|
1641
1812
|
void wsp_ggml_backend_sched_set_tensor_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node, wsp_ggml_backend_t backend) {
|
|
1813
|
+
WSP_GGML_ASSERT(sched);
|
|
1642
1814
|
int backend_index = wsp_ggml_backend_sched_backend_id(sched, backend);
|
|
1643
1815
|
WSP_GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
|
|
1644
1816
|
tensor_backend_id(node) = backend_index;
|
|
@@ -1647,6 +1819,7 @@ void wsp_ggml_backend_sched_set_tensor_backend(wsp_ggml_backend_sched_t sched, s
|
|
|
1647
1819
|
}
|
|
1648
1820
|
|
|
1649
1821
|
wsp_ggml_backend_t wsp_ggml_backend_sched_get_tensor_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node) {
|
|
1822
|
+
WSP_GGML_ASSERT(sched);
|
|
1650
1823
|
int backend_index = tensor_backend_id(node);
|
|
1651
1824
|
if (backend_index == -1) {
|
|
1652
1825
|
return NULL;
|
|
@@ -1657,6 +1830,7 @@ wsp_ggml_backend_t wsp_ggml_backend_sched_get_tensor_backend(wsp_ggml_backend_sc
|
|
|
1657
1830
|
// utils
|
|
1658
1831
|
|
|
1659
1832
|
enum wsp_ggml_status wsp_ggml_backend_view_init(struct wsp_ggml_tensor * tensor) {
|
|
1833
|
+
WSP_GGML_ASSERT(tensor);
|
|
1660
1834
|
WSP_GGML_ASSERT(tensor->buffer == NULL);
|
|
1661
1835
|
WSP_GGML_ASSERT(tensor->view_src != NULL);
|
|
1662
1836
|
WSP_GGML_ASSERT(tensor->view_src->buffer != NULL);
|
|
@@ -1668,6 +1842,7 @@ enum wsp_ggml_status wsp_ggml_backend_view_init(struct wsp_ggml_tensor * tensor)
|
|
|
1668
1842
|
}
|
|
1669
1843
|
|
|
1670
1844
|
enum wsp_ggml_status wsp_ggml_backend_tensor_alloc(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, void * addr) {
|
|
1845
|
+
WSP_GGML_ASSERT(tensor);
|
|
1671
1846
|
WSP_GGML_ASSERT(tensor->buffer == NULL);
|
|
1672
1847
|
WSP_GGML_ASSERT(tensor->data == NULL);
|
|
1673
1848
|
WSP_GGML_ASSERT(tensor->view_src == NULL);
|
|
@@ -1741,6 +1916,7 @@ static void graph_copy_init_tensor(struct wsp_ggml_hash_set * hash_set, struct w
|
|
|
1741
1916
|
}
|
|
1742
1917
|
|
|
1743
1918
|
struct wsp_ggml_backend_graph_copy wsp_ggml_backend_graph_copy(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * graph) {
|
|
1919
|
+
WSP_GGML_ASSERT(graph);
|
|
1744
1920
|
struct wsp_ggml_hash_set hash_set = wsp_ggml_hash_set_new(graph->visited_hash_set.size);
|
|
1745
1921
|
struct wsp_ggml_tensor ** node_copies = (wsp_ggml_tensor **) calloc(hash_set.size, sizeof(node_copies[0])); // NOLINT
|
|
1746
1922
|
bool * node_init = (bool *) calloc(hash_set.size, sizeof(node_init[0]));
|
|
@@ -1885,6 +2061,7 @@ bool wsp_ggml_backend_compare_graph_backend(wsp_ggml_backend_t backend1, wsp_ggm
|
|
|
1885
2061
|
// CPU backend - buffer
|
|
1886
2062
|
|
|
1887
2063
|
static void * wsp_ggml_backend_cpu_buffer_get_base(wsp_ggml_backend_buffer_t buffer) {
|
|
2064
|
+
WSP_GGML_ASSERT(buffer);
|
|
1888
2065
|
uintptr_t data = (uintptr_t)buffer->context;
|
|
1889
2066
|
|
|
1890
2067
|
// align the buffer
|
|
@@ -1896,28 +2073,33 @@ static void * wsp_ggml_backend_cpu_buffer_get_base(wsp_ggml_backend_buffer_t buf
|
|
|
1896
2073
|
}
|
|
1897
2074
|
|
|
1898
2075
|
static void wsp_ggml_backend_cpu_buffer_free_buffer(wsp_ggml_backend_buffer_t buffer) {
|
|
2076
|
+
WSP_GGML_ASSERT(buffer);
|
|
1899
2077
|
wsp_ggml_aligned_free(buffer->context, buffer->size);
|
|
1900
2078
|
}
|
|
1901
2079
|
|
|
1902
2080
|
static void wsp_ggml_backend_cpu_buffer_memset_tensor(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
|
|
2081
|
+
WSP_GGML_ASSERT(tensor);
|
|
1903
2082
|
memset((char *)tensor->data + offset, value, size);
|
|
1904
2083
|
|
|
1905
2084
|
WSP_GGML_UNUSED(buffer);
|
|
1906
2085
|
}
|
|
1907
2086
|
|
|
1908
2087
|
static void wsp_ggml_backend_cpu_buffer_set_tensor(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
|
2088
|
+
WSP_GGML_ASSERT(tensor);
|
|
1909
2089
|
memcpy((char *)tensor->data + offset, data, size);
|
|
1910
2090
|
|
|
1911
2091
|
WSP_GGML_UNUSED(buffer);
|
|
1912
2092
|
}
|
|
1913
2093
|
|
|
1914
2094
|
static void wsp_ggml_backend_cpu_buffer_get_tensor(wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
|
2095
|
+
WSP_GGML_ASSERT(tensor);
|
|
1915
2096
|
memcpy(data, (const char *)tensor->data + offset, size);
|
|
1916
2097
|
|
|
1917
2098
|
WSP_GGML_UNUSED(buffer);
|
|
1918
2099
|
}
|
|
1919
2100
|
|
|
1920
2101
|
static bool wsp_ggml_backend_cpu_buffer_cpy_tensor(wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst) {
|
|
2102
|
+
WSP_GGML_ASSERT(src);
|
|
1921
2103
|
if (wsp_ggml_backend_buffer_is_host(src->buffer)) {
|
|
1922
2104
|
memcpy(dst->data, src->data, wsp_ggml_nbytes(src));
|
|
1923
2105
|
return true;
|
|
@@ -1928,6 +2110,7 @@ static bool wsp_ggml_backend_cpu_buffer_cpy_tensor(wsp_ggml_backend_buffer_t buf
|
|
|
1928
2110
|
}
|
|
1929
2111
|
|
|
1930
2112
|
static void wsp_ggml_backend_cpu_buffer_clear(wsp_ggml_backend_buffer_t buffer, uint8_t value) {
|
|
2113
|
+
WSP_GGML_ASSERT(buffer);
|
|
1931
2114
|
memset(buffer->context, value, buffer->size);
|
|
1932
2115
|
}
|
|
1933
2116
|
|