whisper.rn 0.4.0-rc.1 → 0.4.0-rc.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/README.md +6 -6
  2. package/android/build.gradle +4 -0
  3. package/android/src/main/CMakeLists.txt +21 -1
  4. package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -92
  5. package/android/src/main/java/com/rnwhisper/RNWhisper.java +86 -40
  6. package/android/src/main/java/com/rnwhisper/WhisperContext.java +85 -131
  7. package/android/src/main/jni-utils.h +76 -0
  8. package/android/src/main/jni.cpp +226 -109
  9. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  10. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  11. package/cpp/coreml/whisper-encoder-impl.h +1 -1
  12. package/cpp/coreml/whisper-encoder.h +4 -0
  13. package/cpp/coreml/whisper-encoder.mm +5 -3
  14. package/cpp/ggml-alloc.c +797 -400
  15. package/cpp/ggml-alloc.h +60 -10
  16. package/cpp/ggml-backend-impl.h +255 -0
  17. package/cpp/ggml-backend-reg.cpp +582 -0
  18. package/cpp/ggml-backend.cpp +2002 -0
  19. package/cpp/ggml-backend.h +354 -0
  20. package/cpp/ggml-common.h +1851 -0
  21. package/cpp/ggml-cpp.h +39 -0
  22. package/cpp/ggml-cpu-aarch64.cpp +4247 -0
  23. package/cpp/ggml-cpu-aarch64.h +8 -0
  24. package/cpp/ggml-cpu-impl.h +531 -0
  25. package/cpp/ggml-cpu-quants.c +12245 -0
  26. package/cpp/ggml-cpu-quants.h +63 -0
  27. package/cpp/ggml-cpu-traits.cpp +36 -0
  28. package/cpp/ggml-cpu-traits.h +38 -0
  29. package/cpp/ggml-cpu.c +14792 -0
  30. package/cpp/ggml-cpu.cpp +653 -0
  31. package/cpp/ggml-cpu.h +137 -0
  32. package/cpp/ggml-impl.h +567 -0
  33. package/cpp/ggml-metal-impl.h +288 -0
  34. package/cpp/ggml-metal.h +24 -43
  35. package/cpp/ggml-metal.m +4867 -1080
  36. package/cpp/ggml-opt.cpp +854 -0
  37. package/cpp/ggml-opt.h +216 -0
  38. package/cpp/ggml-quants.c +5238 -0
  39. package/cpp/ggml-quants.h +100 -0
  40. package/cpp/ggml-threading.cpp +12 -0
  41. package/cpp/ggml-threading.h +14 -0
  42. package/cpp/ggml-whisper.metallib +0 -0
  43. package/cpp/ggml.c +5106 -19431
  44. package/cpp/ggml.h +847 -669
  45. package/cpp/gguf.cpp +1329 -0
  46. package/cpp/gguf.h +202 -0
  47. package/cpp/rn-audioutils.cpp +68 -0
  48. package/cpp/rn-audioutils.h +14 -0
  49. package/cpp/rn-whisper-log.h +11 -0
  50. package/cpp/rn-whisper.cpp +221 -52
  51. package/cpp/rn-whisper.h +50 -15
  52. package/cpp/whisper.cpp +3174 -1533
  53. package/cpp/whisper.h +176 -44
  54. package/ios/RNWhisper.mm +139 -46
  55. package/ios/RNWhisperAudioUtils.h +1 -2
  56. package/ios/RNWhisperAudioUtils.m +18 -67
  57. package/ios/RNWhisperContext.h +11 -8
  58. package/ios/RNWhisperContext.mm +195 -150
  59. package/jest/mock.js +15 -2
  60. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  61. package/lib/commonjs/index.js +76 -28
  62. package/lib/commonjs/index.js.map +1 -1
  63. package/lib/commonjs/version.json +1 -1
  64. package/lib/module/NativeRNWhisper.js.map +1 -1
  65. package/lib/module/index.js +76 -28
  66. package/lib/module/index.js.map +1 -1
  67. package/lib/module/version.json +1 -1
  68. package/lib/typescript/NativeRNWhisper.d.ts +13 -4
  69. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  70. package/lib/typescript/index.d.ts +37 -5
  71. package/lib/typescript/index.d.ts.map +1 -1
  72. package/package.json +9 -7
  73. package/src/NativeRNWhisper.ts +20 -4
  74. package/src/index.ts +98 -42
  75. package/src/version.json +1 -1
  76. package/whisper-rn.podspec +13 -20
  77. package/cpp/README.md +0 -4
  78. package/cpp/ggml-metal.metal +0 -2353
@@ -0,0 +1,653 @@
1
+ #include "ggml-backend.h"
2
+ #include "ggml-backend-impl.h"
3
+ #include "ggml-cpu.h"
4
+ #include "ggml-cpu-aarch64.h"
5
+ #include "ggml-cpu-traits.h"
6
+ #include "ggml-impl.h"
7
+ #include "amx/amx.h"
8
+
9
+ #include <cctype>
10
+ #include <string>
11
+ #include <vector>
12
+
13
+ #ifdef WSP_GGML_USE_CPU_HBM
14
+ #include "ggml-cpu-hbm.h"
15
+ #endif
16
+
17
+ #ifdef WSP_GGML_USE_CPU_KLEIDIAI
18
+ #include "kleidiai/kleidiai.h"
19
+ #endif
20
+
21
+ #if defined(__APPLE__)
22
+ #include <sys/types.h>
23
+ #include <sys/sysctl.h>
24
+ #endif
25
+
26
+ #if defined(_WIN32)
27
+ #define WIN32_LEAN_AND_MEAN
28
+ #ifndef NOMINMAX
29
+ #define NOMINMAX
30
+ #endif
31
+ #include <windows.h>
32
+ #endif
33
+
34
+ // ggml-backend interface
35
+
36
+ std::vector<wsp_ggml_backend_buffer_type_t>& wsp_ggml_backend_cpu_get_extra_buffers_type() {
37
+ static std::vector<wsp_ggml_backend_buffer_type_t> bufts = []() {
38
+ std::vector<wsp_ggml_backend_buffer_type_t> bufts;
39
+
40
+ #if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
41
+ if (wsp_ggml_backend_amx_buffer_type()) {
42
+ bufts.push_back(wsp_ggml_backend_amx_buffer_type());
43
+ }
44
+ #endif
45
+
46
+ #ifdef WSP_GGML_USE_CPU_KLEIDIAI
47
+ if (wsp_ggml_backend_cpu_kleidiai_buffer_type()) {
48
+ bufts.push_back(wsp_ggml_backend_cpu_kleidiai_buffer_type());
49
+ }
50
+ #endif
51
+
52
+ #ifdef WSP_GGML_USE_CPU_AARCH64
53
+ if (wsp_ggml_backend_cpu_aarch64_buffer_type()) {
54
+ bufts.push_back(wsp_ggml_backend_cpu_aarch64_buffer_type());
55
+ }
56
+ #endif
57
+
58
+ bufts.push_back(NULL);
59
+
60
+ return bufts;
61
+ }();
62
+
63
+ return bufts;
64
+ }
65
+
66
+ static wsp_ggml_backend_buffer_type_t * wsp_ggml_backend_cpu_device_get_extra_buffers_type(wsp_ggml_backend_dev_t device) {
67
+ return wsp_ggml_backend_cpu_get_extra_buffers_type().data();
68
+
69
+ WSP_GGML_UNUSED(device);
70
+ }
71
+
72
+ static bool wsp_ggml_backend_cpu_is_extra_buffer_type(wsp_ggml_backend_buffer_type_t buft) {
73
+ for (auto extra : wsp_ggml_backend_cpu_get_extra_buffers_type()) {
74
+ if (extra && extra == buft) return true;
75
+ }
76
+ return false;
77
+ }
78
+
79
+ // CPU backend - backend (stream)
80
+
81
+ struct wsp_ggml_backend_cpu_context {
82
+ int n_threads;
83
+ wsp_ggml_threadpool_t threadpool;
84
+
85
+ uint8_t * work_data;
86
+ size_t work_size;
87
+
88
+ wsp_ggml_abort_callback abort_callback;
89
+ void * abort_callback_data;
90
+ };
91
+
92
+ static const char * wsp_ggml_backend_cpu_get_name(wsp_ggml_backend_t backend) {
93
+ return "CPU";
94
+
95
+ WSP_GGML_UNUSED(backend);
96
+ }
97
+
98
+ static void wsp_ggml_backend_cpu_free(wsp_ggml_backend_t backend) {
99
+ struct wsp_ggml_backend_cpu_context * cpu_ctx = (struct wsp_ggml_backend_cpu_context *)backend->context;
100
+ delete[] cpu_ctx->work_data;
101
+ delete cpu_ctx;
102
+ delete backend;
103
+ }
104
+
105
+ struct wsp_ggml_backend_plan_cpu {
106
+ struct wsp_ggml_cplan cplan;
107
+ struct wsp_ggml_cgraph cgraph;
108
+ };
109
+
110
+ static wsp_ggml_backend_graph_plan_t wsp_ggml_backend_cpu_graph_plan_create(wsp_ggml_backend_t backend, const struct wsp_ggml_cgraph * cgraph) {
111
+ struct wsp_ggml_backend_cpu_context * cpu_ctx = (struct wsp_ggml_backend_cpu_context *)backend->context;
112
+
113
+ struct wsp_ggml_backend_plan_cpu * cpu_plan = new wsp_ggml_backend_plan_cpu;
114
+
115
+ cpu_plan->cplan = wsp_ggml_graph_plan(cgraph, cpu_ctx->n_threads, cpu_ctx->threadpool);
116
+ cpu_plan->cgraph = *cgraph; // FIXME: deep copy
117
+
118
+ if (cpu_plan->cplan.work_size > 0) {
119
+ cpu_plan->cplan.work_data = new uint8_t[cpu_plan->cplan.work_size];
120
+ if (cpu_plan->cplan.work_data == NULL) {
121
+ delete cpu_plan;
122
+ return NULL;
123
+ }
124
+ }
125
+
126
+ cpu_plan->cplan.abort_callback = cpu_ctx->abort_callback;
127
+ cpu_plan->cplan.abort_callback_data = cpu_ctx->abort_callback_data;
128
+
129
+ return cpu_plan;
130
+ }
131
+
132
+ static void wsp_ggml_backend_cpu_graph_plan_free(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan) {
133
+ struct wsp_ggml_backend_plan_cpu * cpu_plan = (struct wsp_ggml_backend_plan_cpu *)plan;
134
+
135
+ delete[] cpu_plan->cplan.work_data;
136
+ delete cpu_plan;
137
+
138
+ WSP_GGML_UNUSED(backend);
139
+ }
140
+
141
+ static enum wsp_ggml_status wsp_ggml_backend_cpu_graph_plan_compute(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan) {
142
+ struct wsp_ggml_backend_plan_cpu * cpu_plan = (struct wsp_ggml_backend_plan_cpu *)plan;
143
+
144
+ return wsp_ggml_graph_compute(&cpu_plan->cgraph, &cpu_plan->cplan);
145
+
146
+ WSP_GGML_UNUSED(backend);
147
+ }
148
+
149
+ static enum wsp_ggml_status wsp_ggml_backend_cpu_graph_compute(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph) {
150
+ struct wsp_ggml_backend_cpu_context * cpu_ctx = (struct wsp_ggml_backend_cpu_context *)backend->context;
151
+
152
+ struct wsp_ggml_cplan cplan = wsp_ggml_graph_plan(cgraph, cpu_ctx->n_threads, cpu_ctx->threadpool);
153
+
154
+ if (cpu_ctx->work_size < cplan.work_size) {
155
+ delete[] cpu_ctx->work_data;
156
+ cpu_ctx->work_data = new uint8_t[cplan.work_size];
157
+ if (cpu_ctx->work_data == NULL) {
158
+ cpu_ctx->work_size = 0;
159
+ return WSP_GGML_STATUS_ALLOC_FAILED;
160
+ }
161
+ cpu_ctx->work_size = cplan.work_size;
162
+ }
163
+ cplan.work_data = (uint8_t *)cpu_ctx->work_data;
164
+
165
+ cplan.abort_callback = cpu_ctx->abort_callback;
166
+ cplan.abort_callback_data = cpu_ctx->abort_callback_data;
167
+
168
+ return wsp_ggml_graph_compute(cgraph, &cplan);
169
+ }
170
+
171
+ static const struct wsp_ggml_backend_i wsp_ggml_backend_cpu_i = {
172
+ /* .get_name = */ wsp_ggml_backend_cpu_get_name,
173
+ /* .free = */ wsp_ggml_backend_cpu_free,
174
+ /* .set_tensor_async = */ NULL,
175
+ /* .get_tensor_async = */ NULL,
176
+ /* .cpy_tensor_async = */ NULL,
177
+ /* .synchronize = */ NULL,
178
+ /* .graph_plan_create = */ wsp_ggml_backend_cpu_graph_plan_create,
179
+ /* .graph_plan_free = */ wsp_ggml_backend_cpu_graph_plan_free,
180
+ /* .graph_plan_update = */ NULL,
181
+ /* .graph_plan_compute = */ wsp_ggml_backend_cpu_graph_plan_compute,
182
+ /* .graph_compute = */ wsp_ggml_backend_cpu_graph_compute,
183
+ /* .event_record = */ NULL,
184
+ /* .event_wait = */ NULL,
185
+ };
186
+
187
+ static wsp_ggml_guid_t wsp_ggml_backend_cpu_guid(void) {
188
+ static wsp_ggml_guid guid = { 0xaa, 0x67, 0xc7, 0x43, 0x96, 0xe6, 0xa3, 0x8a, 0xe3, 0xaf, 0xea, 0x92, 0x36, 0xbc, 0xfc, 0x89 };
189
+ return &guid;
190
+ }
191
+
192
+ wsp_ggml_backend_t wsp_ggml_backend_cpu_init(void) {
193
+ // initialize CPU backend now to avoid slowing the first graph computation
194
+ wsp_ggml_cpu_init();
195
+
196
+ struct wsp_ggml_backend_cpu_context * ctx = new wsp_ggml_backend_cpu_context;
197
+ if (ctx == NULL) {
198
+ return NULL;
199
+ }
200
+
201
+ ctx->n_threads = WSP_GGML_DEFAULT_N_THREADS;
202
+ ctx->threadpool = NULL;
203
+ ctx->work_data = NULL;
204
+ ctx->work_size = 0;
205
+ ctx->abort_callback = NULL;
206
+ ctx->abort_callback_data = NULL;
207
+
208
+ wsp_ggml_backend_t cpu_backend = new wsp_ggml_backend {
209
+ /* .guid = */ wsp_ggml_backend_cpu_guid(),
210
+ /* .interface = */ wsp_ggml_backend_cpu_i,
211
+ /* .device = */ wsp_ggml_backend_reg_dev_get(wsp_ggml_backend_cpu_reg(), 0),
212
+ /* .context = */ ctx,
213
+ };
214
+
215
+ if (cpu_backend == NULL) {
216
+ delete ctx;
217
+ return NULL;
218
+ }
219
+
220
+ return cpu_backend;
221
+ }
222
+
223
+ bool wsp_ggml_backend_is_cpu(wsp_ggml_backend_t backend) {
224
+ return backend != NULL && wsp_ggml_guid_matches(backend->guid, wsp_ggml_backend_cpu_guid());
225
+ }
226
+
227
+ void wsp_ggml_backend_cpu_set_n_threads(wsp_ggml_backend_t backend_cpu, int n_threads) {
228
+ WSP_GGML_ASSERT(wsp_ggml_backend_is_cpu(backend_cpu));
229
+
230
+ struct wsp_ggml_backend_cpu_context * ctx = (struct wsp_ggml_backend_cpu_context *)backend_cpu->context;
231
+ ctx->n_threads = n_threads;
232
+ }
233
+
234
+ void wsp_ggml_backend_cpu_set_threadpool(wsp_ggml_backend_t backend_cpu, wsp_ggml_threadpool_t threadpool) {
235
+ WSP_GGML_ASSERT(wsp_ggml_backend_is_cpu(backend_cpu));
236
+
237
+ struct wsp_ggml_backend_cpu_context * ctx = (struct wsp_ggml_backend_cpu_context *)backend_cpu->context;
238
+
239
+ if (ctx->threadpool && ctx->threadpool != threadpool) {
240
+ // already had a different threadpool, pause/suspend it before switching
241
+ wsp_ggml_threadpool_pause(ctx->threadpool);
242
+ }
243
+ ctx->threadpool = threadpool;
244
+ }
245
+
246
+ void wsp_ggml_backend_cpu_set_abort_callback(wsp_ggml_backend_t backend_cpu, wsp_ggml_abort_callback abort_callback, void * abort_callback_data) {
247
+ WSP_GGML_ASSERT(wsp_ggml_backend_is_cpu(backend_cpu));
248
+
249
+ struct wsp_ggml_backend_cpu_context * ctx = (struct wsp_ggml_backend_cpu_context *)backend_cpu->context;
250
+ ctx->abort_callback = abort_callback;
251
+ ctx->abort_callback_data = abort_callback_data;
252
+ }
253
+
254
+ // CPU backend - device
255
+
256
+ struct wsp_ggml_backend_cpu_device_context {
257
+ std::string description = "CPU";
258
+
259
+ wsp_ggml_backend_cpu_device_context() {
260
+ #ifdef __APPLE__
261
+ size_t len = 0;
262
+ if (!sysctlbyname("machdep.cpu.brand_string", NULL, &len, NULL, 0)) {
263
+ description.resize(len);
264
+ sysctlbyname("machdep.cpu.brand_string", &description[0], &len, NULL, 0); // NOLINT
265
+ }
266
+ #elif defined(__linux__)
267
+ FILE * f = fopen("/proc/cpuinfo", "r");
268
+ if (f) {
269
+ char buf[1024];
270
+ while (fgets(buf, sizeof(buf), f)) {
271
+ if (strncmp(buf, "model name", 10) == 0) {
272
+ char * p = strchr(buf, ':');
273
+ if (p) {
274
+ p++;
275
+ while (std::isspace(*p)) {
276
+ p++;
277
+ }
278
+ while (std::isspace(p[strlen(p) - 1])) {
279
+ p[strlen(p) - 1] = '\0';
280
+ }
281
+ description = p;
282
+ break;
283
+ }
284
+ }
285
+ }
286
+ fclose(f);
287
+ }
288
+ #elif defined(_WIN32)
289
+ HKEY hKey;
290
+ if (RegOpenKeyEx(HKEY_LOCAL_MACHINE,
291
+ TEXT("HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0"),
292
+ 0,
293
+ KEY_READ,
294
+ &hKey) == ERROR_SUCCESS) {
295
+ DWORD cpu_brand_size = 0;
296
+ if (RegQueryValueExA(hKey,
297
+ "ProcessorNameString",
298
+ NULL,
299
+ NULL,
300
+ NULL,
301
+ &cpu_brand_size) == ERROR_SUCCESS) {
302
+ description.resize(cpu_brand_size);
303
+ if (RegQueryValueExA(hKey,
304
+ "ProcessorNameString",
305
+ NULL,
306
+ NULL,
307
+ (LPBYTE)&description[0], // NOLINT
308
+ &cpu_brand_size) == ERROR_SUCCESS) {
309
+ if (description.find('\0') != std::string::npos) {
310
+ description.resize(description.find('\0'));
311
+ }
312
+ }
313
+ }
314
+ RegCloseKey(hKey);
315
+ }
316
+ #endif
317
+ }
318
+ };
319
+
320
+ static const char * wsp_ggml_backend_cpu_device_get_name(wsp_ggml_backend_dev_t dev) {
321
+ return "CPU";
322
+
323
+ WSP_GGML_UNUSED(dev);
324
+ }
325
+
326
+ static const char * wsp_ggml_backend_cpu_device_get_description(wsp_ggml_backend_dev_t dev) {
327
+ struct wsp_ggml_backend_cpu_device_context * ctx = (struct wsp_ggml_backend_cpu_device_context *)dev->context;
328
+
329
+ return ctx->description.c_str();
330
+ }
331
+
332
+ static void wsp_ggml_backend_cpu_device_get_memory(wsp_ggml_backend_dev_t dev, size_t * free, size_t * total) {
333
+ // TODO
334
+ *free = 0;
335
+ *total = 0;
336
+
337
+ WSP_GGML_UNUSED(dev);
338
+ }
339
+
340
+ static enum wsp_ggml_backend_dev_type wsp_ggml_backend_cpu_device_get_type(wsp_ggml_backend_dev_t dev) {
341
+ return WSP_GGML_BACKEND_DEVICE_TYPE_CPU;
342
+
343
+ WSP_GGML_UNUSED(dev);
344
+ }
345
+
346
+ static void wsp_ggml_backend_cpu_device_get_props(wsp_ggml_backend_dev_t dev, struct wsp_ggml_backend_dev_props * props) {
347
+ props->name = wsp_ggml_backend_cpu_device_get_name(dev);
348
+ props->description = wsp_ggml_backend_cpu_device_get_description(dev);
349
+ props->type = wsp_ggml_backend_cpu_device_get_type(dev);
350
+ wsp_ggml_backend_cpu_device_get_memory(dev, &props->memory_free, &props->memory_total);
351
+ props->caps = {
352
+ /* .async = */ false,
353
+ /* .host_buffer = */ false,
354
+ /* .buffer_from_host_ptr = */ true,
355
+ /* .events = */ false,
356
+ };
357
+ }
358
+
359
+ static wsp_ggml_backend_t wsp_ggml_backend_cpu_device_init_backend(wsp_ggml_backend_dev_t dev, const char * params) {
360
+ return wsp_ggml_backend_cpu_init();
361
+
362
+ WSP_GGML_UNUSED(dev);
363
+ WSP_GGML_UNUSED(params);
364
+ }
365
+
366
+ static wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_device_get_buffer_type(wsp_ggml_backend_dev_t dev) {
367
+ return wsp_ggml_backend_cpu_buffer_type();
368
+
369
+ WSP_GGML_UNUSED(dev);
370
+ }
371
+
372
+ static wsp_ggml_backend_buffer_t wsp_ggml_backend_cpu_device_buffer_from_host_ptr(wsp_ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) {
373
+ return wsp_ggml_backend_cpu_buffer_from_ptr(ptr, size);
374
+
375
+ WSP_GGML_UNUSED(dev);
376
+ WSP_GGML_UNUSED(max_tensor_size);
377
+ }
378
+
379
+ static bool wsp_ggml_backend_cpu_device_supports_op(wsp_ggml_backend_dev_t dev, const struct wsp_ggml_tensor * op) {
380
+ const struct wsp_ggml_tensor * src0 = op->src[0];
381
+ const struct wsp_ggml_tensor * src1 = op->src[1];
382
+
383
+ if (op->op == WSP_GGML_OP_NONE || op->op == WSP_GGML_OP_RESHAPE || op->op == WSP_GGML_OP_VIEW || op->op == WSP_GGML_OP_PERMUTE || op->op == WSP_GGML_OP_TRANSPOSE) {
384
+ return true;
385
+ }
386
+
387
+ // extra_buffer_op?
388
+ for (auto extra : wsp_ggml_backend_cpu_get_extra_buffers_type()) {
389
+ if (extra) {
390
+ auto buf_extra = (ggml::cpu::extra_buffer_type*) extra->context;
391
+ if (buf_extra && buf_extra->supports_op(dev, op)) {
392
+ return true;
393
+ }
394
+ }
395
+ }
396
+
397
+ // the other case need host buffer.
398
+ for (int i = 0; i < WSP_GGML_MAX_SRC; i++) {
399
+ if (op->src[i] && op->src[i]->buffer && !wsp_ggml_backend_buft_is_host(op->src[i]->buffer->buft)) {
400
+ return false;
401
+ }
402
+ }
403
+
404
+ switch (op->op) {
405
+ case WSP_GGML_OP_CPY:
406
+ return
407
+ op->type != WSP_GGML_TYPE_IQ3_XXS &&
408
+ op->type != WSP_GGML_TYPE_IQ3_S &&
409
+ op->type != WSP_GGML_TYPE_IQ2_XXS &&
410
+ op->type != WSP_GGML_TYPE_IQ2_XS &&
411
+ op->type != WSP_GGML_TYPE_IQ2_S &&
412
+ op->type != WSP_GGML_TYPE_IQ1_S &&
413
+ op->type != WSP_GGML_TYPE_IQ1_M; // missing type_traits.from_float
414
+ case WSP_GGML_OP_MUL_MAT:
415
+ return src1->type == WSP_GGML_TYPE_F32 || src1->type == wsp_ggml_get_type_traits_cpu(src0->type)->vec_dot_type;
416
+ case WSP_GGML_OP_SOFT_MAX_BACK: {
417
+ if (op->src[0]->type != WSP_GGML_TYPE_F32 || op->src[1]->type != WSP_GGML_TYPE_F32) {
418
+ return false;
419
+ }
420
+ float max_bias = 0.0f;
421
+
422
+ memcpy(&max_bias, (const float *) op->op_params + 1, sizeof(float));
423
+
424
+ return max_bias == 0.0f;
425
+ }
426
+ case WSP_GGML_OP_IM2COL_BACK:
427
+ return src0->type == WSP_GGML_TYPE_F32 && src1->type == WSP_GGML_TYPE_F32;
428
+ case WSP_GGML_OP_OUT_PROD:
429
+ return (src0->type == WSP_GGML_TYPE_F32 || (wsp_ggml_is_quantized(src0->type) && src0->ne[2] == src1->ne[2] && src0->ne[3] == src1->ne[3])) &&
430
+ src1->type == WSP_GGML_TYPE_F32 && op->type == WSP_GGML_TYPE_F32;
431
+ default:
432
+ return true;
433
+ }
434
+ }
435
+
436
+ static bool wsp_ggml_backend_cpu_device_supports_buft(wsp_ggml_backend_dev_t dev, wsp_ggml_backend_buffer_type_t buft) {
437
+ return wsp_ggml_backend_buft_is_host(buft) || wsp_ggml_backend_cpu_is_extra_buffer_type(buft);
438
+ WSP_GGML_UNUSED(dev);
439
+ }
440
+
441
+ static const struct wsp_ggml_backend_device_i wsp_ggml_backend_cpu_device_i = {
442
+ /* .get_name = */ wsp_ggml_backend_cpu_device_get_name,
443
+ /* .get_description = */ wsp_ggml_backend_cpu_device_get_description,
444
+ /* .get_memory = */ wsp_ggml_backend_cpu_device_get_memory,
445
+ /* .get_type = */ wsp_ggml_backend_cpu_device_get_type,
446
+ /* .get_props = */ wsp_ggml_backend_cpu_device_get_props,
447
+ /* .init_backend = */ wsp_ggml_backend_cpu_device_init_backend,
448
+ /* .get_buffer_type = */ wsp_ggml_backend_cpu_device_get_buffer_type,
449
+ /* .get_host_buffer_type = */ NULL,
450
+ /* .buffer_from_host_ptr = */ wsp_ggml_backend_cpu_device_buffer_from_host_ptr,
451
+ /* .supports_op = */ wsp_ggml_backend_cpu_device_supports_op,
452
+ /* .supports_buft = */ wsp_ggml_backend_cpu_device_supports_buft,
453
+ /* .offload_op = */ NULL,
454
+ /* .event_new = */ NULL,
455
+ /* .event_free = */ NULL,
456
+ /* .event_synchronize = */ NULL,
457
+ };
458
+
459
+ // CPU backend - backend (reg)
460
+
461
+ static const char * wsp_ggml_backend_cpu_reg_get_name(wsp_ggml_backend_reg_t reg) {
462
+ return "CPU";
463
+
464
+ WSP_GGML_UNUSED(reg);
465
+ }
466
+
467
+ static size_t wsp_ggml_backend_cpu_reg_get_device_count(wsp_ggml_backend_reg_t reg) {
468
+ return 1;
469
+
470
+ WSP_GGML_UNUSED(reg);
471
+ }
472
+
473
+ static wsp_ggml_backend_dev_t wsp_ggml_backend_cpu_reg_get_device(wsp_ggml_backend_reg_t reg, size_t index) {
474
+ WSP_GGML_ASSERT(index == 0);
475
+
476
+ static wsp_ggml_backend_cpu_device_context ctx;
477
+ static wsp_ggml_backend_device wsp_ggml_backend_cpu_device = {
478
+ /* .iface = */ wsp_ggml_backend_cpu_device_i,
479
+ /* .reg = */ reg,
480
+ /* .context = */ &ctx,
481
+ };
482
+
483
+ return &wsp_ggml_backend_cpu_device;
484
+ }
485
+
486
+ // This is intended to replace the the wsp_ggml_cpu_has_* functions when loading the CPU backend dynamically,
487
+ // and additionally to allow other backends to expose their own list of features that applications can query using the same API
488
+ static wsp_ggml_backend_feature * wsp_ggml_backend_cpu_get_features(wsp_ggml_backend_reg_t reg) {
489
+ static std::vector<wsp_ggml_backend_feature> features = []() {
490
+ wsp_ggml_cpu_init();
491
+
492
+ std::vector<wsp_ggml_backend_feature> features;
493
+ if (wsp_ggml_cpu_has_sse3()) {
494
+ features.push_back({ "SSE3", "1" });
495
+ }
496
+ if (wsp_ggml_cpu_has_ssse3()) {
497
+ features.push_back({ "SSSE3", "1" });
498
+ }
499
+ if (wsp_ggml_cpu_has_avx()) {
500
+ features.push_back({ "AVX", "1" });
501
+ }
502
+ if (wsp_ggml_cpu_has_avx_vnni()) {
503
+ features.push_back({ "AVX_VNNI", "1" });
504
+ }
505
+ if (wsp_ggml_cpu_has_avx2()) {
506
+ features.push_back({ "AVX2", "1" });
507
+ }
508
+ if (wsp_ggml_cpu_has_f16c()) {
509
+ features.push_back({ "F16C", "1" });
510
+ }
511
+ if (wsp_ggml_cpu_has_fma()) {
512
+ features.push_back({ "FMA", "1" });
513
+ }
514
+ if (wsp_ggml_cpu_has_avx512()) {
515
+ features.push_back({ "AVX512", "1" });
516
+ }
517
+ if (wsp_ggml_cpu_has_avx512_vbmi()) {
518
+ features.push_back({ "AVX512_VBMI", "1" });
519
+ }
520
+ if (wsp_ggml_cpu_has_avx512_vnni()) {
521
+ features.push_back({ "AVX512_VNNI", "1" });
522
+ }
523
+ if (wsp_ggml_cpu_has_avx512_bf16()) {
524
+ features.push_back({ "AVX512_BF16", "1" });
525
+ }
526
+ if (wsp_ggml_cpu_has_amx_int8()) {
527
+ features.push_back({ "AMX_INT8", "1" });
528
+ }
529
+ if (wsp_ggml_cpu_has_neon()) {
530
+ features.push_back({ "NEON", "1" });
531
+ }
532
+ if (wsp_ggml_cpu_has_arm_fma()) {
533
+ features.push_back({ "ARM_FMA", "1" });
534
+ }
535
+ if (wsp_ggml_cpu_has_fp16_va()) {
536
+ features.push_back({ "FP16_VA", "1" });
537
+ }
538
+ if (wsp_ggml_cpu_has_matmul_int8()) {
539
+ features.push_back({ "MATMUL_INT8", "1" });
540
+ }
541
+ if (wsp_ggml_cpu_has_sve()) {
542
+ features.push_back({ "SVE", "1" });
543
+ }
544
+ if (wsp_ggml_cpu_has_dotprod()) {
545
+ features.push_back({ "DOTPROD", "1" });
546
+ }
547
+ if (wsp_ggml_cpu_get_sve_cnt() > 0) {
548
+ static std::string sve_cnt = std::to_string(wsp_ggml_cpu_get_sve_cnt());
549
+ features.push_back({ "SVE_CNT", sve_cnt.c_str() });
550
+ }
551
+ if (wsp_ggml_cpu_has_sme()) {
552
+ features.push_back({ "SME", "1" });
553
+ }
554
+ if (wsp_ggml_cpu_has_riscv_v()) {
555
+ features.push_back({ "RISCV_V", "1" });
556
+ }
557
+ if (wsp_ggml_cpu_has_vsx()) {
558
+ features.push_back({ "VSX", "1" });
559
+ }
560
+ if (wsp_ggml_cpu_has_vxe()) {
561
+ features.push_back({ "VXE", "1" });
562
+ }
563
+ if (wsp_ggml_cpu_has_wasm_simd()) {
564
+ features.push_back({ "WASM_SIMD", "1" });
565
+ }
566
+ if (wsp_ggml_cpu_has_llamafile()) {
567
+ features.push_back({ "LLAMAFILE", "1" });
568
+ }
569
+ #ifdef WSP_GGML_USE_ACCELERATE
570
+ features.push_back({ "ACCELERATE", "1" });
571
+ #endif
572
+ #ifdef WSP_GGML_USE_CPU_HBM
573
+ features.push_back({ "CPU_HBM", "1" });
574
+ #endif
575
+ #ifdef WSP_GGML_USE_OPENMP
576
+ features.push_back({ "OPENMP", "1" });
577
+ #endif
578
+ #ifdef WSP_GGML_USE_CPU_KLEIDIAI
579
+ features.push_back({ "KLEIDIAI", "1" });
580
+ #endif
581
+ #ifdef WSP_GGML_USE_CPU_AARCH64
582
+ features.push_back({ "AARCH64_REPACK", "1" });
583
+ #endif
584
+
585
+ features.push_back({ nullptr, nullptr });
586
+
587
+ return features;
588
+ }();
589
+
590
+ return features.data();
591
+
592
+ WSP_GGML_UNUSED(reg);
593
+ }
594
+
595
+ static void * wsp_ggml_backend_cpu_get_proc_address(wsp_ggml_backend_reg_t reg, const char * name) {
596
+ if (strcmp(name, "wsp_ggml_backend_set_n_threads") == 0) {
597
+ wsp_ggml_backend_set_n_threads_t fct = wsp_ggml_backend_cpu_set_n_threads;
598
+ return (void *)fct;
599
+ }
600
+ if (strcmp(name, "wsp_ggml_backend_dev_get_extra_bufts") == 0) {
601
+ wsp_ggml_backend_dev_get_extra_bufts_t fct = wsp_ggml_backend_cpu_device_get_extra_buffers_type;
602
+ return (void *)fct;
603
+ }
604
+ if (strcmp(name, "wsp_ggml_backend_get_features") == 0) {
605
+ return (void *)wsp_ggml_backend_cpu_get_features;
606
+ }
607
+ if (strcmp(name, "wsp_ggml_backend_set_abort_callback") == 0) {
608
+ return (void *)wsp_ggml_backend_cpu_set_abort_callback;
609
+ }
610
+ if (strcmp(name, "wsp_ggml_backend_cpu_numa_init") == 0) {
611
+ return (void *)wsp_ggml_numa_init;
612
+ }
613
+ if (strcmp(name, "wsp_ggml_backend_cpu_is_numa") == 0) {
614
+ return (void *)wsp_ggml_is_numa;
615
+ }
616
+
617
+ // threadpool - TODO: move to ggml-base
618
+ if (strcmp(name, "wsp_ggml_threadpool_new") == 0) {
619
+ return (void *)wsp_ggml_threadpool_new;
620
+ }
621
+ if (strcmp(name, "wsp_ggml_threadpool_free") == 0) {
622
+ return (void *)wsp_ggml_threadpool_free;
623
+ }
624
+ if (strcmp(name, "wsp_ggml_backend_cpu_set_threadpool") == 0) {
625
+ return (void *)wsp_ggml_backend_cpu_set_threadpool;
626
+ }
627
+
628
+ return NULL;
629
+
630
+ WSP_GGML_UNUSED(reg);
631
+ }
632
+
633
+ static const struct wsp_ggml_backend_reg_i wsp_ggml_backend_cpu_reg_i = {
634
+ /* .get_name = */ wsp_ggml_backend_cpu_reg_get_name,
635
+ /* .get_device_count = */ wsp_ggml_backend_cpu_reg_get_device_count,
636
+ /* .get_device = */ wsp_ggml_backend_cpu_reg_get_device,
637
+ /* .get_proc_address = */ wsp_ggml_backend_cpu_get_proc_address,
638
+ };
639
+
640
+ wsp_ggml_backend_reg_t wsp_ggml_backend_cpu_reg(void) {
641
+ // init CPU feature detection
642
+ wsp_ggml_cpu_init();
643
+
644
+ static struct wsp_ggml_backend_reg wsp_ggml_backend_cpu_reg = {
645
+ /* .api_version = */ WSP_GGML_BACKEND_API_VERSION,
646
+ /* .iface = */ wsp_ggml_backend_cpu_reg_i,
647
+ /* .context = */ NULL,
648
+ };
649
+
650
+ return &wsp_ggml_backend_cpu_reg;
651
+ }
652
+
653
+ WSP_GGML_BACKEND_DL_IMPL(wsp_ggml_backend_cpu_reg)