whisper.rn 0.4.0-rc.9 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (183) hide show
  1. package/README.md +5 -1
  2. package/android/build.gradle +12 -3
  3. package/android/src/main/CMakeLists.txt +43 -13
  4. package/android/src/main/java/com/rnwhisper/WhisperContext.java +33 -35
  5. package/android/src/main/jni.cpp +9 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnwhisper.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnwhisper_v8fp16_va_2.so +0 -0
  8. package/android/src/main/jniLibs/armeabi-v7a/librnwhisper.so +0 -0
  9. package/android/src/main/jniLibs/armeabi-v7a/librnwhisper_vfpv4.so +0 -0
  10. package/android/src/main/jniLibs/x86_64/librnwhisper.so +0 -0
  11. package/android/src/main/jniLibs/x86_64/librnwhisper_x86_64.so +0 -0
  12. package/cpp/coreml/whisper-compat.h +10 -0
  13. package/cpp/coreml/whisper-compat.m +35 -0
  14. package/cpp/coreml/whisper-decoder-impl.h +27 -15
  15. package/cpp/coreml/whisper-decoder-impl.m +36 -10
  16. package/cpp/coreml/whisper-encoder-impl.h +21 -9
  17. package/cpp/coreml/whisper-encoder-impl.m +29 -3
  18. package/cpp/ggml-alloc.c +39 -37
  19. package/cpp/ggml-alloc.h +1 -1
  20. package/cpp/ggml-backend-impl.h +55 -27
  21. package/cpp/ggml-backend-reg.cpp +591 -0
  22. package/cpp/ggml-backend.cpp +336 -955
  23. package/cpp/ggml-backend.h +70 -42
  24. package/cpp/ggml-common.h +57 -49
  25. package/cpp/ggml-cpp.h +39 -0
  26. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  27. package/cpp/ggml-cpu/amx/amx.h +8 -0
  28. package/cpp/ggml-cpu/amx/common.h +91 -0
  29. package/cpp/ggml-cpu/amx/mmq.cpp +2511 -0
  30. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  31. package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  32. package/cpp/ggml-cpu/arch/arm/quants.c +4113 -0
  33. package/cpp/ggml-cpu/arch/arm/repack.cpp +2162 -0
  34. package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
  35. package/cpp/ggml-cpu/arch/x86/quants.c +4310 -0
  36. package/cpp/ggml-cpu/arch/x86/repack.cpp +3284 -0
  37. package/cpp/ggml-cpu/arch-fallback.h +184 -0
  38. package/cpp/ggml-cpu/binary-ops.cpp +158 -0
  39. package/cpp/ggml-cpu/binary-ops.h +16 -0
  40. package/cpp/ggml-cpu/common.h +72 -0
  41. package/cpp/ggml-cpu/ggml-cpu-impl.h +511 -0
  42. package/cpp/ggml-cpu/ggml-cpu.c +3473 -0
  43. package/cpp/ggml-cpu/ggml-cpu.cpp +671 -0
  44. package/cpp/ggml-cpu/ops.cpp +9085 -0
  45. package/cpp/ggml-cpu/ops.h +111 -0
  46. package/cpp/ggml-cpu/quants.c +1157 -0
  47. package/cpp/ggml-cpu/quants.h +89 -0
  48. package/cpp/ggml-cpu/repack.cpp +1570 -0
  49. package/cpp/ggml-cpu/repack.h +98 -0
  50. package/cpp/ggml-cpu/simd-mappings.h +1006 -0
  51. package/cpp/ggml-cpu/traits.cpp +36 -0
  52. package/cpp/ggml-cpu/traits.h +38 -0
  53. package/cpp/ggml-cpu/unary-ops.cpp +186 -0
  54. package/cpp/ggml-cpu/unary-ops.h +28 -0
  55. package/cpp/ggml-cpu/vec.cpp +321 -0
  56. package/cpp/ggml-cpu/vec.h +973 -0
  57. package/cpp/ggml-cpu.h +143 -0
  58. package/cpp/ggml-impl.h +417 -23
  59. package/cpp/ggml-metal-impl.h +622 -0
  60. package/cpp/ggml-metal.h +9 -9
  61. package/cpp/ggml-metal.m +3451 -1344
  62. package/cpp/ggml-opt.cpp +1037 -0
  63. package/cpp/ggml-opt.h +237 -0
  64. package/cpp/ggml-quants.c +296 -10818
  65. package/cpp/ggml-quants.h +78 -125
  66. package/cpp/ggml-threading.cpp +12 -0
  67. package/cpp/ggml-threading.h +14 -0
  68. package/cpp/ggml-whisper-sim.metallib +0 -0
  69. package/cpp/ggml-whisper.metallib +0 -0
  70. package/cpp/ggml.c +4633 -21450
  71. package/cpp/ggml.h +320 -661
  72. package/cpp/gguf.cpp +1347 -0
  73. package/cpp/gguf.h +202 -0
  74. package/cpp/rn-whisper.cpp +4 -11
  75. package/cpp/whisper-arch.h +197 -0
  76. package/cpp/whisper.cpp +2022 -495
  77. package/cpp/whisper.h +75 -18
  78. package/ios/CMakeLists.txt +95 -0
  79. package/ios/RNWhisper.h +5 -0
  80. package/ios/RNWhisperAudioUtils.m +4 -0
  81. package/ios/RNWhisperContext.h +5 -0
  82. package/ios/RNWhisperContext.mm +4 -2
  83. package/ios/rnwhisper.xcframework/Info.plist +74 -0
  84. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-alloc.h +76 -0
  85. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-backend-impl.h +255 -0
  86. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-backend.h +354 -0
  87. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-common.h +1861 -0
  88. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-cpp.h +39 -0
  89. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-cpu.h +143 -0
  90. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-impl.h +603 -0
  91. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-metal-impl.h +622 -0
  92. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-metal.h +66 -0
  93. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-opt.h +237 -0
  94. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-quants.h +100 -0
  95. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-threading.h +14 -0
  96. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml.h +2221 -0
  97. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/gguf.h +202 -0
  98. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/rn-audioutils.h +14 -0
  99. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/rn-whisper-log.h +11 -0
  100. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/rn-whisper.h +52 -0
  101. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/whisper-arch.h +197 -0
  102. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/whisper.h +739 -0
  103. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Info.plist +0 -0
  104. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/ggml-whisper.metallib +0 -0
  105. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/rnwhisper +0 -0
  106. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-alloc.h +76 -0
  107. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend-impl.h +255 -0
  108. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend.h +354 -0
  109. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-common.h +1861 -0
  110. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpp.h +39 -0
  111. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpu.h +143 -0
  112. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-impl.h +603 -0
  113. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal-impl.h +622 -0
  114. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal.h +66 -0
  115. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-opt.h +237 -0
  116. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-quants.h +100 -0
  117. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-threading.h +14 -0
  118. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml.h +2221 -0
  119. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/gguf.h +202 -0
  120. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-audioutils.h +14 -0
  121. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-whisper-log.h +11 -0
  122. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-whisper.h +52 -0
  123. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper-arch.h +197 -0
  124. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper.h +739 -0
  125. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Info.plist +0 -0
  126. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/_CodeSignature/CodeResources +101 -0
  127. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/ggml-whisper-sim.metallib +0 -0
  128. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/rnwhisper +0 -0
  129. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-alloc.h +76 -0
  130. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-backend-impl.h +255 -0
  131. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-backend.h +354 -0
  132. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-common.h +1861 -0
  133. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-cpp.h +39 -0
  134. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-cpu.h +143 -0
  135. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-impl.h +603 -0
  136. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-metal-impl.h +622 -0
  137. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-metal.h +66 -0
  138. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-opt.h +237 -0
  139. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-quants.h +100 -0
  140. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-threading.h +14 -0
  141. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml.h +2221 -0
  142. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/gguf.h +202 -0
  143. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/rn-audioutils.h +14 -0
  144. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/rn-whisper-log.h +11 -0
  145. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/rn-whisper.h +52 -0
  146. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/whisper-arch.h +197 -0
  147. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/whisper.h +739 -0
  148. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Info.plist +0 -0
  149. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/ggml-whisper.metallib +0 -0
  150. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/rnwhisper +0 -0
  151. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-alloc.h +76 -0
  152. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend-impl.h +255 -0
  153. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend.h +354 -0
  154. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-common.h +1861 -0
  155. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpp.h +39 -0
  156. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpu.h +143 -0
  157. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-impl.h +603 -0
  158. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal-impl.h +622 -0
  159. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal.h +66 -0
  160. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-opt.h +237 -0
  161. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-quants.h +100 -0
  162. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-threading.h +14 -0
  163. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml.h +2221 -0
  164. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/gguf.h +202 -0
  165. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-audioutils.h +14 -0
  166. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-whisper-log.h +11 -0
  167. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-whisper.h +52 -0
  168. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper-arch.h +197 -0
  169. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper.h +739 -0
  170. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Info.plist +0 -0
  171. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/_CodeSignature/CodeResources +101 -0
  172. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/ggml-whisper-sim.metallib +0 -0
  173. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/rnwhisper +0 -0
  174. package/jest/mock.js +5 -0
  175. package/lib/commonjs/version.json +1 -1
  176. package/lib/module/version.json +1 -1
  177. package/package.json +10 -6
  178. package/src/version.json +1 -1
  179. package/whisper-rn.podspec +11 -18
  180. package/cpp/README.md +0 -4
  181. package/cpp/ggml-aarch64.c +0 -3209
  182. package/cpp/ggml-aarch64.h +0 -39
  183. package/cpp/ggml-cpu-impl.h +0 -614
@@ -0,0 +1,255 @@
1
+ #pragma once
2
+
3
+ // ggml-backend internal header
4
+
5
+ #include "ggml-backend.h"
6
+
7
+ #ifdef __cplusplus
8
+ extern "C" {
9
+ #endif
10
+
11
+ #define WSP_GGML_BACKEND_API_VERSION 1
12
+
13
+ //
14
+ // Backend buffer type
15
+ //
16
+
17
+ struct wsp_ggml_backend_buffer_type_i {
18
+ const char * (*get_name) (wsp_ggml_backend_buffer_type_t buft);
19
+ // allocate a buffer of this type
20
+ wsp_ggml_backend_buffer_t (*alloc_buffer) (wsp_ggml_backend_buffer_type_t buft, size_t size);
21
+ // tensor alignment
22
+ size_t (*get_alignment) (wsp_ggml_backend_buffer_type_t buft);
23
+ // (optional) max buffer size that can be allocated (defaults to SIZE_MAX)
24
+ size_t (*get_max_size) (wsp_ggml_backend_buffer_type_t buft);
25
+ // (optional) data size needed to allocate the tensor, including padding (defaults to wsp_ggml_nbytes)
26
+ size_t (*get_alloc_size)(wsp_ggml_backend_buffer_type_t buft, const struct wsp_ggml_tensor * tensor);
27
+ // (optional) check if tensor data is in host memory and uses standard ggml tensor layout (defaults to false)
28
+ bool (*is_host) (wsp_ggml_backend_buffer_type_t buft);
29
+ };
30
+
31
+ struct wsp_ggml_backend_buffer_type {
32
+ struct wsp_ggml_backend_buffer_type_i iface;
33
+ wsp_ggml_backend_dev_t device;
34
+ void * context;
35
+ };
36
+
37
+ //
38
+ // Backend buffer
39
+ //
40
+
41
+ struct wsp_ggml_backend_buffer_i {
42
+ // (optional) free the buffer
43
+ void (*free_buffer) (wsp_ggml_backend_buffer_t buffer);
44
+ // base address of the buffer
45
+ void * (*get_base) (wsp_ggml_backend_buffer_t buffer);
46
+ // (optional) initialize a tensor in the buffer (eg. add tensor extras)
47
+ enum wsp_ggml_status (*init_tensor)(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
48
+ // tensor data access
49
+ void (*memset_tensor)(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
50
+ void (*set_tensor) (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
51
+ void (*get_tensor) (wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
52
+ // (optional) tensor copy: dst is in the buffer, src may be in any buffer, including buffers from a different backend (return false if not supported)
53
+ bool (*cpy_tensor) (wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
54
+ // clear the entire buffer
55
+ void (*clear) (wsp_ggml_backend_buffer_t buffer, uint8_t value);
56
+ // (optional) reset any internal state due to tensor initialization, such as tensor extras
57
+ void (*reset) (wsp_ggml_backend_buffer_t buffer);
58
+ };
59
+
60
+ struct wsp_ggml_backend_buffer {
61
+ struct wsp_ggml_backend_buffer_i iface;
62
+ wsp_ggml_backend_buffer_type_t buft;
63
+ void * context;
64
+ size_t size;
65
+ enum wsp_ggml_backend_buffer_usage usage;
66
+ };
67
+
68
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_buffer_init(
69
+ wsp_ggml_backend_buffer_type_t buft,
70
+ struct wsp_ggml_backend_buffer_i iface,
71
+ void * context,
72
+ size_t size);
73
+
74
+ // do not use directly, use wsp_ggml_backend_tensor_copy instead
75
+ WSP_GGML_API bool wsp_ggml_backend_buffer_copy_tensor(const struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
76
+
77
+ // multi-buffer
78
+ // buffer that contains a collection of buffers
79
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_multi_buffer_alloc_buffer(wsp_ggml_backend_buffer_t * buffers, size_t n_buffers);
80
+ WSP_GGML_API bool wsp_ggml_backend_buffer_is_multi_buffer(wsp_ggml_backend_buffer_t buffer);
81
+ WSP_GGML_API void wsp_ggml_backend_multi_buffer_set_usage(wsp_ggml_backend_buffer_t buffer, enum wsp_ggml_backend_buffer_usage usage);
82
+
83
+ //
84
+ // Backend (stream)
85
+ //
86
+
87
+ struct wsp_ggml_backend_i {
88
+ const char * (*get_name)(wsp_ggml_backend_t backend);
89
+
90
+ void (*free)(wsp_ggml_backend_t backend);
91
+
92
+ // (optional) asynchronous tensor data access
93
+ void (*set_tensor_async)(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
94
+ void (*get_tensor_async)(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
95
+ bool (*cpy_tensor_async)(wsp_ggml_backend_t backend_src, wsp_ggml_backend_t backend_dst, const struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
96
+
97
+ // (optional) complete all pending operations (required if the backend supports async operations)
98
+ void (*synchronize)(wsp_ggml_backend_t backend);
99
+
100
+ // (optional) graph plans (not used currently)
101
+ // compute graph with a plan
102
+ wsp_ggml_backend_graph_plan_t (*graph_plan_create) (wsp_ggml_backend_t backend, const struct wsp_ggml_cgraph * cgraph);
103
+ void (*graph_plan_free) (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
104
+ // update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology
105
+ void (*graph_plan_update) (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan, const struct wsp_ggml_cgraph * cgraph);
106
+ // compute the graph with the plan
107
+ enum wsp_ggml_status (*graph_plan_compute)(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
108
+
109
+ // compute graph (always async if supported by the backend)
110
+ enum wsp_ggml_status (*graph_compute) (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
111
+
112
+ // (optional) event synchronization
113
+ // record an event on this stream
114
+ void (*event_record)(wsp_ggml_backend_t backend, wsp_ggml_backend_event_t event);
115
+ // wait for an event on on a different stream
116
+ void (*event_wait) (wsp_ggml_backend_t backend, wsp_ggml_backend_event_t event);
117
+ };
118
+
119
+ struct wsp_ggml_backend {
120
+ wsp_ggml_guid_t guid;
121
+ struct wsp_ggml_backend_i iface;
122
+ wsp_ggml_backend_dev_t device;
123
+ void * context;
124
+ };
125
+
126
+ struct wsp_ggml_backend_event {
127
+ struct wsp_ggml_backend_device * device;
128
+ void * context;
129
+ };
130
+
131
+ //
132
+ // Backend device
133
+ //
134
+
135
+ // Note: if additional properties are needed, we should add a struct with all of them
136
+ // the current functions to obtain the properties can remain, since they are more convenient for often used properties
137
+ struct wsp_ggml_backend_device_i {
138
+ // device name: short identifier for this device, such as "CPU" or "CUDA0"
139
+ const char * (*get_name)(wsp_ggml_backend_dev_t dev);
140
+
141
+ // device description: short informative description of the device, could be the model name
142
+ const char * (*get_description)(wsp_ggml_backend_dev_t dev);
143
+
144
+ // device memory in bytes
145
+ void (*get_memory)(wsp_ggml_backend_dev_t dev, size_t * free, size_t * total);
146
+
147
+ // device type
148
+ enum wsp_ggml_backend_dev_type (*get_type)(wsp_ggml_backend_dev_t dev);
149
+
150
+ // device properties
151
+ void (*get_props)(wsp_ggml_backend_dev_t dev, struct wsp_ggml_backend_dev_props * props);
152
+
153
+ // backend (stream) initialization
154
+ wsp_ggml_backend_t (*init_backend)(wsp_ggml_backend_dev_t dev, const char * params);
155
+
156
+ // preferred buffer type
157
+ wsp_ggml_backend_buffer_type_t (*get_buffer_type)(wsp_ggml_backend_dev_t dev);
158
+
159
+ // (optional) host buffer type (in system memory, typically this is a pinned memory buffer for faster transfers between host and device)
160
+ wsp_ggml_backend_buffer_type_t (*get_host_buffer_type)(wsp_ggml_backend_dev_t dev);
161
+
162
+ // (optional) buffer from pointer: create a buffer from a host pointer (useful for memory mapped models and importing data from other libraries)
163
+ wsp_ggml_backend_buffer_t (*buffer_from_host_ptr)(wsp_ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size);
164
+
165
+ // check if the backend can compute an operation
166
+ bool (*supports_op)(wsp_ggml_backend_dev_t dev, const struct wsp_ggml_tensor * op);
167
+
168
+ // check if the backend can use tensors allocated in a buffer type
169
+ bool (*supports_buft)(wsp_ggml_backend_dev_t dev, wsp_ggml_backend_buffer_type_t buft);
170
+
171
+ // (optional) check if the backend wants to run an operation, even if the weights are allocated in an incompatible buffer
172
+ // these should be expensive operations that may benefit from running on this backend instead of the CPU backend
173
+ bool (*offload_op)(wsp_ggml_backend_dev_t dev, const struct wsp_ggml_tensor * op);
174
+
175
+ // (optional) event synchronization
176
+ wsp_ggml_backend_event_t (*event_new) (wsp_ggml_backend_dev_t dev);
177
+ void (*event_free) (wsp_ggml_backend_dev_t dev, wsp_ggml_backend_event_t event);
178
+ void (*event_synchronize) (wsp_ggml_backend_dev_t dev, wsp_ggml_backend_event_t event);
179
+ };
180
+
181
+ struct wsp_ggml_backend_device {
182
+ struct wsp_ggml_backend_device_i iface;
183
+ wsp_ggml_backend_reg_t reg;
184
+ void * context;
185
+ };
186
+
187
+ //
188
+ // Backend (reg)
189
+ //
190
+
191
+ struct wsp_ggml_backend_reg_i {
192
+ const char * (*get_name)(wsp_ggml_backend_reg_t reg);
193
+
194
+ // enumerate available devices
195
+ size_t (*get_device_count)(wsp_ggml_backend_reg_t reg);
196
+ wsp_ggml_backend_dev_t (*get_device)(wsp_ggml_backend_reg_t reg, size_t index);
197
+
198
+ // (optional) get a pointer to a function in the backend
199
+ // backends can add custom functions that are not part of the standard ggml-backend interface
200
+ void * (*get_proc_address)(wsp_ggml_backend_reg_t reg, const char * name);
201
+ };
202
+
203
+ struct wsp_ggml_backend_reg {
204
+ int api_version; // initialize to WSP_GGML_BACKEND_API_VERSION
205
+ struct wsp_ggml_backend_reg_i iface;
206
+ void * context;
207
+ };
208
+
209
+ // Internal backend registry API
210
+ WSP_GGML_API void wsp_ggml_backend_register(wsp_ggml_backend_reg_t reg);
211
+
212
+ // Add backend dynamic loading support to the backend
213
+
214
+ // Initialize the backend
215
+ typedef wsp_ggml_backend_reg_t (*wsp_ggml_backend_init_t)(void);
216
+ // Optional: obtain a score for the backend based on the system configuration
217
+ // Higher scores are preferred, 0 means the backend is not supported in the current system
218
+ typedef int (*wsp_ggml_backend_score_t)(void);
219
+
220
+ #ifdef WSP_GGML_BACKEND_DL
221
+ # ifdef __cplusplus
222
+ # define WSP_GGML_BACKEND_DL_IMPL(reg_fn) \
223
+ extern "C" { \
224
+ WSP_GGML_BACKEND_API wsp_ggml_backend_reg_t wsp_ggml_backend_init(void); \
225
+ } \
226
+ wsp_ggml_backend_reg_t wsp_ggml_backend_init(void) { \
227
+ return reg_fn(); \
228
+ }
229
+ # define WSP_GGML_BACKEND_DL_SCORE_IMPL(score_fn) \
230
+ extern "C" { \
231
+ WSP_GGML_BACKEND_API int wsp_ggml_backend_score(void); \
232
+ } \
233
+ int wsp_ggml_backend_score(void) { \
234
+ return score_fn(); \
235
+ }
236
+ # else
237
+ # define WSP_GGML_BACKEND_DL_IMPL(reg_fn) \
238
+ WSP_GGML_BACKEND_API wsp_ggml_backend_reg_t wsp_ggml_backend_init(void); \
239
+ wsp_ggml_backend_reg_t wsp_ggml_backend_init(void) { \
240
+ return reg_fn(); \
241
+ }
242
+ # define WSP_GGML_BACKEND_DL_SCORE_IMPL(score_fn) \
243
+ WSP_GGML_BACKEND_API int wsp_ggml_backend_score(void); \
244
+ int wsp_ggml_backend_score(void) { \
245
+ return score_fn(); \
246
+ }
247
+ # endif
248
+ #else
249
+ # define WSP_GGML_BACKEND_DL_IMPL(reg_fn)
250
+ # define WSP_GGML_BACKEND_DL_SCORE_IMPL(score_fn)
251
+ #endif
252
+
253
+ #ifdef __cplusplus
254
+ }
255
+ #endif
@@ -0,0 +1,354 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-alloc.h"
5
+
6
+ #ifdef WSP_GGML_BACKEND_SHARED
7
+ # if defined(_WIN32) && !defined(__MINGW32__)
8
+ # ifdef WSP_GGML_BACKEND_BUILD
9
+ # define WSP_GGML_BACKEND_API __declspec(dllexport) extern
10
+ # else
11
+ # define WSP_GGML_BACKEND_API __declspec(dllimport) extern
12
+ # endif
13
+ # else
14
+ # define WSP_GGML_BACKEND_API __attribute__ ((visibility ("default"))) extern
15
+ # endif
16
+ #else
17
+ # define WSP_GGML_BACKEND_API extern
18
+ #endif
19
+
20
+ #ifdef __cplusplus
21
+ extern "C" {
22
+ #endif
23
+
24
+ typedef struct wsp_ggml_backend_buffer_type * wsp_ggml_backend_buffer_type_t;
25
+ typedef struct wsp_ggml_backend_buffer * wsp_ggml_backend_buffer_t;
26
+ typedef struct wsp_ggml_backend_event * wsp_ggml_backend_event_t;
27
+ typedef struct wsp_ggml_backend * wsp_ggml_backend_t;
28
+ typedef void * wsp_ggml_backend_graph_plan_t;
29
+ typedef struct wsp_ggml_backend_reg * wsp_ggml_backend_reg_t;
30
+ typedef struct wsp_ggml_backend_device * wsp_ggml_backend_dev_t;
31
+
32
+
33
+ //
34
+ // Backend buffer type
35
+ //
36
+
37
+ WSP_GGML_API const char * wsp_ggml_backend_buft_name (wsp_ggml_backend_buffer_type_t buft);
38
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_buft_alloc_buffer (wsp_ggml_backend_buffer_type_t buft, size_t size);
39
+ WSP_GGML_API size_t wsp_ggml_backend_buft_get_alignment (wsp_ggml_backend_buffer_type_t buft);
40
+ WSP_GGML_API size_t wsp_ggml_backend_buft_get_max_size (wsp_ggml_backend_buffer_type_t buft);
41
+ WSP_GGML_API size_t wsp_ggml_backend_buft_get_alloc_size(wsp_ggml_backend_buffer_type_t buft, const struct wsp_ggml_tensor * tensor);
42
+ WSP_GGML_API bool wsp_ggml_backend_buft_is_host (wsp_ggml_backend_buffer_type_t buft);
43
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_buft_get_device (wsp_ggml_backend_buffer_type_t buft);
44
+
45
+ //
46
+ // Backend buffer
47
+ //
48
+
49
+ enum wsp_ggml_backend_buffer_usage {
50
+ WSP_GGML_BACKEND_BUFFER_USAGE_ANY = 0,
51
+ WSP_GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1,
52
+ WSP_GGML_BACKEND_BUFFER_USAGE_COMPUTE = 2,
53
+ };
54
+
55
+ WSP_GGML_API const char * wsp_ggml_backend_buffer_name (wsp_ggml_backend_buffer_t buffer);
56
+ WSP_GGML_API void wsp_ggml_backend_buffer_free (wsp_ggml_backend_buffer_t buffer);
57
+ WSP_GGML_API void * wsp_ggml_backend_buffer_get_base (wsp_ggml_backend_buffer_t buffer);
58
+ WSP_GGML_API size_t wsp_ggml_backend_buffer_get_size (wsp_ggml_backend_buffer_t buffer);
59
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_buffer_init_tensor (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
60
+ WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alignment (wsp_ggml_backend_buffer_t buffer);
61
+ WSP_GGML_API size_t wsp_ggml_backend_buffer_get_max_size (wsp_ggml_backend_buffer_t buffer);
62
+ WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alloc_size(wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * tensor);
63
+ WSP_GGML_API void wsp_ggml_backend_buffer_clear (wsp_ggml_backend_buffer_t buffer, uint8_t value);
64
+ WSP_GGML_API bool wsp_ggml_backend_buffer_is_host (wsp_ggml_backend_buffer_t buffer);
65
+ WSP_GGML_API void wsp_ggml_backend_buffer_set_usage (wsp_ggml_backend_buffer_t buffer, enum wsp_ggml_backend_buffer_usage usage);
66
+ WSP_GGML_API enum wsp_ggml_backend_buffer_usage wsp_ggml_backend_buffer_get_usage (wsp_ggml_backend_buffer_t buffer);
67
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_buffer_get_type (wsp_ggml_backend_buffer_t buffer);
68
+ WSP_GGML_API void wsp_ggml_backend_buffer_reset (wsp_ggml_backend_buffer_t buffer);
69
+
70
+ // tensor copy between different backends
71
+ WSP_GGML_API void wsp_ggml_backend_tensor_copy(struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
72
+
73
+ //
74
+ // Backend (stream)
75
+ //
76
+
77
+ WSP_GGML_API wsp_ggml_guid_t wsp_ggml_backend_guid(wsp_ggml_backend_t backend);
78
+ WSP_GGML_API const char * wsp_ggml_backend_name(wsp_ggml_backend_t backend);
79
+ WSP_GGML_API void wsp_ggml_backend_free(wsp_ggml_backend_t backend);
80
+
81
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_get_default_buffer_type(wsp_ggml_backend_t backend);
82
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_alloc_buffer(wsp_ggml_backend_t backend, size_t size);
83
+ WSP_GGML_API size_t wsp_ggml_backend_get_alignment(wsp_ggml_backend_t backend);
84
+ WSP_GGML_API size_t wsp_ggml_backend_get_max_size(wsp_ggml_backend_t backend);
85
+
86
+ WSP_GGML_API void wsp_ggml_backend_tensor_set_async(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
87
+ WSP_GGML_API void wsp_ggml_backend_tensor_get_async(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
88
+
89
+ // "offset" refers to the offset in tensor->data for setting/getting data
90
+ WSP_GGML_API void wsp_ggml_backend_tensor_set( struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
91
+ WSP_GGML_API void wsp_ggml_backend_tensor_get(const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
92
+ WSP_GGML_API void wsp_ggml_backend_tensor_memset( struct wsp_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
93
+
94
+ WSP_GGML_API void wsp_ggml_backend_synchronize(wsp_ggml_backend_t backend);
95
+
96
+ WSP_GGML_API wsp_ggml_backend_graph_plan_t wsp_ggml_backend_graph_plan_create(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
97
+ WSP_GGML_API void wsp_ggml_backend_graph_plan_free (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
98
+
99
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_graph_plan_compute (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
100
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_graph_compute (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
101
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_graph_compute_async(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
102
+
103
+ // NOTE: will be removed, use device version instead
104
+ WSP_GGML_API bool wsp_ggml_backend_supports_op(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
105
+ WSP_GGML_API bool wsp_ggml_backend_supports_buft(wsp_ggml_backend_t backend, wsp_ggml_backend_buffer_type_t buft);
106
+ WSP_GGML_API bool wsp_ggml_backend_offload_op(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
107
+
108
+ // asynchronous copy
109
+ // the copy is performed after all the currently queued operations in backend_src
110
+ // backend_dst will wait for the copy to complete before performing other operations
111
+ // automatic fallback to sync copy if async is not supported
112
+ WSP_GGML_API void wsp_ggml_backend_tensor_copy_async(wsp_ggml_backend_t backend_src, wsp_ggml_backend_t backend_dst, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
113
+
114
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_get_device(wsp_ggml_backend_t backend);
115
+
116
+ //
117
+ // Events
118
+ //
119
+
120
+ WSP_GGML_API wsp_ggml_backend_event_t wsp_ggml_backend_event_new(wsp_ggml_backend_dev_t device);
121
+ WSP_GGML_API void wsp_ggml_backend_event_free(wsp_ggml_backend_event_t event);
122
+ WSP_GGML_API void wsp_ggml_backend_event_record(wsp_ggml_backend_event_t event, wsp_ggml_backend_t backend);
123
+ WSP_GGML_API void wsp_ggml_backend_event_synchronize(wsp_ggml_backend_event_t event);
124
+ WSP_GGML_API void wsp_ggml_backend_event_wait(wsp_ggml_backend_t backend, wsp_ggml_backend_event_t event);
125
+
126
+ //
127
+ // Backend device
128
+ //
129
+
130
+ enum wsp_ggml_backend_dev_type {
131
+ // CPU device using system memory
132
+ WSP_GGML_BACKEND_DEVICE_TYPE_CPU,
133
+ // GPU device using dedicated memory
134
+ WSP_GGML_BACKEND_DEVICE_TYPE_GPU,
135
+ // accelerator devices intended to be used together with the CPU backend (e.g. BLAS or AMX)
136
+ WSP_GGML_BACKEND_DEVICE_TYPE_ACCEL
137
+ };
138
+
139
+ // functionality supported by the device
140
+ struct wsp_ggml_backend_dev_caps {
141
+ // asynchronous operations
142
+ bool async;
143
+ // pinned host buffer
144
+ bool host_buffer;
145
+ // creating buffers from host ptr
146
+ bool buffer_from_host_ptr;
147
+ // event synchronization
148
+ bool events;
149
+ };
150
+
151
+ // all the device properties
152
+ struct wsp_ggml_backend_dev_props {
153
+ const char * name;
154
+ const char * description;
155
+ size_t memory_free;
156
+ size_t memory_total;
157
+ enum wsp_ggml_backend_dev_type type;
158
+ struct wsp_ggml_backend_dev_caps caps;
159
+ };
160
+
161
+ WSP_GGML_API const char * wsp_ggml_backend_dev_name(wsp_ggml_backend_dev_t device);
162
+ WSP_GGML_API const char * wsp_ggml_backend_dev_description(wsp_ggml_backend_dev_t device);
163
+ WSP_GGML_API void wsp_ggml_backend_dev_memory(wsp_ggml_backend_dev_t device, size_t * free, size_t * total);
164
+ WSP_GGML_API enum wsp_ggml_backend_dev_type wsp_ggml_backend_dev_type(wsp_ggml_backend_dev_t device);
165
+ WSP_GGML_API void wsp_ggml_backend_dev_get_props(wsp_ggml_backend_dev_t device, struct wsp_ggml_backend_dev_props * props);
166
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_dev_backend_reg(wsp_ggml_backend_dev_t device);
167
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_t device, const char * params);
168
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_dev_buffer_type(wsp_ggml_backend_dev_t device);
169
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_dev_host_buffer_type(wsp_ggml_backend_dev_t device);
170
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_dev_buffer_from_host_ptr(wsp_ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size);
171
+
172
+ WSP_GGML_API bool wsp_ggml_backend_dev_supports_op(wsp_ggml_backend_dev_t device, const struct wsp_ggml_tensor * op);
173
+ WSP_GGML_API bool wsp_ggml_backend_dev_supports_buft(wsp_ggml_backend_dev_t device, wsp_ggml_backend_buffer_type_t buft);
174
+ WSP_GGML_API bool wsp_ggml_backend_dev_offload_op(wsp_ggml_backend_dev_t device, const struct wsp_ggml_tensor * op);
175
+
176
+ //
177
+ // Backend (reg)
178
+ //
179
+
180
+ WSP_GGML_API const char * wsp_ggml_backend_reg_name(wsp_ggml_backend_reg_t reg);
181
+ WSP_GGML_API size_t wsp_ggml_backend_reg_dev_count(wsp_ggml_backend_reg_t reg);
182
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_reg_dev_get(wsp_ggml_backend_reg_t reg, size_t index);
183
+ WSP_GGML_API void * wsp_ggml_backend_reg_get_proc_address(wsp_ggml_backend_reg_t reg, const char * name);
184
+
185
+ // Common functions that may be obtained using wsp_ggml_backend_reg_get_proc_address
186
+
187
+ // Split buffer type for tensor parallelism
188
+ typedef wsp_ggml_backend_buffer_type_t (*wsp_ggml_backend_split_buffer_type_t)(int main_device, const float * tensor_split);
189
+ // Set the number of threads for the backend
190
+ typedef void (*wsp_ggml_backend_set_n_threads_t)(wsp_ggml_backend_t backend, int n_threads);
191
+ // Get additional buffer types provided by the device (returns a NULL-terminated array)
192
+ typedef wsp_ggml_backend_buffer_type_t * (*wsp_ggml_backend_dev_get_extra_bufts_t)(wsp_ggml_backend_dev_t device);
193
+ // Set the abort callback for the backend
194
+ typedef void (*wsp_ggml_backend_set_abort_callback_t)(wsp_ggml_backend_t backend, wsp_ggml_abort_callback abort_callback, void * abort_callback_data);
195
+ // Get a list of feature flags supported by the backend (returns a NULL-terminated array)
196
+ struct wsp_ggml_backend_feature {
197
+ const char * name;
198
+ const char * value;
199
+ };
200
+ typedef struct wsp_ggml_backend_feature * (*wsp_ggml_backend_get_features_t)(wsp_ggml_backend_reg_t reg);
201
+
202
+ //
203
+ // Backend registry
204
+ //
205
+
206
+ WSP_GGML_API void wsp_ggml_backend_device_register(wsp_ggml_backend_dev_t device);
207
+
208
+ // Backend (reg) enumeration
209
+ WSP_GGML_API size_t wsp_ggml_backend_reg_count(void);
210
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_reg_get(size_t index);
211
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_reg_by_name(const char * name);
212
+
213
+ // Device enumeration
214
+ WSP_GGML_API size_t wsp_ggml_backend_dev_count(void);
215
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_dev_get(size_t index);
216
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_dev_by_name(const char * name);
217
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_dev_by_type(enum wsp_ggml_backend_dev_type type);
218
+
219
+ // Direct backend (stream) initialization
220
+ // = wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_by_name(name), params)
221
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_init_by_name(const char * name, const char * params);
222
+ // = wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_by_type(type), params)
223
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_init_by_type(enum wsp_ggml_backend_dev_type type, const char * params);
224
+ // = wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_by_type(GPU) OR wsp_ggml_backend_dev_by_type(CPU), NULL)
225
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_init_best(void);
226
+
227
+ // Load a backend from a dynamic library and register it
228
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_load(const char * path);
229
+ // Unload a backend if loaded dynamically and unregister it
230
+ WSP_GGML_API void wsp_ggml_backend_unload(wsp_ggml_backend_reg_t reg);
231
+ // Load all known backends from dynamic libraries
232
+ WSP_GGML_API void wsp_ggml_backend_load_all(void);
233
+ WSP_GGML_API void wsp_ggml_backend_load_all_from_path(const char * dir_path);
234
+
235
+ //
236
+ // Backend scheduler
237
+ //
238
+
239
+ // The backend scheduler allows for multiple backend devices to be used together
240
+ // Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends
241
+ // The backends are selected based on:
242
+ // - the backend that supports the operation
243
+ // - the location of the pre-allocated tensors (e.g. the weights)
244
+ /*
245
+ Example usage:
246
+
247
+ // operations that use tensors allocated in a buffer with USAGE_WEIGHTS will be assigned
248
+ // preferrably to run on the same backend as the buffer
249
+ wsp_ggml_backend_buffer_set_usage(buf_weights, WSP_GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
250
+
251
+ sched = wsp_ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, WSP_GGML_DEFAULT_GRAPH_SIZE, false, true);
252
+
253
+ // initialize buffers from a max size graph (optional)
254
+ reserve_graph = build_graph(sched, max_batch_size);
255
+
256
+ // manually assign nodes to a backend (optional, should not be needed in most cases)
257
+ struct wsp_ggml_tensor * node = wsp_ggml_mul_mat(ctx, ...);
258
+ wsp_ggml_backend_sched_set_tensor_backend(sched, node, backend_gpu);
259
+
260
+ wsp_ggml_backend_sched_reserve(sched, reserve_graph);
261
+
262
+ // compute
263
+ graph = build_graph(sched); // the graph and its tensors are single-use in terms of allocation, multi-use in terms of computation
264
+ for (int i = 0; i < 10; ++i) {
265
+ wsp_ggml_backend_sched_graph_compute(sched, graph); // on the first iteration the graph is allocated automatically
266
+ }
267
+
268
+ // if there are graph inputs:
269
+ graph = build_graph(sched); // get a new graph that is not allocated (the metadata for the old graph is freed once wsp_ggml_free is called)
270
+ wsp_ggml_backend_sched_reset(sched); // clear the allocation of the previous graph
271
+ wsp_ggml_backend_sched_alloc_graph(sched, graph); // explicitly allocate the new graph but do not execute it
272
+ wsp_ggml_backend_tensor_set(input_tensor, ...); // copy data to the newly allocated graph tensors
273
+ wsp_ggml_backend_sched_graph_compute(sched, graph); // execute the graph
274
+
275
+ // as an alternative to the above it is also possible to assign the inputs to a dedicated context and
276
+ // allocate them statically via wsp_ggml_backend_alloc_ctx_tensors
277
+ }
278
+ */
279
+
280
+ typedef struct wsp_ggml_backend_sched * wsp_ggml_backend_sched_t;
281
+
282
+ // Evaluation callback for each node in the graph (set with wsp_ggml_backend_sched_set_eval_callback)
283
+ // when ask == true, the scheduler wants to know if the user wants to observe this node
284
+ // this allows the scheduler to batch nodes together in order to evaluate them in a single call
285
+ //
286
+ // when ask == false, the scheduler is passing the node tensor to the user for observation
287
+ // if the user returns false, the scheduler will cancel the graph compute
288
+ //
289
+ typedef bool (*wsp_ggml_backend_sched_eval_callback)(struct wsp_ggml_tensor * t, bool ask, void * user_data);
290
+
291
+ // Initialize a backend scheduler, backends with low index are given priority over backends with high index
292
+ WSP_GGML_API wsp_ggml_backend_sched_t wsp_ggml_backend_sched_new(wsp_ggml_backend_t * backends, wsp_ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel, bool op_offload);
293
+ WSP_GGML_API void wsp_ggml_backend_sched_free(wsp_ggml_backend_sched_t sched);
294
+
295
+ // Initialize backend buffers from a measure graph
296
+ WSP_GGML_API bool wsp_ggml_backend_sched_reserve(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * measure_graph); // returns success
297
+
298
+ WSP_GGML_API int wsp_ggml_backend_sched_get_n_backends(wsp_ggml_backend_sched_t sched);
299
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_sched_get_backend(wsp_ggml_backend_sched_t sched, int i);
300
+
301
+ // Get the number of splits of the last graph
302
+ WSP_GGML_API int wsp_ggml_backend_sched_get_n_splits(wsp_ggml_backend_sched_t sched);
303
+ WSP_GGML_API int wsp_ggml_backend_sched_get_n_copies(wsp_ggml_backend_sched_t sched);
304
+
305
+ WSP_GGML_API size_t wsp_ggml_backend_sched_get_buffer_size(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend);
306
+
307
+ WSP_GGML_API void wsp_ggml_backend_sched_set_tensor_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node, wsp_ggml_backend_t backend);
308
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_sched_get_tensor_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node);
309
+
310
+ // Allocate and compute graph on the backend scheduler
311
+ WSP_GGML_API bool wsp_ggml_backend_sched_alloc_graph(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph); // returns success
312
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_sched_graph_compute(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph);
313
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_sched_graph_compute_async(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph);
314
+ WSP_GGML_API void wsp_ggml_backend_sched_synchronize(wsp_ggml_backend_sched_t sched);
315
+
316
+ // Reset all assignments and allocators - must be called before changing the node backends or allocating a new graph.
317
+ // This in effect deallocates all tensors that were previously allocated and leaves them with dangling pointers.
318
+ // The correct way to use this API is to discard the deallocated tensors and create new ones.
319
+ WSP_GGML_API void wsp_ggml_backend_sched_reset(wsp_ggml_backend_sched_t sched);
320
+
321
+ // Set a callback to be called for each resulting node during graph compute
322
+ WSP_GGML_API void wsp_ggml_backend_sched_set_eval_callback(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_sched_eval_callback callback, void * user_data);
323
+
324
+ //
325
+ // Utils
326
+ //
327
+
328
+ struct wsp_ggml_backend_graph_copy {
329
+ wsp_ggml_backend_buffer_t buffer;
330
+ struct wsp_ggml_context * ctx_allocated;
331
+ struct wsp_ggml_context * ctx_unallocated;
332
+ struct wsp_ggml_cgraph * graph;
333
+ };
334
+
335
+ // Copy a graph to a different backend
336
+ WSP_GGML_API struct wsp_ggml_backend_graph_copy wsp_ggml_backend_graph_copy(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * graph);
337
+ WSP_GGML_API void wsp_ggml_backend_graph_copy_free(struct wsp_ggml_backend_graph_copy copy);
338
+
339
+ typedef bool (*wsp_ggml_backend_eval_callback)(int node_index, struct wsp_ggml_tensor * t1, struct wsp_ggml_tensor * t2, void * user_data);
340
+
341
+ // Compare the output of two backends
342
+ WSP_GGML_API bool wsp_ggml_backend_compare_graph_backend(wsp_ggml_backend_t backend1, wsp_ggml_backend_t backend2, struct wsp_ggml_cgraph * graph, wsp_ggml_backend_eval_callback callback, void * user_data);
343
+
344
+ // Tensor initialization
345
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_tensor_alloc(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, void * addr);
346
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_view_init(struct wsp_ggml_tensor * tensor);
347
+
348
+ // CPU buffer types are always available
349
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
350
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_buffer_type(void);
351
+
352
+ #ifdef __cplusplus
353
+ }
354
+ #endif