whisper.rn 0.4.0-rc.7 → 0.4.0-rc.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/android/src/main/CMakeLists.txt +2 -1
  2. package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -12
  3. package/android/src/main/java/com/rnwhisper/RNWhisper.java +75 -34
  4. package/android/src/main/java/com/rnwhisper/WhisperContext.java +20 -3
  5. package/android/src/main/jni.cpp +29 -1
  6. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  7. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  8. package/cpp/coreml/whisper-encoder.mm +1 -1
  9. package/cpp/ggml-aarch64.c +3209 -0
  10. package/cpp/ggml-aarch64.h +39 -0
  11. package/cpp/ggml-alloc.c +732 -494
  12. package/cpp/ggml-alloc.h +47 -63
  13. package/cpp/ggml-backend-impl.h +162 -47
  14. package/cpp/ggml-backend.cpp +2635 -0
  15. package/cpp/ggml-backend.h +216 -71
  16. package/cpp/ggml-common.h +1853 -0
  17. package/cpp/ggml-cpu-impl.h +614 -0
  18. package/cpp/ggml-impl.h +144 -178
  19. package/cpp/ggml-metal.h +14 -60
  20. package/cpp/ggml-metal.m +3437 -2097
  21. package/cpp/ggml-quants.c +12559 -4189
  22. package/cpp/ggml-quants.h +135 -212
  23. package/cpp/ggml-whisper.metallib +0 -0
  24. package/cpp/ggml.c +9029 -5219
  25. package/cpp/ggml.h +673 -338
  26. package/cpp/rn-whisper.cpp +91 -0
  27. package/cpp/rn-whisper.h +2 -0
  28. package/cpp/whisper.cpp +1476 -675
  29. package/cpp/whisper.h +84 -28
  30. package/ios/RNWhisper.mm +124 -37
  31. package/ios/RNWhisperAudioUtils.h +1 -0
  32. package/ios/RNWhisperAudioUtils.m +20 -13
  33. package/ios/RNWhisperContext.h +3 -2
  34. package/ios/RNWhisperContext.mm +41 -8
  35. package/jest/mock.js +9 -1
  36. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  37. package/lib/commonjs/index.js +48 -19
  38. package/lib/commonjs/index.js.map +1 -1
  39. package/lib/commonjs/version.json +1 -1
  40. package/lib/module/NativeRNWhisper.js.map +1 -1
  41. package/lib/module/index.js +48 -19
  42. package/lib/module/index.js.map +1 -1
  43. package/lib/module/version.json +1 -1
  44. package/lib/typescript/NativeRNWhisper.d.ts +6 -3
  45. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  46. package/lib/typescript/index.d.ts +25 -3
  47. package/lib/typescript/index.d.ts.map +1 -1
  48. package/package.json +6 -5
  49. package/src/NativeRNWhisper.ts +12 -3
  50. package/src/index.ts +63 -24
  51. package/src/version.json +1 -1
  52. package/whisper-rn.podspec +9 -2
  53. package/cpp/ggml-backend.c +0 -1357
  54. package/cpp/ggml-metal-whisper.metal +0 -4908
@@ -9,92 +9,197 @@ extern "C" {
9
9
 
10
10
  typedef struct wsp_ggml_backend_buffer_type * wsp_ggml_backend_buffer_type_t;
11
11
  typedef struct wsp_ggml_backend_buffer * wsp_ggml_backend_buffer_t;
12
+ typedef struct wsp_ggml_backend_event * wsp_ggml_backend_event_t;
12
13
  typedef struct wsp_ggml_backend * wsp_ggml_backend_t;
13
14
  typedef void * wsp_ggml_backend_graph_plan_t;
15
+ typedef struct wsp_ggml_backend_reg * wsp_ggml_backend_reg_t;
16
+ typedef struct wsp_ggml_backend_device * wsp_ggml_backend_dev_t;
17
+
18
+
19
+ //
20
+ // Backend buffer type
21
+ //
22
+
23
+ WSP_GGML_API const char * wsp_ggml_backend_buft_name (wsp_ggml_backend_buffer_type_t buft);
24
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_buft_alloc_buffer (wsp_ggml_backend_buffer_type_t buft, size_t size);
25
+ WSP_GGML_API size_t wsp_ggml_backend_buft_get_alignment (wsp_ggml_backend_buffer_type_t buft);
26
+ WSP_GGML_API size_t wsp_ggml_backend_buft_get_max_size (wsp_ggml_backend_buffer_type_t buft);
27
+ WSP_GGML_API size_t wsp_ggml_backend_buft_get_alloc_size(wsp_ggml_backend_buffer_type_t buft, struct wsp_ggml_tensor * tensor);
28
+ WSP_GGML_API bool wsp_ggml_backend_buft_is_host (wsp_ggml_backend_buffer_type_t buft);
29
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_buft_get_device (wsp_ggml_backend_buffer_type_t buft);
14
30
 
15
31
  //
16
32
  // Backend buffer
17
33
  //
18
34
 
19
- // buffer type
20
- WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_buft_alloc_buffer(wsp_ggml_backend_buffer_type_t buft, size_t size);
21
- WSP_GGML_API size_t wsp_ggml_backend_buft_get_alignment (wsp_ggml_backend_buffer_type_t buft);
22
- WSP_GGML_API size_t wsp_ggml_backend_buft_get_alloc_size(wsp_ggml_backend_buffer_type_t buft, struct wsp_ggml_tensor * tensor);
23
- WSP_GGML_API bool wsp_ggml_backend_buft_supports_backend(wsp_ggml_backend_buffer_type_t buft, wsp_ggml_backend_t backend);
35
+ enum wsp_ggml_backend_buffer_usage {
36
+ WSP_GGML_BACKEND_BUFFER_USAGE_ANY = 0,
37
+ WSP_GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1,
38
+ WSP_GGML_BACKEND_BUFFER_USAGE_COMPUTE = 2,
39
+ };
40
+
41
+ WSP_GGML_API const char * wsp_ggml_backend_buffer_name (wsp_ggml_backend_buffer_t buffer);
42
+ WSP_GGML_API void wsp_ggml_backend_buffer_free (wsp_ggml_backend_buffer_t buffer);
43
+ WSP_GGML_API void * wsp_ggml_backend_buffer_get_base (wsp_ggml_backend_buffer_t buffer);
44
+ WSP_GGML_API size_t wsp_ggml_backend_buffer_get_size (wsp_ggml_backend_buffer_t buffer);
45
+ WSP_GGML_API void wsp_ggml_backend_buffer_init_tensor (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
46
+ WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alignment (wsp_ggml_backend_buffer_t buffer);
47
+ WSP_GGML_API size_t wsp_ggml_backend_buffer_get_max_size (wsp_ggml_backend_buffer_t buffer);
48
+ WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alloc_size(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
49
+ WSP_GGML_API void wsp_ggml_backend_buffer_clear (wsp_ggml_backend_buffer_t buffer, uint8_t value);
50
+ WSP_GGML_API bool wsp_ggml_backend_buffer_is_host (wsp_ggml_backend_buffer_t buffer);
51
+ WSP_GGML_API void wsp_ggml_backend_buffer_set_usage (wsp_ggml_backend_buffer_t buffer, enum wsp_ggml_backend_buffer_usage usage);
52
+ WSP_GGML_API enum wsp_ggml_backend_buffer_usage wsp_ggml_backend_buffer_get_usage (wsp_ggml_backend_buffer_t buffer);
53
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_buffer_get_type (wsp_ggml_backend_buffer_t buffer);
54
+ WSP_GGML_API void wsp_ggml_backend_buffer_reset (wsp_ggml_backend_buffer_t buffer);
24
55
 
25
- // buffer
26
- WSP_GGML_API void wsp_ggml_backend_buffer_free (wsp_ggml_backend_buffer_t buffer);
27
- WSP_GGML_API void * wsp_ggml_backend_buffer_get_base (wsp_ggml_backend_buffer_t buffer);
28
- WSP_GGML_API size_t wsp_ggml_backend_buffer_get_size (wsp_ggml_backend_buffer_t buffer);
29
- WSP_GGML_API void wsp_ggml_backend_buffer_init_tensor (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
30
- WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alignment (wsp_ggml_backend_buffer_t buffer);
31
- WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alloc_size(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
32
- WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_buffer_type(wsp_ggml_backend_buffer_t buffer);
56
+ // tensor copy between different backends
57
+ WSP_GGML_API void wsp_ggml_backend_tensor_copy(struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
33
58
 
34
59
  //
35
- // Backend
60
+ // Backend (stream)
36
61
  //
37
62
 
38
-
63
+ WSP_GGML_API wsp_ggml_guid_t wsp_ggml_backend_guid(wsp_ggml_backend_t backend);
39
64
  WSP_GGML_API const char * wsp_ggml_backend_name(wsp_ggml_backend_t backend);
40
65
  WSP_GGML_API void wsp_ggml_backend_free(wsp_ggml_backend_t backend);
41
66
 
42
67
  WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_get_default_buffer_type(wsp_ggml_backend_t backend);
43
68
  WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_alloc_buffer(wsp_ggml_backend_t backend, size_t size);
44
69
  WSP_GGML_API size_t wsp_ggml_backend_get_alignment(wsp_ggml_backend_t backend);
70
+ WSP_GGML_API size_t wsp_ggml_backend_get_max_size(wsp_ggml_backend_t backend);
45
71
 
46
72
  WSP_GGML_API void wsp_ggml_backend_tensor_set_async(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
47
73
  WSP_GGML_API void wsp_ggml_backend_tensor_get_async(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
48
74
 
75
+ // "offset" refers to the offset of the tensor data for setting/getting data
49
76
  WSP_GGML_API void wsp_ggml_backend_tensor_set( struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
50
77
  WSP_GGML_API void wsp_ggml_backend_tensor_get(const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
78
+ WSP_GGML_API void wsp_ggml_backend_tensor_memset( struct wsp_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
51
79
 
52
80
  WSP_GGML_API void wsp_ggml_backend_synchronize(wsp_ggml_backend_t backend);
53
81
 
54
- WSP_GGML_API wsp_ggml_backend_graph_plan_t wsp_ggml_backend_graph_plan_create (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
82
+ WSP_GGML_API wsp_ggml_backend_graph_plan_t wsp_ggml_backend_graph_plan_create(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
83
+ WSP_GGML_API void wsp_ggml_backend_graph_plan_free (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
55
84
 
56
- WSP_GGML_API void wsp_ggml_backend_graph_plan_free (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
57
- WSP_GGML_API void wsp_ggml_backend_graph_plan_compute(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
58
- WSP_GGML_API void wsp_ggml_backend_graph_compute (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
59
- WSP_GGML_API bool wsp_ggml_backend_supports_op (wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
85
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_graph_plan_compute (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
86
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_graph_compute (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
87
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_graph_compute_async(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
60
88
 
61
- // tensor copy between different backends
62
- WSP_GGML_API void wsp_ggml_backend_tensor_copy(struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
63
- WSP_GGML_API void wsp_ggml_backend_tensor_copy_async(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst); // automatic fallback to sync copy
89
+ // NOTE: will be removed, use device version instead
90
+ WSP_GGML_API bool wsp_ggml_backend_supports_op(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
91
+ WSP_GGML_API bool wsp_ggml_backend_supports_buft(wsp_ggml_backend_t backend, wsp_ggml_backend_buffer_type_t buft);
92
+ WSP_GGML_API bool wsp_ggml_backend_offload_op(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
93
+
94
+ // asynchronous copy
95
+ // the copy is performed after all the currently queued operations in backend_src
96
+ // backend_dst will wait for the copy to complete before performing other operations
97
+ // automatic fallback to sync copy if async is not supported
98
+ WSP_GGML_API void wsp_ggml_backend_tensor_copy_async(wsp_ggml_backend_t backend_src, wsp_ggml_backend_t backend_dst, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
99
+
100
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_get_device(wsp_ggml_backend_t backend);
64
101
 
65
102
  //
66
- // CPU backend
103
+ // Events
67
104
  //
68
105
 
69
- WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_cpu_init(void);
106
+ WSP_GGML_API wsp_ggml_backend_event_t wsp_ggml_backend_event_new(wsp_ggml_backend_dev_t device);
107
+ WSP_GGML_API void wsp_ggml_backend_event_free(wsp_ggml_backend_event_t event);
108
+ WSP_GGML_API void wsp_ggml_backend_event_record(wsp_ggml_backend_event_t event, wsp_ggml_backend_t backend);
109
+ WSP_GGML_API void wsp_ggml_backend_event_synchronize(wsp_ggml_backend_event_t event);
110
+ WSP_GGML_API void wsp_ggml_backend_event_wait(wsp_ggml_backend_t backend, wsp_ggml_backend_event_t event);
70
111
 
71
- WSP_GGML_API bool wsp_ggml_backend_is_cpu(wsp_ggml_backend_t backend);
72
- WSP_GGML_API void wsp_ggml_backend_cpu_set_n_threads(wsp_ggml_backend_t backend_cpu, int n_threads);
112
+ //
113
+ // Backend device
114
+ //
73
115
 
74
- // Create a backend buffer from an existing pointer
75
- WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
116
+ enum wsp_ggml_backend_dev_type {
117
+ WSP_GGML_BACKEND_DEVICE_TYPE_CPU,
118
+ WSP_GGML_BACKEND_DEVICE_TYPE_GPU,
119
+ // devices with full capabilities (excludes backends such as BLAS that only support matrix multiplication)
120
+ WSP_GGML_BACKEND_DEVICE_TYPE_CPU_FULL,
121
+ WSP_GGML_BACKEND_DEVICE_TYPE_GPU_FULL
122
+ };
76
123
 
77
- WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_buffer_type(void);
124
+ // functionality supported by the device
125
+ struct wsp_ggml_backend_dev_caps {
126
+ // asynchronous operations
127
+ bool async;
128
+ // pinned host buffer
129
+ bool host_buffer;
130
+ // creating buffers from host ptr
131
+ bool buffer_from_host_ptr;
132
+ // event synchronization
133
+ bool events;
134
+ };
135
+
136
+ // all the device properties
137
+ struct wsp_ggml_backend_dev_props {
138
+ const char * name;
139
+ const char * description;
140
+ size_t memory_free;
141
+ size_t memory_total;
142
+ enum wsp_ggml_backend_dev_type type;
143
+ struct wsp_ggml_backend_dev_caps caps;
144
+ };
145
+
146
+ WSP_GGML_API const char * wsp_ggml_backend_dev_name(wsp_ggml_backend_dev_t device);
147
+ WSP_GGML_API const char * wsp_ggml_backend_dev_description(wsp_ggml_backend_dev_t device);
148
+ WSP_GGML_API void wsp_ggml_backend_dev_memory(wsp_ggml_backend_dev_t device, size_t * free, size_t * total);
149
+ WSP_GGML_API enum wsp_ggml_backend_dev_type wsp_ggml_backend_dev_type(wsp_ggml_backend_dev_t device);
150
+ WSP_GGML_API void wsp_ggml_backend_dev_get_props(wsp_ggml_backend_dev_t device, struct wsp_ggml_backend_dev_props * props);
151
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_dev_backend_reg(wsp_ggml_backend_dev_t device);
152
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_t device, const char * params);
153
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_dev_buffer_type(wsp_ggml_backend_dev_t device);
154
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_dev_host_buffer_type(wsp_ggml_backend_dev_t device);
155
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_dev_buffer_from_host_ptr(wsp_ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size);
156
+
157
+ WSP_GGML_API bool wsp_ggml_backend_dev_supports_op(wsp_ggml_backend_dev_t device, const struct wsp_ggml_tensor * op);
158
+ WSP_GGML_API bool wsp_ggml_backend_dev_supports_buft(wsp_ggml_backend_dev_t device, wsp_ggml_backend_buffer_type_t buft);
159
+ WSP_GGML_API bool wsp_ggml_backend_dev_offload_op(wsp_ggml_backend_dev_t device, const struct wsp_ggml_tensor * op);
78
160
 
79
161
  //
80
- // Backend registry
162
+ // Backend (reg)
81
163
  //
82
164
 
83
- // The backend registry is a registry of all the available backends, and allows initializing backends in a generic way
165
+ WSP_GGML_API const char * wsp_ggml_backend_reg_name(wsp_ggml_backend_reg_t reg);
166
+ WSP_GGML_API size_t wsp_ggml_backend_reg_dev_count(wsp_ggml_backend_reg_t reg);
167
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_reg_dev_get(wsp_ggml_backend_reg_t reg, size_t index);
168
+ WSP_GGML_API void * wsp_ggml_backend_reg_get_proc_address(wsp_ggml_backend_reg_t reg, const char * name);
169
+
84
170
 
85
- WSP_GGML_API size_t wsp_ggml_backend_reg_get_count(void);
86
- WSP_GGML_API size_t wsp_ggml_backend_reg_find_by_name(const char * name);
87
- WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_reg_init_backend_from_str(const char * backend_str); // str is name[:params]
88
- WSP_GGML_API const char * wsp_ggml_backend_reg_get_name(size_t i);
89
- WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_reg_init_backend(size_t i, const char * params); // params is backend-specific
90
- WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_reg_get_default_buffer_type(size_t i);
91
- WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_reg_alloc_buffer(size_t i, size_t size);
171
+ // Functions that may be obtained using wsp_ggml_backend_reg_get_proc_address
172
+ typedef wsp_ggml_backend_buffer_type_t (*wsp_ggml_backend_split_buffer_type_t)(const float *);
173
+ typedef void (*wsp_ggml_backend_set_n_threads_t)(wsp_ggml_backend_t, int);
174
+
175
+ //
176
+ // Backend registry
177
+ //
178
+
179
+ // Backend (reg) enumeration
180
+ WSP_GGML_API size_t wsp_ggml_backend_reg_count(void);
181
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_reg_get(size_t index);
182
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_reg_by_name(const char * name);
183
+
184
+ // Device enumeration
185
+ WSP_GGML_API size_t wsp_ggml_backend_dev_count(void);
186
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_dev_get(size_t index);
187
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_dev_by_name(const char * name);
188
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_dev_by_type(enum wsp_ggml_backend_dev_type type);
189
+
190
+ // Direct backend (stream) initialization
191
+ // = wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_by_name(name), params)
192
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_init_by_name(const char * name, const char * params);
193
+ // = wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_by_type(type), params)
194
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_init_by_type(enum wsp_ggml_backend_dev_type type, const char * params);
195
+ // = wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_by_type(GPU_FULL) OR wsp_ggml_backend_dev_by_type(CPU_FULL), NULL)
196
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_init_best(void);
92
197
 
93
198
  //
94
199
  // Backend scheduler
95
200
  //
96
201
 
97
- // The backend scheduler allows for multiple backends to be used together
202
+ // The backend scheduler allows for multiple backend devices to be used together
98
203
  // Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends
99
204
  // The backends are selected based on:
100
205
  // - the backend that supports the operation
@@ -102,54 +207,74 @@ extern "C" {
102
207
  /*
103
208
  Example usage:
104
209
 
105
- sched = wsp_ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, num_backends);
106
- // sched is initialized with measure allocators and cannot be used until allocated with a measure graph
107
-
108
- // initialize buffers from a measure graph
109
- measure_graph = build_graph(sched); // use the allocr to allocate inputs as needed
210
+ // operations that use tensors allocated in a buffer with USAGE_WEIGHTS will be assigned
211
+ // preferrably to run on the same backend as the buffer
212
+ wsp_ggml_backend_buffer_set_usage(buf_weights, WSP_GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
110
213
 
111
- // in build_graph:
112
- build_graph(...) {
113
- // allocating tensors in a specific backend (optional, recommended: pre-allocate inputs in a different buffer)
114
- alloc_cpu = wsp_ggml_backend_sched_get_allocr(sched, backend_cpu);
115
- wsp_ggml_allocr_alloc(alloc_cpu, tensor);
214
+ sched = wsp_ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, WSP_GGML_DEFAULT_GRAPH_SIZE, false);
116
215
 
117
- // manually assigning nodes to a backend (optional, shouldn't be needed in most cases)
118
- struct wsp_ggml_tensor * node = wsp_ggml_mul_mat(ctx, ...);
119
- wsp_ggml_backend_sched_set_node_backend(sched, node, backend_gpu);
120
- }
216
+ // initialize buffers from a max size graph (optional)
217
+ reserve_graph = build_graph(sched, max_batch_size);
121
218
 
122
- // allocate backend buffers from measure graph
123
- wsp_ggml_backend_sched_init_measure(sched, measure_graph);
219
+ // manually assign nodes to a backend (optional, should not be needed in most cases)
220
+ struct wsp_ggml_tensor * node = wsp_ggml_mul_mat(ctx, ...);
221
+ wsp_ggml_backend_sched_set_tensor_backend(sched, node, backend_gpu);
124
222
 
125
- // the scheduler is now ready to compute graphs
223
+ wsp_ggml_backend_sched_reserve(sched, reserve_graph);
126
224
 
127
225
  // compute
128
226
  graph = build_graph(sched);
129
227
  wsp_ggml_backend_sched_graph_compute(sched, graph);
228
+
229
+ // if there are graph inputs:
230
+ wsp_ggml_backend_sched_reset(sched);
231
+ wsp_ggml_backend_sched_alloc_graph(sched, graph);
232
+ wsp_ggml_backend_tensor_set(input_tensor, ...);
233
+ wsp_ggml_backend_sched_graph_compute(sched, graph);
234
+ }
130
235
  */
131
236
 
132
- struct wsp_ggml_backend_sched;
133
237
  typedef struct wsp_ggml_backend_sched * wsp_ggml_backend_sched_t;
134
238
 
135
- // Initialize a backend scheduler
136
- WSP_GGML_API wsp_ggml_backend_sched_t wsp_ggml_backend_sched_new(wsp_ggml_backend_t * backends, int n_backends);
239
+ // Evaluation callback for each node in the graph (set with wsp_ggml_backend_sched_set_eval_callback)
240
+ // when ask == true, the scheduler wants to know if the user wants to observe this node
241
+ // this allows the scheduler to batch nodes together in order to evaluate them in a single call
242
+ //
243
+ // when ask == false, the scheduler is passing the node tensor to the user for observation
244
+ // if the user returns false, the scheduler will cancel the graph compute
245
+ //
246
+ typedef bool (*wsp_ggml_backend_sched_eval_callback)(struct wsp_ggml_tensor * t, bool ask, void * user_data);
137
247
 
138
- WSP_GGML_API void wsp_ggml_backend_sched_free(wsp_ggml_backend_sched_t sched);
248
+ // Initialize a backend scheduler
249
+ WSP_GGML_API wsp_ggml_backend_sched_t wsp_ggml_backend_sched_new(wsp_ggml_backend_t * backends, wsp_ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
250
+ WSP_GGML_API void wsp_ggml_backend_sched_free(wsp_ggml_backend_sched_t sched);
139
251
 
140
252
  // Initialize backend buffers from a measure graph
141
- WSP_GGML_API void wsp_ggml_backend_sched_init_measure(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * measure_graph);
253
+ WSP_GGML_API bool wsp_ggml_backend_sched_reserve(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * measure_graph); // returns success
254
+
255
+ WSP_GGML_API int wsp_ggml_backend_sched_get_n_backends(wsp_ggml_backend_sched_t sched);
256
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_sched_get_backend(wsp_ggml_backend_sched_t sched, int i);
257
+
258
+ // Get the number of splits of the last graph
259
+ WSP_GGML_API int wsp_ggml_backend_sched_get_n_splits(wsp_ggml_backend_sched_t sched);
260
+ WSP_GGML_API int wsp_ggml_backend_sched_get_n_copies(wsp_ggml_backend_sched_t sched);
142
261
 
143
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_backend_sched_get_tallocr(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend);
144
- WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_sched_get_buffer (wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend);
262
+ WSP_GGML_API size_t wsp_ggml_backend_sched_get_buffer_size(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend);
145
263
 
146
- WSP_GGML_API void wsp_ggml_backend_sched_set_node_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node, wsp_ggml_backend_t backend);
264
+ WSP_GGML_API void wsp_ggml_backend_sched_set_tensor_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node, wsp_ggml_backend_t backend);
265
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_sched_get_tensor_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node);
147
266
 
148
- // Allocate a graph on the backend scheduler
149
- WSP_GGML_API void wsp_ggml_backend_sched_graph_compute(
150
- wsp_ggml_backend_sched_t sched,
151
- struct wsp_ggml_cgraph * graph);
267
+ // Allocate and compute graph on the backend scheduler
268
+ WSP_GGML_API bool wsp_ggml_backend_sched_alloc_graph(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph); // returns success
269
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_sched_graph_compute(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph);
270
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_sched_graph_compute_async(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph);
271
+ WSP_GGML_API void wsp_ggml_backend_sched_synchronize(wsp_ggml_backend_sched_t sched);
152
272
 
273
+ // Reset all assignments and allocators - must be called before changing the node backends
274
+ WSP_GGML_API void wsp_ggml_backend_sched_reset(wsp_ggml_backend_sched_t sched);
275
+
276
+ // Set a callback to be called for each resulting node during graph compute
277
+ WSP_GGML_API void wsp_ggml_backend_sched_set_eval_callback(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_sched_eval_callback callback, void * user_data);
153
278
 
154
279
  //
155
280
  // Utils
@@ -169,12 +294,32 @@ extern "C" {
169
294
  typedef bool (*wsp_ggml_backend_eval_callback)(int node_index, struct wsp_ggml_tensor * t1, struct wsp_ggml_tensor * t2, void * user_data);
170
295
 
171
296
  // Compare the output of two backends
172
- WSP_GGML_API void wsp_ggml_backend_compare_graph_backend(wsp_ggml_backend_t backend1, wsp_ggml_backend_t backend2, struct wsp_ggml_cgraph * graph, wsp_ggml_backend_eval_callback callback, void * user_data);
297
+ WSP_GGML_API bool wsp_ggml_backend_compare_graph_backend(wsp_ggml_backend_t backend1, wsp_ggml_backend_t backend2, struct wsp_ggml_cgraph * graph, wsp_ggml_backend_eval_callback callback, void * user_data);
173
298
 
174
299
  // Tensor initialization
175
300
  WSP_GGML_API void wsp_ggml_backend_tensor_alloc(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, void * addr);
176
- WSP_GGML_API void wsp_ggml_backend_view_init(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
301
+ WSP_GGML_API void wsp_ggml_backend_view_init(struct wsp_ggml_tensor * tensor);
302
+
303
+ //
304
+ // CPU backend
305
+ //
306
+
307
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_cpu_init(void);
177
308
 
309
+ WSP_GGML_API bool wsp_ggml_backend_is_cpu (wsp_ggml_backend_t backend);
310
+ WSP_GGML_API void wsp_ggml_backend_cpu_set_n_threads (wsp_ggml_backend_t backend_cpu, int n_threads);
311
+ WSP_GGML_API void wsp_ggml_backend_cpu_set_threadpool (wsp_ggml_backend_t backend_cpu, wsp_ggml_threadpool_t threadpool);
312
+ WSP_GGML_API void wsp_ggml_backend_cpu_set_abort_callback(wsp_ggml_backend_t backend_cpu, wsp_ggml_abort_callback abort_callback, void * abort_callback_data);
313
+
314
+ // Create a backend buffer from an existing pointer
315
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
316
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_buffer_type(void);
317
+
318
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_cpu_reg(void);
319
+
320
+ #ifdef WSP_GGML_USE_CPU_HBM
321
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_hbm_buffer_type(void);
322
+ #endif
178
323
 
179
324
  #ifdef __cplusplus
180
325
  }