whisper.rn 0.4.0-rc.8 → 0.4.0-rc.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/android/src/main/CMakeLists.txt +2 -1
  2. package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -12
  3. package/android/src/main/java/com/rnwhisper/RNWhisper.java +75 -34
  4. package/android/src/main/java/com/rnwhisper/WhisperContext.java +20 -3
  5. package/android/src/main/jni.cpp +29 -1
  6. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  7. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  8. package/cpp/ggml-aarch64.c +3209 -0
  9. package/cpp/ggml-aarch64.h +39 -0
  10. package/cpp/ggml-alloc.c +725 -517
  11. package/cpp/ggml-alloc.h +47 -65
  12. package/cpp/ggml-backend-impl.h +166 -55
  13. package/cpp/ggml-backend.cpp +2635 -0
  14. package/cpp/ggml-backend.h +202 -85
  15. package/cpp/ggml-common.h +1853 -0
  16. package/cpp/ggml-cpu-impl.h +614 -0
  17. package/cpp/ggml-impl.h +143 -180
  18. package/cpp/ggml-metal.h +13 -11
  19. package/cpp/ggml-metal.m +2955 -1632
  20. package/cpp/ggml-quants.c +9824 -3263
  21. package/cpp/ggml-quants.h +133 -248
  22. package/cpp/ggml-whisper.metallib +0 -0
  23. package/cpp/ggml.c +8482 -5142
  24. package/cpp/ggml.h +633 -349
  25. package/cpp/rn-whisper.cpp +91 -0
  26. package/cpp/rn-whisper.h +2 -0
  27. package/cpp/whisper.cpp +1427 -658
  28. package/cpp/whisper.h +84 -28
  29. package/ios/RNWhisper.mm +124 -37
  30. package/ios/RNWhisperAudioUtils.h +1 -0
  31. package/ios/RNWhisperAudioUtils.m +20 -13
  32. package/ios/RNWhisperContext.h +3 -2
  33. package/ios/RNWhisperContext.mm +39 -7
  34. package/jest/mock.js +9 -1
  35. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  36. package/lib/commonjs/index.js +48 -19
  37. package/lib/commonjs/index.js.map +1 -1
  38. package/lib/commonjs/version.json +1 -1
  39. package/lib/module/NativeRNWhisper.js.map +1 -1
  40. package/lib/module/index.js +48 -19
  41. package/lib/module/index.js.map +1 -1
  42. package/lib/module/version.json +1 -1
  43. package/lib/typescript/NativeRNWhisper.d.ts +6 -3
  44. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  45. package/lib/typescript/index.d.ts +25 -3
  46. package/lib/typescript/index.d.ts.map +1 -1
  47. package/package.json +6 -5
  48. package/src/NativeRNWhisper.ts +12 -3
  49. package/src/index.ts +63 -24
  50. package/src/version.json +1 -1
  51. package/whisper-rn.podspec +9 -2
  52. package/cpp/ggml-backend.c +0 -1718
  53. package/cpp/ggml-metal-whisper.metal +0 -5820
@@ -9,108 +9,197 @@ extern "C" {
9
9
 
10
10
  typedef struct wsp_ggml_backend_buffer_type * wsp_ggml_backend_buffer_type_t;
11
11
  typedef struct wsp_ggml_backend_buffer * wsp_ggml_backend_buffer_t;
12
+ typedef struct wsp_ggml_backend_event * wsp_ggml_backend_event_t;
12
13
  typedef struct wsp_ggml_backend * wsp_ggml_backend_t;
13
14
  typedef void * wsp_ggml_backend_graph_plan_t;
15
+ typedef struct wsp_ggml_backend_reg * wsp_ggml_backend_reg_t;
16
+ typedef struct wsp_ggml_backend_device * wsp_ggml_backend_dev_t;
17
+
14
18
 
15
19
  //
16
- // Backend buffer
20
+ // Backend buffer type
17
21
  //
18
22
 
19
- // buffer type
20
- WSP_GGML_API const char * wsp_ggml_backend_buft_name (wsp_ggml_backend_buffer_type_t buft);
21
- WSP_GGML_API WSP_GGML_CALL wsp_ggml_backend_buffer_t wsp_ggml_backend_buft_alloc_buffer (wsp_ggml_backend_buffer_type_t buft, size_t size);
22
- WSP_GGML_API size_t wsp_ggml_backend_buft_get_alignment (wsp_ggml_backend_buffer_type_t buft);
23
- WSP_GGML_API WSP_GGML_CALL size_t wsp_ggml_backend_buft_get_alloc_size (wsp_ggml_backend_buffer_type_t buft, struct wsp_ggml_tensor * tensor);
24
- WSP_GGML_API bool wsp_ggml_backend_buft_supports_backend(wsp_ggml_backend_buffer_type_t buft, wsp_ggml_backend_t backend);
25
- WSP_GGML_API bool wsp_ggml_backend_buft_is_host (wsp_ggml_backend_buffer_type_t buft);
23
+ WSP_GGML_API const char * wsp_ggml_backend_buft_name (wsp_ggml_backend_buffer_type_t buft);
24
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_buft_alloc_buffer (wsp_ggml_backend_buffer_type_t buft, size_t size);
25
+ WSP_GGML_API size_t wsp_ggml_backend_buft_get_alignment (wsp_ggml_backend_buffer_type_t buft);
26
+ WSP_GGML_API size_t wsp_ggml_backend_buft_get_max_size (wsp_ggml_backend_buffer_type_t buft);
27
+ WSP_GGML_API size_t wsp_ggml_backend_buft_get_alloc_size(wsp_ggml_backend_buffer_type_t buft, struct wsp_ggml_tensor * tensor);
28
+ WSP_GGML_API bool wsp_ggml_backend_buft_is_host (wsp_ggml_backend_buffer_type_t buft);
29
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_buft_get_device (wsp_ggml_backend_buffer_type_t buft);
30
+
31
+ //
32
+ // Backend buffer
33
+ //
26
34
 
27
- // buffer
28
35
  enum wsp_ggml_backend_buffer_usage {
29
36
  WSP_GGML_BACKEND_BUFFER_USAGE_ANY = 0,
30
37
  WSP_GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1,
38
+ WSP_GGML_BACKEND_BUFFER_USAGE_COMPUTE = 2,
31
39
  };
32
40
 
33
- WSP_GGML_API const char * wsp_ggml_backend_buffer_name (wsp_ggml_backend_buffer_t buffer);
34
- WSP_GGML_API void wsp_ggml_backend_buffer_free (wsp_ggml_backend_buffer_t buffer);
35
- WSP_GGML_API void * wsp_ggml_backend_buffer_get_base (wsp_ggml_backend_buffer_t buffer);
36
- WSP_GGML_API size_t wsp_ggml_backend_buffer_get_size (wsp_ggml_backend_buffer_t buffer);
37
- WSP_GGML_API WSP_GGML_CALL void wsp_ggml_backend_buffer_init_tensor (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
38
- WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alignment (wsp_ggml_backend_buffer_t buffer);
39
- WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alloc_size(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
40
- WSP_GGML_API void wsp_ggml_backend_buffer_clear (wsp_ggml_backend_buffer_t buffer, uint8_t value);
41
- WSP_GGML_API bool wsp_ggml_backend_buffer_is_host (wsp_ggml_backend_buffer_t buffer);
42
- WSP_GGML_API void wsp_ggml_backend_buffer_set_usage (wsp_ggml_backend_buffer_t buffer, enum wsp_ggml_backend_buffer_usage usage);
43
- WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_buffer_get_type (wsp_ggml_backend_buffer_t buffer);
44
- WSP_GGML_API void wsp_ggml_backend_buffer_reset (wsp_ggml_backend_buffer_t buffer);
41
+ WSP_GGML_API const char * wsp_ggml_backend_buffer_name (wsp_ggml_backend_buffer_t buffer);
42
+ WSP_GGML_API void wsp_ggml_backend_buffer_free (wsp_ggml_backend_buffer_t buffer);
43
+ WSP_GGML_API void * wsp_ggml_backend_buffer_get_base (wsp_ggml_backend_buffer_t buffer);
44
+ WSP_GGML_API size_t wsp_ggml_backend_buffer_get_size (wsp_ggml_backend_buffer_t buffer);
45
+ WSP_GGML_API void wsp_ggml_backend_buffer_init_tensor (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
46
+ WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alignment (wsp_ggml_backend_buffer_t buffer);
47
+ WSP_GGML_API size_t wsp_ggml_backend_buffer_get_max_size (wsp_ggml_backend_buffer_t buffer);
48
+ WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alloc_size(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
49
+ WSP_GGML_API void wsp_ggml_backend_buffer_clear (wsp_ggml_backend_buffer_t buffer, uint8_t value);
50
+ WSP_GGML_API bool wsp_ggml_backend_buffer_is_host (wsp_ggml_backend_buffer_t buffer);
51
+ WSP_GGML_API void wsp_ggml_backend_buffer_set_usage (wsp_ggml_backend_buffer_t buffer, enum wsp_ggml_backend_buffer_usage usage);
52
+ WSP_GGML_API enum wsp_ggml_backend_buffer_usage wsp_ggml_backend_buffer_get_usage (wsp_ggml_backend_buffer_t buffer);
53
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_buffer_get_type (wsp_ggml_backend_buffer_t buffer);
54
+ WSP_GGML_API void wsp_ggml_backend_buffer_reset (wsp_ggml_backend_buffer_t buffer);
55
+
56
+ // tensor copy between different backends
57
+ WSP_GGML_API void wsp_ggml_backend_tensor_copy(struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
45
58
 
46
59
  //
47
- // Backend
60
+ // Backend (stream)
48
61
  //
49
62
 
50
-
63
+ WSP_GGML_API wsp_ggml_guid_t wsp_ggml_backend_guid(wsp_ggml_backend_t backend);
51
64
  WSP_GGML_API const char * wsp_ggml_backend_name(wsp_ggml_backend_t backend);
52
65
  WSP_GGML_API void wsp_ggml_backend_free(wsp_ggml_backend_t backend);
53
66
 
54
67
  WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_get_default_buffer_type(wsp_ggml_backend_t backend);
55
68
  WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_alloc_buffer(wsp_ggml_backend_t backend, size_t size);
56
69
  WSP_GGML_API size_t wsp_ggml_backend_get_alignment(wsp_ggml_backend_t backend);
70
+ WSP_GGML_API size_t wsp_ggml_backend_get_max_size(wsp_ggml_backend_t backend);
57
71
 
58
72
  WSP_GGML_API void wsp_ggml_backend_tensor_set_async(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
59
73
  WSP_GGML_API void wsp_ggml_backend_tensor_get_async(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
60
74
 
61
- WSP_GGML_API WSP_GGML_CALL void wsp_ggml_backend_tensor_set( struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
62
- WSP_GGML_API WSP_GGML_CALL void wsp_ggml_backend_tensor_get(const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
75
+ // "offset" refers to the offset of the tensor data for setting/getting data
76
+ WSP_GGML_API void wsp_ggml_backend_tensor_set( struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
77
+ WSP_GGML_API void wsp_ggml_backend_tensor_get(const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
78
+ WSP_GGML_API void wsp_ggml_backend_tensor_memset( struct wsp_ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
63
79
 
64
80
  WSP_GGML_API void wsp_ggml_backend_synchronize(wsp_ggml_backend_t backend);
65
81
 
66
- WSP_GGML_API wsp_ggml_backend_graph_plan_t wsp_ggml_backend_graph_plan_create (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
82
+ WSP_GGML_API wsp_ggml_backend_graph_plan_t wsp_ggml_backend_graph_plan_create(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
83
+ WSP_GGML_API void wsp_ggml_backend_graph_plan_free (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
67
84
 
68
- WSP_GGML_API void wsp_ggml_backend_graph_plan_free (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
69
- WSP_GGML_API void wsp_ggml_backend_graph_plan_compute(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
70
- WSP_GGML_API bool wsp_ggml_backend_graph_compute (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
71
- WSP_GGML_API bool wsp_ggml_backend_supports_op (wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
85
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_graph_plan_compute (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
86
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_graph_compute (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
87
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_graph_compute_async(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
72
88
 
73
- // tensor copy between different backends
74
- WSP_GGML_API void wsp_ggml_backend_tensor_copy(struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
75
- WSP_GGML_API void wsp_ggml_backend_tensor_copy_async(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst); // automatic fallback to sync copy
89
+ // NOTE: will be removed, use device version instead
90
+ WSP_GGML_API bool wsp_ggml_backend_supports_op(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
91
+ WSP_GGML_API bool wsp_ggml_backend_supports_buft(wsp_ggml_backend_t backend, wsp_ggml_backend_buffer_type_t buft);
92
+ WSP_GGML_API bool wsp_ggml_backend_offload_op(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
93
+
94
+ // asynchronous copy
95
+ // the copy is performed after all the currently queued operations in backend_src
96
+ // backend_dst will wait for the copy to complete before performing other operations
97
+ // automatic fallback to sync copy if async is not supported
98
+ WSP_GGML_API void wsp_ggml_backend_tensor_copy_async(wsp_ggml_backend_t backend_src, wsp_ggml_backend_t backend_dst, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
99
+
100
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_get_device(wsp_ggml_backend_t backend);
76
101
 
77
102
  //
78
- // CPU backend
103
+ // Events
79
104
  //
80
105
 
81
- WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_cpu_init(void);
106
+ WSP_GGML_API wsp_ggml_backend_event_t wsp_ggml_backend_event_new(wsp_ggml_backend_dev_t device);
107
+ WSP_GGML_API void wsp_ggml_backend_event_free(wsp_ggml_backend_event_t event);
108
+ WSP_GGML_API void wsp_ggml_backend_event_record(wsp_ggml_backend_event_t event, wsp_ggml_backend_t backend);
109
+ WSP_GGML_API void wsp_ggml_backend_event_synchronize(wsp_ggml_backend_event_t event);
110
+ WSP_GGML_API void wsp_ggml_backend_event_wait(wsp_ggml_backend_t backend, wsp_ggml_backend_event_t event);
82
111
 
83
- WSP_GGML_API WSP_GGML_CALL bool wsp_ggml_backend_is_cpu (wsp_ggml_backend_t backend);
84
- WSP_GGML_API void wsp_ggml_backend_cpu_set_n_threads(wsp_ggml_backend_t backend_cpu, int n_threads);
112
+ //
113
+ // Backend device
114
+ //
85
115
 
86
- // Create a backend buffer from an existing pointer
87
- WSP_GGML_API WSP_GGML_CALL wsp_ggml_backend_buffer_t wsp_ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
116
+ enum wsp_ggml_backend_dev_type {
117
+ WSP_GGML_BACKEND_DEVICE_TYPE_CPU,
118
+ WSP_GGML_BACKEND_DEVICE_TYPE_GPU,
119
+ // devices with full capabilities (excludes backends such as BLAS that only support matrix multiplication)
120
+ WSP_GGML_BACKEND_DEVICE_TYPE_CPU_FULL,
121
+ WSP_GGML_BACKEND_DEVICE_TYPE_GPU_FULL
122
+ };
88
123
 
89
- WSP_GGML_API WSP_GGML_CALL wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_buffer_type(void);
124
+ // functionality supported by the device
125
+ struct wsp_ggml_backend_dev_caps {
126
+ // asynchronous operations
127
+ bool async;
128
+ // pinned host buffer
129
+ bool host_buffer;
130
+ // creating buffers from host ptr
131
+ bool buffer_from_host_ptr;
132
+ // event synchronization
133
+ bool events;
134
+ };
90
135
 
91
- #ifdef WSP_GGML_USE_CPU_HBM
92
- WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_hbm_buffer_type(void);
93
- #endif
136
+ // all the device properties
137
+ struct wsp_ggml_backend_dev_props {
138
+ const char * name;
139
+ const char * description;
140
+ size_t memory_free;
141
+ size_t memory_total;
142
+ enum wsp_ggml_backend_dev_type type;
143
+ struct wsp_ggml_backend_dev_caps caps;
144
+ };
145
+
146
+ WSP_GGML_API const char * wsp_ggml_backend_dev_name(wsp_ggml_backend_dev_t device);
147
+ WSP_GGML_API const char * wsp_ggml_backend_dev_description(wsp_ggml_backend_dev_t device);
148
+ WSP_GGML_API void wsp_ggml_backend_dev_memory(wsp_ggml_backend_dev_t device, size_t * free, size_t * total);
149
+ WSP_GGML_API enum wsp_ggml_backend_dev_type wsp_ggml_backend_dev_type(wsp_ggml_backend_dev_t device);
150
+ WSP_GGML_API void wsp_ggml_backend_dev_get_props(wsp_ggml_backend_dev_t device, struct wsp_ggml_backend_dev_props * props);
151
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_dev_backend_reg(wsp_ggml_backend_dev_t device);
152
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_t device, const char * params);
153
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_dev_buffer_type(wsp_ggml_backend_dev_t device);
154
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_dev_host_buffer_type(wsp_ggml_backend_dev_t device);
155
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_dev_buffer_from_host_ptr(wsp_ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size);
156
+
157
+ WSP_GGML_API bool wsp_ggml_backend_dev_supports_op(wsp_ggml_backend_dev_t device, const struct wsp_ggml_tensor * op);
158
+ WSP_GGML_API bool wsp_ggml_backend_dev_supports_buft(wsp_ggml_backend_dev_t device, wsp_ggml_backend_buffer_type_t buft);
159
+ WSP_GGML_API bool wsp_ggml_backend_dev_offload_op(wsp_ggml_backend_dev_t device, const struct wsp_ggml_tensor * op);
94
160
 
95
161
  //
96
- // Backend registry
162
+ // Backend (reg)
97
163
  //
98
164
 
99
- // The backend registry is a registry of all the available backends, and allows initializing backends in a generic way
165
+ WSP_GGML_API const char * wsp_ggml_backend_reg_name(wsp_ggml_backend_reg_t reg);
166
+ WSP_GGML_API size_t wsp_ggml_backend_reg_dev_count(wsp_ggml_backend_reg_t reg);
167
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_reg_dev_get(wsp_ggml_backend_reg_t reg, size_t index);
168
+ WSP_GGML_API void * wsp_ggml_backend_reg_get_proc_address(wsp_ggml_backend_reg_t reg, const char * name);
169
+
170
+
171
+ // Functions that may be obtained using wsp_ggml_backend_reg_get_proc_address
172
+ typedef wsp_ggml_backend_buffer_type_t (*wsp_ggml_backend_split_buffer_type_t)(const float *);
173
+ typedef void (*wsp_ggml_backend_set_n_threads_t)(wsp_ggml_backend_t, int);
174
+
175
+ //
176
+ // Backend registry
177
+ //
100
178
 
101
- WSP_GGML_API size_t wsp_ggml_backend_reg_get_count(void);
102
- WSP_GGML_API size_t wsp_ggml_backend_reg_find_by_name(const char * name);
103
- WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_reg_init_backend_from_str(const char * backend_str); // str is name[:params]
104
- WSP_GGML_API const char * wsp_ggml_backend_reg_get_name(size_t i);
105
- WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_reg_init_backend(size_t i, const char * params); // params is backend-specific
106
- WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_reg_get_default_buffer_type(size_t i);
107
- WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_reg_alloc_buffer(size_t i, size_t size);
179
+ // Backend (reg) enumeration
180
+ WSP_GGML_API size_t wsp_ggml_backend_reg_count(void);
181
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_reg_get(size_t index);
182
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_reg_by_name(const char * name);
183
+
184
+ // Device enumeration
185
+ WSP_GGML_API size_t wsp_ggml_backend_dev_count(void);
186
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_dev_get(size_t index);
187
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_dev_by_name(const char * name);
188
+ WSP_GGML_API wsp_ggml_backend_dev_t wsp_ggml_backend_dev_by_type(enum wsp_ggml_backend_dev_type type);
189
+
190
+ // Direct backend (stream) initialization
191
+ // = wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_by_name(name), params)
192
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_init_by_name(const char * name, const char * params);
193
+ // = wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_by_type(type), params)
194
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_init_by_type(enum wsp_ggml_backend_dev_type type, const char * params);
195
+ // = wsp_ggml_backend_dev_init(wsp_ggml_backend_dev_by_type(GPU_FULL) OR wsp_ggml_backend_dev_by_type(CPU_FULL), NULL)
196
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_init_best(void);
108
197
 
109
198
  //
110
199
  // Backend scheduler
111
200
  //
112
201
 
113
- // The backend scheduler allows for multiple backends to be used together
202
+ // The backend scheduler allows for multiple backend devices to be used together
114
203
  // Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends
115
204
  // The backends are selected based on:
116
205
  // - the backend that supports the operation
@@ -118,36 +207,36 @@ extern "C" {
118
207
  /*
119
208
  Example usage:
120
209
 
121
- sched = wsp_ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, num_backends);
122
- // sched is initialized with measure allocators and cannot be used until allocated with a measure graph
210
+ // operations that use tensors allocated in a buffer with USAGE_WEIGHTS will be assigned
211
+ // preferrably to run on the same backend as the buffer
212
+ wsp_ggml_backend_buffer_set_usage(buf_weights, WSP_GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
123
213
 
124
- // initialize buffers from a measure graph
125
- measure_graph = build_graph(sched); // use the allocr to allocate inputs as needed
214
+ sched = wsp_ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, WSP_GGML_DEFAULT_GRAPH_SIZE, false);
126
215
 
127
- // in build_graph:
128
- build_graph(...) {
129
- // allocating tensors in a specific backend (optional, recommended: pre-allocate inputs in a different buffer)
130
- alloc_cpu = wsp_ggml_backend_sched_get_allocr(sched, backend_cpu);
131
- wsp_ggml_allocr_alloc(alloc_cpu, tensor);
216
+ // initialize buffers from a max size graph (optional)
217
+ reserve_graph = build_graph(sched, max_batch_size);
132
218
 
133
- // manually assigning nodes to a backend (optional, shouldn't be needed in most cases)
134
- struct wsp_ggml_tensor * node = wsp_ggml_mul_mat(ctx, ...);
135
- wsp_ggml_backend_sched_set_node_backend(sched, node, backend_gpu);
136
- }
219
+ // manually assign nodes to a backend (optional, should not be needed in most cases)
220
+ struct wsp_ggml_tensor * node = wsp_ggml_mul_mat(ctx, ...);
221
+ wsp_ggml_backend_sched_set_tensor_backend(sched, node, backend_gpu);
137
222
 
138
- // allocate backend buffers from measure graph
139
- wsp_ggml_backend_sched_init_measure(sched, measure_graph);
140
-
141
- // the scheduler is now ready to compute graphs
223
+ wsp_ggml_backend_sched_reserve(sched, reserve_graph);
142
224
 
143
225
  // compute
144
226
  graph = build_graph(sched);
145
227
  wsp_ggml_backend_sched_graph_compute(sched, graph);
228
+
229
+ // if there are graph inputs:
230
+ wsp_ggml_backend_sched_reset(sched);
231
+ wsp_ggml_backend_sched_alloc_graph(sched, graph);
232
+ wsp_ggml_backend_tensor_set(input_tensor, ...);
233
+ wsp_ggml_backend_sched_graph_compute(sched, graph);
234
+ }
146
235
  */
147
236
 
148
- struct wsp_ggml_backend_sched;
149
237
  typedef struct wsp_ggml_backend_sched * wsp_ggml_backend_sched_t;
150
238
 
239
+ // Evaluation callback for each node in the graph (set with wsp_ggml_backend_sched_set_eval_callback)
151
240
  // when ask == true, the scheduler wants to know if the user wants to observe this node
152
241
  // this allows the scheduler to batch nodes together in order to evaluate them in a single call
153
242
  //
@@ -157,27 +246,35 @@ extern "C" {
157
246
  typedef bool (*wsp_ggml_backend_sched_eval_callback)(struct wsp_ggml_tensor * t, bool ask, void * user_data);
158
247
 
159
248
  // Initialize a backend scheduler
160
- WSP_GGML_API wsp_ggml_backend_sched_t wsp_ggml_backend_sched_new(wsp_ggml_backend_t * backends, wsp_ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size);
161
- WSP_GGML_API void wsp_ggml_backend_sched_free(wsp_ggml_backend_sched_t sched);
249
+ WSP_GGML_API wsp_ggml_backend_sched_t wsp_ggml_backend_sched_new(wsp_ggml_backend_t * backends, wsp_ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
250
+ WSP_GGML_API void wsp_ggml_backend_sched_free(wsp_ggml_backend_sched_t sched);
251
+
162
252
  // Initialize backend buffers from a measure graph
163
- WSP_GGML_API void wsp_ggml_backend_sched_init_measure(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * measure_graph);
253
+ WSP_GGML_API bool wsp_ggml_backend_sched_reserve(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * measure_graph); // returns success
254
+
255
+ WSP_GGML_API int wsp_ggml_backend_sched_get_n_backends(wsp_ggml_backend_sched_t sched);
256
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_sched_get_backend(wsp_ggml_backend_sched_t sched, int i);
257
+
164
258
  // Get the number of splits of the last graph
165
- WSP_GGML_API int wsp_ggml_backend_sched_get_n_splits(wsp_ggml_backend_sched_t sched);
259
+ WSP_GGML_API int wsp_ggml_backend_sched_get_n_splits(wsp_ggml_backend_sched_t sched);
260
+ WSP_GGML_API int wsp_ggml_backend_sched_get_n_copies(wsp_ggml_backend_sched_t sched);
166
261
 
167
- WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_backend_sched_get_tallocr(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend);
168
- WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_sched_get_buffer (wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend);
262
+ WSP_GGML_API size_t wsp_ggml_backend_sched_get_buffer_size(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend);
169
263
 
170
- WSP_GGML_API void wsp_ggml_backend_sched_set_node_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node, wsp_ggml_backend_t backend);
171
- WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_sched_get_node_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node);
264
+ WSP_GGML_API void wsp_ggml_backend_sched_set_tensor_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node, wsp_ggml_backend_t backend);
265
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_sched_get_tensor_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node);
172
266
 
173
267
  // Allocate and compute graph on the backend scheduler
174
- WSP_GGML_API void wsp_ggml_backend_sched_graph_compute(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph);
268
+ WSP_GGML_API bool wsp_ggml_backend_sched_alloc_graph(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph); // returns success
269
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_sched_graph_compute(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph);
270
+ WSP_GGML_API enum wsp_ggml_status wsp_ggml_backend_sched_graph_compute_async(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph);
271
+ WSP_GGML_API void wsp_ggml_backend_sched_synchronize(wsp_ggml_backend_sched_t sched);
175
272
 
176
- // Reset all assignments and allocators - must be called before using the sched allocators to allocate inputs
177
- WSP_GGML_API void wsp_ggml_backend_sched_reset(wsp_ggml_backend_sched_t sched);
273
+ // Reset all assignments and allocators - must be called before changing the node backends
274
+ WSP_GGML_API void wsp_ggml_backend_sched_reset(wsp_ggml_backend_sched_t sched);
178
275
 
179
276
  // Set a callback to be called for each resulting node during graph compute
180
- WSP_GGML_API void wsp_ggml_backend_sched_set_eval_callback(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_sched_eval_callback callback, void * user_data);
277
+ WSP_GGML_API void wsp_ggml_backend_sched_set_eval_callback(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_sched_eval_callback callback, void * user_data);
181
278
 
182
279
  //
183
280
  // Utils
@@ -194,15 +291,35 @@ extern "C" {
194
291
  WSP_GGML_API struct wsp_ggml_backend_graph_copy wsp_ggml_backend_graph_copy(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * graph);
195
292
  WSP_GGML_API void wsp_ggml_backend_graph_copy_free(struct wsp_ggml_backend_graph_copy copy);
196
293
 
197
- typedef bool (*WSP_GGML_CALL wsp_ggml_backend_eval_callback)(int node_index, struct wsp_ggml_tensor * t1, struct wsp_ggml_tensor * t2, void * user_data);
294
+ typedef bool (*wsp_ggml_backend_eval_callback)(int node_index, struct wsp_ggml_tensor * t1, struct wsp_ggml_tensor * t2, void * user_data);
198
295
 
199
296
  // Compare the output of two backends
200
297
  WSP_GGML_API bool wsp_ggml_backend_compare_graph_backend(wsp_ggml_backend_t backend1, wsp_ggml_backend_t backend2, struct wsp_ggml_cgraph * graph, wsp_ggml_backend_eval_callback callback, void * user_data);
201
298
 
202
299
  // Tensor initialization
203
300
  WSP_GGML_API void wsp_ggml_backend_tensor_alloc(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, void * addr);
204
- WSP_GGML_API void wsp_ggml_backend_view_init(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
301
+ WSP_GGML_API void wsp_ggml_backend_view_init(struct wsp_ggml_tensor * tensor);
205
302
 
303
+ //
304
+ // CPU backend
305
+ //
306
+
307
+ WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_cpu_init(void);
308
+
309
+ WSP_GGML_API bool wsp_ggml_backend_is_cpu (wsp_ggml_backend_t backend);
310
+ WSP_GGML_API void wsp_ggml_backend_cpu_set_n_threads (wsp_ggml_backend_t backend_cpu, int n_threads);
311
+ WSP_GGML_API void wsp_ggml_backend_cpu_set_threadpool (wsp_ggml_backend_t backend_cpu, wsp_ggml_threadpool_t threadpool);
312
+ WSP_GGML_API void wsp_ggml_backend_cpu_set_abort_callback(wsp_ggml_backend_t backend_cpu, wsp_ggml_abort_callback abort_callback, void * abort_callback_data);
313
+
314
+ // Create a backend buffer from an existing pointer
315
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
316
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_buffer_type(void);
317
+
318
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_cpu_reg(void);
319
+
320
+ #ifdef WSP_GGML_USE_CPU_HBM
321
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_hbm_buffer_type(void);
322
+ #endif
206
323
 
207
324
  #ifdef __cplusplus
208
325
  }