whisper.rn 0.4.0-rc.7 → 0.4.0-rc.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cpp/coreml/whisper-encoder.mm +1 -1
- package/cpp/ggml-alloc.c +41 -11
- package/cpp/ggml-alloc.h +3 -1
- package/cpp/ggml-backend-impl.h +38 -34
- package/cpp/ggml-backend.c +630 -269
- package/cpp/ggml-backend.h +58 -30
- package/cpp/ggml-impl.h +3 -0
- package/cpp/ggml-metal-whisper.metal +1253 -341
- package/cpp/ggml-metal.h +6 -54
- package/cpp/ggml-metal.m +2004 -1987
- package/cpp/ggml-quants.c +2230 -421
- package/cpp/ggml-quants.h +39 -1
- package/cpp/ggml.c +735 -265
- package/cpp/ggml.h +94 -43
- package/cpp/whisper.cpp +118 -86
- package/ios/RNWhisperContext.mm +2 -1
- package/lib/commonjs/version.json +1 -1
- package/lib/module/version.json +1 -1
- package/package.json +1 -1
- package/src/version.json +1 -1
package/cpp/ggml-backend.h
CHANGED
|
@@ -17,19 +17,31 @@ extern "C" {
|
|
|
17
17
|
//
|
|
18
18
|
|
|
19
19
|
// buffer type
|
|
20
|
-
WSP_GGML_API
|
|
21
|
-
WSP_GGML_API
|
|
22
|
-
WSP_GGML_API
|
|
23
|
-
WSP_GGML_API
|
|
20
|
+
WSP_GGML_API const char * wsp_ggml_backend_buft_name (wsp_ggml_backend_buffer_type_t buft);
|
|
21
|
+
WSP_GGML_API WSP_GGML_CALL wsp_ggml_backend_buffer_t wsp_ggml_backend_buft_alloc_buffer (wsp_ggml_backend_buffer_type_t buft, size_t size);
|
|
22
|
+
WSP_GGML_API size_t wsp_ggml_backend_buft_get_alignment (wsp_ggml_backend_buffer_type_t buft);
|
|
23
|
+
WSP_GGML_API WSP_GGML_CALL size_t wsp_ggml_backend_buft_get_alloc_size (wsp_ggml_backend_buffer_type_t buft, struct wsp_ggml_tensor * tensor);
|
|
24
|
+
WSP_GGML_API bool wsp_ggml_backend_buft_supports_backend(wsp_ggml_backend_buffer_type_t buft, wsp_ggml_backend_t backend);
|
|
25
|
+
WSP_GGML_API bool wsp_ggml_backend_buft_is_host (wsp_ggml_backend_buffer_type_t buft);
|
|
24
26
|
|
|
25
27
|
// buffer
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
WSP_GGML_API
|
|
32
|
-
WSP_GGML_API
|
|
28
|
+
enum wsp_ggml_backend_buffer_usage {
|
|
29
|
+
WSP_GGML_BACKEND_BUFFER_USAGE_ANY = 0,
|
|
30
|
+
WSP_GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1,
|
|
31
|
+
};
|
|
32
|
+
|
|
33
|
+
WSP_GGML_API const char * wsp_ggml_backend_buffer_name (wsp_ggml_backend_buffer_t buffer);
|
|
34
|
+
WSP_GGML_API void wsp_ggml_backend_buffer_free (wsp_ggml_backend_buffer_t buffer);
|
|
35
|
+
WSP_GGML_API void * wsp_ggml_backend_buffer_get_base (wsp_ggml_backend_buffer_t buffer);
|
|
36
|
+
WSP_GGML_API size_t wsp_ggml_backend_buffer_get_size (wsp_ggml_backend_buffer_t buffer);
|
|
37
|
+
WSP_GGML_API WSP_GGML_CALL void wsp_ggml_backend_buffer_init_tensor (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
|
|
38
|
+
WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alignment (wsp_ggml_backend_buffer_t buffer);
|
|
39
|
+
WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alloc_size(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
|
|
40
|
+
WSP_GGML_API void wsp_ggml_backend_buffer_clear (wsp_ggml_backend_buffer_t buffer, uint8_t value);
|
|
41
|
+
WSP_GGML_API bool wsp_ggml_backend_buffer_is_host (wsp_ggml_backend_buffer_t buffer);
|
|
42
|
+
WSP_GGML_API void wsp_ggml_backend_buffer_set_usage (wsp_ggml_backend_buffer_t buffer, enum wsp_ggml_backend_buffer_usage usage);
|
|
43
|
+
WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_buffer_get_type (wsp_ggml_backend_buffer_t buffer);
|
|
44
|
+
WSP_GGML_API void wsp_ggml_backend_buffer_reset (wsp_ggml_backend_buffer_t buffer);
|
|
33
45
|
|
|
34
46
|
//
|
|
35
47
|
// Backend
|
|
@@ -46,8 +58,8 @@ extern "C" {
|
|
|
46
58
|
WSP_GGML_API void wsp_ggml_backend_tensor_set_async(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
47
59
|
WSP_GGML_API void wsp_ggml_backend_tensor_get_async(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
48
60
|
|
|
49
|
-
WSP_GGML_API void wsp_ggml_backend_tensor_set( struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
50
|
-
WSP_GGML_API void wsp_ggml_backend_tensor_get(const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
61
|
+
WSP_GGML_API WSP_GGML_CALL void wsp_ggml_backend_tensor_set( struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
62
|
+
WSP_GGML_API WSP_GGML_CALL void wsp_ggml_backend_tensor_get(const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
51
63
|
|
|
52
64
|
WSP_GGML_API void wsp_ggml_backend_synchronize(wsp_ggml_backend_t backend);
|
|
53
65
|
|
|
@@ -55,7 +67,7 @@ extern "C" {
|
|
|
55
67
|
|
|
56
68
|
WSP_GGML_API void wsp_ggml_backend_graph_plan_free (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
|
|
57
69
|
WSP_GGML_API void wsp_ggml_backend_graph_plan_compute(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
|
|
58
|
-
WSP_GGML_API
|
|
70
|
+
WSP_GGML_API bool wsp_ggml_backend_graph_compute (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
|
|
59
71
|
WSP_GGML_API bool wsp_ggml_backend_supports_op (wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
|
|
60
72
|
|
|
61
73
|
// tensor copy between different backends
|
|
@@ -68,13 +80,17 @@ extern "C" {
|
|
|
68
80
|
|
|
69
81
|
WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_cpu_init(void);
|
|
70
82
|
|
|
71
|
-
WSP_GGML_API bool wsp_ggml_backend_is_cpu(wsp_ggml_backend_t backend);
|
|
72
|
-
WSP_GGML_API
|
|
83
|
+
WSP_GGML_API WSP_GGML_CALL bool wsp_ggml_backend_is_cpu (wsp_ggml_backend_t backend);
|
|
84
|
+
WSP_GGML_API void wsp_ggml_backend_cpu_set_n_threads(wsp_ggml_backend_t backend_cpu, int n_threads);
|
|
73
85
|
|
|
74
86
|
// Create a backend buffer from an existing pointer
|
|
75
|
-
WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
|
|
87
|
+
WSP_GGML_API WSP_GGML_CALL wsp_ggml_backend_buffer_t wsp_ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
|
|
76
88
|
|
|
77
|
-
WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_buffer_type(void);
|
|
89
|
+
WSP_GGML_API WSP_GGML_CALL wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_buffer_type(void);
|
|
90
|
+
|
|
91
|
+
#ifdef WSP_GGML_USE_CPU_HBM
|
|
92
|
+
WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_hbm_buffer_type(void);
|
|
93
|
+
#endif
|
|
78
94
|
|
|
79
95
|
//
|
|
80
96
|
// Backend registry
|
|
@@ -132,24 +148,36 @@ extern "C" {
|
|
|
132
148
|
struct wsp_ggml_backend_sched;
|
|
133
149
|
typedef struct wsp_ggml_backend_sched * wsp_ggml_backend_sched_t;
|
|
134
150
|
|
|
135
|
-
//
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
151
|
+
// when ask == true, the scheduler wants to know if the user wants to observe this node
|
|
152
|
+
// this allows the scheduler to batch nodes together in order to evaluate them in a single call
|
|
153
|
+
//
|
|
154
|
+
// when ask == false, the scheduler is passing the node tensor to the user for observation
|
|
155
|
+
// if the user returns false, the scheduler will cancel the graph compute
|
|
156
|
+
//
|
|
157
|
+
typedef bool (*wsp_ggml_backend_sched_eval_callback)(struct wsp_ggml_tensor * t, bool ask, void * user_data);
|
|
139
158
|
|
|
159
|
+
// Initialize a backend scheduler
|
|
160
|
+
WSP_GGML_API wsp_ggml_backend_sched_t wsp_ggml_backend_sched_new(wsp_ggml_backend_t * backends, wsp_ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size);
|
|
161
|
+
WSP_GGML_API void wsp_ggml_backend_sched_free(wsp_ggml_backend_sched_t sched);
|
|
140
162
|
// Initialize backend buffers from a measure graph
|
|
141
|
-
WSP_GGML_API void
|
|
163
|
+
WSP_GGML_API void wsp_ggml_backend_sched_init_measure(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * measure_graph);
|
|
164
|
+
// Get the number of splits of the last graph
|
|
165
|
+
WSP_GGML_API int wsp_ggml_backend_sched_get_n_splits(wsp_ggml_backend_sched_t sched);
|
|
142
166
|
|
|
143
167
|
WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_backend_sched_get_tallocr(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend);
|
|
144
168
|
WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_sched_get_buffer (wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend);
|
|
145
169
|
|
|
146
|
-
WSP_GGML_API void
|
|
170
|
+
WSP_GGML_API void wsp_ggml_backend_sched_set_node_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node, wsp_ggml_backend_t backend);
|
|
171
|
+
WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_sched_get_node_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node);
|
|
172
|
+
|
|
173
|
+
// Allocate and compute graph on the backend scheduler
|
|
174
|
+
WSP_GGML_API void wsp_ggml_backend_sched_graph_compute(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * graph);
|
|
147
175
|
|
|
148
|
-
//
|
|
149
|
-
WSP_GGML_API void
|
|
150
|
-
wsp_ggml_backend_sched_t sched,
|
|
151
|
-
struct wsp_ggml_cgraph * graph);
|
|
176
|
+
// Reset all assignments and allocators - must be called before using the sched allocators to allocate inputs
|
|
177
|
+
WSP_GGML_API void wsp_ggml_backend_sched_reset(wsp_ggml_backend_sched_t sched);
|
|
152
178
|
|
|
179
|
+
// Set a callback to be called for each resulting node during graph compute
|
|
180
|
+
WSP_GGML_API void wsp_ggml_backend_sched_set_eval_callback(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_sched_eval_callback callback, void * user_data);
|
|
153
181
|
|
|
154
182
|
//
|
|
155
183
|
// Utils
|
|
@@ -166,10 +194,10 @@ extern "C" {
|
|
|
166
194
|
WSP_GGML_API struct wsp_ggml_backend_graph_copy wsp_ggml_backend_graph_copy(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * graph);
|
|
167
195
|
WSP_GGML_API void wsp_ggml_backend_graph_copy_free(struct wsp_ggml_backend_graph_copy copy);
|
|
168
196
|
|
|
169
|
-
typedef bool (*wsp_ggml_backend_eval_callback)(int node_index, struct wsp_ggml_tensor * t1, struct wsp_ggml_tensor * t2, void * user_data);
|
|
197
|
+
typedef bool (*WSP_GGML_CALL wsp_ggml_backend_eval_callback)(int node_index, struct wsp_ggml_tensor * t1, struct wsp_ggml_tensor * t2, void * user_data);
|
|
170
198
|
|
|
171
199
|
// Compare the output of two backends
|
|
172
|
-
WSP_GGML_API
|
|
200
|
+
WSP_GGML_API bool wsp_ggml_backend_compare_graph_backend(wsp_ggml_backend_t backend1, wsp_ggml_backend_t backend2, struct wsp_ggml_cgraph * graph, wsp_ggml_backend_eval_callback callback, void * user_data);
|
|
173
201
|
|
|
174
202
|
// Tensor initialization
|
|
175
203
|
WSP_GGML_API void wsp_ggml_backend_tensor_alloc(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, void * addr);
|
package/cpp/ggml-impl.h
CHANGED
|
@@ -5,6 +5,7 @@
|
|
|
5
5
|
// GGML internal header
|
|
6
6
|
|
|
7
7
|
#include <assert.h>
|
|
8
|
+
#include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
|
|
8
9
|
#include <stddef.h>
|
|
9
10
|
#include <stdbool.h>
|
|
10
11
|
#include <string.h> // memcpy
|
|
@@ -227,6 +228,8 @@ inline static float wsp_ggml_lookup_fp16_to_fp32(wsp_ggml_fp16_t f) {
|
|
|
227
228
|
#define WSP_GGML_HASHTABLE_FULL ((size_t)-1)
|
|
228
229
|
#define WSP_GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
|
|
229
230
|
|
|
231
|
+
struct wsp_ggml_hash_set wsp_ggml_hash_set_new(size_t size);
|
|
232
|
+
|
|
230
233
|
bool wsp_ggml_hash_contains (const struct wsp_ggml_hash_set hash_set, struct wsp_ggml_tensor * key);
|
|
231
234
|
|
|
232
235
|
// returns WSP_GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
|