whisper.rn 0.4.0-rc.8 → 0.4.0-rc.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/android/src/main/CMakeLists.txt +2 -1
  2. package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -12
  3. package/android/src/main/java/com/rnwhisper/RNWhisper.java +75 -34
  4. package/android/src/main/java/com/rnwhisper/WhisperContext.java +20 -3
  5. package/android/src/main/jni.cpp +29 -1
  6. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  7. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  8. package/cpp/ggml-aarch64.c +3209 -0
  9. package/cpp/ggml-aarch64.h +39 -0
  10. package/cpp/ggml-alloc.c +725 -517
  11. package/cpp/ggml-alloc.h +47 -65
  12. package/cpp/ggml-backend-impl.h +166 -55
  13. package/cpp/ggml-backend.cpp +2635 -0
  14. package/cpp/ggml-backend.h +202 -85
  15. package/cpp/ggml-common.h +1853 -0
  16. package/cpp/ggml-cpu-impl.h +614 -0
  17. package/cpp/ggml-impl.h +143 -180
  18. package/cpp/ggml-metal.h +13 -11
  19. package/cpp/ggml-metal.m +2955 -1632
  20. package/cpp/ggml-quants.c +9824 -3263
  21. package/cpp/ggml-quants.h +133 -248
  22. package/cpp/ggml-whisper.metallib +0 -0
  23. package/cpp/ggml.c +8482 -5142
  24. package/cpp/ggml.h +633 -349
  25. package/cpp/rn-whisper.cpp +91 -0
  26. package/cpp/rn-whisper.h +2 -0
  27. package/cpp/whisper.cpp +1427 -658
  28. package/cpp/whisper.h +84 -28
  29. package/ios/RNWhisper.mm +124 -37
  30. package/ios/RNWhisperAudioUtils.h +1 -0
  31. package/ios/RNWhisperAudioUtils.m +20 -13
  32. package/ios/RNWhisperContext.h +3 -2
  33. package/ios/RNWhisperContext.mm +39 -7
  34. package/jest/mock.js +9 -1
  35. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  36. package/lib/commonjs/index.js +48 -19
  37. package/lib/commonjs/index.js.map +1 -1
  38. package/lib/commonjs/version.json +1 -1
  39. package/lib/module/NativeRNWhisper.js.map +1 -1
  40. package/lib/module/index.js +48 -19
  41. package/lib/module/index.js.map +1 -1
  42. package/lib/module/version.json +1 -1
  43. package/lib/typescript/NativeRNWhisper.d.ts +6 -3
  44. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  45. package/lib/typescript/index.d.ts +25 -3
  46. package/lib/typescript/index.d.ts.map +1 -1
  47. package/package.json +6 -5
  48. package/src/NativeRNWhisper.ts +12 -3
  49. package/src/index.ts +63 -24
  50. package/src/version.json +1 -1
  51. package/whisper-rn.podspec +9 -2
  52. package/cpp/ggml-backend.c +0 -1718
  53. package/cpp/ggml-metal-whisper.metal +0 -5820
package/cpp/ggml-impl.h CHANGED
@@ -1,24 +1,32 @@
1
1
  #pragma once
2
2
 
3
- #include "ggml.h"
4
-
5
3
  // GGML internal header
6
4
 
5
+ #include "ggml.h"
6
+
7
7
  #include <assert.h>
8
8
  #include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
9
- #include <stddef.h>
10
9
  #include <stdbool.h>
11
- #include <string.h> // memcpy
12
- #include <math.h> // fabsf
10
+ #include <stdint.h>
13
11
 
14
12
  #ifdef __cplusplus
15
13
  extern "C" {
16
14
  #endif
17
15
 
16
+ #undef MIN
17
+ #undef MAX
18
+
19
+ #define MIN(a, b) ((a) < (b) ? (a) : (b))
20
+ #define MAX(a, b) ((a) > (b) ? (a) : (b))
21
+
22
+ // required for mmap as gguf only guarantees 32-byte alignment
23
+ #define TENSOR_ALIGNMENT 32
24
+
18
25
  // static_assert should be a #define, but if it's not,
19
26
  // fall back to the _Static_assert C11 keyword.
20
27
  // if C99 - static_assert is noop
21
28
  // ref: https://stackoverflow.com/a/53923785/4039976
29
+ #ifndef __cplusplus
22
30
  #ifndef static_assert
23
31
  #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
24
32
  #define static_assert(cond, msg) _Static_assert(cond, msg)
@@ -26,220 +34,175 @@ extern "C" {
26
34
  #define static_assert(cond, msg) struct global_scope_noop_trick
27
35
  #endif
28
36
  #endif
29
-
30
- // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
31
- #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
32
- #ifndef __FMA__
33
- #define __FMA__
34
- #endif
35
- #ifndef __F16C__
36
- #define __F16C__
37
- #endif
38
- #ifndef __SSE3__
39
- #define __SSE3__
40
- #endif
41
37
  #endif
42
38
 
43
- // 16-bit float
44
- // on Arm, we use __fp16
45
- // on x86, we use uint16_t
46
- #if defined(__ARM_NEON) && !defined(_MSC_VER)
47
-
48
- // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
49
39
  //
50
- // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
40
+ // logging
51
41
  //
52
- #include <arm_neon.h>
53
42
 
54
- #define WSP_GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
55
- #define WSP_GGML_COMPUTE_FP32_TO_FP16(x) (x)
43
+ WSP_GGML_ATTRIBUTE_FORMAT(2, 3)
44
+ void wsp_ggml_log_internal (enum wsp_ggml_log_level level, const char * format, ...);
45
+ void wsp_ggml_log_callback_default(enum wsp_ggml_log_level level, const char * text, void * user_data);
56
46
 
57
- #define WSP_GGML_FP16_TO_FP32(x) ((float) (x))
58
- #define WSP_GGML_FP32_TO_FP16(x) (x)
47
+ #define WSP_GGML_LOG(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_NONE , __VA_ARGS__)
48
+ #define WSP_GGML_LOG_INFO(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_INFO , __VA_ARGS__)
49
+ #define WSP_GGML_LOG_WARN(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_WARN , __VA_ARGS__)
50
+ #define WSP_GGML_LOG_ERROR(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
51
+ #define WSP_GGML_LOG_DEBUG(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
52
+ #define WSP_GGML_LOG_CONT(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_CONT , __VA_ARGS__)
59
53
 
60
- #else
61
-
62
- #ifdef __wasm_simd128__
63
- #include <wasm_simd128.h>
64
- #else
65
- #ifdef __POWER9_VECTOR__
66
- #include <altivec.h>
67
- #undef bool
68
- #define bool _Bool
69
- #else
70
- #if defined(_MSC_VER) || defined(__MINGW32__)
71
- #include <intrin.h>
72
- #else
73
- #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
74
- #if !defined(__riscv)
75
- #include <immintrin.h>
76
- #endif
77
- #endif
78
- #endif
79
- #endif
80
- #endif
54
+ // bitset
81
55
 
82
- #ifdef __riscv_v_intrinsic
83
- #include <riscv_vector.h>
84
- #endif
56
+ typedef uint32_t wsp_ggml_bitset_t;
85
57
 
86
- #ifdef __F16C__
58
+ static_assert(sizeof(wsp_ggml_bitset_t) == 4, "bitset_t constants must be updated");
59
+ #define BITSET_SHR 5 // log2(sizeof(wsp_ggml_bitset_t)*8)
60
+ #define BITSET_MASK (sizeof(wsp_ggml_bitset_t)*8 - 1)
87
61
 
88
- #ifdef _MSC_VER
89
- #define WSP_GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
90
- #define WSP_GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
91
- #else
92
- #define WSP_GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
93
- #define WSP_GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
94
- #endif
62
+ static size_t wsp_ggml_bitset_size(size_t n) {
63
+ return (n + BITSET_MASK) >> BITSET_SHR;
64
+ }
95
65
 
96
- #elif defined(__POWER9_VECTOR__)
97
-
98
- #define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
99
- #define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
100
- /* the inline asm below is about 12% faster than the lookup method */
101
- #define WSP_GGML_FP16_TO_FP32(x) WSP_GGML_COMPUTE_FP16_TO_FP32(x)
102
- #define WSP_GGML_FP32_TO_FP16(x) WSP_GGML_COMPUTE_FP32_TO_FP16(x)
103
-
104
- static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
105
- register float f;
106
- register double d;
107
- __asm__(
108
- "mtfprd %0,%2\n"
109
- "xscvhpdp %0,%0\n"
110
- "frsp %1,%0\n" :
111
- /* temp */ "=d"(d),
112
- /* out */ "=f"(f):
113
- /* in */ "r"(h));
114
- return f;
66
+ static inline bool wsp_ggml_bitset_get(const wsp_ggml_bitset_t * bitset, size_t i) {
67
+ return !!(bitset[i >> BITSET_SHR] & (1u << (i & BITSET_MASK)));
115
68
  }
116
69
 
117
- static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
118
- register double d;
119
- register wsp_ggml_fp16_t r;
120
- __asm__( /* xscvdphp can work on double or single precision */
121
- "xscvdphp %0,%2\n"
122
- "mffprd %1,%0\n" :
123
- /* temp */ "=d"(d),
124
- /* out */ "=r"(r):
125
- /* in */ "f"(f));
126
- return r;
70
+ static inline void wsp_ggml_bitset_set(wsp_ggml_bitset_t * bitset, size_t i) {
71
+ bitset[i >> BITSET_SHR] |= (1u << (i & BITSET_MASK));
127
72
  }
128
73
 
129
- #else
74
+ static inline void wsp_ggml_bitset_clear(wsp_ggml_bitset_t * bitset, size_t i) {
75
+ bitset[i >> BITSET_SHR] &= ~(1u << (i & BITSET_MASK));
76
+ }
130
77
 
131
- // FP16 <-> FP32
132
- // ref: https://github.com/Maratyszcza/FP16
78
+ // hash set
133
79
 
134
- static inline float fp32_from_bits(uint32_t w) {
135
- union {
136
- uint32_t as_bits;
137
- float as_value;
138
- } fp32;
139
- fp32.as_bits = w;
140
- return fp32.as_value;
141
- }
80
+ #define WSP_GGML_HASHSET_FULL ((size_t)-1)
81
+ #define WSP_GGML_HASHSET_ALREADY_EXISTS ((size_t)-2)
142
82
 
143
- static inline uint32_t fp32_to_bits(float f) {
144
- union {
145
- float as_value;
146
- uint32_t as_bits;
147
- } fp32;
148
- fp32.as_value = f;
149
- return fp32.as_bits;
150
- }
83
+ struct wsp_ggml_hash_set {
84
+ size_t size;
85
+ wsp_ggml_bitset_t * used; // whether or not the keys are in use i.e. set
86
+ struct wsp_ggml_tensor ** keys; // actual tensors in the set, keys[i] is only defined if wsp_ggml_bitset_get(used, i)
87
+ };
151
88
 
152
- static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
153
- const uint32_t w = (uint32_t) h << 16;
154
- const uint32_t sign = w & UINT32_C(0x80000000);
155
- const uint32_t two_w = w + w;
89
+ struct wsp_ggml_hash_set wsp_ggml_hash_set_new(size_t size);
90
+ void wsp_ggml_hash_set_free(struct wsp_ggml_hash_set * hash_set);
156
91
 
157
- const uint32_t exp_offset = UINT32_C(0xE0) << 23;
158
- #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
159
- const float exp_scale = 0x1.0p-112f;
160
- #else
161
- const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
162
- #endif
163
- const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
92
+ // returns the minimum size for a hash set that can hold min_sz elements
93
+ size_t wsp_ggml_hash_size(size_t min_sz);
164
94
 
165
- const uint32_t magic_mask = UINT32_C(126) << 23;
166
- const float magic_bias = 0.5f;
167
- const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
95
+ // remove all elements from the hash set
96
+ void wsp_ggml_hash_set_reset(struct wsp_ggml_hash_set * hash_set);
168
97
 
169
- const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
170
- const uint32_t result = sign |
171
- (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
172
- return fp32_from_bits(result);
173
- }
98
+ // returns true if key is in the hash set
99
+ static bool wsp_ggml_hash_contains(const struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
174
100
 
175
- static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
176
- #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
177
- const float scale_to_inf = 0x1.0p+112f;
178
- const float scale_to_zero = 0x1.0p-110f;
179
- #else
180
- const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
181
- const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
182
- #endif
183
- float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
184
-
185
- const uint32_t w = fp32_to_bits(f);
186
- const uint32_t shl1_w = w + w;
187
- const uint32_t sign = w & UINT32_C(0x80000000);
188
- uint32_t bias = shl1_w & UINT32_C(0xFF000000);
189
- if (bias < UINT32_C(0x71000000)) {
190
- bias = UINT32_C(0x71000000);
191
- }
101
+ // returns WSP_GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
102
+ static size_t wsp_ggml_hash_find(const struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
192
103
 
193
- base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
194
- const uint32_t bits = fp32_to_bits(base);
195
- const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
196
- const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
197
- const uint32_t nonsign = exp_bits + mantissa_bits;
198
- return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
199
- }
104
+ // returns WSP_GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
105
+ static size_t wsp_ggml_hash_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
200
106
 
201
- #define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
202
- #define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
107
+ // return index, asserts if table is full
108
+ static size_t wsp_ggml_hash_find_or_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
203
109
 
204
- #endif // __F16C__
110
+ // hash function for wsp_ggml_tensor
111
+ static inline size_t wsp_ggml_hash(const struct wsp_ggml_tensor * p) {
112
+ // the last 4 bits are always zero due to alignment
113
+ return (size_t)(uintptr_t)p >> 4;
114
+ }
205
115
 
206
- #endif // __ARM_NEON
116
+ static size_t wsp_ggml_hash_find(const struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key) {
117
+ size_t h = wsp_ggml_hash(key) % hash_set->size;
118
+
119
+ // linear probing
120
+ size_t i = h;
121
+ while (wsp_ggml_bitset_get(hash_set->used, i) && hash_set->keys[i] != key) {
122
+ i = (i + 1) % hash_set->size;
123
+ if (i == h) {
124
+ // visited all hash table entries -> not found
125
+ return WSP_GGML_HASHSET_FULL;
126
+ }
127
+ }
128
+ return i;
129
+ }
207
130
 
208
- // precomputed f32 table for f16 (256 KB)
209
- // defined in ggml.c, initialized in wsp_ggml_init()
210
- extern float wsp_ggml_table_f32_f16[1 << 16];
131
+ static bool wsp_ggml_hash_contains(const struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key) {
132
+ size_t i = wsp_ggml_hash_find(hash_set, key);
133
+ return i != WSP_GGML_HASHSET_FULL && wsp_ggml_bitset_get(hash_set->used, i);
134
+ }
211
135
 
212
- // On ARM NEON, it's quicker to directly convert x -> x instead of calling into wsp_ggml_lookup_fp16_to_fp32,
213
- // so we define WSP_GGML_FP16_TO_FP32 and WSP_GGML_FP32_TO_FP16 elsewhere for NEON.
214
- // This is also true for POWER9.
215
- #if !defined(WSP_GGML_FP16_TO_FP32) || !defined(WSP_GGML_FP32_TO_FP16)
136
+ static size_t wsp_ggml_hash_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key) {
137
+ size_t h = wsp_ggml_hash(key) % hash_set->size;
138
+
139
+ // linear probing
140
+ size_t i = h;
141
+ do {
142
+ if (!wsp_ggml_bitset_get(hash_set->used, i)) {
143
+ wsp_ggml_bitset_set(hash_set->used, i);
144
+ hash_set->keys[i] = key;
145
+ return i;
146
+ }
147
+ if (hash_set->keys[i] == key) {
148
+ return WSP_GGML_HASHSET_ALREADY_EXISTS;
149
+ }
150
+ i = (i + 1) % hash_set->size;
151
+ } while (i != h);
152
+
153
+ // visited all hash table entries -> not found
154
+ WSP_GGML_ABORT("fatal error");
155
+ }
216
156
 
217
- inline static float wsp_ggml_lookup_fp16_to_fp32(wsp_ggml_fp16_t f) {
218
- uint16_t s;
219
- memcpy(&s, &f, sizeof(uint16_t));
220
- return wsp_ggml_table_f32_f16[s];
157
+ static size_t wsp_ggml_hash_find_or_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key) {
158
+ size_t h = wsp_ggml_hash(key) % hash_set->size;
159
+
160
+ // linear probing
161
+ size_t i = h;
162
+ do {
163
+ if (!wsp_ggml_bitset_get(hash_set->used, i)) {
164
+ wsp_ggml_bitset_set(hash_set->used, i);
165
+ hash_set->keys[i] = key;
166
+ return i;
167
+ }
168
+ if (hash_set->keys[i] == key) {
169
+ return i;
170
+ }
171
+ i = (i + 1) % hash_set->size;
172
+ } while (i != h);
173
+
174
+ // visited all hash table entries -> not found
175
+ WSP_GGML_ABORT("fatal error");
221
176
  }
222
177
 
223
- #define WSP_GGML_FP16_TO_FP32(x) wsp_ggml_lookup_fp16_to_fp32(x)
224
- #define WSP_GGML_FP32_TO_FP16(x) WSP_GGML_COMPUTE_FP32_TO_FP16(x)
178
+ // computation graph
225
179
 
226
- #endif
180
+ enum wsp_ggml_cgraph_eval_order {
181
+ WSP_GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0,
182
+ WSP_GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT,
183
+ WSP_GGML_CGRAPH_EVAL_ORDER_COUNT
184
+ };
227
185
 
228
- #define WSP_GGML_HASHTABLE_FULL ((size_t)-1)
229
- #define WSP_GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
186
+ struct wsp_ggml_cgraph {
187
+ int size;
188
+ int n_nodes;
189
+ int n_leafs;
230
190
 
231
- struct wsp_ggml_hash_set wsp_ggml_hash_set_new(size_t size);
191
+ struct wsp_ggml_tensor ** nodes;
192
+ struct wsp_ggml_tensor ** grads;
193
+ struct wsp_ggml_tensor ** leafs;
232
194
 
233
- bool wsp_ggml_hash_contains (const struct wsp_ggml_hash_set hash_set, struct wsp_ggml_tensor * key);
195
+ struct wsp_ggml_hash_set visited_hash_set;
234
196
 
235
- // returns WSP_GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
236
- size_t wsp_ggml_hash_find (const struct wsp_ggml_hash_set hash_set, struct wsp_ggml_tensor * key);
197
+ enum wsp_ggml_cgraph_eval_order order;
198
+ };
237
199
 
238
- // returns WSP_GGML_HASHTABLE_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
239
- size_t wsp_ggml_hash_insert ( struct wsp_ggml_hash_set hash_set, struct wsp_ggml_tensor * key);
200
+ struct wsp_ggml_cgraph wsp_ggml_graph_view(struct wsp_ggml_cgraph * cgraph, int i0, int i1);
240
201
 
241
- // return index, asserts if table is full
242
- size_t wsp_ggml_hash_find_or_insert( struct wsp_ggml_hash_set hash_set, struct wsp_ggml_tensor * key);
202
+ // Memory allocation
203
+
204
+ void * wsp_ggml_aligned_malloc(size_t size);
205
+ void wsp_ggml_aligned_free(void * ptr, size_t size);
243
206
 
244
207
  #ifdef __cplusplus
245
208
  }
package/cpp/ggml-metal.h CHANGED
@@ -1,7 +1,9 @@
1
+ // Note: this description is outdated
2
+ //
1
3
  // An interface allowing to compute wsp_ggml_cgraph with Metal
2
4
  //
3
5
  // This is a fully functional interface that extends ggml with GPU support for Apple devices.
4
- // A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.)
6
+ // A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, etc.)
5
7
  //
6
8
  // How it works?
7
9
  //
@@ -25,10 +27,6 @@
25
27
  #include <stddef.h>
26
28
  #include <stdbool.h>
27
29
 
28
- // max memory buffers that can be mapped to the device
29
- #define WSP_GGML_METAL_MAX_BUFFERS 64
30
- #define WSP_GGML_METAL_MAX_COMMAND_BUFFERS 32
31
-
32
30
  struct wsp_ggml_tensor;
33
31
  struct wsp_ggml_cgraph;
34
32
 
@@ -41,24 +39,28 @@ extern "C" {
41
39
  // user-code should use only these functions
42
40
  //
43
41
 
44
- WSP_GGML_API void wsp_ggml_backend_metal_log_set_callback(wsp_ggml_log_callback log_callback, void * user_data);
45
-
46
42
  WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_metal_init(void);
47
43
 
48
44
  WSP_GGML_API bool wsp_ggml_backend_is_metal(wsp_ggml_backend_t backend);
49
45
 
50
- WSP_GGML_API WSP_GGML_CALL wsp_ggml_backend_buffer_t wsp_ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size);
46
+ WSP_GGML_DEPRECATED(
47
+ WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size),
48
+ "obsoleted by the new device interface - https://github.com/ggerganov/llama.cpp/pull/9713");
51
49
 
52
- WSP_GGML_API void wsp_ggml_backend_metal_set_n_cb(wsp_ggml_backend_t backend, int n_cb);
50
+ WSP_GGML_API void wsp_ggml_backend_metal_set_abort_callback(wsp_ggml_backend_t backend, wsp_ggml_abort_callback abort_callback, void * user_data);
53
51
 
54
- WSP_GGML_API WSP_GGML_CALL wsp_ggml_backend_buffer_type_t wsp_ggml_backend_metal_buffer_type(void);
52
+ WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_metal_buffer_type(void);
55
53
 
56
54
  // helper to check if the device supports a specific family
57
55
  // ideally, the user code should be doing these checks
58
56
  // ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
59
57
  WSP_GGML_API bool wsp_ggml_backend_metal_supports_family(wsp_ggml_backend_t backend, int family);
60
58
 
59
+ // capture all command buffers committed the next time `wsp_ggml_backend_graph_compute` is called
60
+ WSP_GGML_API void wsp_ggml_backend_metal_capture_next_compute(wsp_ggml_backend_t backend);
61
+
62
+ WSP_GGML_API wsp_ggml_backend_reg_t wsp_ggml_backend_metal_reg(void);
63
+
61
64
  #ifdef __cplusplus
62
65
  }
63
66
  #endif
64
-