whisper.rn 0.4.0-rc.1 → 0.4.0-rc.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/README.md +6 -6
  2. package/android/build.gradle +4 -0
  3. package/android/src/main/CMakeLists.txt +14 -0
  4. package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -92
  5. package/android/src/main/java/com/rnwhisper/RNWhisper.java +86 -40
  6. package/android/src/main/java/com/rnwhisper/WhisperContext.java +85 -131
  7. package/android/src/main/jni-utils.h +76 -0
  8. package/android/src/main/jni.cpp +226 -109
  9. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  10. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  11. package/cpp/README.md +1 -1
  12. package/cpp/coreml/whisper-encoder-impl.h +1 -1
  13. package/cpp/coreml/whisper-encoder.h +4 -0
  14. package/cpp/coreml/whisper-encoder.mm +5 -3
  15. package/cpp/ggml-aarch64.c +129 -0
  16. package/cpp/ggml-aarch64.h +19 -0
  17. package/cpp/ggml-alloc.c +805 -400
  18. package/cpp/ggml-alloc.h +60 -10
  19. package/cpp/ggml-backend-impl.h +216 -0
  20. package/cpp/ggml-backend-reg.cpp +204 -0
  21. package/cpp/ggml-backend.cpp +1996 -0
  22. package/cpp/ggml-backend.cpp.rej +12 -0
  23. package/cpp/ggml-backend.h +336 -0
  24. package/cpp/ggml-common.h +1853 -0
  25. package/cpp/ggml-cpp.h +38 -0
  26. package/cpp/ggml-cpu-aarch64.c +3560 -0
  27. package/cpp/ggml-cpu-aarch64.h +30 -0
  28. package/cpp/ggml-cpu-impl.h +371 -0
  29. package/cpp/ggml-cpu-quants.c +10822 -0
  30. package/cpp/ggml-cpu-quants.h +63 -0
  31. package/cpp/ggml-cpu.c +13970 -0
  32. package/cpp/ggml-cpu.cpp +663 -0
  33. package/cpp/ggml-cpu.h +177 -0
  34. package/cpp/ggml-impl.h +551 -0
  35. package/cpp/ggml-metal-impl.h +249 -0
  36. package/cpp/ggml-metal.h +24 -43
  37. package/cpp/ggml-metal.m +4190 -1075
  38. package/cpp/ggml-quants.c +5247 -0
  39. package/cpp/ggml-quants.h +100 -0
  40. package/cpp/ggml-threading.cpp +12 -0
  41. package/cpp/ggml-threading.h +12 -0
  42. package/cpp/ggml-whisper.metallib +0 -0
  43. package/cpp/ggml.c +5474 -18763
  44. package/cpp/ggml.h +833 -628
  45. package/cpp/rn-audioutils.cpp +68 -0
  46. package/cpp/rn-audioutils.h +14 -0
  47. package/cpp/rn-whisper-log.h +11 -0
  48. package/cpp/rn-whisper.cpp +221 -52
  49. package/cpp/rn-whisper.h +50 -15
  50. package/cpp/whisper.cpp +2872 -1371
  51. package/cpp/whisper.h +170 -41
  52. package/ios/RNWhisper.mm +139 -46
  53. package/ios/RNWhisperAudioUtils.h +1 -2
  54. package/ios/RNWhisperAudioUtils.m +18 -67
  55. package/ios/RNWhisperContext.h +11 -8
  56. package/ios/RNWhisperContext.mm +195 -150
  57. package/jest/mock.js +15 -2
  58. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  59. package/lib/commonjs/index.js +76 -28
  60. package/lib/commonjs/index.js.map +1 -1
  61. package/lib/commonjs/version.json +1 -1
  62. package/lib/module/NativeRNWhisper.js.map +1 -1
  63. package/lib/module/index.js +76 -28
  64. package/lib/module/index.js.map +1 -1
  65. package/lib/module/version.json +1 -1
  66. package/lib/typescript/NativeRNWhisper.d.ts +13 -4
  67. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  68. package/lib/typescript/index.d.ts +37 -5
  69. package/lib/typescript/index.d.ts.map +1 -1
  70. package/package.json +9 -7
  71. package/src/NativeRNWhisper.ts +20 -4
  72. package/src/index.ts +98 -42
  73. package/src/version.json +1 -1
  74. package/whisper-rn.podspec +11 -18
  75. package/cpp/ggml-metal.metal +0 -2353
@@ -0,0 +1,551 @@
1
+ #pragma once
2
+
3
+ // GGML internal header
4
+
5
+ #include "ggml.h"
6
+ #include <assert.h>
7
+ #include <math.h>
8
+ #include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
9
+ #include <stdbool.h>
10
+ #include <stdint.h>
11
+ #include <string.h>
12
+
13
+ #ifdef __ARM_FEATURE_SVE
14
+ #include <arm_sve.h>
15
+ #endif // __ARM_FEATURE_SVE
16
+
17
+ #if defined(__ARM_NEON)
18
+ // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
19
+ //
20
+ // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
21
+ //
22
+ #include <arm_neon.h>
23
+ #endif
24
+
25
+ #if defined(__F16C__)
26
+ #include <immintrin.h>
27
+ #endif
28
+
29
+ #ifdef __cplusplus
30
+ extern "C" {
31
+ #endif
32
+
33
+ #undef MIN
34
+ #undef MAX
35
+
36
+ #define MIN(a, b) ((a) < (b) ? (a) : (b))
37
+ #define MAX(a, b) ((a) > (b) ? (a) : (b))
38
+
39
+ // required for mmap as gguf only guarantees 32-byte alignment
40
+ #define TENSOR_ALIGNMENT 32
41
+
42
+ // static_assert should be a #define, but if it's not,
43
+ // fall back to the _Static_assert C11 keyword.
44
+ // if C99 - static_assert is noop
45
+ // ref: https://stackoverflow.com/a/53923785/4039976
46
+ #ifndef __cplusplus
47
+ #ifndef static_assert
48
+ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
49
+ #define static_assert(cond, msg) _Static_assert(cond, msg)
50
+ #else
51
+ #define static_assert(cond, msg) struct global_scope_noop_trick
52
+ #endif
53
+ #endif
54
+ #endif
55
+
56
+ static inline int wsp_ggml_up32(int n) {
57
+ return (n + 31) & ~31;
58
+ }
59
+
60
+ //static inline int wsp_ggml_up64(int n) {
61
+ // return (n + 63) & ~63;
62
+ //}
63
+
64
+ static inline int wsp_ggml_up(int n, int m) {
65
+ // assert m is a power of 2
66
+ WSP_GGML_ASSERT((m & (m - 1)) == 0);
67
+ return (n + m - 1) & ~(m - 1);
68
+ }
69
+
70
+ //
71
+ // logging
72
+ //
73
+
74
+ WSP_GGML_ATTRIBUTE_FORMAT(2, 3)
75
+ void wsp_ggml_log_internal (enum wsp_ggml_log_level level, const char * format, ...);
76
+ void wsp_ggml_log_callback_default(enum wsp_ggml_log_level level, const char * text, void * user_data);
77
+
78
+ #define WSP_GGML_LOG(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_NONE , __VA_ARGS__)
79
+ #define WSP_GGML_LOG_INFO(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_INFO , __VA_ARGS__)
80
+ #define WSP_GGML_LOG_WARN(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_WARN , __VA_ARGS__)
81
+ #define WSP_GGML_LOG_ERROR(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
82
+ #define WSP_GGML_LOG_DEBUG(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
83
+ #define WSP_GGML_LOG_CONT(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_CONT , __VA_ARGS__)
84
+
85
+ #define WSP_GGML_DEBUG 0
86
+
87
+ #if (WSP_GGML_DEBUG >= 1)
88
+ #define WSP_GGML_PRINT_DEBUG(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
89
+ #else
90
+ #define WSP_GGML_PRINT_DEBUG(...)
91
+ #endif
92
+
93
+ #if (WSP_GGML_DEBUG >= 5)
94
+ #define WSP_GGML_PRINT_DEBUG_5(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
95
+ #else
96
+ #define WSP_GGML_PRINT_DEBUG_5(...)
97
+ #endif
98
+
99
+ #if (WSP_GGML_DEBUG >= 10)
100
+ #define WSP_GGML_PRINT_DEBUG_10(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
101
+ #else
102
+ #define WSP_GGML_PRINT_DEBUG_10(...)
103
+ #endif
104
+
105
+ // tensor params
106
+
107
+ static void wsp_ggml_set_op_params(struct wsp_ggml_tensor * tensor, const void * params, size_t params_size) {
108
+ WSP_GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
109
+ assert(params_size <= WSP_GGML_MAX_OP_PARAMS);
110
+ memcpy(tensor->op_params, params, params_size);
111
+ }
112
+
113
+ static int32_t wsp_ggml_get_op_params_i32(const struct wsp_ggml_tensor * tensor, uint32_t i) {
114
+ assert(i < WSP_GGML_MAX_OP_PARAMS / sizeof(int32_t));
115
+ return ((const int32_t *)(tensor->op_params))[i];
116
+ }
117
+
118
+ static float wsp_ggml_get_op_params_f32(const struct wsp_ggml_tensor * tensor, uint32_t i) {
119
+ assert(i < WSP_GGML_MAX_OP_PARAMS / sizeof(float));
120
+ return ((const float *)(tensor->op_params))[i];
121
+ }
122
+
123
+ static void wsp_ggml_set_op_params_i32(struct wsp_ggml_tensor * tensor, uint32_t i, int32_t value) {
124
+ assert(i < WSP_GGML_MAX_OP_PARAMS / sizeof(int32_t));
125
+ ((int32_t *)(tensor->op_params))[i] = value;
126
+ }
127
+
128
+ static void wsp_ggml_set_op_params_f32(struct wsp_ggml_tensor * tensor, uint32_t i, float value) {
129
+ assert(i < WSP_GGML_MAX_OP_PARAMS / sizeof(float));
130
+ ((float *)(tensor->op_params))[i] = value;
131
+ }
132
+
133
+ struct wsp_ggml_map_custom1_op_params {
134
+ wsp_ggml_custom1_op_t fun;
135
+ int n_tasks;
136
+ void * userdata;
137
+ };
138
+
139
+ struct wsp_ggml_map_custom2_op_params {
140
+ wsp_ggml_custom2_op_t fun;
141
+ int n_tasks;
142
+ void * userdata;
143
+ };
144
+
145
+ struct wsp_ggml_map_custom3_op_params {
146
+ wsp_ggml_custom3_op_t fun;
147
+ int n_tasks;
148
+ void * userdata;
149
+ };
150
+
151
+ // bitset
152
+
153
+ typedef uint32_t wsp_ggml_bitset_t;
154
+
155
+ static_assert(sizeof(wsp_ggml_bitset_t) == 4, "bitset_t constants must be updated");
156
+ #define BITSET_SHR 5 // log2(sizeof(wsp_ggml_bitset_t)*8)
157
+ #define BITSET_MASK (sizeof(wsp_ggml_bitset_t)*8 - 1)
158
+
159
+ static size_t wsp_ggml_bitset_size(size_t n) {
160
+ return (n + BITSET_MASK) >> BITSET_SHR;
161
+ }
162
+
163
+ static inline bool wsp_ggml_bitset_get(const wsp_ggml_bitset_t * bitset, size_t i) {
164
+ return !!(bitset[i >> BITSET_SHR] & (1u << (i & BITSET_MASK)));
165
+ }
166
+
167
+ static inline void wsp_ggml_bitset_set(wsp_ggml_bitset_t * bitset, size_t i) {
168
+ bitset[i >> BITSET_SHR] |= (1u << (i & BITSET_MASK));
169
+ }
170
+
171
+ static inline void wsp_ggml_bitset_clear(wsp_ggml_bitset_t * bitset, size_t i) {
172
+ bitset[i >> BITSET_SHR] &= ~(1u << (i & BITSET_MASK));
173
+ }
174
+
175
+ // hash set
176
+
177
+ #define WSP_GGML_HASHSET_FULL ((size_t)-1)
178
+ #define WSP_GGML_HASHSET_ALREADY_EXISTS ((size_t)-2)
179
+
180
+ struct wsp_ggml_hash_set {
181
+ size_t size;
182
+ wsp_ggml_bitset_t * used; // whether or not the keys are in use i.e. set
183
+ struct wsp_ggml_tensor ** keys; // actual tensors in the set, keys[i] is only defined if wsp_ggml_bitset_get(used, i)
184
+ };
185
+
186
+ struct wsp_ggml_hash_set wsp_ggml_hash_set_new(size_t size);
187
+ void wsp_ggml_hash_set_free(struct wsp_ggml_hash_set * hash_set);
188
+
189
+ // returns the minimum size for a hash set that can hold min_sz elements
190
+ size_t wsp_ggml_hash_size(size_t min_sz);
191
+
192
+ // remove all elements from the hash set
193
+ void wsp_ggml_hash_set_reset(struct wsp_ggml_hash_set * hash_set);
194
+
195
+ // returns true if key is in the hash set
196
+ static bool wsp_ggml_hash_contains(const struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
197
+
198
+ // returns WSP_GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
199
+ static size_t wsp_ggml_hash_find(const struct wsp_ggml_hash_set * hash_set, const struct wsp_ggml_tensor * key);
200
+
201
+ // returns WSP_GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
202
+ static size_t wsp_ggml_hash_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
203
+
204
+ // return index, asserts if table is full
205
+ static size_t wsp_ggml_hash_find_or_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
206
+
207
+ // hash function for wsp_ggml_tensor
208
+ static inline size_t wsp_ggml_hash(const struct wsp_ggml_tensor * p) {
209
+ // the last 4 bits are always zero due to alignment
210
+ return (size_t)(uintptr_t)p >> 4;
211
+ }
212
+
213
+ static size_t wsp_ggml_hash_find(const struct wsp_ggml_hash_set * hash_set, const struct wsp_ggml_tensor * key) {
214
+ size_t h = wsp_ggml_hash(key) % hash_set->size;
215
+
216
+ // linear probing
217
+ size_t i = h;
218
+ while (wsp_ggml_bitset_get(hash_set->used, i) && hash_set->keys[i] != key) {
219
+ i = (i + 1) % hash_set->size;
220
+ if (i == h) {
221
+ // visited all hash table entries -> not found
222
+ return WSP_GGML_HASHSET_FULL;
223
+ }
224
+ }
225
+ return i;
226
+ }
227
+
228
+ static bool wsp_ggml_hash_contains(const struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key) {
229
+ size_t i = wsp_ggml_hash_find(hash_set, key);
230
+ return i != WSP_GGML_HASHSET_FULL && wsp_ggml_bitset_get(hash_set->used, i);
231
+ }
232
+
233
+ static size_t wsp_ggml_hash_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key) {
234
+ size_t h = wsp_ggml_hash(key) % hash_set->size;
235
+
236
+ // linear probing
237
+ size_t i = h;
238
+ do {
239
+ if (!wsp_ggml_bitset_get(hash_set->used, i)) {
240
+ wsp_ggml_bitset_set(hash_set->used, i);
241
+ hash_set->keys[i] = key;
242
+ return i;
243
+ }
244
+ if (hash_set->keys[i] == key) {
245
+ return WSP_GGML_HASHSET_ALREADY_EXISTS;
246
+ }
247
+ i = (i + 1) % hash_set->size;
248
+ } while (i != h);
249
+
250
+ // visited all hash table entries -> not found
251
+ WSP_GGML_ABORT("fatal error");
252
+ }
253
+
254
+ static size_t wsp_ggml_hash_find_or_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key) {
255
+ size_t h = wsp_ggml_hash(key) % hash_set->size;
256
+
257
+ // linear probing
258
+ size_t i = h;
259
+ do {
260
+ if (!wsp_ggml_bitset_get(hash_set->used, i)) {
261
+ wsp_ggml_bitset_set(hash_set->used, i);
262
+ hash_set->keys[i] = key;
263
+ return i;
264
+ }
265
+ if (hash_set->keys[i] == key) {
266
+ return i;
267
+ }
268
+ i = (i + 1) % hash_set->size;
269
+ } while (i != h);
270
+
271
+ // visited all hash table entries -> not found
272
+ WSP_GGML_ABORT("fatal error");
273
+ }
274
+
275
+ // computation graph
276
+
277
+ enum wsp_ggml_cgraph_eval_order {
278
+ WSP_GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0,
279
+ WSP_GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT,
280
+ WSP_GGML_CGRAPH_EVAL_ORDER_COUNT
281
+ };
282
+
283
+ struct wsp_ggml_cgraph {
284
+ int size; // maximum number of nodes/leafs/grads/grad_accs
285
+ int n_nodes; // number of nodes currently in use
286
+ int n_leafs; // number of leafs currently in use
287
+
288
+ struct wsp_ggml_tensor ** nodes; // tensors with data that can change if the graph is evaluated
289
+ struct wsp_ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes
290
+ struct wsp_ggml_tensor ** grad_accs; // accumulators for node gradients
291
+ struct wsp_ggml_tensor ** leafs; // tensors with constant data
292
+
293
+ struct wsp_ggml_hash_set visited_hash_set;
294
+
295
+ enum wsp_ggml_cgraph_eval_order order;
296
+ };
297
+
298
+ struct wsp_ggml_cgraph wsp_ggml_graph_view(struct wsp_ggml_cgraph * cgraph, int i0, int i1);
299
+
300
+ // Memory allocation
301
+
302
+ void * wsp_ggml_aligned_malloc(size_t size);
303
+ void wsp_ggml_aligned_free(void * ptr, size_t size);
304
+
305
+ // FP16 to FP32 conversion
306
+
307
+ #if defined(__ARM_NEON)
308
+ #ifdef _MSC_VER
309
+ typedef uint16_t wsp_ggml_fp16_internal_t;
310
+ #else
311
+ typedef __fp16 wsp_ggml_fp16_internal_t;
312
+ #endif
313
+ #endif
314
+
315
+ #if defined(__ARM_NEON) && !defined(_MSC_VER)
316
+ #define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
317
+ #define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
318
+
319
+ #define WSP_GGML_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
320
+
321
+ static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
322
+ wsp_ggml_fp16_internal_t tmp;
323
+ memcpy(&tmp, &h, sizeof(wsp_ggml_fp16_t));
324
+ return (float)tmp;
325
+ }
326
+
327
+ static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
328
+ wsp_ggml_fp16_t res;
329
+ wsp_ggml_fp16_internal_t tmp = f;
330
+ memcpy(&res, &tmp, sizeof(wsp_ggml_fp16_t));
331
+ return res;
332
+ }
333
+
334
+ #elif defined(__F16C__)
335
+
336
+ #ifdef _MSC_VER
337
+ #define WSP_GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
338
+ #define WSP_GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
339
+ #else
340
+ #define WSP_GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
341
+ #define WSP_GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
342
+ #endif
343
+
344
+ #elif defined(__POWER9_VECTOR__)
345
+
346
+ #define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
347
+ #define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
348
+ /* the inline asm below is about 12% faster than the lookup method */
349
+ #define WSP_GGML_FP16_TO_FP32(x) WSP_GGML_COMPUTE_FP16_TO_FP32(x)
350
+ #define WSP_GGML_FP32_TO_FP16(x) WSP_GGML_COMPUTE_FP32_TO_FP16(x)
351
+
352
+ static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
353
+ register float f;
354
+ register double d;
355
+ __asm__(
356
+ "mtfprd %0,%2\n"
357
+ "xscvhpdp %0,%0\n"
358
+ "frsp %1,%0\n" :
359
+ /* temp */ "=d"(d),
360
+ /* out */ "=f"(f):
361
+ /* in */ "r"(h));
362
+ return f;
363
+ }
364
+
365
+ static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
366
+ register double d;
367
+ register wsp_ggml_fp16_t r;
368
+ __asm__( /* xscvdphp can work on double or single precision */
369
+ "xscvdphp %0,%2\n"
370
+ "mffprd %1,%0\n" :
371
+ /* temp */ "=d"(d),
372
+ /* out */ "=r"(r):
373
+ /* in */ "f"(f));
374
+ return r;
375
+ }
376
+
377
+ #else
378
+
379
+ // FP16 <-> FP32
380
+ // ref: https://github.com/Maratyszcza/FP16
381
+
382
+ static inline float fp32_from_bits(uint32_t w) {
383
+ union {
384
+ uint32_t as_bits;
385
+ float as_value;
386
+ } fp32;
387
+ fp32.as_bits = w;
388
+ return fp32.as_value;
389
+ }
390
+
391
+ static inline uint32_t fp32_to_bits(float f) {
392
+ union {
393
+ float as_value;
394
+ uint32_t as_bits;
395
+ } fp32;
396
+ fp32.as_value = f;
397
+ return fp32.as_bits;
398
+ }
399
+
400
+ static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
401
+ const uint32_t w = (uint32_t) h << 16;
402
+ const uint32_t sign = w & UINT32_C(0x80000000);
403
+ const uint32_t two_w = w + w;
404
+
405
+ const uint32_t exp_offset = UINT32_C(0xE0) << 23;
406
+ #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
407
+ const float exp_scale = 0x1.0p-112f;
408
+ #else
409
+ const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
410
+ #endif
411
+ const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
412
+
413
+ const uint32_t magic_mask = UINT32_C(126) << 23;
414
+ const float magic_bias = 0.5f;
415
+ const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
416
+
417
+ const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
418
+ const uint32_t result = sign |
419
+ (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
420
+ return fp32_from_bits(result);
421
+ }
422
+
423
+ static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
424
+ #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
425
+ const float scale_to_inf = 0x1.0p+112f;
426
+ const float scale_to_zero = 0x1.0p-110f;
427
+ #else
428
+ const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
429
+ const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
430
+ #endif
431
+ float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
432
+
433
+ const uint32_t w = fp32_to_bits(f);
434
+ const uint32_t shl1_w = w + w;
435
+ const uint32_t sign = w & UINT32_C(0x80000000);
436
+ uint32_t bias = shl1_w & UINT32_C(0xFF000000);
437
+ if (bias < UINT32_C(0x71000000)) {
438
+ bias = UINT32_C(0x71000000);
439
+ }
440
+
441
+ base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
442
+ const uint32_t bits = fp32_to_bits(base);
443
+ const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
444
+ const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
445
+ const uint32_t nonsign = exp_bits + mantissa_bits;
446
+ return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
447
+ }
448
+
449
+ #define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
450
+ #define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
451
+
452
+ #endif // defined(__ARM_NEON) && (!defined(__MSC_VER)
453
+
454
+ // precomputed f32 table for f16 (256 KB)
455
+ // defined in ggml.c, initialized in wsp_ggml_init()
456
+ WSP_GGML_API float wsp_ggml_table_f32_f16[1 << 16];
457
+
458
+ // On ARM NEON, it's quicker to directly convert x -> x instead of calling into wsp_ggml_lookup_fp16_to_fp32,
459
+ // so we define WSP_GGML_FP16_TO_FP32 and WSP_GGML_FP32_TO_FP16 elsewhere for NEON.
460
+ // This is also true for POWER9.
461
+ #if !defined(WSP_GGML_FP16_TO_FP32)
462
+ inline static float wsp_ggml_lookup_fp16_to_fp32(wsp_ggml_fp16_t f) {
463
+ uint16_t s;
464
+ memcpy(&s, &f, sizeof(uint16_t));
465
+ return wsp_ggml_table_f32_f16[s];
466
+ }
467
+
468
+ #define WSP_GGML_FP16_TO_FP32(x) wsp_ggml_lookup_fp16_to_fp32(x)
469
+ #endif
470
+
471
+ #if !defined(WSP_GGML_FP32_TO_FP16)
472
+ #define WSP_GGML_FP32_TO_FP16(x) WSP_GGML_COMPUTE_FP32_TO_FP16(x)
473
+ #endif
474
+
475
+ /**
476
+ * Converts brain16 to float32.
477
+ *
478
+ * The bfloat16 floating point format has the following structure:
479
+ *
480
+ * ┌sign
481
+ * │
482
+ * │ ┌exponent
483
+ * │ │
484
+ * │ │ ┌mantissa
485
+ * │ │ │
486
+ * │┌──┴───┐┌─┴───┐
487
+ * 0b0000000000000000 brain16
488
+ *
489
+ * Since bf16 has the same number of exponent bits as a 32bit float,
490
+ * encoding and decoding numbers becomes relatively straightforward.
491
+ *
492
+ * ┌sign
493
+ * │
494
+ * │ ┌exponent
495
+ * │ │
496
+ * │ │ ┌mantissa
497
+ * │ │ │
498
+ * │┌──┴───┐┌─┴───────────────────┐
499
+ * 0b00000000000000000000000000000000 IEEE binary32
500
+ *
501
+ * For comparison, the standard fp16 format has fewer exponent bits.
502
+ *
503
+ * ┌sign
504
+ * │
505
+ * │ ┌exponent
506
+ * │ │
507
+ * │ │ ┌mantissa
508
+ * │ │ │
509
+ * │┌─┴─┐┌─┴──────┐
510
+ * 0b0000000000000000 IEEE binary16
511
+ *
512
+ * @see IEEE 754-2008
513
+ */
514
+ static inline float wsp_ggml_compute_bf16_to_fp32(wsp_ggml_bf16_t h) {
515
+ union {
516
+ float f;
517
+ uint32_t i;
518
+ } u;
519
+ u.i = (uint32_t)h.bits << 16;
520
+ return u.f;
521
+ }
522
+
523
+ /**
524
+ * Converts float32 to brain16.
525
+ *
526
+ * This is binary identical with Google Brain float conversion.
527
+ * Floats shall round to nearest even, and NANs shall be quiet.
528
+ * Subnormals aren't flushed to zero, except perhaps when used.
529
+ * This code should vectorize nicely if using modern compilers.
530
+ */
531
+ static inline wsp_ggml_bf16_t wsp_ggml_compute_fp32_to_bf16(float s) {
532
+ wsp_ggml_bf16_t h;
533
+ union {
534
+ float f;
535
+ uint32_t i;
536
+ } u;
537
+ u.f = s;
538
+ if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */
539
+ h.bits = (u.i >> 16) | 64; /* force to quiet */
540
+ return h;
541
+ }
542
+ h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
543
+ return h;
544
+ }
545
+
546
+ #define WSP_GGML_FP32_TO_BF16(x) wsp_ggml_compute_fp32_to_bf16(x)
547
+ #define WSP_GGML_BF16_TO_FP32(x) wsp_ggml_compute_bf16_to_fp32(x)
548
+
549
+ #ifdef __cplusplus
550
+ }
551
+ #endif