whisper.rn 0.4.0-rc.1 → 0.4.0-rc.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/README.md +6 -6
  2. package/android/build.gradle +4 -0
  3. package/android/src/main/CMakeLists.txt +14 -0
  4. package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -92
  5. package/android/src/main/java/com/rnwhisper/RNWhisper.java +86 -40
  6. package/android/src/main/java/com/rnwhisper/WhisperContext.java +85 -131
  7. package/android/src/main/jni-utils.h +76 -0
  8. package/android/src/main/jni.cpp +226 -109
  9. package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  10. package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
  11. package/cpp/README.md +1 -1
  12. package/cpp/coreml/whisper-encoder-impl.h +1 -1
  13. package/cpp/coreml/whisper-encoder.h +4 -0
  14. package/cpp/coreml/whisper-encoder.mm +5 -3
  15. package/cpp/ggml-aarch64.c +129 -0
  16. package/cpp/ggml-aarch64.h +19 -0
  17. package/cpp/ggml-alloc.c +805 -400
  18. package/cpp/ggml-alloc.h +60 -10
  19. package/cpp/ggml-backend-impl.h +216 -0
  20. package/cpp/ggml-backend-reg.cpp +204 -0
  21. package/cpp/ggml-backend.cpp +1996 -0
  22. package/cpp/ggml-backend.cpp.rej +12 -0
  23. package/cpp/ggml-backend.h +336 -0
  24. package/cpp/ggml-common.h +1853 -0
  25. package/cpp/ggml-cpp.h +38 -0
  26. package/cpp/ggml-cpu-aarch64.c +3560 -0
  27. package/cpp/ggml-cpu-aarch64.h +30 -0
  28. package/cpp/ggml-cpu-impl.h +371 -0
  29. package/cpp/ggml-cpu-quants.c +10822 -0
  30. package/cpp/ggml-cpu-quants.h +63 -0
  31. package/cpp/ggml-cpu.c +13970 -0
  32. package/cpp/ggml-cpu.cpp +663 -0
  33. package/cpp/ggml-cpu.h +177 -0
  34. package/cpp/ggml-impl.h +551 -0
  35. package/cpp/ggml-metal-impl.h +249 -0
  36. package/cpp/ggml-metal.h +24 -43
  37. package/cpp/ggml-metal.m +4190 -1075
  38. package/cpp/ggml-quants.c +5247 -0
  39. package/cpp/ggml-quants.h +100 -0
  40. package/cpp/ggml-threading.cpp +12 -0
  41. package/cpp/ggml-threading.h +12 -0
  42. package/cpp/ggml-whisper.metallib +0 -0
  43. package/cpp/ggml.c +5474 -18763
  44. package/cpp/ggml.h +833 -628
  45. package/cpp/rn-audioutils.cpp +68 -0
  46. package/cpp/rn-audioutils.h +14 -0
  47. package/cpp/rn-whisper-log.h +11 -0
  48. package/cpp/rn-whisper.cpp +221 -52
  49. package/cpp/rn-whisper.h +50 -15
  50. package/cpp/whisper.cpp +2872 -1371
  51. package/cpp/whisper.h +170 -41
  52. package/ios/RNWhisper.mm +139 -46
  53. package/ios/RNWhisperAudioUtils.h +1 -2
  54. package/ios/RNWhisperAudioUtils.m +18 -67
  55. package/ios/RNWhisperContext.h +11 -8
  56. package/ios/RNWhisperContext.mm +195 -150
  57. package/jest/mock.js +15 -2
  58. package/lib/commonjs/NativeRNWhisper.js.map +1 -1
  59. package/lib/commonjs/index.js +76 -28
  60. package/lib/commonjs/index.js.map +1 -1
  61. package/lib/commonjs/version.json +1 -1
  62. package/lib/module/NativeRNWhisper.js.map +1 -1
  63. package/lib/module/index.js +76 -28
  64. package/lib/module/index.js.map +1 -1
  65. package/lib/module/version.json +1 -1
  66. package/lib/typescript/NativeRNWhisper.d.ts +13 -4
  67. package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
  68. package/lib/typescript/index.d.ts +37 -5
  69. package/lib/typescript/index.d.ts.map +1 -1
  70. package/package.json +9 -7
  71. package/src/NativeRNWhisper.ts +20 -4
  72. package/src/index.ts +98 -42
  73. package/src/version.json +1 -1
  74. package/whisper-rn.podspec +11 -18
  75. package/cpp/ggml-metal.metal +0 -2353
package/cpp/ggml-cpu.h ADDED
@@ -0,0 +1,177 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-backend.h"
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ // Scheduling priorities
11
+ enum wsp_ggml_sched_priority {
12
+ WSP_GGML_SCHED_PRIO_NORMAL,
13
+ WSP_GGML_SCHED_PRIO_MEDIUM,
14
+ WSP_GGML_SCHED_PRIO_HIGH,
15
+ WSP_GGML_SCHED_PRIO_REALTIME
16
+ };
17
+
18
+ // Threadpool params
19
+ // Use wsp_ggml_threadpool_params_default() or wsp_ggml_threadpool_params_init() to populate the defaults
20
+ struct wsp_ggml_threadpool_params {
21
+ bool cpumask[WSP_GGML_MAX_N_THREADS]; // mask of cpu cores (all-zeros means use default affinity settings)
22
+ int n_threads; // number of threads
23
+ enum wsp_ggml_sched_priority prio; // thread priority
24
+ uint32_t poll; // polling level (0 - no polling, 100 - aggressive polling)
25
+ bool strict_cpu; // strict cpu placement
26
+ bool paused; // start in paused state
27
+ };
28
+
29
+ struct wsp_ggml_threadpool; // forward declaration, see ggml.c
30
+
31
+ typedef struct wsp_ggml_threadpool * wsp_ggml_threadpool_t;
32
+
33
+ // the compute plan that needs to be prepared for wsp_ggml_graph_compute()
34
+ // since https://github.com/ggerganov/ggml/issues/287
35
+ struct wsp_ggml_cplan {
36
+ size_t work_size; // size of work buffer, calculated by `wsp_ggml_graph_plan()`
37
+ uint8_t * work_data; // work buffer, to be allocated by caller before calling to `wsp_ggml_graph_compute()`
38
+
39
+ int n_threads;
40
+ struct wsp_ggml_threadpool * threadpool;
41
+
42
+ // abort wsp_ggml_graph_compute when true
43
+ wsp_ggml_abort_callback abort_callback;
44
+ void * abort_callback_data;
45
+ };
46
+
47
+ // numa strategies
48
+ enum wsp_ggml_numa_strategy {
49
+ WSP_GGML_NUMA_STRATEGY_DISABLED = 0,
50
+ WSP_GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
51
+ WSP_GGML_NUMA_STRATEGY_ISOLATE = 2,
52
+ WSP_GGML_NUMA_STRATEGY_NUMACTL = 3,
53
+ WSP_GGML_NUMA_STRATEGY_MIRROR = 4,
54
+ WSP_GGML_NUMA_STRATEGY_COUNT
55
+ };
56
+
57
+ WSP_GGML_BACKEND_API void wsp_ggml_numa_init(enum wsp_ggml_numa_strategy numa); // call once for better performance on NUMA systems
58
+ WSP_GGML_BACKEND_API bool wsp_ggml_is_numa(void); // true if init detected that system has >1 NUMA node
59
+
60
+ WSP_GGML_BACKEND_API struct wsp_ggml_tensor * wsp_ggml_new_i32(struct wsp_ggml_context * ctx, int32_t value);
61
+ WSP_GGML_BACKEND_API struct wsp_ggml_tensor * wsp_ggml_new_f32(struct wsp_ggml_context * ctx, float value);
62
+
63
+ WSP_GGML_BACKEND_API struct wsp_ggml_tensor * wsp_ggml_set_i32 (struct wsp_ggml_tensor * tensor, int32_t value);
64
+ WSP_GGML_BACKEND_API struct wsp_ggml_tensor * wsp_ggml_set_f32 (struct wsp_ggml_tensor * tensor, float value);
65
+
66
+ WSP_GGML_BACKEND_API int32_t wsp_ggml_get_i32_1d(const struct wsp_ggml_tensor * tensor, int i);
67
+ WSP_GGML_BACKEND_API void wsp_ggml_set_i32_1d(const struct wsp_ggml_tensor * tensor, int i, int32_t value);
68
+
69
+ WSP_GGML_BACKEND_API int32_t wsp_ggml_get_i32_nd(const struct wsp_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
70
+ WSP_GGML_BACKEND_API void wsp_ggml_set_i32_nd(const struct wsp_ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
71
+
72
+ WSP_GGML_BACKEND_API float wsp_ggml_get_f32_1d(const struct wsp_ggml_tensor * tensor, int i);
73
+ WSP_GGML_BACKEND_API void wsp_ggml_set_f32_1d(const struct wsp_ggml_tensor * tensor, int i, float value);
74
+
75
+ WSP_GGML_BACKEND_API float wsp_ggml_get_f32_nd(const struct wsp_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
76
+ WSP_GGML_BACKEND_API void wsp_ggml_set_f32_nd(const struct wsp_ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
77
+
78
+ WSP_GGML_BACKEND_API struct wsp_ggml_threadpool_params wsp_ggml_threadpool_params_default(int n_threads);
79
+ WSP_GGML_BACKEND_API void wsp_ggml_threadpool_params_init (struct wsp_ggml_threadpool_params * p, int n_threads);
80
+ WSP_GGML_BACKEND_API bool wsp_ggml_threadpool_params_match (const struct wsp_ggml_threadpool_params * p0, const struct wsp_ggml_threadpool_params * p1);
81
+ WSP_GGML_BACKEND_API struct wsp_ggml_threadpool * wsp_ggml_threadpool_new (struct wsp_ggml_threadpool_params * params);
82
+ WSP_GGML_BACKEND_API void wsp_ggml_threadpool_free (struct wsp_ggml_threadpool * threadpool);
83
+ WSP_GGML_BACKEND_API int wsp_ggml_threadpool_get_n_threads(struct wsp_ggml_threadpool * threadpool);
84
+ WSP_GGML_BACKEND_API void wsp_ggml_threadpool_pause (struct wsp_ggml_threadpool * threadpool);
85
+ WSP_GGML_BACKEND_API void wsp_ggml_threadpool_resume (struct wsp_ggml_threadpool * threadpool);
86
+
87
+ // wsp_ggml_graph_plan() has to be called before wsp_ggml_graph_compute()
88
+ // when plan.work_size > 0, caller must allocate memory for plan.work_data
89
+ WSP_GGML_BACKEND_API struct wsp_ggml_cplan wsp_ggml_graph_plan(
90
+ const struct wsp_ggml_cgraph * cgraph,
91
+ int n_threads, /* = WSP_GGML_DEFAULT_N_THREADS */
92
+ struct wsp_ggml_threadpool * threadpool /* = NULL */ );
93
+ WSP_GGML_BACKEND_API enum wsp_ggml_status wsp_ggml_graph_compute(struct wsp_ggml_cgraph * cgraph, struct wsp_ggml_cplan * cplan);
94
+
95
+ // same as wsp_ggml_graph_compute() but the work data is allocated as a part of the context
96
+ // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
97
+ WSP_GGML_BACKEND_API enum wsp_ggml_status wsp_ggml_graph_compute_with_ctx(struct wsp_ggml_context * ctx, struct wsp_ggml_cgraph * cgraph, int n_threads);
98
+
99
+ //
100
+ // system info
101
+ //
102
+
103
+ // x86
104
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_sse3 (void);
105
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_ssse3 (void);
106
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx (void);
107
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx2 (void);
108
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_f16c (void);
109
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_fma (void);
110
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx_vnni (void);
111
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx512 (void);
112
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx512_vbmi(void);
113
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx512_vnni(void);
114
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx512_bf16(void);
115
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_amx_int8 (void);
116
+ // ARM
117
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_neon (void);
118
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_arm_fma (void);
119
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_fp16_va (void);
120
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_matmul_int8(void);
121
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_sve (void);
122
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_get_sve_cnt (void); // sve vector length in bytes
123
+ // other
124
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_riscv_v (void);
125
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_vsx (void);
126
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_wasm_simd (void);
127
+ WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_llamafile (void);
128
+
129
+ // Internal types and functions exposed for tests and benchmarks
130
+
131
+ typedef void (*wsp_ggml_from_float_to_mat_t)
132
+ (const float * WSP_GGML_RESTRICT x, void * WSP_GGML_RESTRICT y, int64_t nr, int64_t k, int64_t bs);
133
+ typedef void (*wsp_ggml_vec_dot_t) (int n, float * WSP_GGML_RESTRICT s, size_t bs, const void * WSP_GGML_RESTRICT x, size_t bx,
134
+ const void * WSP_GGML_RESTRICT y, size_t by, int nrc);
135
+ typedef void (*wsp_ggml_gemv_t) (int n, float * WSP_GGML_RESTRICT s, size_t bs, const void * WSP_GGML_RESTRICT x,
136
+ const void * WSP_GGML_RESTRICT y, int nr, int nc);
137
+ typedef void (*wsp_ggml_gemm_t) (int n, float * WSP_GGML_RESTRICT s, size_t bs, const void * WSP_GGML_RESTRICT x,
138
+ const void * WSP_GGML_RESTRICT y, int nr, int nc);
139
+
140
+ struct wsp_ggml_type_traits_cpu {
141
+ wsp_ggml_from_float_t from_float;
142
+ wsp_ggml_from_float_to_mat_t from_float_to_mat;
143
+ wsp_ggml_vec_dot_t vec_dot;
144
+ enum wsp_ggml_type vec_dot_type;
145
+ int64_t nrows; // number of rows to process simultaneously
146
+ int64_t ncols; // number of columns to process simultaneously
147
+ wsp_ggml_gemv_t gemv;
148
+ wsp_ggml_gemm_t gemm;
149
+ };
150
+
151
+ WSP_GGML_BACKEND_API const struct wsp_ggml_type_traits_cpu * wsp_ggml_get_type_traits_cpu(enum wsp_ggml_type type);
152
+
153
+ WSP_GGML_BACKEND_API void wsp_ggml_cpu_init(void);
154
+
155
+ //
156
+ // CPU backend
157
+ //
158
+
159
+ WSP_GGML_BACKEND_API wsp_ggml_backend_t wsp_ggml_backend_cpu_init(void);
160
+
161
+ WSP_GGML_BACKEND_API bool wsp_ggml_backend_is_cpu (wsp_ggml_backend_t backend);
162
+ WSP_GGML_BACKEND_API void wsp_ggml_backend_cpu_set_n_threads (wsp_ggml_backend_t backend_cpu, int n_threads);
163
+ WSP_GGML_BACKEND_API void wsp_ggml_backend_cpu_set_threadpool (wsp_ggml_backend_t backend_cpu, wsp_ggml_threadpool_t threadpool);
164
+ WSP_GGML_BACKEND_API void wsp_ggml_backend_cpu_set_abort_callback(wsp_ggml_backend_t backend_cpu, wsp_ggml_abort_callback abort_callback, void * abort_callback_data);
165
+
166
+ WSP_GGML_BACKEND_API wsp_ggml_backend_reg_t wsp_ggml_backend_cpu_reg(void);
167
+
168
+ #ifdef WSP_GGML_USE_CPU_HBM
169
+ WSP_GGML_BACKEND_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_hbm_buffer_type(void);
170
+ #endif
171
+
172
+ WSP_GGML_BACKEND_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_aarch64_buffer_type(void);
173
+ WSP_GGML_BACKEND_API bool wsp_ggml_backend_cpu_buft_is_aarch64(wsp_ggml_backend_buffer_type_t buft);
174
+
175
+ #ifdef __cplusplus
176
+ }
177
+ #endif