whisper.rn 0.4.0-rc.3 → 0.4.0-rc.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -6
- package/android/build.gradle +4 -0
- package/android/src/main/CMakeLists.txt +7 -0
- package/android/src/main/java/com/rnwhisper/AudioUtils.java +0 -80
- package/android/src/main/java/com/rnwhisper/RNWhisper.java +6 -1
- package/android/src/main/java/com/rnwhisper/WhisperContext.java +53 -135
- package/android/src/main/jni-utils.h +76 -0
- package/android/src/main/jni.cpp +188 -109
- package/cpp/README.md +1 -1
- package/cpp/coreml/whisper-encoder-impl.h +1 -1
- package/cpp/coreml/whisper-encoder.h +4 -0
- package/cpp/coreml/whisper-encoder.mm +4 -2
- package/cpp/ggml-alloc.c +451 -282
- package/cpp/ggml-alloc.h +74 -8
- package/cpp/ggml-backend-impl.h +112 -0
- package/cpp/ggml-backend.c +1357 -0
- package/cpp/ggml-backend.h +181 -0
- package/cpp/ggml-impl.h +243 -0
- package/cpp/{ggml-metal.metal → ggml-metal-whisper.metal} +1556 -329
- package/cpp/ggml-metal.h +28 -1
- package/cpp/ggml-metal.m +1128 -308
- package/cpp/ggml-quants.c +7382 -0
- package/cpp/ggml-quants.h +224 -0
- package/cpp/ggml.c +3848 -5245
- package/cpp/ggml.h +353 -155
- package/cpp/rn-audioutils.cpp +68 -0
- package/cpp/rn-audioutils.h +14 -0
- package/cpp/rn-whisper-log.h +11 -0
- package/cpp/rn-whisper.cpp +141 -59
- package/cpp/rn-whisper.h +47 -15
- package/cpp/whisper.cpp +1750 -964
- package/cpp/whisper.h +97 -15
- package/ios/RNWhisper.mm +15 -9
- package/ios/RNWhisper.xcodeproj/project.xcworkspace/contents.xcworkspacedata +4 -0
- package/ios/RNWhisper.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist +8 -0
- package/ios/RNWhisper.xcodeproj/project.xcworkspace/xcuserdata/jhen.xcuserdatad/UserInterfaceState.xcuserstate +0 -0
- package/ios/RNWhisper.xcodeproj/xcuserdata/jhen.xcuserdatad/xcschemes/xcschememanagement.plist +19 -0
- package/ios/RNWhisperAudioUtils.h +0 -2
- package/ios/RNWhisperAudioUtils.m +0 -56
- package/ios/RNWhisperContext.h +8 -12
- package/ios/RNWhisperContext.mm +132 -138
- package/jest/mock.js +1 -1
- package/lib/commonjs/NativeRNWhisper.js.map +1 -1
- package/lib/commonjs/index.js +28 -9
- package/lib/commonjs/index.js.map +1 -1
- package/lib/commonjs/version.json +1 -1
- package/lib/module/NativeRNWhisper.js.map +1 -1
- package/lib/module/index.js +28 -9
- package/lib/module/index.js.map +1 -1
- package/lib/module/version.json +1 -1
- package/lib/typescript/NativeRNWhisper.d.ts +7 -1
- package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +7 -2
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +6 -5
- package/src/NativeRNWhisper.ts +8 -1
- package/src/index.ts +29 -17
- package/src/version.json +1 -1
- package/whisper-rn.podspec +1 -2
package/cpp/ggml-alloc.h
CHANGED
|
@@ -6,20 +6,86 @@
|
|
|
6
6
|
extern "C" {
|
|
7
7
|
#endif
|
|
8
8
|
|
|
9
|
+
struct wsp_ggml_backend;
|
|
10
|
+
struct wsp_ggml_backend_buffer;
|
|
11
|
+
struct wsp_ggml_backend_buffer_type;
|
|
9
12
|
|
|
10
|
-
|
|
11
|
-
|
|
13
|
+
//
|
|
14
|
+
// Legacy API
|
|
15
|
+
//
|
|
16
|
+
|
|
17
|
+
typedef struct wsp_ggml_allocr * wsp_ggml_allocr_t;
|
|
18
|
+
|
|
19
|
+
// initialize allocator for use with CPU backend only
|
|
20
|
+
WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new(void * data, size_t size, size_t alignment);
|
|
21
|
+
WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new_measure(size_t alignment);
|
|
22
|
+
|
|
23
|
+
// initialize allocator for use with ggml-backend
|
|
24
|
+
WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new_from_buffer(struct wsp_ggml_backend_buffer * buffer);
|
|
25
|
+
WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new_from_backend(struct wsp_ggml_backend * backend, size_t size); // allocates an owned buffer
|
|
26
|
+
WSP_GGML_API wsp_ggml_allocr_t wsp_ggml_allocr_new_measure_from_backend(struct wsp_ggml_backend * backend);
|
|
27
|
+
|
|
28
|
+
WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_allocr_get_buffer(wsp_ggml_allocr_t alloc);
|
|
12
29
|
|
|
13
30
|
// tell the allocator to parse nodes following the order described in the list
|
|
14
31
|
// you should call this if your graph are optimized to execute out-of-order
|
|
15
|
-
WSP_GGML_API void wsp_ggml_allocr_set_parse_seq(
|
|
32
|
+
WSP_GGML_API void wsp_ggml_allocr_set_parse_seq(wsp_ggml_allocr_t alloc, const int * list, int n);
|
|
33
|
+
|
|
34
|
+
WSP_GGML_API void wsp_ggml_allocr_free (wsp_ggml_allocr_t alloc);
|
|
35
|
+
WSP_GGML_API bool wsp_ggml_allocr_is_measure (wsp_ggml_allocr_t alloc);
|
|
36
|
+
WSP_GGML_API void wsp_ggml_allocr_reset (wsp_ggml_allocr_t alloc);
|
|
37
|
+
WSP_GGML_API void wsp_ggml_allocr_alloc (wsp_ggml_allocr_t alloc, struct wsp_ggml_tensor * tensor);
|
|
38
|
+
WSP_GGML_API size_t wsp_ggml_allocr_max_size (wsp_ggml_allocr_t alloc);
|
|
39
|
+
|
|
40
|
+
WSP_GGML_API size_t wsp_ggml_allocr_alloc_graph(wsp_ggml_allocr_t alloc, struct wsp_ggml_cgraph * graph);
|
|
41
|
+
|
|
42
|
+
//
|
|
43
|
+
// ggml-backend v2 API
|
|
44
|
+
//
|
|
45
|
+
|
|
46
|
+
// Seperate tensor and graph allocator objects
|
|
47
|
+
// This is necessary for multi-backend allocation because the graph allocator needs to use multiple tensor allocators
|
|
48
|
+
// The original API is kept as a wrapper around the new API
|
|
49
|
+
|
|
50
|
+
// Tensor allocator
|
|
51
|
+
typedef struct wsp_ggml_tallocr * wsp_ggml_tallocr_t;
|
|
52
|
+
|
|
53
|
+
WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new(void * data, size_t size, size_t alignment);
|
|
54
|
+
WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure(size_t alignment);
|
|
55
|
+
WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_buffer(struct wsp_ggml_backend_buffer * buffer);
|
|
56
|
+
WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_from_backend(struct wsp_ggml_backend * backend, size_t size); // allocates an owned buffer
|
|
57
|
+
WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_tallocr_new_measure_from_backend(struct wsp_ggml_backend * backend);
|
|
58
|
+
|
|
59
|
+
WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_tallocr_get_buffer(wsp_ggml_tallocr_t talloc);
|
|
60
|
+
|
|
61
|
+
WSP_GGML_API void wsp_ggml_tallocr_free (wsp_ggml_tallocr_t talloc);
|
|
62
|
+
WSP_GGML_API bool wsp_ggml_tallocr_is_measure (wsp_ggml_tallocr_t talloc);
|
|
63
|
+
WSP_GGML_API void wsp_ggml_tallocr_reset (wsp_ggml_tallocr_t talloc);
|
|
64
|
+
WSP_GGML_API void wsp_ggml_tallocr_alloc (wsp_ggml_tallocr_t talloc, struct wsp_ggml_tensor * tensor);
|
|
65
|
+
WSP_GGML_API size_t wsp_ggml_tallocr_max_size (wsp_ggml_tallocr_t talloc);
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
// Graph allocator
|
|
69
|
+
typedef struct wsp_ggml_gallocr * wsp_ggml_gallocr_t;
|
|
70
|
+
|
|
71
|
+
WSP_GGML_API wsp_ggml_gallocr_t wsp_ggml_gallocr_new(void);
|
|
72
|
+
WSP_GGML_API void wsp_ggml_gallocr_free(wsp_ggml_gallocr_t galloc);
|
|
73
|
+
|
|
74
|
+
WSP_GGML_API void wsp_ggml_gallocr_set_parse_seq(wsp_ggml_gallocr_t galloc, const int * list, int n);
|
|
75
|
+
WSP_GGML_API size_t wsp_ggml_gallocr_alloc_graph(wsp_ggml_gallocr_t galloc, wsp_ggml_tallocr_t talloc, struct wsp_ggml_cgraph * graph);
|
|
76
|
+
|
|
77
|
+
// Allocate tensors from the allocators given by the hash table
|
|
78
|
+
WSP_GGML_API void wsp_ggml_gallocr_alloc_graph_n(
|
|
79
|
+
wsp_ggml_gallocr_t galloc,
|
|
80
|
+
struct wsp_ggml_cgraph * graph,
|
|
81
|
+
struct wsp_ggml_hash_set hash_set,
|
|
82
|
+
wsp_ggml_tallocr_t * hash_node_talloc);
|
|
16
83
|
|
|
17
|
-
WSP_GGML_API void wsp_ggml_allocr_free(struct wsp_ggml_allocr * alloc);
|
|
18
|
-
WSP_GGML_API bool wsp_ggml_allocr_is_measure(struct wsp_ggml_allocr * alloc);
|
|
19
|
-
WSP_GGML_API void wsp_ggml_allocr_reset(struct wsp_ggml_allocr * alloc);
|
|
20
|
-
WSP_GGML_API void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor);
|
|
21
|
-
WSP_GGML_API size_t wsp_ggml_allocr_alloc_graph(struct wsp_ggml_allocr * alloc, struct wsp_ggml_cgraph * graph);
|
|
22
84
|
|
|
85
|
+
// Utils
|
|
86
|
+
// Create a buffer and allocate all the tensors in a wsp_ggml_context
|
|
87
|
+
WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_backend_alloc_ctx_tensors_from_buft(struct wsp_ggml_context * ctx, struct wsp_ggml_backend_buffer_type * buft);
|
|
88
|
+
WSP_GGML_API struct wsp_ggml_backend_buffer * wsp_ggml_backend_alloc_ctx_tensors(struct wsp_ggml_context * ctx, struct wsp_ggml_backend * backend);
|
|
23
89
|
|
|
24
90
|
#ifdef __cplusplus
|
|
25
91
|
}
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
// ggml-backend internal header
|
|
4
|
+
|
|
5
|
+
#include "ggml-backend.h"
|
|
6
|
+
|
|
7
|
+
#ifdef __cplusplus
|
|
8
|
+
extern "C" {
|
|
9
|
+
#endif
|
|
10
|
+
|
|
11
|
+
//
|
|
12
|
+
// Backend buffer
|
|
13
|
+
//
|
|
14
|
+
|
|
15
|
+
// buffer type
|
|
16
|
+
typedef void * wsp_ggml_backend_buffer_type_context_t;
|
|
17
|
+
|
|
18
|
+
struct wsp_ggml_backend_buffer_type_i {
|
|
19
|
+
wsp_ggml_backend_buffer_t (*alloc_buffer) (wsp_ggml_backend_buffer_type_t buft, size_t size);
|
|
20
|
+
size_t (*get_alignment) (wsp_ggml_backend_buffer_type_t buft); // tensor alignment
|
|
21
|
+
size_t (*get_alloc_size) (wsp_ggml_backend_buffer_type_t buft, struct wsp_ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
|
|
22
|
+
bool (*supports_backend)(wsp_ggml_backend_buffer_type_t buft, wsp_ggml_backend_t backend); // check if the buffer type is usable by the backend
|
|
23
|
+
};
|
|
24
|
+
|
|
25
|
+
struct wsp_ggml_backend_buffer_type {
|
|
26
|
+
struct wsp_ggml_backend_buffer_type_i iface;
|
|
27
|
+
wsp_ggml_backend_buffer_type_context_t context;
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
// buffer
|
|
31
|
+
typedef void * wsp_ggml_backend_buffer_context_t;
|
|
32
|
+
|
|
33
|
+
struct wsp_ggml_backend_buffer_i {
|
|
34
|
+
void (*free_buffer)(wsp_ggml_backend_buffer_t buffer);
|
|
35
|
+
//void (*reset) (wsp_ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
|
|
36
|
+
void * (*get_base) (wsp_ggml_backend_buffer_t buffer);
|
|
37
|
+
void (*init_tensor)(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
|
|
38
|
+
void (*set_tensor) (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
39
|
+
void (*get_tensor) (wsp_ggml_backend_buffer_t buffer, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
40
|
+
// (optional) copy tensor between different buffer-type, allow for single-copy tranfers
|
|
41
|
+
void (*cpy_tensor_from)(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
|
|
42
|
+
void (*cpy_tensor_to) (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
|
|
43
|
+
};
|
|
44
|
+
|
|
45
|
+
struct wsp_ggml_backend_buffer {
|
|
46
|
+
struct wsp_ggml_backend_buffer_i iface;
|
|
47
|
+
wsp_ggml_backend_buffer_type_t buft;
|
|
48
|
+
wsp_ggml_backend_buffer_context_t context;
|
|
49
|
+
size_t size;
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
wsp_ggml_backend_buffer_t wsp_ggml_backend_buffer_init(
|
|
53
|
+
wsp_ggml_backend_buffer_type_t buft,
|
|
54
|
+
struct wsp_ggml_backend_buffer_i iface,
|
|
55
|
+
wsp_ggml_backend_buffer_context_t context,
|
|
56
|
+
size_t size);
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
//
|
|
60
|
+
// Backend
|
|
61
|
+
//
|
|
62
|
+
|
|
63
|
+
typedef void * wsp_ggml_backend_context_t;
|
|
64
|
+
|
|
65
|
+
struct wsp_ggml_backend_i {
|
|
66
|
+
const char * (*get_name)(wsp_ggml_backend_t backend);
|
|
67
|
+
|
|
68
|
+
void (*free)(wsp_ggml_backend_t backend);
|
|
69
|
+
|
|
70
|
+
// buffer allocation
|
|
71
|
+
wsp_ggml_backend_buffer_type_t (*get_default_buffer_type)(wsp_ggml_backend_t backend);
|
|
72
|
+
|
|
73
|
+
// (optional) asynchroneous tensor data access
|
|
74
|
+
void (*set_tensor_async)(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
75
|
+
void (*get_tensor_async)(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
76
|
+
|
|
77
|
+
// (optional) asynchroneous tensor copy
|
|
78
|
+
void (*cpy_tensor_from_async)(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
|
|
79
|
+
void (*cpy_tensor_to_async) (wsp_ggml_backend_t backend, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
|
|
80
|
+
|
|
81
|
+
void (*synchronize) (wsp_ggml_backend_t backend);
|
|
82
|
+
|
|
83
|
+
// compute graph with a plan
|
|
84
|
+
wsp_ggml_backend_graph_plan_t (*graph_plan_create) (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
|
|
85
|
+
void (*graph_plan_free) (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
|
|
86
|
+
void (*graph_plan_compute)(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
|
|
87
|
+
|
|
88
|
+
// compute graph without a plan
|
|
89
|
+
void (*graph_compute)(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
|
|
90
|
+
|
|
91
|
+
// check if the backend supports an operation
|
|
92
|
+
bool (*supports_op)(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
|
|
93
|
+
};
|
|
94
|
+
|
|
95
|
+
struct wsp_ggml_backend {
|
|
96
|
+
struct wsp_ggml_backend_i iface;
|
|
97
|
+
|
|
98
|
+
wsp_ggml_backend_context_t context;
|
|
99
|
+
};
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
//
|
|
103
|
+
// Backend registry
|
|
104
|
+
//
|
|
105
|
+
|
|
106
|
+
typedef wsp_ggml_backend_t (*wsp_ggml_backend_init_fn)(const char * params, void * user_data);
|
|
107
|
+
|
|
108
|
+
void wsp_ggml_backend_register(const char * name, wsp_ggml_backend_init_fn init_fn, wsp_ggml_backend_buffer_type_t default_buffer_type, void * user_data);
|
|
109
|
+
|
|
110
|
+
#ifdef __cplusplus
|
|
111
|
+
}
|
|
112
|
+
#endif
|