whisper.rn 0.3.8 → 0.4.0-rc.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/CMakeLists.txt +2 -1
- package/android/src/main/jni.cpp +7 -1
- package/cpp/coreml/whisper-encoder.mm +7 -1
- package/cpp/ggml-alloc.c +633 -0
- package/cpp/ggml-alloc.h +26 -0
- package/cpp/ggml-metal.h +85 -0
- package/cpp/ggml-metal.m +1283 -0
- package/cpp/ggml-metal.metal +2353 -0
- package/cpp/ggml.c +5024 -2924
- package/cpp/ggml.h +569 -95
- package/cpp/whisper.cpp +993 -668
- package/cpp/whisper.h +10 -0
- package/ios/RNWhisperAudioSessionUtils.m +7 -1
- package/ios/RNWhisperContext.mm +9 -3
- package/jest/mock.js +10 -0
- package/package.json +1 -1
- package/whisper-rn.podspec +8 -2
package/cpp/ggml-alloc.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "ggml.h"
|
|
4
|
+
|
|
5
|
+
#ifdef __cplusplus
|
|
6
|
+
extern "C" {
|
|
7
|
+
#endif
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
WSP_GGML_API struct wsp_ggml_allocr * wsp_ggml_allocr_new(void * data, size_t size, size_t alignment);
|
|
11
|
+
WSP_GGML_API struct wsp_ggml_allocr * wsp_ggml_allocr_new_measure(size_t alignment);
|
|
12
|
+
|
|
13
|
+
// tell the allocator to parse nodes following the order described in the list
|
|
14
|
+
// you should call this if your graph are optimized to execute out-of-order
|
|
15
|
+
WSP_GGML_API void wsp_ggml_allocr_set_parse_seq(struct wsp_ggml_allocr * alloc, const int * list, int n);
|
|
16
|
+
|
|
17
|
+
WSP_GGML_API void wsp_ggml_allocr_free(struct wsp_ggml_allocr * alloc);
|
|
18
|
+
WSP_GGML_API bool wsp_ggml_allocr_is_measure(struct wsp_ggml_allocr * alloc);
|
|
19
|
+
WSP_GGML_API void wsp_ggml_allocr_reset(struct wsp_ggml_allocr * alloc);
|
|
20
|
+
WSP_GGML_API void wsp_ggml_allocr_alloc(struct wsp_ggml_allocr * alloc, struct wsp_ggml_tensor * tensor);
|
|
21
|
+
WSP_GGML_API size_t wsp_ggml_allocr_alloc_graph(struct wsp_ggml_allocr * alloc, struct wsp_ggml_cgraph * graph);
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
#ifdef __cplusplus
|
|
25
|
+
}
|
|
26
|
+
#endif
|
package/cpp/ggml-metal.h
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
// An interface allowing to compute wsp_ggml_cgraph with Metal
|
|
2
|
+
//
|
|
3
|
+
// This is a fully functional interface that extends ggml with GPU support for Apple devices.
|
|
4
|
+
// A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.)
|
|
5
|
+
//
|
|
6
|
+
// How it works?
|
|
7
|
+
//
|
|
8
|
+
// As long as your program can create and evaluate a wsp_ggml_cgraph on the CPU, you can use this
|
|
9
|
+
// interface to evaluate the same graph on the GPU. Instead of using wsp_ggml_graph_compute(), you
|
|
10
|
+
// use wsp_ggml_metal_graph_compute() (or wsp_ggml_vulkan_graph_compute(), etc.)
|
|
11
|
+
//
|
|
12
|
+
// You only need to make sure that all memory buffers that you used during the graph creation
|
|
13
|
+
// are mapped to the device memory with the wsp_ggml_metal_add_buffer() function. This mapping is
|
|
14
|
+
// used during the graph evaluation to determine the arguments of the compute kernels.
|
|
15
|
+
//
|
|
16
|
+
// Synchronization between device and host memory (for example for input and output tensors)
|
|
17
|
+
// is done with the wsp_ggml_metal_set_tensor() and wsp_ggml_metal_get_tensor() functions.
|
|
18
|
+
//
|
|
19
|
+
|
|
20
|
+
#pragma once
|
|
21
|
+
|
|
22
|
+
#include <stddef.h>
|
|
23
|
+
#include <stdbool.h>
|
|
24
|
+
|
|
25
|
+
// max memory buffers that can be mapped to the device
|
|
26
|
+
#define WSP_GGML_METAL_MAX_BUFFERS 16
|
|
27
|
+
#define WSP_GGML_METAL_MAX_COMMAND_BUFFERS 32
|
|
28
|
+
|
|
29
|
+
struct wsp_ggml_tensor;
|
|
30
|
+
struct wsp_ggml_cgraph;
|
|
31
|
+
|
|
32
|
+
#ifdef __cplusplus
|
|
33
|
+
extern "C" {
|
|
34
|
+
#endif
|
|
35
|
+
|
|
36
|
+
struct wsp_ggml_metal_context;
|
|
37
|
+
|
|
38
|
+
// number of command buffers to use
|
|
39
|
+
struct wsp_ggml_metal_context * wsp_ggml_metal_init(int n_cb);
|
|
40
|
+
void wsp_ggml_metal_free(struct wsp_ggml_metal_context * ctx);
|
|
41
|
+
|
|
42
|
+
void * wsp_ggml_metal_host_malloc(size_t n);
|
|
43
|
+
void wsp_ggml_metal_host_free (void * data);
|
|
44
|
+
|
|
45
|
+
// set the number of command buffers to use
|
|
46
|
+
void wsp_ggml_metal_set_n_cb(struct wsp_ggml_metal_context * ctx, int n_cb);
|
|
47
|
+
|
|
48
|
+
// creates a mapping between a host memory buffer and a device memory buffer
|
|
49
|
+
// - make sure to map all buffers used in the graph before calling wsp_ggml_metal_graph_compute
|
|
50
|
+
// - the mapping is used during computation to determine the arguments of the compute kernels
|
|
51
|
+
// - you don't need to keep the host memory buffer allocated as it is never accessed by Metal
|
|
52
|
+
// - max_size specifies the maximum size of a tensor and is used to create shared views such
|
|
53
|
+
// that it is guaranteed that the tensor will fit in at least one of the views
|
|
54
|
+
//
|
|
55
|
+
bool wsp_ggml_metal_add_buffer(
|
|
56
|
+
struct wsp_ggml_metal_context * ctx,
|
|
57
|
+
const char * name,
|
|
58
|
+
void * data,
|
|
59
|
+
size_t size,
|
|
60
|
+
size_t max_size);
|
|
61
|
+
|
|
62
|
+
// set data from host memory into the device
|
|
63
|
+
void wsp_ggml_metal_set_tensor(struct wsp_ggml_metal_context * ctx, struct wsp_ggml_tensor * t);
|
|
64
|
+
|
|
65
|
+
// get data from the device into host memory
|
|
66
|
+
void wsp_ggml_metal_get_tensor(struct wsp_ggml_metal_context * ctx, struct wsp_ggml_tensor * t);
|
|
67
|
+
|
|
68
|
+
// try to find operations that can be run concurrently in the graph
|
|
69
|
+
// you should run it again if the topology of your graph changes
|
|
70
|
+
void wsp_ggml_metal_graph_find_concurrency(struct wsp_ggml_metal_context * ctx, struct wsp_ggml_cgraph * gf, bool check_mem);
|
|
71
|
+
|
|
72
|
+
// if the graph has been optimized for concurrently dispatch, return length of the concur_list if optimized
|
|
73
|
+
int wsp_ggml_metal_if_optimized(struct wsp_ggml_metal_context * ctx);
|
|
74
|
+
|
|
75
|
+
// output the concur_list for wsp_ggml_alloc
|
|
76
|
+
int * wsp_ggml_metal_get_concur_list(struct wsp_ggml_metal_context * ctx);
|
|
77
|
+
|
|
78
|
+
// same as wsp_ggml_graph_compute but uses Metal
|
|
79
|
+
// creates gf->n_threads command buffers in parallel
|
|
80
|
+
void wsp_ggml_metal_graph_compute(struct wsp_ggml_metal_context * ctx, struct wsp_ggml_cgraph * gf);
|
|
81
|
+
|
|
82
|
+
#ifdef __cplusplus
|
|
83
|
+
}
|
|
84
|
+
#endif
|
|
85
|
+
|