whisper.rn 0.4.0-rc.3 → 0.4.0-rc.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -6
- package/android/build.gradle +4 -0
- package/android/src/main/CMakeLists.txt +7 -0
- package/android/src/main/java/com/rnwhisper/AudioUtils.java +0 -80
- package/android/src/main/java/com/rnwhisper/RNWhisper.java +6 -1
- package/android/src/main/java/com/rnwhisper/WhisperContext.java +53 -135
- package/android/src/main/jni-utils.h +76 -0
- package/android/src/main/jni.cpp +188 -109
- package/cpp/README.md +1 -1
- package/cpp/coreml/whisper-encoder-impl.h +1 -1
- package/cpp/coreml/whisper-encoder.h +4 -0
- package/cpp/coreml/whisper-encoder.mm +4 -2
- package/cpp/ggml-alloc.c +451 -282
- package/cpp/ggml-alloc.h +74 -8
- package/cpp/ggml-backend-impl.h +112 -0
- package/cpp/ggml-backend.c +1357 -0
- package/cpp/ggml-backend.h +181 -0
- package/cpp/ggml-impl.h +243 -0
- package/cpp/{ggml-metal.metal → ggml-metal-whisper.metal} +1556 -329
- package/cpp/ggml-metal.h +28 -1
- package/cpp/ggml-metal.m +1128 -308
- package/cpp/ggml-quants.c +7382 -0
- package/cpp/ggml-quants.h +224 -0
- package/cpp/ggml.c +3848 -5245
- package/cpp/ggml.h +353 -155
- package/cpp/rn-audioutils.cpp +68 -0
- package/cpp/rn-audioutils.h +14 -0
- package/cpp/rn-whisper-log.h +11 -0
- package/cpp/rn-whisper.cpp +141 -59
- package/cpp/rn-whisper.h +47 -15
- package/cpp/whisper.cpp +1750 -964
- package/cpp/whisper.h +97 -15
- package/ios/RNWhisper.mm +15 -9
- package/ios/RNWhisper.xcodeproj/project.xcworkspace/contents.xcworkspacedata +4 -0
- package/ios/RNWhisper.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist +8 -0
- package/ios/RNWhisper.xcodeproj/project.xcworkspace/xcuserdata/jhen.xcuserdatad/UserInterfaceState.xcuserstate +0 -0
- package/ios/RNWhisper.xcodeproj/xcuserdata/jhen.xcuserdatad/xcschemes/xcschememanagement.plist +19 -0
- package/ios/RNWhisperAudioUtils.h +0 -2
- package/ios/RNWhisperAudioUtils.m +0 -56
- package/ios/RNWhisperContext.h +8 -12
- package/ios/RNWhisperContext.mm +132 -138
- package/jest/mock.js +1 -1
- package/lib/commonjs/NativeRNWhisper.js.map +1 -1
- package/lib/commonjs/index.js +28 -9
- package/lib/commonjs/index.js.map +1 -1
- package/lib/commonjs/version.json +1 -1
- package/lib/module/NativeRNWhisper.js.map +1 -1
- package/lib/module/index.js +28 -9
- package/lib/module/index.js.map +1 -1
- package/lib/module/version.json +1 -1
- package/lib/typescript/NativeRNWhisper.d.ts +7 -1
- package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +7 -2
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +6 -5
- package/src/NativeRNWhisper.ts +8 -1
- package/src/index.ts +29 -17
- package/src/version.json +1 -1
- package/whisper-rn.podspec +1 -2
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "ggml.h"
|
|
4
|
+
#include "ggml-alloc.h"
|
|
5
|
+
|
|
6
|
+
#ifdef __cplusplus
|
|
7
|
+
extern "C" {
|
|
8
|
+
#endif
|
|
9
|
+
|
|
10
|
+
typedef struct wsp_ggml_backend_buffer_type * wsp_ggml_backend_buffer_type_t;
|
|
11
|
+
typedef struct wsp_ggml_backend_buffer * wsp_ggml_backend_buffer_t;
|
|
12
|
+
typedef struct wsp_ggml_backend * wsp_ggml_backend_t;
|
|
13
|
+
typedef void * wsp_ggml_backend_graph_plan_t;
|
|
14
|
+
|
|
15
|
+
//
|
|
16
|
+
// Backend buffer
|
|
17
|
+
//
|
|
18
|
+
|
|
19
|
+
// buffer type
|
|
20
|
+
WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_buft_alloc_buffer(wsp_ggml_backend_buffer_type_t buft, size_t size);
|
|
21
|
+
WSP_GGML_API size_t wsp_ggml_backend_buft_get_alignment (wsp_ggml_backend_buffer_type_t buft);
|
|
22
|
+
WSP_GGML_API size_t wsp_ggml_backend_buft_get_alloc_size(wsp_ggml_backend_buffer_type_t buft, struct wsp_ggml_tensor * tensor);
|
|
23
|
+
WSP_GGML_API bool wsp_ggml_backend_buft_supports_backend(wsp_ggml_backend_buffer_type_t buft, wsp_ggml_backend_t backend);
|
|
24
|
+
|
|
25
|
+
// buffer
|
|
26
|
+
WSP_GGML_API void wsp_ggml_backend_buffer_free (wsp_ggml_backend_buffer_t buffer);
|
|
27
|
+
WSP_GGML_API void * wsp_ggml_backend_buffer_get_base (wsp_ggml_backend_buffer_t buffer);
|
|
28
|
+
WSP_GGML_API size_t wsp_ggml_backend_buffer_get_size (wsp_ggml_backend_buffer_t buffer);
|
|
29
|
+
WSP_GGML_API void wsp_ggml_backend_buffer_init_tensor (wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
|
|
30
|
+
WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alignment (wsp_ggml_backend_buffer_t buffer);
|
|
31
|
+
WSP_GGML_API size_t wsp_ggml_backend_buffer_get_alloc_size(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
|
|
32
|
+
WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_buffer_type(wsp_ggml_backend_buffer_t buffer);
|
|
33
|
+
|
|
34
|
+
//
|
|
35
|
+
// Backend
|
|
36
|
+
//
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
WSP_GGML_API const char * wsp_ggml_backend_name(wsp_ggml_backend_t backend);
|
|
40
|
+
WSP_GGML_API void wsp_ggml_backend_free(wsp_ggml_backend_t backend);
|
|
41
|
+
|
|
42
|
+
WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_get_default_buffer_type(wsp_ggml_backend_t backend);
|
|
43
|
+
WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_alloc_buffer(wsp_ggml_backend_t backend, size_t size);
|
|
44
|
+
WSP_GGML_API size_t wsp_ggml_backend_get_alignment(wsp_ggml_backend_t backend);
|
|
45
|
+
|
|
46
|
+
WSP_GGML_API void wsp_ggml_backend_tensor_set_async(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
47
|
+
WSP_GGML_API void wsp_ggml_backend_tensor_get_async(wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
48
|
+
|
|
49
|
+
WSP_GGML_API void wsp_ggml_backend_tensor_set( struct wsp_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
50
|
+
WSP_GGML_API void wsp_ggml_backend_tensor_get(const struct wsp_ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
51
|
+
|
|
52
|
+
WSP_GGML_API void wsp_ggml_backend_synchronize(wsp_ggml_backend_t backend);
|
|
53
|
+
|
|
54
|
+
WSP_GGML_API wsp_ggml_backend_graph_plan_t wsp_ggml_backend_graph_plan_create (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
|
|
55
|
+
|
|
56
|
+
WSP_GGML_API void wsp_ggml_backend_graph_plan_free (wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
|
|
57
|
+
WSP_GGML_API void wsp_ggml_backend_graph_plan_compute(wsp_ggml_backend_t backend, wsp_ggml_backend_graph_plan_t plan);
|
|
58
|
+
WSP_GGML_API void wsp_ggml_backend_graph_compute (wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * cgraph);
|
|
59
|
+
WSP_GGML_API bool wsp_ggml_backend_supports_op (wsp_ggml_backend_t backend, const struct wsp_ggml_tensor * op);
|
|
60
|
+
|
|
61
|
+
// tensor copy between different backends
|
|
62
|
+
WSP_GGML_API void wsp_ggml_backend_tensor_copy(struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst);
|
|
63
|
+
WSP_GGML_API void wsp_ggml_backend_tensor_copy_async(wsp_ggml_backend_t backend, struct wsp_ggml_tensor * src, struct wsp_ggml_tensor * dst); // automatic fallback to sync copy
|
|
64
|
+
|
|
65
|
+
//
|
|
66
|
+
// CPU backend
|
|
67
|
+
//
|
|
68
|
+
|
|
69
|
+
WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_cpu_init(void);
|
|
70
|
+
|
|
71
|
+
WSP_GGML_API bool wsp_ggml_backend_is_cpu(wsp_ggml_backend_t backend);
|
|
72
|
+
WSP_GGML_API void wsp_ggml_backend_cpu_set_n_threads(wsp_ggml_backend_t backend_cpu, int n_threads);
|
|
73
|
+
|
|
74
|
+
// Create a backend buffer from an existing pointer
|
|
75
|
+
WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
|
|
76
|
+
|
|
77
|
+
WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_cpu_buffer_type(void);
|
|
78
|
+
|
|
79
|
+
//
|
|
80
|
+
// Backend registry
|
|
81
|
+
//
|
|
82
|
+
|
|
83
|
+
// The backend registry is a registry of all the available backends, and allows initializing backends in a generic way
|
|
84
|
+
|
|
85
|
+
WSP_GGML_API size_t wsp_ggml_backend_reg_get_count(void);
|
|
86
|
+
WSP_GGML_API size_t wsp_ggml_backend_reg_find_by_name(const char * name);
|
|
87
|
+
WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_reg_init_backend_from_str(const char * backend_str); // str is name[:params]
|
|
88
|
+
WSP_GGML_API const char * wsp_ggml_backend_reg_get_name(size_t i);
|
|
89
|
+
WSP_GGML_API wsp_ggml_backend_t wsp_ggml_backend_reg_init_backend(size_t i, const char * params); // params is backend-specific
|
|
90
|
+
WSP_GGML_API wsp_ggml_backend_buffer_type_t wsp_ggml_backend_reg_get_default_buffer_type(size_t i);
|
|
91
|
+
WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_reg_alloc_buffer(size_t i, size_t size);
|
|
92
|
+
|
|
93
|
+
//
|
|
94
|
+
// Backend scheduler
|
|
95
|
+
//
|
|
96
|
+
|
|
97
|
+
// The backend scheduler allows for multiple backends to be used together
|
|
98
|
+
// Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends
|
|
99
|
+
// The backends are selected based on:
|
|
100
|
+
// - the backend that supports the operation
|
|
101
|
+
// - the location of the pre-allocated tensors (e.g. the weights)
|
|
102
|
+
/*
|
|
103
|
+
Example usage:
|
|
104
|
+
|
|
105
|
+
sched = wsp_ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, num_backends);
|
|
106
|
+
// sched is initialized with measure allocators and cannot be used until allocated with a measure graph
|
|
107
|
+
|
|
108
|
+
// initialize buffers from a measure graph
|
|
109
|
+
measure_graph = build_graph(sched); // use the allocr to allocate inputs as needed
|
|
110
|
+
|
|
111
|
+
// in build_graph:
|
|
112
|
+
build_graph(...) {
|
|
113
|
+
// allocating tensors in a specific backend (optional, recommended: pre-allocate inputs in a different buffer)
|
|
114
|
+
alloc_cpu = wsp_ggml_backend_sched_get_allocr(sched, backend_cpu);
|
|
115
|
+
wsp_ggml_allocr_alloc(alloc_cpu, tensor);
|
|
116
|
+
|
|
117
|
+
// manually assigning nodes to a backend (optional, shouldn't be needed in most cases)
|
|
118
|
+
struct wsp_ggml_tensor * node = wsp_ggml_mul_mat(ctx, ...);
|
|
119
|
+
wsp_ggml_backend_sched_set_node_backend(sched, node, backend_gpu);
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// allocate backend buffers from measure graph
|
|
123
|
+
wsp_ggml_backend_sched_init_measure(sched, measure_graph);
|
|
124
|
+
|
|
125
|
+
// the scheduler is now ready to compute graphs
|
|
126
|
+
|
|
127
|
+
// compute
|
|
128
|
+
graph = build_graph(sched);
|
|
129
|
+
wsp_ggml_backend_sched_graph_compute(sched, graph);
|
|
130
|
+
*/
|
|
131
|
+
|
|
132
|
+
struct wsp_ggml_backend_sched;
|
|
133
|
+
typedef struct wsp_ggml_backend_sched * wsp_ggml_backend_sched_t;
|
|
134
|
+
|
|
135
|
+
// Initialize a backend scheduler
|
|
136
|
+
WSP_GGML_API wsp_ggml_backend_sched_t wsp_ggml_backend_sched_new(wsp_ggml_backend_t * backends, int n_backends);
|
|
137
|
+
|
|
138
|
+
WSP_GGML_API void wsp_ggml_backend_sched_free(wsp_ggml_backend_sched_t sched);
|
|
139
|
+
|
|
140
|
+
// Initialize backend buffers from a measure graph
|
|
141
|
+
WSP_GGML_API void wsp_ggml_backend_sched_init_measure(wsp_ggml_backend_sched_t sched, struct wsp_ggml_cgraph * measure_graph);
|
|
142
|
+
|
|
143
|
+
WSP_GGML_API wsp_ggml_tallocr_t wsp_ggml_backend_sched_get_tallocr(wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend);
|
|
144
|
+
WSP_GGML_API wsp_ggml_backend_buffer_t wsp_ggml_backend_sched_get_buffer (wsp_ggml_backend_sched_t sched, wsp_ggml_backend_t backend);
|
|
145
|
+
|
|
146
|
+
WSP_GGML_API void wsp_ggml_backend_sched_set_node_backend(wsp_ggml_backend_sched_t sched, struct wsp_ggml_tensor * node, wsp_ggml_backend_t backend);
|
|
147
|
+
|
|
148
|
+
// Allocate a graph on the backend scheduler
|
|
149
|
+
WSP_GGML_API void wsp_ggml_backend_sched_graph_compute(
|
|
150
|
+
wsp_ggml_backend_sched_t sched,
|
|
151
|
+
struct wsp_ggml_cgraph * graph);
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
//
|
|
155
|
+
// Utils
|
|
156
|
+
//
|
|
157
|
+
|
|
158
|
+
struct wsp_ggml_backend_graph_copy {
|
|
159
|
+
wsp_ggml_backend_buffer_t buffer;
|
|
160
|
+
struct wsp_ggml_context * ctx_allocated;
|
|
161
|
+
struct wsp_ggml_context * ctx_unallocated;
|
|
162
|
+
struct wsp_ggml_cgraph * graph;
|
|
163
|
+
};
|
|
164
|
+
|
|
165
|
+
// Copy a graph to a different backend
|
|
166
|
+
WSP_GGML_API struct wsp_ggml_backend_graph_copy wsp_ggml_backend_graph_copy(wsp_ggml_backend_t backend, struct wsp_ggml_cgraph * graph);
|
|
167
|
+
WSP_GGML_API void wsp_ggml_backend_graph_copy_free(struct wsp_ggml_backend_graph_copy copy);
|
|
168
|
+
|
|
169
|
+
typedef bool (*wsp_ggml_backend_eval_callback)(int node_index, struct wsp_ggml_tensor * t1, struct wsp_ggml_tensor * t2, void * user_data);
|
|
170
|
+
|
|
171
|
+
// Compare the output of two backends
|
|
172
|
+
WSP_GGML_API void wsp_ggml_backend_compare_graph_backend(wsp_ggml_backend_t backend1, wsp_ggml_backend_t backend2, struct wsp_ggml_cgraph * graph, wsp_ggml_backend_eval_callback callback, void * user_data);
|
|
173
|
+
|
|
174
|
+
// Tensor initialization
|
|
175
|
+
WSP_GGML_API void wsp_ggml_backend_tensor_alloc(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor, void * addr);
|
|
176
|
+
WSP_GGML_API void wsp_ggml_backend_view_init(wsp_ggml_backend_buffer_t buffer, struct wsp_ggml_tensor * tensor);
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
#ifdef __cplusplus
|
|
180
|
+
}
|
|
181
|
+
#endif
|
package/cpp/ggml-impl.h
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "ggml.h"
|
|
4
|
+
|
|
5
|
+
// GGML internal header
|
|
6
|
+
|
|
7
|
+
#include <assert.h>
|
|
8
|
+
#include <stddef.h>
|
|
9
|
+
#include <stdbool.h>
|
|
10
|
+
#include <string.h> // memcpy
|
|
11
|
+
#include <math.h> // fabsf
|
|
12
|
+
|
|
13
|
+
#ifdef __cplusplus
|
|
14
|
+
extern "C" {
|
|
15
|
+
#endif
|
|
16
|
+
|
|
17
|
+
// static_assert should be a #define, but if it's not,
|
|
18
|
+
// fall back to the _Static_assert C11 keyword.
|
|
19
|
+
// if C99 - static_assert is noop
|
|
20
|
+
// ref: https://stackoverflow.com/a/53923785/4039976
|
|
21
|
+
#ifndef static_assert
|
|
22
|
+
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
|
|
23
|
+
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
|
24
|
+
#else
|
|
25
|
+
#define static_assert(cond, msg) struct global_scope_noop_trick
|
|
26
|
+
#endif
|
|
27
|
+
#endif
|
|
28
|
+
|
|
29
|
+
// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
|
|
30
|
+
#if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
|
|
31
|
+
#ifndef __FMA__
|
|
32
|
+
#define __FMA__
|
|
33
|
+
#endif
|
|
34
|
+
#ifndef __F16C__
|
|
35
|
+
#define __F16C__
|
|
36
|
+
#endif
|
|
37
|
+
#ifndef __SSE3__
|
|
38
|
+
#define __SSE3__
|
|
39
|
+
#endif
|
|
40
|
+
#endif
|
|
41
|
+
|
|
42
|
+
// 16-bit float
|
|
43
|
+
// on Arm, we use __fp16
|
|
44
|
+
// on x86, we use uint16_t
|
|
45
|
+
#if defined(__ARM_NEON) && !defined(_MSC_VER)
|
|
46
|
+
|
|
47
|
+
// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
|
|
48
|
+
//
|
|
49
|
+
// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
|
|
50
|
+
//
|
|
51
|
+
#include <arm_neon.h>
|
|
52
|
+
|
|
53
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
|
|
54
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) (x)
|
|
55
|
+
|
|
56
|
+
#define WSP_GGML_FP16_TO_FP32(x) ((float) (x))
|
|
57
|
+
#define WSP_GGML_FP32_TO_FP16(x) (x)
|
|
58
|
+
|
|
59
|
+
#else
|
|
60
|
+
|
|
61
|
+
#ifdef __wasm_simd128__
|
|
62
|
+
#include <wasm_simd128.h>
|
|
63
|
+
#else
|
|
64
|
+
#ifdef __POWER9_VECTOR__
|
|
65
|
+
#include <altivec.h>
|
|
66
|
+
#undef bool
|
|
67
|
+
#define bool _Bool
|
|
68
|
+
#else
|
|
69
|
+
#if defined(_MSC_VER) || defined(__MINGW32__)
|
|
70
|
+
#include <intrin.h>
|
|
71
|
+
#else
|
|
72
|
+
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
|
|
73
|
+
#if !defined(__riscv)
|
|
74
|
+
#include <immintrin.h>
|
|
75
|
+
#endif
|
|
76
|
+
#endif
|
|
77
|
+
#endif
|
|
78
|
+
#endif
|
|
79
|
+
#endif
|
|
80
|
+
|
|
81
|
+
#ifdef __riscv_v_intrinsic
|
|
82
|
+
#include <riscv_vector.h>
|
|
83
|
+
#endif
|
|
84
|
+
|
|
85
|
+
#ifdef __F16C__
|
|
86
|
+
|
|
87
|
+
#ifdef _MSC_VER
|
|
88
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
|
|
89
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
|
|
90
|
+
#else
|
|
91
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
|
|
92
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
|
|
93
|
+
#endif
|
|
94
|
+
|
|
95
|
+
#elif defined(__POWER9_VECTOR__)
|
|
96
|
+
|
|
97
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
|
|
98
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
|
|
99
|
+
/* the inline asm below is about 12% faster than the lookup method */
|
|
100
|
+
#define WSP_GGML_FP16_TO_FP32(x) WSP_GGML_COMPUTE_FP16_TO_FP32(x)
|
|
101
|
+
#define WSP_GGML_FP32_TO_FP16(x) WSP_GGML_COMPUTE_FP32_TO_FP16(x)
|
|
102
|
+
|
|
103
|
+
static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
|
|
104
|
+
register float f;
|
|
105
|
+
register double d;
|
|
106
|
+
__asm__(
|
|
107
|
+
"mtfprd %0,%2\n"
|
|
108
|
+
"xscvhpdp %0,%0\n"
|
|
109
|
+
"frsp %1,%0\n" :
|
|
110
|
+
/* temp */ "=d"(d),
|
|
111
|
+
/* out */ "=f"(f):
|
|
112
|
+
/* in */ "r"(h));
|
|
113
|
+
return f;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
|
|
117
|
+
register double d;
|
|
118
|
+
register wsp_ggml_fp16_t r;
|
|
119
|
+
__asm__( /* xscvdphp can work on double or single precision */
|
|
120
|
+
"xscvdphp %0,%2\n"
|
|
121
|
+
"mffprd %1,%0\n" :
|
|
122
|
+
/* temp */ "=d"(d),
|
|
123
|
+
/* out */ "=r"(r):
|
|
124
|
+
/* in */ "f"(f));
|
|
125
|
+
return r;
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
#else
|
|
129
|
+
|
|
130
|
+
// FP16 <-> FP32
|
|
131
|
+
// ref: https://github.com/Maratyszcza/FP16
|
|
132
|
+
|
|
133
|
+
static inline float fp32_from_bits(uint32_t w) {
|
|
134
|
+
union {
|
|
135
|
+
uint32_t as_bits;
|
|
136
|
+
float as_value;
|
|
137
|
+
} fp32;
|
|
138
|
+
fp32.as_bits = w;
|
|
139
|
+
return fp32.as_value;
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
static inline uint32_t fp32_to_bits(float f) {
|
|
143
|
+
union {
|
|
144
|
+
float as_value;
|
|
145
|
+
uint32_t as_bits;
|
|
146
|
+
} fp32;
|
|
147
|
+
fp32.as_value = f;
|
|
148
|
+
return fp32.as_bits;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
|
|
152
|
+
const uint32_t w = (uint32_t) h << 16;
|
|
153
|
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
154
|
+
const uint32_t two_w = w + w;
|
|
155
|
+
|
|
156
|
+
const uint32_t exp_offset = UINT32_C(0xE0) << 23;
|
|
157
|
+
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
|
|
158
|
+
const float exp_scale = 0x1.0p-112f;
|
|
159
|
+
#else
|
|
160
|
+
const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
|
|
161
|
+
#endif
|
|
162
|
+
const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
|
|
163
|
+
|
|
164
|
+
const uint32_t magic_mask = UINT32_C(126) << 23;
|
|
165
|
+
const float magic_bias = 0.5f;
|
|
166
|
+
const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
|
|
167
|
+
|
|
168
|
+
const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
|
|
169
|
+
const uint32_t result = sign |
|
|
170
|
+
(two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
|
|
171
|
+
return fp32_from_bits(result);
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
|
|
175
|
+
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
|
|
176
|
+
const float scale_to_inf = 0x1.0p+112f;
|
|
177
|
+
const float scale_to_zero = 0x1.0p-110f;
|
|
178
|
+
#else
|
|
179
|
+
const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
|
|
180
|
+
const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
|
|
181
|
+
#endif
|
|
182
|
+
float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
|
|
183
|
+
|
|
184
|
+
const uint32_t w = fp32_to_bits(f);
|
|
185
|
+
const uint32_t shl1_w = w + w;
|
|
186
|
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
187
|
+
uint32_t bias = shl1_w & UINT32_C(0xFF000000);
|
|
188
|
+
if (bias < UINT32_C(0x71000000)) {
|
|
189
|
+
bias = UINT32_C(0x71000000);
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
|
|
193
|
+
const uint32_t bits = fp32_to_bits(base);
|
|
194
|
+
const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
|
|
195
|
+
const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
|
|
196
|
+
const uint32_t nonsign = exp_bits + mantissa_bits;
|
|
197
|
+
return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
|
|
201
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
|
|
202
|
+
|
|
203
|
+
#endif // __F16C__
|
|
204
|
+
|
|
205
|
+
#endif // __ARM_NEON
|
|
206
|
+
|
|
207
|
+
// precomputed f32 table for f16 (256 KB)
|
|
208
|
+
// defined in ggml.c, initialized in wsp_ggml_init()
|
|
209
|
+
extern float wsp_ggml_table_f32_f16[1 << 16];
|
|
210
|
+
|
|
211
|
+
// On ARM NEON, it's quicker to directly convert x -> x instead of calling into wsp_ggml_lookup_fp16_to_fp32,
|
|
212
|
+
// so we define WSP_GGML_FP16_TO_FP32 and WSP_GGML_FP32_TO_FP16 elsewhere for NEON.
|
|
213
|
+
// This is also true for POWER9.
|
|
214
|
+
#if !defined(WSP_GGML_FP16_TO_FP32) || !defined(WSP_GGML_FP32_TO_FP16)
|
|
215
|
+
|
|
216
|
+
inline static float wsp_ggml_lookup_fp16_to_fp32(wsp_ggml_fp16_t f) {
|
|
217
|
+
uint16_t s;
|
|
218
|
+
memcpy(&s, &f, sizeof(uint16_t));
|
|
219
|
+
return wsp_ggml_table_f32_f16[s];
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
#define WSP_GGML_FP16_TO_FP32(x) wsp_ggml_lookup_fp16_to_fp32(x)
|
|
223
|
+
#define WSP_GGML_FP32_TO_FP16(x) WSP_GGML_COMPUTE_FP32_TO_FP16(x)
|
|
224
|
+
|
|
225
|
+
#endif
|
|
226
|
+
|
|
227
|
+
#define WSP_GGML_HASHTABLE_FULL ((size_t)-1)
|
|
228
|
+
#define WSP_GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
|
|
229
|
+
|
|
230
|
+
bool wsp_ggml_hash_contains (const struct wsp_ggml_hash_set hash_set, struct wsp_ggml_tensor * key);
|
|
231
|
+
|
|
232
|
+
// returns WSP_GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
|
|
233
|
+
size_t wsp_ggml_hash_find (const struct wsp_ggml_hash_set hash_set, struct wsp_ggml_tensor * key);
|
|
234
|
+
|
|
235
|
+
// returns WSP_GGML_HASHTABLE_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
|
|
236
|
+
size_t wsp_ggml_hash_insert ( struct wsp_ggml_hash_set hash_set, struct wsp_ggml_tensor * key);
|
|
237
|
+
|
|
238
|
+
// return index, asserts if table is full
|
|
239
|
+
size_t wsp_ggml_hash_find_or_insert( struct wsp_ggml_hash_set hash_set, struct wsp_ggml_tensor * key);
|
|
240
|
+
|
|
241
|
+
#ifdef __cplusplus
|
|
242
|
+
}
|
|
243
|
+
#endif
|