whisper.rn 0.4.0-rc.1 → 0.4.0-rc.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -6
- package/android/build.gradle +4 -0
- package/android/src/main/CMakeLists.txt +21 -1
- package/android/src/main/java/com/rnwhisper/AudioUtils.java +27 -92
- package/android/src/main/java/com/rnwhisper/RNWhisper.java +86 -40
- package/android/src/main/java/com/rnwhisper/WhisperContext.java +85 -131
- package/android/src/main/jni-utils.h +76 -0
- package/android/src/main/jni.cpp +226 -109
- package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
- package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +10 -0
- package/cpp/coreml/whisper-encoder-impl.h +1 -1
- package/cpp/coreml/whisper-encoder.h +4 -0
- package/cpp/coreml/whisper-encoder.mm +5 -3
- package/cpp/ggml-alloc.c +797 -400
- package/cpp/ggml-alloc.h +60 -10
- package/cpp/ggml-backend-impl.h +255 -0
- package/cpp/ggml-backend-reg.cpp +582 -0
- package/cpp/ggml-backend.cpp +2002 -0
- package/cpp/ggml-backend.h +354 -0
- package/cpp/ggml-common.h +1851 -0
- package/cpp/ggml-cpp.h +39 -0
- package/cpp/ggml-cpu-aarch64.cpp +4247 -0
- package/cpp/ggml-cpu-aarch64.h +8 -0
- package/cpp/ggml-cpu-impl.h +531 -0
- package/cpp/ggml-cpu-quants.c +12245 -0
- package/cpp/ggml-cpu-quants.h +63 -0
- package/cpp/ggml-cpu-traits.cpp +36 -0
- package/cpp/ggml-cpu-traits.h +38 -0
- package/cpp/ggml-cpu.c +14792 -0
- package/cpp/ggml-cpu.cpp +653 -0
- package/cpp/ggml-cpu.h +137 -0
- package/cpp/ggml-impl.h +567 -0
- package/cpp/ggml-metal-impl.h +288 -0
- package/cpp/ggml-metal.h +24 -43
- package/cpp/ggml-metal.m +4867 -1080
- package/cpp/ggml-opt.cpp +854 -0
- package/cpp/ggml-opt.h +216 -0
- package/cpp/ggml-quants.c +5238 -0
- package/cpp/ggml-quants.h +100 -0
- package/cpp/ggml-threading.cpp +12 -0
- package/cpp/ggml-threading.h +14 -0
- package/cpp/ggml-whisper.metallib +0 -0
- package/cpp/ggml.c +5106 -19431
- package/cpp/ggml.h +847 -669
- package/cpp/gguf.cpp +1329 -0
- package/cpp/gguf.h +202 -0
- package/cpp/rn-audioutils.cpp +68 -0
- package/cpp/rn-audioutils.h +14 -0
- package/cpp/rn-whisper-log.h +11 -0
- package/cpp/rn-whisper.cpp +221 -52
- package/cpp/rn-whisper.h +50 -15
- package/cpp/whisper.cpp +3174 -1533
- package/cpp/whisper.h +176 -44
- package/ios/RNWhisper.mm +139 -46
- package/ios/RNWhisperAudioUtils.h +1 -2
- package/ios/RNWhisperAudioUtils.m +18 -67
- package/ios/RNWhisperContext.h +11 -8
- package/ios/RNWhisperContext.mm +195 -150
- package/jest/mock.js +15 -2
- package/lib/commonjs/NativeRNWhisper.js.map +1 -1
- package/lib/commonjs/index.js +76 -28
- package/lib/commonjs/index.js.map +1 -1
- package/lib/commonjs/version.json +1 -1
- package/lib/module/NativeRNWhisper.js.map +1 -1
- package/lib/module/index.js +76 -28
- package/lib/module/index.js.map +1 -1
- package/lib/module/version.json +1 -1
- package/lib/typescript/NativeRNWhisper.d.ts +13 -4
- package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +37 -5
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +9 -7
- package/src/NativeRNWhisper.ts +20 -4
- package/src/index.ts +98 -42
- package/src/version.json +1 -1
- package/whisper-rn.podspec +13 -20
- package/cpp/README.md +0 -4
- package/cpp/ggml-metal.metal +0 -2353
package/cpp/ggml-cpu.h
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "ggml.h"
|
|
4
|
+
#include "ggml-backend.h"
|
|
5
|
+
|
|
6
|
+
#ifdef __cplusplus
|
|
7
|
+
extern "C" {
|
|
8
|
+
#endif
|
|
9
|
+
|
|
10
|
+
// the compute plan that needs to be prepared for wsp_ggml_graph_compute()
|
|
11
|
+
// since https://github.com/ggml-org/ggml/issues/287
|
|
12
|
+
struct wsp_ggml_cplan {
|
|
13
|
+
size_t work_size; // size of work buffer, calculated by `wsp_ggml_graph_plan()`
|
|
14
|
+
uint8_t * work_data; // work buffer, to be allocated by caller before calling to `wsp_ggml_graph_compute()`
|
|
15
|
+
|
|
16
|
+
int n_threads;
|
|
17
|
+
struct wsp_ggml_threadpool * threadpool;
|
|
18
|
+
|
|
19
|
+
// abort wsp_ggml_graph_compute when true
|
|
20
|
+
wsp_ggml_abort_callback abort_callback;
|
|
21
|
+
void * abort_callback_data;
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
// numa strategies
|
|
25
|
+
enum wsp_ggml_numa_strategy {
|
|
26
|
+
WSP_GGML_NUMA_STRATEGY_DISABLED = 0,
|
|
27
|
+
WSP_GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
|
|
28
|
+
WSP_GGML_NUMA_STRATEGY_ISOLATE = 2,
|
|
29
|
+
WSP_GGML_NUMA_STRATEGY_NUMACTL = 3,
|
|
30
|
+
WSP_GGML_NUMA_STRATEGY_MIRROR = 4,
|
|
31
|
+
WSP_GGML_NUMA_STRATEGY_COUNT
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
WSP_GGML_BACKEND_API void wsp_ggml_numa_init(enum wsp_ggml_numa_strategy numa); // call once for better performance on NUMA systems
|
|
35
|
+
WSP_GGML_BACKEND_API bool wsp_ggml_is_numa(void); // true if init detected that system has >1 NUMA node
|
|
36
|
+
|
|
37
|
+
WSP_GGML_BACKEND_API struct wsp_ggml_tensor * wsp_ggml_new_i32(struct wsp_ggml_context * ctx, int32_t value);
|
|
38
|
+
WSP_GGML_BACKEND_API struct wsp_ggml_tensor * wsp_ggml_new_f32(struct wsp_ggml_context * ctx, float value);
|
|
39
|
+
|
|
40
|
+
WSP_GGML_BACKEND_API struct wsp_ggml_tensor * wsp_ggml_set_i32 (struct wsp_ggml_tensor * tensor, int32_t value);
|
|
41
|
+
WSP_GGML_BACKEND_API struct wsp_ggml_tensor * wsp_ggml_set_f32 (struct wsp_ggml_tensor * tensor, float value);
|
|
42
|
+
|
|
43
|
+
WSP_GGML_BACKEND_API int32_t wsp_ggml_get_i32_1d(const struct wsp_ggml_tensor * tensor, int i);
|
|
44
|
+
WSP_GGML_BACKEND_API void wsp_ggml_set_i32_1d(const struct wsp_ggml_tensor * tensor, int i, int32_t value);
|
|
45
|
+
|
|
46
|
+
WSP_GGML_BACKEND_API int32_t wsp_ggml_get_i32_nd(const struct wsp_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
|
|
47
|
+
WSP_GGML_BACKEND_API void wsp_ggml_set_i32_nd(const struct wsp_ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
|
|
48
|
+
|
|
49
|
+
WSP_GGML_BACKEND_API float wsp_ggml_get_f32_1d(const struct wsp_ggml_tensor * tensor, int i);
|
|
50
|
+
WSP_GGML_BACKEND_API void wsp_ggml_set_f32_1d(const struct wsp_ggml_tensor * tensor, int i, float value);
|
|
51
|
+
|
|
52
|
+
WSP_GGML_BACKEND_API float wsp_ggml_get_f32_nd(const struct wsp_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
|
|
53
|
+
WSP_GGML_BACKEND_API void wsp_ggml_set_f32_nd(const struct wsp_ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
|
|
54
|
+
|
|
55
|
+
WSP_GGML_BACKEND_API struct wsp_ggml_threadpool * wsp_ggml_threadpool_new (struct wsp_ggml_threadpool_params * params);
|
|
56
|
+
WSP_GGML_BACKEND_API void wsp_ggml_threadpool_free (struct wsp_ggml_threadpool * threadpool);
|
|
57
|
+
WSP_GGML_BACKEND_API int wsp_ggml_threadpool_get_n_threads (struct wsp_ggml_threadpool * threadpool);
|
|
58
|
+
WSP_GGML_BACKEND_API void wsp_ggml_threadpool_pause (struct wsp_ggml_threadpool * threadpool);
|
|
59
|
+
WSP_GGML_BACKEND_API void wsp_ggml_threadpool_resume (struct wsp_ggml_threadpool * threadpool);
|
|
60
|
+
|
|
61
|
+
// wsp_ggml_graph_plan() has to be called before wsp_ggml_graph_compute()
|
|
62
|
+
// when plan.work_size > 0, caller must allocate memory for plan.work_data
|
|
63
|
+
WSP_GGML_BACKEND_API struct wsp_ggml_cplan wsp_ggml_graph_plan(
|
|
64
|
+
const struct wsp_ggml_cgraph * cgraph,
|
|
65
|
+
int n_threads, /* = WSP_GGML_DEFAULT_N_THREADS */
|
|
66
|
+
struct wsp_ggml_threadpool * threadpool /* = NULL */ );
|
|
67
|
+
WSP_GGML_BACKEND_API enum wsp_ggml_status wsp_ggml_graph_compute(struct wsp_ggml_cgraph * cgraph, struct wsp_ggml_cplan * cplan);
|
|
68
|
+
|
|
69
|
+
// same as wsp_ggml_graph_compute() but the work data is allocated as a part of the context
|
|
70
|
+
// note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
|
|
71
|
+
WSP_GGML_BACKEND_API enum wsp_ggml_status wsp_ggml_graph_compute_with_ctx(struct wsp_ggml_context * ctx, struct wsp_ggml_cgraph * cgraph, int n_threads);
|
|
72
|
+
|
|
73
|
+
//
|
|
74
|
+
// system info
|
|
75
|
+
//
|
|
76
|
+
|
|
77
|
+
// x86
|
|
78
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_sse3 (void);
|
|
79
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_ssse3 (void);
|
|
80
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx (void);
|
|
81
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx_vnni (void);
|
|
82
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx2 (void);
|
|
83
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_f16c (void);
|
|
84
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_fma (void);
|
|
85
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx512 (void);
|
|
86
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx512_vbmi(void);
|
|
87
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx512_vnni(void);
|
|
88
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx512_bf16(void);
|
|
89
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_amx_int8 (void);
|
|
90
|
+
// ARM
|
|
91
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_neon (void);
|
|
92
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_arm_fma (void);
|
|
93
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_fp16_va (void);
|
|
94
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_dotprod (void);
|
|
95
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_matmul_int8(void);
|
|
96
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_sve (void);
|
|
97
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_get_sve_cnt (void); // sve vector length in bytes
|
|
98
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_sme (void);
|
|
99
|
+
// other
|
|
100
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_riscv_v (void);
|
|
101
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_vsx (void);
|
|
102
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_vxe (void);
|
|
103
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_wasm_simd (void);
|
|
104
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_llamafile (void);
|
|
105
|
+
|
|
106
|
+
// Internal types and functions exposed for tests and benchmarks
|
|
107
|
+
|
|
108
|
+
typedef void (*wsp_ggml_vec_dot_t) (int n, float * WSP_GGML_RESTRICT s, size_t bs, const void * WSP_GGML_RESTRICT x, size_t bx,
|
|
109
|
+
const void * WSP_GGML_RESTRICT y, size_t by, int nrc);
|
|
110
|
+
|
|
111
|
+
struct wsp_ggml_type_traits_cpu {
|
|
112
|
+
wsp_ggml_from_float_t from_float;
|
|
113
|
+
wsp_ggml_vec_dot_t vec_dot;
|
|
114
|
+
enum wsp_ggml_type vec_dot_type;
|
|
115
|
+
int64_t nrows; // number of rows to process simultaneously
|
|
116
|
+
};
|
|
117
|
+
|
|
118
|
+
WSP_GGML_BACKEND_API const struct wsp_ggml_type_traits_cpu * wsp_ggml_get_type_traits_cpu(enum wsp_ggml_type type);
|
|
119
|
+
|
|
120
|
+
WSP_GGML_BACKEND_API void wsp_ggml_cpu_init(void);
|
|
121
|
+
|
|
122
|
+
//
|
|
123
|
+
// CPU backend
|
|
124
|
+
//
|
|
125
|
+
|
|
126
|
+
WSP_GGML_BACKEND_API wsp_ggml_backend_t wsp_ggml_backend_cpu_init(void);
|
|
127
|
+
|
|
128
|
+
WSP_GGML_BACKEND_API bool wsp_ggml_backend_is_cpu (wsp_ggml_backend_t backend);
|
|
129
|
+
WSP_GGML_BACKEND_API void wsp_ggml_backend_cpu_set_n_threads (wsp_ggml_backend_t backend_cpu, int n_threads);
|
|
130
|
+
WSP_GGML_BACKEND_API void wsp_ggml_backend_cpu_set_threadpool (wsp_ggml_backend_t backend_cpu, wsp_ggml_threadpool_t threadpool);
|
|
131
|
+
WSP_GGML_BACKEND_API void wsp_ggml_backend_cpu_set_abort_callback(wsp_ggml_backend_t backend_cpu, wsp_ggml_abort_callback abort_callback, void * abort_callback_data);
|
|
132
|
+
|
|
133
|
+
WSP_GGML_BACKEND_API wsp_ggml_backend_reg_t wsp_ggml_backend_cpu_reg(void);
|
|
134
|
+
|
|
135
|
+
#ifdef __cplusplus
|
|
136
|
+
}
|
|
137
|
+
#endif
|
package/cpp/ggml-impl.h
ADDED
|
@@ -0,0 +1,567 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
// GGML internal header
|
|
4
|
+
|
|
5
|
+
#include "ggml.h"
|
|
6
|
+
#include "gguf.h"
|
|
7
|
+
|
|
8
|
+
#include <assert.h>
|
|
9
|
+
#include <math.h>
|
|
10
|
+
#include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
|
|
11
|
+
#include <stdbool.h>
|
|
12
|
+
#include <stdint.h>
|
|
13
|
+
#include <string.h>
|
|
14
|
+
|
|
15
|
+
#ifdef __ARM_FEATURE_SVE
|
|
16
|
+
#include <arm_sve.h>
|
|
17
|
+
#endif // __ARM_FEATURE_SVE
|
|
18
|
+
|
|
19
|
+
#if defined(__ARM_NEON) && !defined(__CUDACC__) && !defined(__MUSACC__)
|
|
20
|
+
// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
|
|
21
|
+
//
|
|
22
|
+
// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
|
|
23
|
+
//
|
|
24
|
+
#include <arm_neon.h>
|
|
25
|
+
#endif
|
|
26
|
+
|
|
27
|
+
#if defined(__F16C__)
|
|
28
|
+
#include <immintrin.h>
|
|
29
|
+
#endif
|
|
30
|
+
|
|
31
|
+
#ifdef __cplusplus
|
|
32
|
+
extern "C" {
|
|
33
|
+
#endif
|
|
34
|
+
|
|
35
|
+
#ifndef MIN
|
|
36
|
+
# define MIN(a, b) ((a) < (b) ? (a) : (b))
|
|
37
|
+
#endif
|
|
38
|
+
|
|
39
|
+
#ifndef MAX
|
|
40
|
+
# define MAX(a, b) ((a) > (b) ? (a) : (b))
|
|
41
|
+
#endif
|
|
42
|
+
|
|
43
|
+
// required for mmap as gguf only guarantees 32-byte alignment
|
|
44
|
+
#define TENSOR_ALIGNMENT 32
|
|
45
|
+
|
|
46
|
+
// static_assert should be a #define, but if it's not,
|
|
47
|
+
// fall back to the _Static_assert C11 keyword.
|
|
48
|
+
// if C99 - static_assert is noop
|
|
49
|
+
// ref: https://stackoverflow.com/a/53923785/4039976
|
|
50
|
+
#ifndef __cplusplus
|
|
51
|
+
#ifndef static_assert
|
|
52
|
+
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
|
|
53
|
+
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
|
54
|
+
#else
|
|
55
|
+
#define static_assert(cond, msg) struct global_scope_noop_trick
|
|
56
|
+
#endif
|
|
57
|
+
#endif
|
|
58
|
+
#endif
|
|
59
|
+
|
|
60
|
+
static inline int wsp_ggml_up32(int n) {
|
|
61
|
+
return (n + 31) & ~31;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
//static inline int wsp_ggml_up64(int n) {
|
|
65
|
+
// return (n + 63) & ~63;
|
|
66
|
+
//}
|
|
67
|
+
|
|
68
|
+
static inline int wsp_ggml_up(int n, int m) {
|
|
69
|
+
// assert m is a power of 2
|
|
70
|
+
WSP_GGML_ASSERT((m & (m - 1)) == 0);
|
|
71
|
+
return (n + m - 1) & ~(m - 1);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
//
|
|
75
|
+
// logging
|
|
76
|
+
//
|
|
77
|
+
|
|
78
|
+
WSP_GGML_ATTRIBUTE_FORMAT(2, 3)
|
|
79
|
+
WSP_GGML_API void wsp_ggml_log_internal (enum wsp_ggml_log_level level, const char * format, ...);
|
|
80
|
+
WSP_GGML_API void wsp_ggml_log_callback_default(enum wsp_ggml_log_level level, const char * text, void * user_data);
|
|
81
|
+
|
|
82
|
+
#define WSP_GGML_LOG(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_NONE , __VA_ARGS__)
|
|
83
|
+
#define WSP_GGML_LOG_INFO(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_INFO , __VA_ARGS__)
|
|
84
|
+
#define WSP_GGML_LOG_WARN(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_WARN , __VA_ARGS__)
|
|
85
|
+
#define WSP_GGML_LOG_ERROR(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
|
|
86
|
+
#define WSP_GGML_LOG_DEBUG(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
|
|
87
|
+
#define WSP_GGML_LOG_CONT(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_CONT , __VA_ARGS__)
|
|
88
|
+
|
|
89
|
+
#define WSP_GGML_DEBUG 0
|
|
90
|
+
|
|
91
|
+
#if (WSP_GGML_DEBUG >= 1)
|
|
92
|
+
#define WSP_GGML_PRINT_DEBUG(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
|
|
93
|
+
#else
|
|
94
|
+
#define WSP_GGML_PRINT_DEBUG(...)
|
|
95
|
+
#endif
|
|
96
|
+
|
|
97
|
+
#if (WSP_GGML_DEBUG >= 5)
|
|
98
|
+
#define WSP_GGML_PRINT_DEBUG_5(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
|
|
99
|
+
#else
|
|
100
|
+
#define WSP_GGML_PRINT_DEBUG_5(...)
|
|
101
|
+
#endif
|
|
102
|
+
|
|
103
|
+
#if (WSP_GGML_DEBUG >= 10)
|
|
104
|
+
#define WSP_GGML_PRINT_DEBUG_10(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
|
|
105
|
+
#else
|
|
106
|
+
#define WSP_GGML_PRINT_DEBUG_10(...)
|
|
107
|
+
#endif
|
|
108
|
+
|
|
109
|
+
// tensor params
|
|
110
|
+
|
|
111
|
+
static void wsp_ggml_set_op_params(struct wsp_ggml_tensor * tensor, const void * params, size_t params_size) {
|
|
112
|
+
WSP_GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
|
|
113
|
+
assert(params_size <= WSP_GGML_MAX_OP_PARAMS);
|
|
114
|
+
memcpy(tensor->op_params, params, params_size);
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
static int32_t wsp_ggml_get_op_params_i32(const struct wsp_ggml_tensor * tensor, uint32_t i) {
|
|
118
|
+
assert(i < WSP_GGML_MAX_OP_PARAMS / sizeof(int32_t));
|
|
119
|
+
return ((const int32_t *)(tensor->op_params))[i];
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
static float wsp_ggml_get_op_params_f32(const struct wsp_ggml_tensor * tensor, uint32_t i) {
|
|
123
|
+
assert(i < WSP_GGML_MAX_OP_PARAMS / sizeof(float));
|
|
124
|
+
return ((const float *)(tensor->op_params))[i];
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
static void wsp_ggml_set_op_params_i32(struct wsp_ggml_tensor * tensor, uint32_t i, int32_t value) {
|
|
128
|
+
assert(i < WSP_GGML_MAX_OP_PARAMS / sizeof(int32_t));
|
|
129
|
+
((int32_t *)(tensor->op_params))[i] = value;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
static void wsp_ggml_set_op_params_f32(struct wsp_ggml_tensor * tensor, uint32_t i, float value) {
|
|
133
|
+
assert(i < WSP_GGML_MAX_OP_PARAMS / sizeof(float));
|
|
134
|
+
((float *)(tensor->op_params))[i] = value;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
struct wsp_ggml_map_custom1_op_params {
|
|
138
|
+
wsp_ggml_custom1_op_t fun;
|
|
139
|
+
int n_tasks;
|
|
140
|
+
void * userdata;
|
|
141
|
+
};
|
|
142
|
+
|
|
143
|
+
struct wsp_ggml_map_custom2_op_params {
|
|
144
|
+
wsp_ggml_custom2_op_t fun;
|
|
145
|
+
int n_tasks;
|
|
146
|
+
void * userdata;
|
|
147
|
+
};
|
|
148
|
+
|
|
149
|
+
struct wsp_ggml_map_custom3_op_params {
|
|
150
|
+
wsp_ggml_custom3_op_t fun;
|
|
151
|
+
int n_tasks;
|
|
152
|
+
void * userdata;
|
|
153
|
+
};
|
|
154
|
+
|
|
155
|
+
// bitset
|
|
156
|
+
|
|
157
|
+
typedef uint32_t wsp_ggml_bitset_t;
|
|
158
|
+
|
|
159
|
+
static_assert(sizeof(wsp_ggml_bitset_t) == 4, "bitset_t constants must be updated");
|
|
160
|
+
#define BITSET_SHR 5 // log2(sizeof(wsp_ggml_bitset_t)*8)
|
|
161
|
+
#define BITSET_MASK (sizeof(wsp_ggml_bitset_t)*8 - 1)
|
|
162
|
+
|
|
163
|
+
static size_t wsp_ggml_bitset_size(size_t n) {
|
|
164
|
+
return (n + BITSET_MASK) >> BITSET_SHR;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
static inline bool wsp_ggml_bitset_get(const wsp_ggml_bitset_t * bitset, size_t i) {
|
|
168
|
+
return !!(bitset[i >> BITSET_SHR] & (1u << (i & BITSET_MASK)));
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
static inline void wsp_ggml_bitset_set(wsp_ggml_bitset_t * bitset, size_t i) {
|
|
172
|
+
bitset[i >> BITSET_SHR] |= (1u << (i & BITSET_MASK));
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
static inline void wsp_ggml_bitset_clear(wsp_ggml_bitset_t * bitset, size_t i) {
|
|
176
|
+
bitset[i >> BITSET_SHR] &= ~(1u << (i & BITSET_MASK));
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
// hash set
|
|
180
|
+
|
|
181
|
+
#define WSP_GGML_HASHSET_FULL ((size_t)-1)
|
|
182
|
+
#define WSP_GGML_HASHSET_ALREADY_EXISTS ((size_t)-2)
|
|
183
|
+
|
|
184
|
+
struct wsp_ggml_hash_set {
|
|
185
|
+
size_t size;
|
|
186
|
+
wsp_ggml_bitset_t * used; // whether or not the keys are in use i.e. set
|
|
187
|
+
struct wsp_ggml_tensor ** keys; // actual tensors in the set, keys[i] is only defined if wsp_ggml_bitset_get(used, i)
|
|
188
|
+
};
|
|
189
|
+
|
|
190
|
+
struct wsp_ggml_hash_set wsp_ggml_hash_set_new(size_t size);
|
|
191
|
+
void wsp_ggml_hash_set_free(struct wsp_ggml_hash_set * hash_set);
|
|
192
|
+
|
|
193
|
+
// returns the minimum size for a hash set that can hold min_sz elements
|
|
194
|
+
size_t wsp_ggml_hash_size(size_t min_sz);
|
|
195
|
+
|
|
196
|
+
// remove all elements from the hash set
|
|
197
|
+
void wsp_ggml_hash_set_reset(struct wsp_ggml_hash_set * hash_set);
|
|
198
|
+
|
|
199
|
+
// returns true if key is in the hash set
|
|
200
|
+
static bool wsp_ggml_hash_contains(const struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
|
|
201
|
+
|
|
202
|
+
// returns WSP_GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
|
|
203
|
+
static size_t wsp_ggml_hash_find(const struct wsp_ggml_hash_set * hash_set, const struct wsp_ggml_tensor * key);
|
|
204
|
+
|
|
205
|
+
// returns WSP_GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
|
|
206
|
+
static size_t wsp_ggml_hash_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
|
|
207
|
+
|
|
208
|
+
// return index, asserts if table is full
|
|
209
|
+
static size_t wsp_ggml_hash_find_or_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
|
|
210
|
+
|
|
211
|
+
// hash function for wsp_ggml_tensor
|
|
212
|
+
static inline size_t wsp_ggml_hash(const struct wsp_ggml_tensor * p) {
|
|
213
|
+
// the last 4 bits are always zero due to alignment
|
|
214
|
+
return (size_t)(uintptr_t)p >> 4;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
static size_t wsp_ggml_hash_find(const struct wsp_ggml_hash_set * hash_set, const struct wsp_ggml_tensor * key) {
|
|
218
|
+
size_t h = wsp_ggml_hash(key) % hash_set->size;
|
|
219
|
+
|
|
220
|
+
// linear probing
|
|
221
|
+
size_t i = h;
|
|
222
|
+
while (wsp_ggml_bitset_get(hash_set->used, i) && hash_set->keys[i] != key) {
|
|
223
|
+
i = (i + 1) % hash_set->size;
|
|
224
|
+
if (i == h) {
|
|
225
|
+
// visited all hash table entries -> not found
|
|
226
|
+
return WSP_GGML_HASHSET_FULL;
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
return i;
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
static bool wsp_ggml_hash_contains(const struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key) {
|
|
233
|
+
size_t i = wsp_ggml_hash_find(hash_set, key);
|
|
234
|
+
return i != WSP_GGML_HASHSET_FULL && wsp_ggml_bitset_get(hash_set->used, i);
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
static size_t wsp_ggml_hash_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key) {
|
|
238
|
+
size_t h = wsp_ggml_hash(key) % hash_set->size;
|
|
239
|
+
|
|
240
|
+
// linear probing
|
|
241
|
+
size_t i = h;
|
|
242
|
+
do {
|
|
243
|
+
if (!wsp_ggml_bitset_get(hash_set->used, i)) {
|
|
244
|
+
wsp_ggml_bitset_set(hash_set->used, i);
|
|
245
|
+
hash_set->keys[i] = key;
|
|
246
|
+
return i;
|
|
247
|
+
}
|
|
248
|
+
if (hash_set->keys[i] == key) {
|
|
249
|
+
return WSP_GGML_HASHSET_ALREADY_EXISTS;
|
|
250
|
+
}
|
|
251
|
+
i = (i + 1) % hash_set->size;
|
|
252
|
+
} while (i != h);
|
|
253
|
+
|
|
254
|
+
// visited all hash table entries -> not found
|
|
255
|
+
WSP_GGML_ABORT("fatal error");
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
static size_t wsp_ggml_hash_find_or_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key) {
|
|
259
|
+
size_t h = wsp_ggml_hash(key) % hash_set->size;
|
|
260
|
+
|
|
261
|
+
// linear probing
|
|
262
|
+
size_t i = h;
|
|
263
|
+
do {
|
|
264
|
+
if (!wsp_ggml_bitset_get(hash_set->used, i)) {
|
|
265
|
+
wsp_ggml_bitset_set(hash_set->used, i);
|
|
266
|
+
hash_set->keys[i] = key;
|
|
267
|
+
return i;
|
|
268
|
+
}
|
|
269
|
+
if (hash_set->keys[i] == key) {
|
|
270
|
+
return i;
|
|
271
|
+
}
|
|
272
|
+
i = (i + 1) % hash_set->size;
|
|
273
|
+
} while (i != h);
|
|
274
|
+
|
|
275
|
+
// visited all hash table entries -> not found
|
|
276
|
+
WSP_GGML_ABORT("fatal error");
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// computation graph
|
|
280
|
+
|
|
281
|
+
enum wsp_ggml_cgraph_eval_order {
|
|
282
|
+
WSP_GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0,
|
|
283
|
+
WSP_GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT,
|
|
284
|
+
WSP_GGML_CGRAPH_EVAL_ORDER_COUNT
|
|
285
|
+
};
|
|
286
|
+
|
|
287
|
+
struct wsp_ggml_cgraph {
|
|
288
|
+
int size; // maximum number of nodes/leafs/grads/grad_accs
|
|
289
|
+
int n_nodes; // number of nodes currently in use
|
|
290
|
+
int n_leafs; // number of leafs currently in use
|
|
291
|
+
|
|
292
|
+
struct wsp_ggml_tensor ** nodes; // tensors with data that can change if the graph is evaluated
|
|
293
|
+
struct wsp_ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes
|
|
294
|
+
struct wsp_ggml_tensor ** grad_accs; // accumulators for node gradients
|
|
295
|
+
struct wsp_ggml_tensor ** leafs; // tensors with constant data
|
|
296
|
+
|
|
297
|
+
struct wsp_ggml_hash_set visited_hash_set;
|
|
298
|
+
|
|
299
|
+
enum wsp_ggml_cgraph_eval_order order;
|
|
300
|
+
};
|
|
301
|
+
|
|
302
|
+
// returns a slice of cgraph with nodes [i0, i1)
|
|
303
|
+
// the slice does not have leafs or gradients
|
|
304
|
+
// if you need the gradients, get them from the original graph
|
|
305
|
+
struct wsp_ggml_cgraph wsp_ggml_graph_view(struct wsp_ggml_cgraph * cgraph, int i0, int i1);
|
|
306
|
+
|
|
307
|
+
// Memory allocation
|
|
308
|
+
|
|
309
|
+
WSP_GGML_API void * wsp_ggml_aligned_malloc(size_t size);
|
|
310
|
+
WSP_GGML_API void wsp_ggml_aligned_free(void * ptr, size_t size);
|
|
311
|
+
|
|
312
|
+
// FP16 to FP32 conversion
|
|
313
|
+
|
|
314
|
+
#if defined(__ARM_NEON)
|
|
315
|
+
#if defined(_MSC_VER) || (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11)
|
|
316
|
+
typedef uint16_t wsp_ggml_fp16_internal_t;
|
|
317
|
+
#else
|
|
318
|
+
typedef __fp16 wsp_ggml_fp16_internal_t;
|
|
319
|
+
#endif
|
|
320
|
+
#endif
|
|
321
|
+
|
|
322
|
+
#if defined(__ARM_NEON) && !defined(_MSC_VER) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11)
|
|
323
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
|
|
324
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
|
|
325
|
+
|
|
326
|
+
#define WSP_GGML_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
|
|
327
|
+
|
|
328
|
+
static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
|
|
329
|
+
wsp_ggml_fp16_internal_t tmp;
|
|
330
|
+
memcpy(&tmp, &h, sizeof(wsp_ggml_fp16_t));
|
|
331
|
+
return (float)tmp;
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
|
|
335
|
+
wsp_ggml_fp16_t res;
|
|
336
|
+
wsp_ggml_fp16_internal_t tmp = f;
|
|
337
|
+
memcpy(&res, &tmp, sizeof(wsp_ggml_fp16_t));
|
|
338
|
+
return res;
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
#elif defined(__F16C__)
|
|
342
|
+
|
|
343
|
+
#ifdef _MSC_VER
|
|
344
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
|
|
345
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
|
|
346
|
+
#else
|
|
347
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
|
|
348
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
|
|
349
|
+
#endif
|
|
350
|
+
|
|
351
|
+
#elif defined(__POWER9_VECTOR__)
|
|
352
|
+
|
|
353
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
|
|
354
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
|
|
355
|
+
/* the inline asm below is about 12% faster than the lookup method */
|
|
356
|
+
#define WSP_GGML_FP16_TO_FP32(x) WSP_GGML_COMPUTE_FP16_TO_FP32(x)
|
|
357
|
+
#define WSP_GGML_FP32_TO_FP16(x) WSP_GGML_COMPUTE_FP32_TO_FP16(x)
|
|
358
|
+
|
|
359
|
+
static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
|
|
360
|
+
register float f;
|
|
361
|
+
register double d;
|
|
362
|
+
__asm__(
|
|
363
|
+
"mtfprd %0,%2\n"
|
|
364
|
+
"xscvhpdp %0,%0\n"
|
|
365
|
+
"frsp %1,%0\n" :
|
|
366
|
+
/* temp */ "=d"(d),
|
|
367
|
+
/* out */ "=f"(f):
|
|
368
|
+
/* in */ "r"(h));
|
|
369
|
+
return f;
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
|
|
373
|
+
register double d;
|
|
374
|
+
register wsp_ggml_fp16_t r;
|
|
375
|
+
__asm__( /* xscvdphp can work on double or single precision */
|
|
376
|
+
"xscvdphp %0,%2\n"
|
|
377
|
+
"mffprd %1,%0\n" :
|
|
378
|
+
/* temp */ "=d"(d),
|
|
379
|
+
/* out */ "=r"(r):
|
|
380
|
+
/* in */ "f"(f));
|
|
381
|
+
return r;
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
#else
|
|
385
|
+
|
|
386
|
+
// FP16 <-> FP32
|
|
387
|
+
// ref: https://github.com/Maratyszcza/FP16
|
|
388
|
+
|
|
389
|
+
static inline float fp32_from_bits(uint32_t w) {
|
|
390
|
+
union {
|
|
391
|
+
uint32_t as_bits;
|
|
392
|
+
float as_value;
|
|
393
|
+
} fp32;
|
|
394
|
+
fp32.as_bits = w;
|
|
395
|
+
return fp32.as_value;
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
static inline uint32_t fp32_to_bits(float f) {
|
|
399
|
+
union {
|
|
400
|
+
float as_value;
|
|
401
|
+
uint32_t as_bits;
|
|
402
|
+
} fp32;
|
|
403
|
+
fp32.as_value = f;
|
|
404
|
+
return fp32.as_bits;
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
|
|
408
|
+
const uint32_t w = (uint32_t) h << 16;
|
|
409
|
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
410
|
+
const uint32_t two_w = w + w;
|
|
411
|
+
|
|
412
|
+
const uint32_t exp_offset = UINT32_C(0xE0) << 23;
|
|
413
|
+
#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
|
|
414
|
+
const float exp_scale = 0x1.0p-112f;
|
|
415
|
+
#else
|
|
416
|
+
const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
|
|
417
|
+
#endif
|
|
418
|
+
const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
|
|
419
|
+
|
|
420
|
+
const uint32_t magic_mask = UINT32_C(126) << 23;
|
|
421
|
+
const float magic_bias = 0.5f;
|
|
422
|
+
const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
|
|
423
|
+
|
|
424
|
+
const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
|
|
425
|
+
const uint32_t result = sign |
|
|
426
|
+
(two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
|
|
427
|
+
return fp32_from_bits(result);
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
|
|
431
|
+
#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
|
|
432
|
+
const float scale_to_inf = 0x1.0p+112f;
|
|
433
|
+
const float scale_to_zero = 0x1.0p-110f;
|
|
434
|
+
#else
|
|
435
|
+
const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
|
|
436
|
+
const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
|
|
437
|
+
#endif
|
|
438
|
+
float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
|
|
439
|
+
|
|
440
|
+
const uint32_t w = fp32_to_bits(f);
|
|
441
|
+
const uint32_t shl1_w = w + w;
|
|
442
|
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
443
|
+
uint32_t bias = shl1_w & UINT32_C(0xFF000000);
|
|
444
|
+
if (bias < UINT32_C(0x71000000)) {
|
|
445
|
+
bias = UINT32_C(0x71000000);
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
|
|
449
|
+
const uint32_t bits = fp32_to_bits(base);
|
|
450
|
+
const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
|
|
451
|
+
const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
|
|
452
|
+
const uint32_t nonsign = exp_bits + mantissa_bits;
|
|
453
|
+
return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
|
|
457
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
|
|
458
|
+
|
|
459
|
+
#endif // defined(__ARM_NEON) && (!defined(__MSC_VER)
|
|
460
|
+
|
|
461
|
+
// precomputed f32 table for f16 (256 KB)
|
|
462
|
+
// defined in ggml.c, initialized in wsp_ggml_init()
|
|
463
|
+
WSP_GGML_API float wsp_ggml_table_f32_f16[1 << 16];
|
|
464
|
+
|
|
465
|
+
// On ARM NEON, it's quicker to directly convert x -> x instead of calling into wsp_ggml_lookup_fp16_to_fp32,
|
|
466
|
+
// so we define WSP_GGML_FP16_TO_FP32 and WSP_GGML_FP32_TO_FP16 elsewhere for NEON.
|
|
467
|
+
// This is also true for POWER9.
|
|
468
|
+
#if !defined(WSP_GGML_FP16_TO_FP32)
|
|
469
|
+
inline static float wsp_ggml_lookup_fp16_to_fp32(wsp_ggml_fp16_t f) {
|
|
470
|
+
uint16_t s;
|
|
471
|
+
memcpy(&s, &f, sizeof(uint16_t));
|
|
472
|
+
return wsp_ggml_table_f32_f16[s];
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
#define WSP_GGML_FP16_TO_FP32(x) wsp_ggml_lookup_fp16_to_fp32(x)
|
|
476
|
+
#endif
|
|
477
|
+
|
|
478
|
+
#if !defined(WSP_GGML_FP32_TO_FP16)
|
|
479
|
+
#define WSP_GGML_FP32_TO_FP16(x) WSP_GGML_COMPUTE_FP32_TO_FP16(x)
|
|
480
|
+
#endif
|
|
481
|
+
|
|
482
|
+
/**
|
|
483
|
+
* Converts brain16 to float32.
|
|
484
|
+
*
|
|
485
|
+
* The bfloat16 floating point format has the following structure:
|
|
486
|
+
*
|
|
487
|
+
* ┌sign
|
|
488
|
+
* │
|
|
489
|
+
* │ ┌exponent
|
|
490
|
+
* │ │
|
|
491
|
+
* │ │ ┌mantissa
|
|
492
|
+
* │ │ │
|
|
493
|
+
* │┌──┴───┐┌─┴───┐
|
|
494
|
+
* 0b0000000000000000 brain16
|
|
495
|
+
*
|
|
496
|
+
* Since bf16 has the same number of exponent bits as a 32bit float,
|
|
497
|
+
* encoding and decoding numbers becomes relatively straightforward.
|
|
498
|
+
*
|
|
499
|
+
* ┌sign
|
|
500
|
+
* │
|
|
501
|
+
* │ ┌exponent
|
|
502
|
+
* │ │
|
|
503
|
+
* │ │ ┌mantissa
|
|
504
|
+
* │ │ │
|
|
505
|
+
* │┌──┴───┐┌─┴───────────────────┐
|
|
506
|
+
* 0b00000000000000000000000000000000 IEEE binary32
|
|
507
|
+
*
|
|
508
|
+
* For comparison, the standard fp16 format has fewer exponent bits.
|
|
509
|
+
*
|
|
510
|
+
* ┌sign
|
|
511
|
+
* │
|
|
512
|
+
* │ ┌exponent
|
|
513
|
+
* │ │
|
|
514
|
+
* │ │ ┌mantissa
|
|
515
|
+
* │ │ │
|
|
516
|
+
* │┌─┴─┐┌─┴──────┐
|
|
517
|
+
* 0b0000000000000000 IEEE binary16
|
|
518
|
+
*
|
|
519
|
+
* @see IEEE 754-2008
|
|
520
|
+
*/
|
|
521
|
+
static inline float wsp_ggml_compute_bf16_to_fp32(wsp_ggml_bf16_t h) {
|
|
522
|
+
union {
|
|
523
|
+
float f;
|
|
524
|
+
uint32_t i;
|
|
525
|
+
} u;
|
|
526
|
+
u.i = (uint32_t)h.bits << 16;
|
|
527
|
+
return u.f;
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
/**
|
|
531
|
+
* Converts float32 to brain16.
|
|
532
|
+
*
|
|
533
|
+
* This is binary identical with Google Brain float conversion.
|
|
534
|
+
* Floats shall round to nearest even, and NANs shall be quiet.
|
|
535
|
+
* Subnormals aren't flushed to zero, except perhaps when used.
|
|
536
|
+
* This code should vectorize nicely if using modern compilers.
|
|
537
|
+
*/
|
|
538
|
+
static inline wsp_ggml_bf16_t wsp_ggml_compute_fp32_to_bf16(float s) {
|
|
539
|
+
wsp_ggml_bf16_t h;
|
|
540
|
+
union {
|
|
541
|
+
float f;
|
|
542
|
+
uint32_t i;
|
|
543
|
+
} u;
|
|
544
|
+
u.f = s;
|
|
545
|
+
if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */
|
|
546
|
+
h.bits = (u.i >> 16) | 64; /* force to quiet */
|
|
547
|
+
return h;
|
|
548
|
+
}
|
|
549
|
+
h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
|
|
550
|
+
return h;
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
#define WSP_GGML_FP32_TO_BF16(x) wsp_ggml_compute_fp32_to_bf16(x)
|
|
554
|
+
#define WSP_GGML_BF16_TO_FP32(x) wsp_ggml_compute_bf16_to_fp32(x)
|
|
555
|
+
|
|
556
|
+
#ifdef __cplusplus
|
|
557
|
+
}
|
|
558
|
+
#endif
|
|
559
|
+
|
|
560
|
+
#ifdef __cplusplus
|
|
561
|
+
#include <vector>
|
|
562
|
+
|
|
563
|
+
// expose GGUF internals for test code
|
|
564
|
+
WSP_GGML_API size_t wsp_gguf_type_size(enum wsp_gguf_type type);
|
|
565
|
+
WSP_GGML_API struct wsp_gguf_context * wsp_gguf_init_from_file_impl(FILE * file, struct wsp_gguf_init_params params);
|
|
566
|
+
WSP_GGML_API void wsp_gguf_write_to_buf(const struct wsp_gguf_context * ctx, std::vector<int8_t> & buf, bool only_meta);
|
|
567
|
+
#endif // __cplusplus
|