whisper.rn 0.4.0-rc.9 → 0.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +74 -1
- package/android/build.gradle +12 -3
- package/android/src/main/CMakeLists.txt +43 -13
- package/android/src/main/java/com/rnwhisper/RNWhisper.java +211 -0
- package/android/src/main/java/com/rnwhisper/WhisperContext.java +64 -36
- package/android/src/main/java/com/rnwhisper/WhisperVadContext.java +157 -0
- package/android/src/main/jni.cpp +205 -0
- package/android/src/main/jniLibs/arm64-v8a/librnwhisper.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnwhisper_v8fp16_va_2.so +0 -0
- package/android/src/main/jniLibs/armeabi-v7a/librnwhisper.so +0 -0
- package/android/src/main/jniLibs/armeabi-v7a/librnwhisper_vfpv4.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnwhisper.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnwhisper_x86_64.so +0 -0
- package/android/src/newarch/java/com/rnwhisper/RNWhisperModule.java +26 -0
- package/android/src/oldarch/java/com/rnwhisper/RNWhisperModule.java +26 -0
- package/cpp/coreml/whisper-compat.h +10 -0
- package/cpp/coreml/whisper-compat.m +35 -0
- package/cpp/coreml/whisper-decoder-impl.h +27 -15
- package/cpp/coreml/whisper-decoder-impl.m +36 -10
- package/cpp/coreml/whisper-encoder-impl.h +21 -9
- package/cpp/coreml/whisper-encoder-impl.m +29 -3
- package/cpp/ggml-alloc.c +39 -37
- package/cpp/ggml-alloc.h +1 -1
- package/cpp/ggml-backend-impl.h +55 -27
- package/cpp/ggml-backend-reg.cpp +591 -0
- package/cpp/ggml-backend.cpp +336 -955
- package/cpp/ggml-backend.h +70 -42
- package/cpp/ggml-common.h +57 -49
- package/cpp/ggml-cpp.h +39 -0
- package/cpp/ggml-cpu/amx/amx.cpp +221 -0
- package/cpp/ggml-cpu/amx/amx.h +8 -0
- package/cpp/ggml-cpu/amx/common.h +91 -0
- package/cpp/ggml-cpu/amx/mmq.cpp +2511 -0
- package/cpp/ggml-cpu/amx/mmq.h +10 -0
- package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
- package/cpp/ggml-cpu/arch/arm/quants.c +4113 -0
- package/cpp/ggml-cpu/arch/arm/repack.cpp +2162 -0
- package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
- package/cpp/ggml-cpu/arch/x86/quants.c +4310 -0
- package/cpp/ggml-cpu/arch/x86/repack.cpp +3284 -0
- package/cpp/ggml-cpu/arch-fallback.h +184 -0
- package/cpp/ggml-cpu/binary-ops.cpp +158 -0
- package/cpp/ggml-cpu/binary-ops.h +16 -0
- package/cpp/ggml-cpu/common.h +72 -0
- package/cpp/ggml-cpu/ggml-cpu-impl.h +511 -0
- package/cpp/ggml-cpu/ggml-cpu.c +3473 -0
- package/cpp/ggml-cpu/ggml-cpu.cpp +671 -0
- package/cpp/ggml-cpu/ops.cpp +9085 -0
- package/cpp/ggml-cpu/ops.h +111 -0
- package/cpp/ggml-cpu/quants.c +1157 -0
- package/cpp/ggml-cpu/quants.h +89 -0
- package/cpp/ggml-cpu/repack.cpp +1570 -0
- package/cpp/ggml-cpu/repack.h +98 -0
- package/cpp/ggml-cpu/simd-mappings.h +1006 -0
- package/cpp/ggml-cpu/traits.cpp +36 -0
- package/cpp/ggml-cpu/traits.h +38 -0
- package/cpp/ggml-cpu/unary-ops.cpp +186 -0
- package/cpp/ggml-cpu/unary-ops.h +28 -0
- package/cpp/ggml-cpu/vec.cpp +321 -0
- package/cpp/ggml-cpu/vec.h +973 -0
- package/cpp/ggml-cpu.h +143 -0
- package/cpp/ggml-impl.h +417 -23
- package/cpp/ggml-metal-impl.h +622 -0
- package/cpp/ggml-metal.h +9 -9
- package/cpp/ggml-metal.m +3451 -1344
- package/cpp/ggml-opt.cpp +1037 -0
- package/cpp/ggml-opt.h +237 -0
- package/cpp/ggml-quants.c +296 -10818
- package/cpp/ggml-quants.h +78 -125
- package/cpp/ggml-threading.cpp +12 -0
- package/cpp/ggml-threading.h +14 -0
- package/cpp/ggml-whisper-sim.metallib +0 -0
- package/cpp/ggml-whisper.metallib +0 -0
- package/cpp/ggml.c +4633 -21450
- package/cpp/ggml.h +320 -661
- package/cpp/gguf.cpp +1347 -0
- package/cpp/gguf.h +202 -0
- package/cpp/rn-whisper.cpp +4 -11
- package/cpp/whisper-arch.h +197 -0
- package/cpp/whisper.cpp +2022 -495
- package/cpp/whisper.h +75 -18
- package/ios/CMakeLists.txt +95 -0
- package/ios/RNWhisper.h +5 -0
- package/ios/RNWhisper.mm +147 -0
- package/ios/RNWhisperAudioUtils.m +4 -0
- package/ios/RNWhisperContext.h +5 -0
- package/ios/RNWhisperContext.mm +22 -26
- package/ios/RNWhisperVadContext.h +29 -0
- package/ios/RNWhisperVadContext.mm +152 -0
- package/ios/rnwhisper.xcframework/Info.plist +74 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-common.h +1861 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-cpu.h +143 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-impl.h +603 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-metal-impl.h +622 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-opt.h +237 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml.h +2221 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/gguf.h +202 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/rn-audioutils.h +14 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/rn-whisper-log.h +11 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/rn-whisper.h +52 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/whisper-arch.h +197 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/whisper.h +739 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Info.plist +0 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/ggml-whisper.metallib +0 -0
- package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/rnwhisper +0 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-common.h +1861 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpu.h +143 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-impl.h +603 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal-impl.h +622 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-opt.h +237 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml.h +2221 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/gguf.h +202 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-audioutils.h +14 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-whisper-log.h +11 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-whisper.h +52 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper-arch.h +197 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper.h +739 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Info.plist +0 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/_CodeSignature/CodeResources +101 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/ggml-whisper-sim.metallib +0 -0
- package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/rnwhisper +0 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-common.h +1861 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-cpu.h +143 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-impl.h +603 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-metal-impl.h +622 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-opt.h +237 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml.h +2221 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/gguf.h +202 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/rn-audioutils.h +14 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/rn-whisper-log.h +11 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/rn-whisper.h +52 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/whisper-arch.h +197 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/whisper.h +739 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Info.plist +0 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/ggml-whisper.metallib +0 -0
- package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/rnwhisper +0 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-alloc.h +76 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend-impl.h +255 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend.h +354 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-common.h +1861 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpp.h +39 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpu.h +143 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-impl.h +603 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal-impl.h +622 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal.h +66 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-opt.h +237 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-quants.h +100 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-threading.h +14 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml.h +2221 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/gguf.h +202 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-audioutils.h +14 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-whisper-log.h +11 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-whisper.h +52 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper-arch.h +197 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper.h +739 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Info.plist +0 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/_CodeSignature/CodeResources +101 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/ggml-whisper-sim.metallib +0 -0
- package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/rnwhisper +0 -0
- package/jest/mock.js +24 -0
- package/lib/commonjs/NativeRNWhisper.js.map +1 -1
- package/lib/commonjs/index.js +111 -1
- package/lib/commonjs/index.js.map +1 -1
- package/lib/commonjs/version.json +1 -1
- package/lib/module/NativeRNWhisper.js.map +1 -1
- package/lib/module/index.js +112 -0
- package/lib/module/index.js.map +1 -1
- package/lib/module/version.json +1 -1
- package/lib/typescript/NativeRNWhisper.d.ts +35 -0
- package/lib/typescript/NativeRNWhisper.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +39 -3
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +10 -6
- package/src/NativeRNWhisper.ts +48 -0
- package/src/index.ts +132 -1
- package/src/version.json +1 -1
- package/whisper-rn.podspec +11 -18
- package/cpp/README.md +0 -4
- package/cpp/ggml-aarch64.c +0 -3209
- package/cpp/ggml-aarch64.h +0 -39
- package/cpp/ggml-cpu-impl.h +0 -614
package/cpp/ggml-cpu.h
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "ggml.h"
|
|
4
|
+
#include "ggml-backend.h"
|
|
5
|
+
|
|
6
|
+
#ifdef __cplusplus
|
|
7
|
+
extern "C" {
|
|
8
|
+
#endif
|
|
9
|
+
|
|
10
|
+
// the compute plan that needs to be prepared for wsp_ggml_graph_compute()
|
|
11
|
+
// since https://github.com/ggml-org/ggml/issues/287
|
|
12
|
+
struct wsp_ggml_cplan {
|
|
13
|
+
size_t work_size; // size of work buffer, calculated by `wsp_ggml_graph_plan()`
|
|
14
|
+
uint8_t * work_data; // work buffer, to be allocated by caller before calling to `wsp_ggml_graph_compute()`
|
|
15
|
+
|
|
16
|
+
int n_threads;
|
|
17
|
+
struct wsp_ggml_threadpool * threadpool;
|
|
18
|
+
|
|
19
|
+
// abort wsp_ggml_graph_compute when true
|
|
20
|
+
wsp_ggml_abort_callback abort_callback;
|
|
21
|
+
void * abort_callback_data;
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
// numa strategies
|
|
25
|
+
enum wsp_ggml_numa_strategy {
|
|
26
|
+
WSP_GGML_NUMA_STRATEGY_DISABLED = 0,
|
|
27
|
+
WSP_GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
|
|
28
|
+
WSP_GGML_NUMA_STRATEGY_ISOLATE = 2,
|
|
29
|
+
WSP_GGML_NUMA_STRATEGY_NUMACTL = 3,
|
|
30
|
+
WSP_GGML_NUMA_STRATEGY_MIRROR = 4,
|
|
31
|
+
WSP_GGML_NUMA_STRATEGY_COUNT
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
WSP_GGML_BACKEND_API void wsp_ggml_numa_init(enum wsp_ggml_numa_strategy numa); // call once for better performance on NUMA systems
|
|
35
|
+
WSP_GGML_BACKEND_API bool wsp_ggml_is_numa(void); // true if init detected that system has >1 NUMA node
|
|
36
|
+
|
|
37
|
+
WSP_GGML_BACKEND_API struct wsp_ggml_tensor * wsp_ggml_new_i32(struct wsp_ggml_context * ctx, int32_t value);
|
|
38
|
+
WSP_GGML_BACKEND_API struct wsp_ggml_tensor * wsp_ggml_new_f32(struct wsp_ggml_context * ctx, float value);
|
|
39
|
+
|
|
40
|
+
WSP_GGML_BACKEND_API struct wsp_ggml_tensor * wsp_ggml_set_i32 (struct wsp_ggml_tensor * tensor, int32_t value);
|
|
41
|
+
WSP_GGML_BACKEND_API struct wsp_ggml_tensor * wsp_ggml_set_f32 (struct wsp_ggml_tensor * tensor, float value);
|
|
42
|
+
|
|
43
|
+
WSP_GGML_BACKEND_API int32_t wsp_ggml_get_i32_1d(const struct wsp_ggml_tensor * tensor, int i);
|
|
44
|
+
WSP_GGML_BACKEND_API void wsp_ggml_set_i32_1d(const struct wsp_ggml_tensor * tensor, int i, int32_t value);
|
|
45
|
+
|
|
46
|
+
WSP_GGML_BACKEND_API int32_t wsp_ggml_get_i32_nd(const struct wsp_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
|
|
47
|
+
WSP_GGML_BACKEND_API void wsp_ggml_set_i32_nd(const struct wsp_ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
|
|
48
|
+
|
|
49
|
+
WSP_GGML_BACKEND_API float wsp_ggml_get_f32_1d(const struct wsp_ggml_tensor * tensor, int i);
|
|
50
|
+
WSP_GGML_BACKEND_API void wsp_ggml_set_f32_1d(const struct wsp_ggml_tensor * tensor, int i, float value);
|
|
51
|
+
|
|
52
|
+
WSP_GGML_BACKEND_API float wsp_ggml_get_f32_nd(const struct wsp_ggml_tensor * tensor, int i0, int i1, int i2, int i3);
|
|
53
|
+
WSP_GGML_BACKEND_API void wsp_ggml_set_f32_nd(const struct wsp_ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
|
|
54
|
+
|
|
55
|
+
WSP_GGML_BACKEND_API struct wsp_ggml_threadpool * wsp_ggml_threadpool_new (struct wsp_ggml_threadpool_params * params);
|
|
56
|
+
WSP_GGML_BACKEND_API void wsp_ggml_threadpool_free (struct wsp_ggml_threadpool * threadpool);
|
|
57
|
+
WSP_GGML_BACKEND_API int wsp_ggml_threadpool_get_n_threads (struct wsp_ggml_threadpool * threadpool);
|
|
58
|
+
WSP_GGML_BACKEND_API void wsp_ggml_threadpool_pause (struct wsp_ggml_threadpool * threadpool);
|
|
59
|
+
WSP_GGML_BACKEND_API void wsp_ggml_threadpool_resume (struct wsp_ggml_threadpool * threadpool);
|
|
60
|
+
|
|
61
|
+
// wsp_ggml_graph_plan() has to be called before wsp_ggml_graph_compute()
|
|
62
|
+
// when plan.work_size > 0, caller must allocate memory for plan.work_data
|
|
63
|
+
WSP_GGML_BACKEND_API struct wsp_ggml_cplan wsp_ggml_graph_plan(
|
|
64
|
+
const struct wsp_ggml_cgraph * cgraph,
|
|
65
|
+
int n_threads, /* = WSP_GGML_DEFAULT_N_THREADS */
|
|
66
|
+
struct wsp_ggml_threadpool * threadpool /* = NULL */ );
|
|
67
|
+
WSP_GGML_BACKEND_API enum wsp_ggml_status wsp_ggml_graph_compute(struct wsp_ggml_cgraph * cgraph, struct wsp_ggml_cplan * cplan);
|
|
68
|
+
|
|
69
|
+
// same as wsp_ggml_graph_compute() but the work data is allocated as a part of the context
|
|
70
|
+
// note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
|
|
71
|
+
WSP_GGML_BACKEND_API enum wsp_ggml_status wsp_ggml_graph_compute_with_ctx(struct wsp_ggml_context * ctx, struct wsp_ggml_cgraph * cgraph, int n_threads);
|
|
72
|
+
|
|
73
|
+
//
|
|
74
|
+
// system info
|
|
75
|
+
//
|
|
76
|
+
|
|
77
|
+
// x86
|
|
78
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_sse3 (void);
|
|
79
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_ssse3 (void);
|
|
80
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx (void);
|
|
81
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx_vnni (void);
|
|
82
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx2 (void);
|
|
83
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_bmi2 (void);
|
|
84
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_f16c (void);
|
|
85
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_fma (void);
|
|
86
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx512 (void);
|
|
87
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx512_vbmi(void);
|
|
88
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx512_vnni(void);
|
|
89
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_avx512_bf16(void);
|
|
90
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_amx_int8 (void);
|
|
91
|
+
// ARM
|
|
92
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_neon (void);
|
|
93
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_arm_fma (void);
|
|
94
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_fp16_va (void);
|
|
95
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_dotprod (void);
|
|
96
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_matmul_int8(void);
|
|
97
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_sve (void);
|
|
98
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_get_sve_cnt (void); // sve vector length in bytes
|
|
99
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_sme (void);
|
|
100
|
+
// other
|
|
101
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_riscv_v (void);
|
|
102
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_vsx (void);
|
|
103
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_vxe (void);
|
|
104
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_wasm_simd (void);
|
|
105
|
+
WSP_GGML_BACKEND_API int wsp_ggml_cpu_has_llamafile (void);
|
|
106
|
+
|
|
107
|
+
// Internal types and functions exposed for tests and benchmarks
|
|
108
|
+
|
|
109
|
+
typedef void (*wsp_ggml_vec_dot_t) (int n, float * WSP_GGML_RESTRICT s, size_t bs, const void * WSP_GGML_RESTRICT x, size_t bx,
|
|
110
|
+
const void * WSP_GGML_RESTRICT y, size_t by, int nrc);
|
|
111
|
+
|
|
112
|
+
struct wsp_ggml_type_traits_cpu {
|
|
113
|
+
wsp_ggml_from_float_t from_float;
|
|
114
|
+
wsp_ggml_vec_dot_t vec_dot;
|
|
115
|
+
enum wsp_ggml_type vec_dot_type;
|
|
116
|
+
int64_t nrows; // number of rows to process simultaneously
|
|
117
|
+
};
|
|
118
|
+
|
|
119
|
+
WSP_GGML_BACKEND_API const struct wsp_ggml_type_traits_cpu * wsp_ggml_get_type_traits_cpu(enum wsp_ggml_type type);
|
|
120
|
+
|
|
121
|
+
WSP_GGML_BACKEND_API void wsp_ggml_cpu_init(void);
|
|
122
|
+
|
|
123
|
+
//
|
|
124
|
+
// CPU backend
|
|
125
|
+
//
|
|
126
|
+
|
|
127
|
+
WSP_GGML_BACKEND_API wsp_ggml_backend_t wsp_ggml_backend_cpu_init(void);
|
|
128
|
+
|
|
129
|
+
WSP_GGML_BACKEND_API bool wsp_ggml_backend_is_cpu (wsp_ggml_backend_t backend);
|
|
130
|
+
WSP_GGML_BACKEND_API void wsp_ggml_backend_cpu_set_n_threads (wsp_ggml_backend_t backend_cpu, int n_threads);
|
|
131
|
+
WSP_GGML_BACKEND_API void wsp_ggml_backend_cpu_set_threadpool (wsp_ggml_backend_t backend_cpu, wsp_ggml_threadpool_t threadpool);
|
|
132
|
+
WSP_GGML_BACKEND_API void wsp_ggml_backend_cpu_set_abort_callback(wsp_ggml_backend_t backend_cpu, wsp_ggml_abort_callback abort_callback, void * abort_callback_data);
|
|
133
|
+
|
|
134
|
+
WSP_GGML_BACKEND_API wsp_ggml_backend_reg_t wsp_ggml_backend_cpu_reg(void);
|
|
135
|
+
|
|
136
|
+
WSP_GGML_BACKEND_API void wsp_ggml_cpu_fp32_to_fp16(const float *, wsp_ggml_fp16_t *, int64_t);
|
|
137
|
+
WSP_GGML_BACKEND_API void wsp_ggml_cpu_fp16_to_fp32(const wsp_ggml_fp16_t *, float *, int64_t);
|
|
138
|
+
WSP_GGML_BACKEND_API void wsp_ggml_cpu_fp32_to_bf16(const float *, wsp_ggml_bf16_t *, int64_t);
|
|
139
|
+
WSP_GGML_BACKEND_API void wsp_ggml_cpu_bf16_to_fp32(const wsp_ggml_bf16_t *, float *, int64_t);
|
|
140
|
+
|
|
141
|
+
#ifdef __cplusplus
|
|
142
|
+
}
|
|
143
|
+
#endif
|
package/cpp/ggml-impl.h
CHANGED
|
@@ -3,21 +3,44 @@
|
|
|
3
3
|
// GGML internal header
|
|
4
4
|
|
|
5
5
|
#include "ggml.h"
|
|
6
|
+
#include "gguf.h"
|
|
6
7
|
|
|
7
8
|
#include <assert.h>
|
|
9
|
+
#include <math.h>
|
|
8
10
|
#include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
|
|
9
11
|
#include <stdbool.h>
|
|
10
12
|
#include <stdint.h>
|
|
13
|
+
#include <string.h>
|
|
14
|
+
|
|
15
|
+
#ifdef __ARM_FEATURE_SVE
|
|
16
|
+
#include <arm_sve.h>
|
|
17
|
+
#endif // __ARM_FEATURE_SVE
|
|
18
|
+
|
|
19
|
+
#if defined(__ARM_NEON) && !defined(__CUDACC__) && !defined(__MUSACC__)
|
|
20
|
+
// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
|
|
21
|
+
//
|
|
22
|
+
// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
|
|
23
|
+
//
|
|
24
|
+
#include <arm_neon.h>
|
|
25
|
+
#endif
|
|
26
|
+
|
|
27
|
+
#if defined(__F16C__)
|
|
28
|
+
#include <immintrin.h>
|
|
29
|
+
#endif
|
|
11
30
|
|
|
12
31
|
#ifdef __cplusplus
|
|
13
32
|
extern "C" {
|
|
14
33
|
#endif
|
|
15
34
|
|
|
16
|
-
|
|
17
|
-
|
|
35
|
+
void wsp_ggml_print_backtrace(void);
|
|
36
|
+
|
|
37
|
+
#ifndef MIN
|
|
38
|
+
# define MIN(a, b) ((a) < (b) ? (a) : (b))
|
|
39
|
+
#endif
|
|
18
40
|
|
|
19
|
-
#
|
|
20
|
-
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
|
41
|
+
#ifndef MAX
|
|
42
|
+
# define MAX(a, b) ((a) > (b) ? (a) : (b))
|
|
43
|
+
#endif
|
|
21
44
|
|
|
22
45
|
// required for mmap as gguf only guarantees 32-byte alignment
|
|
23
46
|
#define TENSOR_ALIGNMENT 32
|
|
@@ -27,22 +50,36 @@ extern "C" {
|
|
|
27
50
|
// if C99 - static_assert is noop
|
|
28
51
|
// ref: https://stackoverflow.com/a/53923785/4039976
|
|
29
52
|
#ifndef __cplusplus
|
|
30
|
-
#ifndef static_assert
|
|
31
|
-
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
|
|
32
|
-
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
|
33
|
-
#else
|
|
34
|
-
#define static_assert(cond, msg) struct global_scope_noop_trick
|
|
35
|
-
#endif
|
|
36
|
-
#endif
|
|
53
|
+
#ifndef static_assert
|
|
54
|
+
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
|
|
55
|
+
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
|
56
|
+
#else
|
|
57
|
+
#define static_assert(cond, msg) struct global_scope_noop_trick
|
|
58
|
+
#endif
|
|
59
|
+
#endif
|
|
37
60
|
#endif
|
|
38
61
|
|
|
62
|
+
static inline int wsp_ggml_up32(int n) {
|
|
63
|
+
return (n + 31) & ~31;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
//static inline int wsp_ggml_up64(int n) {
|
|
67
|
+
// return (n + 63) & ~63;
|
|
68
|
+
//}
|
|
69
|
+
|
|
70
|
+
static inline int wsp_ggml_up(int n, int m) {
|
|
71
|
+
// assert m is a power of 2
|
|
72
|
+
WSP_GGML_ASSERT((m & (m - 1)) == 0);
|
|
73
|
+
return (n + m - 1) & ~(m - 1);
|
|
74
|
+
}
|
|
75
|
+
|
|
39
76
|
//
|
|
40
77
|
// logging
|
|
41
78
|
//
|
|
42
79
|
|
|
43
80
|
WSP_GGML_ATTRIBUTE_FORMAT(2, 3)
|
|
44
|
-
void wsp_ggml_log_internal (enum wsp_ggml_log_level level, const char * format, ...);
|
|
45
|
-
void wsp_ggml_log_callback_default(enum wsp_ggml_log_level level, const char * text, void * user_data);
|
|
81
|
+
WSP_GGML_API void wsp_ggml_log_internal (enum wsp_ggml_log_level level, const char * format, ...);
|
|
82
|
+
WSP_GGML_API void wsp_ggml_log_callback_default(enum wsp_ggml_log_level level, const char * text, void * user_data);
|
|
46
83
|
|
|
47
84
|
#define WSP_GGML_LOG(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_NONE , __VA_ARGS__)
|
|
48
85
|
#define WSP_GGML_LOG_INFO(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_INFO , __VA_ARGS__)
|
|
@@ -51,6 +88,78 @@ void wsp_ggml_log_callback_default(enum wsp_ggml_log_level level, const char * t
|
|
|
51
88
|
#define WSP_GGML_LOG_DEBUG(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
|
|
52
89
|
#define WSP_GGML_LOG_CONT(...) wsp_ggml_log_internal(WSP_GGML_LOG_LEVEL_CONT , __VA_ARGS__)
|
|
53
90
|
|
|
91
|
+
#define WSP_GGML_DEBUG 0
|
|
92
|
+
|
|
93
|
+
#if (WSP_GGML_DEBUG >= 1)
|
|
94
|
+
#define WSP_GGML_PRINT_DEBUG(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
|
|
95
|
+
#else
|
|
96
|
+
#define WSP_GGML_PRINT_DEBUG(...)
|
|
97
|
+
#endif
|
|
98
|
+
|
|
99
|
+
#if (WSP_GGML_DEBUG >= 5)
|
|
100
|
+
#define WSP_GGML_PRINT_DEBUG_5(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
|
|
101
|
+
#else
|
|
102
|
+
#define WSP_GGML_PRINT_DEBUG_5(...)
|
|
103
|
+
#endif
|
|
104
|
+
|
|
105
|
+
#if (WSP_GGML_DEBUG >= 10)
|
|
106
|
+
#define WSP_GGML_PRINT_DEBUG_10(...) WSP_GGML_LOG_DEBUG(__VA_ARGS__)
|
|
107
|
+
#else
|
|
108
|
+
#define WSP_GGML_PRINT_DEBUG_10(...)
|
|
109
|
+
#endif
|
|
110
|
+
|
|
111
|
+
// tensor params
|
|
112
|
+
|
|
113
|
+
static void wsp_ggml_set_op_params(struct wsp_ggml_tensor * tensor, const void * params, size_t params_size) {
|
|
114
|
+
WSP_GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
|
|
115
|
+
assert(params_size <= WSP_GGML_MAX_OP_PARAMS);
|
|
116
|
+
memcpy(tensor->op_params, params, params_size);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
static int32_t wsp_ggml_get_op_params_i32(const struct wsp_ggml_tensor * tensor, uint32_t i) {
|
|
120
|
+
assert(i < WSP_GGML_MAX_OP_PARAMS / sizeof(int32_t));
|
|
121
|
+
return ((const int32_t *)(tensor->op_params))[i];
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
static float wsp_ggml_get_op_params_f32(const struct wsp_ggml_tensor * tensor, uint32_t i) {
|
|
125
|
+
assert(i < WSP_GGML_MAX_OP_PARAMS / sizeof(float));
|
|
126
|
+
return ((const float *)(tensor->op_params))[i];
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
static void wsp_ggml_set_op_params_i32(struct wsp_ggml_tensor * tensor, uint32_t i, int32_t value) {
|
|
130
|
+
assert(i < WSP_GGML_MAX_OP_PARAMS / sizeof(int32_t));
|
|
131
|
+
((int32_t *)(tensor->op_params))[i] = value;
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
static void wsp_ggml_set_op_params_f32(struct wsp_ggml_tensor * tensor, uint32_t i, float value) {
|
|
135
|
+
assert(i < WSP_GGML_MAX_OP_PARAMS / sizeof(float));
|
|
136
|
+
((float *)(tensor->op_params))[i] = value;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
struct wsp_ggml_map_custom1_op_params {
|
|
140
|
+
wsp_ggml_custom1_op_t fun;
|
|
141
|
+
int n_tasks;
|
|
142
|
+
void * userdata;
|
|
143
|
+
};
|
|
144
|
+
|
|
145
|
+
struct wsp_ggml_map_custom2_op_params {
|
|
146
|
+
wsp_ggml_custom2_op_t fun;
|
|
147
|
+
int n_tasks;
|
|
148
|
+
void * userdata;
|
|
149
|
+
};
|
|
150
|
+
|
|
151
|
+
struct wsp_ggml_map_custom3_op_params {
|
|
152
|
+
wsp_ggml_custom3_op_t fun;
|
|
153
|
+
int n_tasks;
|
|
154
|
+
void * userdata;
|
|
155
|
+
};
|
|
156
|
+
|
|
157
|
+
struct wsp_ggml_custom_op_params {
|
|
158
|
+
wsp_ggml_custom_op_t fun;
|
|
159
|
+
int n_tasks;
|
|
160
|
+
void * userdata;
|
|
161
|
+
};
|
|
162
|
+
|
|
54
163
|
// bitset
|
|
55
164
|
|
|
56
165
|
typedef uint32_t wsp_ggml_bitset_t;
|
|
@@ -99,7 +208,7 @@ void wsp_ggml_hash_set_reset(struct wsp_ggml_hash_set * hash_set);
|
|
|
99
208
|
static bool wsp_ggml_hash_contains(const struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
|
|
100
209
|
|
|
101
210
|
// returns WSP_GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
|
|
102
|
-
static size_t wsp_ggml_hash_find(const struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
|
|
211
|
+
static size_t wsp_ggml_hash_find(const struct wsp_ggml_hash_set * hash_set, const struct wsp_ggml_tensor * key);
|
|
103
212
|
|
|
104
213
|
// returns WSP_GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
|
|
105
214
|
static size_t wsp_ggml_hash_insert(struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key);
|
|
@@ -113,7 +222,7 @@ static inline size_t wsp_ggml_hash(const struct wsp_ggml_tensor * p) {
|
|
|
113
222
|
return (size_t)(uintptr_t)p >> 4;
|
|
114
223
|
}
|
|
115
224
|
|
|
116
|
-
static size_t wsp_ggml_hash_find(const struct wsp_ggml_hash_set * hash_set, struct wsp_ggml_tensor * key) {
|
|
225
|
+
static size_t wsp_ggml_hash_find(const struct wsp_ggml_hash_set * hash_set, const struct wsp_ggml_tensor * key) {
|
|
117
226
|
size_t h = wsp_ggml_hash(key) % hash_set->size;
|
|
118
227
|
|
|
119
228
|
// linear probing
|
|
@@ -184,26 +293,311 @@ enum wsp_ggml_cgraph_eval_order {
|
|
|
184
293
|
};
|
|
185
294
|
|
|
186
295
|
struct wsp_ggml_cgraph {
|
|
187
|
-
int size;
|
|
188
|
-
int n_nodes;
|
|
189
|
-
int n_leafs;
|
|
296
|
+
int size; // maximum number of nodes/leafs/grads/grad_accs
|
|
297
|
+
int n_nodes; // number of nodes currently in use
|
|
298
|
+
int n_leafs; // number of leafs currently in use
|
|
190
299
|
|
|
191
|
-
struct wsp_ggml_tensor ** nodes;
|
|
192
|
-
struct wsp_ggml_tensor ** grads;
|
|
193
|
-
struct wsp_ggml_tensor **
|
|
300
|
+
struct wsp_ggml_tensor ** nodes; // tensors with data that can change if the graph is evaluated
|
|
301
|
+
struct wsp_ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes
|
|
302
|
+
struct wsp_ggml_tensor ** grad_accs; // accumulators for node gradients
|
|
303
|
+
struct wsp_ggml_tensor ** leafs; // tensors with constant data
|
|
194
304
|
|
|
195
305
|
struct wsp_ggml_hash_set visited_hash_set;
|
|
196
306
|
|
|
197
307
|
enum wsp_ggml_cgraph_eval_order order;
|
|
198
308
|
};
|
|
199
309
|
|
|
310
|
+
// returns a slice of cgraph with nodes [i0, i1)
|
|
311
|
+
// the slice does not have leafs or gradients
|
|
312
|
+
// if you need the gradients, get them from the original graph
|
|
200
313
|
struct wsp_ggml_cgraph wsp_ggml_graph_view(struct wsp_ggml_cgraph * cgraph, int i0, int i1);
|
|
201
314
|
|
|
202
315
|
// Memory allocation
|
|
203
316
|
|
|
204
|
-
void * wsp_ggml_aligned_malloc(size_t size);
|
|
205
|
-
void wsp_ggml_aligned_free(void * ptr, size_t size);
|
|
317
|
+
WSP_GGML_API void * wsp_ggml_aligned_malloc(size_t size);
|
|
318
|
+
WSP_GGML_API void wsp_ggml_aligned_free(void * ptr, size_t size);
|
|
319
|
+
|
|
320
|
+
// FP16 to FP32 conversion
|
|
321
|
+
|
|
322
|
+
// 16-bit float
|
|
323
|
+
// on Arm, we use __fp16
|
|
324
|
+
// on x86, we use uint16_t
|
|
325
|
+
//
|
|
326
|
+
// for old CUDA compilers (<= 11), we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/10616
|
|
327
|
+
// for MUSA compilers , we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/11843
|
|
328
|
+
//
|
|
329
|
+
#if defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__)
|
|
330
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
|
|
331
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
|
|
332
|
+
|
|
333
|
+
#define WSP_GGML_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
|
|
334
|
+
|
|
335
|
+
static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
|
|
336
|
+
__fp16 tmp;
|
|
337
|
+
memcpy(&tmp, &h, sizeof(wsp_ggml_fp16_t));
|
|
338
|
+
return (float)tmp;
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
|
|
342
|
+
wsp_ggml_fp16_t res;
|
|
343
|
+
__fp16 tmp = f;
|
|
344
|
+
memcpy(&res, &tmp, sizeof(wsp_ggml_fp16_t));
|
|
345
|
+
return res;
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
#elif defined(__F16C__)
|
|
349
|
+
|
|
350
|
+
#ifdef _MSC_VER
|
|
351
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
|
|
352
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
|
|
353
|
+
#else
|
|
354
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
|
|
355
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
|
|
356
|
+
#endif
|
|
357
|
+
|
|
358
|
+
#elif defined(__POWER9_VECTOR__)
|
|
359
|
+
|
|
360
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
|
|
361
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
|
|
362
|
+
/* the inline asm below is about 12% faster than the lookup method */
|
|
363
|
+
#define WSP_GGML_FP16_TO_FP32(x) WSP_GGML_COMPUTE_FP16_TO_FP32(x)
|
|
364
|
+
#define WSP_GGML_FP32_TO_FP16(x) WSP_GGML_COMPUTE_FP32_TO_FP16(x)
|
|
365
|
+
|
|
366
|
+
static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
|
|
367
|
+
float f;
|
|
368
|
+
double d;
|
|
369
|
+
__asm__(
|
|
370
|
+
"mtfprd %0,%2\n"
|
|
371
|
+
"xscvhpdp %0,%0\n"
|
|
372
|
+
"frsp %1,%0\n" :
|
|
373
|
+
/* temp */ "=d"(d),
|
|
374
|
+
/* out */ "=f"(f):
|
|
375
|
+
/* in */ "r"(h));
|
|
376
|
+
return f;
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
|
|
380
|
+
double d;
|
|
381
|
+
wsp_ggml_fp16_t r;
|
|
382
|
+
__asm__( /* xscvdphp can work on double or single precision */
|
|
383
|
+
"xscvdphp %0,%2\n"
|
|
384
|
+
"mffprd %1,%0\n" :
|
|
385
|
+
/* temp */ "=d"(d),
|
|
386
|
+
/* out */ "=r"(r):
|
|
387
|
+
/* in */ "f"(f));
|
|
388
|
+
return r;
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
#elif defined(__riscv) && defined(__riscv_zfhmin)
|
|
392
|
+
|
|
393
|
+
static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
|
|
394
|
+
float f;
|
|
395
|
+
__asm__(
|
|
396
|
+
"fmv.h.x %[f], %[h]\n\t"
|
|
397
|
+
"fcvt.s.h %[f], %[f]"
|
|
398
|
+
: [f] "=&f" (f)
|
|
399
|
+
: [h] "r" (h)
|
|
400
|
+
);
|
|
401
|
+
return f;
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
|
|
405
|
+
wsp_ggml_fp16_t res;
|
|
406
|
+
__asm__(
|
|
407
|
+
"fcvt.h.s %[f], %[f]\n\t"
|
|
408
|
+
"fmv.x.h %[h], %[f]"
|
|
409
|
+
: [h] "=&r" (res)
|
|
410
|
+
: [f] "f" (f)
|
|
411
|
+
);
|
|
412
|
+
return res;
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
|
|
416
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
|
|
417
|
+
#define WSP_GGML_FP16_TO_FP32(x) WSP_GGML_COMPUTE_FP16_TO_FP32(x)
|
|
418
|
+
#define WSP_GGML_FP32_TO_FP16(x) WSP_GGML_COMPUTE_FP32_TO_FP16(x)
|
|
419
|
+
|
|
420
|
+
#else
|
|
421
|
+
|
|
422
|
+
// FP16 <-> FP32
|
|
423
|
+
// ref: https://github.com/Maratyszcza/FP16
|
|
424
|
+
|
|
425
|
+
static inline float fp32_from_bits(uint32_t w) {
|
|
426
|
+
union {
|
|
427
|
+
uint32_t as_bits;
|
|
428
|
+
float as_value;
|
|
429
|
+
} fp32;
|
|
430
|
+
fp32.as_bits = w;
|
|
431
|
+
return fp32.as_value;
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
static inline uint32_t fp32_to_bits(float f) {
|
|
435
|
+
union {
|
|
436
|
+
float as_value;
|
|
437
|
+
uint32_t as_bits;
|
|
438
|
+
} fp32;
|
|
439
|
+
fp32.as_value = f;
|
|
440
|
+
return fp32.as_bits;
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
static inline float wsp_ggml_compute_fp16_to_fp32(wsp_ggml_fp16_t h) {
|
|
444
|
+
const uint32_t w = (uint32_t) h << 16;
|
|
445
|
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
446
|
+
const uint32_t two_w = w + w;
|
|
447
|
+
|
|
448
|
+
const uint32_t exp_offset = UINT32_C(0xE0) << 23;
|
|
449
|
+
#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
|
|
450
|
+
const float exp_scale = 0x1.0p-112f;
|
|
451
|
+
#else
|
|
452
|
+
const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
|
|
453
|
+
#endif
|
|
454
|
+
const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
|
|
455
|
+
|
|
456
|
+
const uint32_t magic_mask = UINT32_C(126) << 23;
|
|
457
|
+
const float magic_bias = 0.5f;
|
|
458
|
+
const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
|
|
459
|
+
|
|
460
|
+
const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
|
|
461
|
+
const uint32_t result = sign |
|
|
462
|
+
(two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
|
|
463
|
+
return fp32_from_bits(result);
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
static inline wsp_ggml_fp16_t wsp_ggml_compute_fp32_to_fp16(float f) {
|
|
467
|
+
#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
|
|
468
|
+
const float scale_to_inf = 0x1.0p+112f;
|
|
469
|
+
const float scale_to_zero = 0x1.0p-110f;
|
|
470
|
+
#else
|
|
471
|
+
const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
|
|
472
|
+
const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
|
|
473
|
+
#endif
|
|
474
|
+
float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
|
|
475
|
+
|
|
476
|
+
const uint32_t w = fp32_to_bits(f);
|
|
477
|
+
const uint32_t shl1_w = w + w;
|
|
478
|
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
479
|
+
uint32_t bias = shl1_w & UINT32_C(0xFF000000);
|
|
480
|
+
if (bias < UINT32_C(0x71000000)) {
|
|
481
|
+
bias = UINT32_C(0x71000000);
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
|
|
485
|
+
const uint32_t bits = fp32_to_bits(base);
|
|
486
|
+
const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
|
|
487
|
+
const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
|
|
488
|
+
const uint32_t nonsign = exp_bits + mantissa_bits;
|
|
489
|
+
return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
#define WSP_GGML_COMPUTE_FP16_TO_FP32(x) wsp_ggml_compute_fp16_to_fp32(x)
|
|
493
|
+
#define WSP_GGML_COMPUTE_FP32_TO_FP16(x) wsp_ggml_compute_fp32_to_fp16(x)
|
|
494
|
+
|
|
495
|
+
#endif // defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__)
|
|
496
|
+
|
|
497
|
+
// precomputed f32 table for f16 (256 KB)
|
|
498
|
+
// defined in ggml.c, initialized in wsp_ggml_init()
|
|
499
|
+
WSP_GGML_API float wsp_ggml_table_f32_f16[1 << 16];
|
|
500
|
+
|
|
501
|
+
// On ARM NEON, it's quicker to directly convert x -> x instead of calling into wsp_ggml_lookup_fp16_to_fp32,
|
|
502
|
+
// so we define WSP_GGML_FP16_TO_FP32 and WSP_GGML_FP32_TO_FP16 elsewhere for NEON.
|
|
503
|
+
// This is also true for POWER9.
|
|
504
|
+
#if !defined(WSP_GGML_FP16_TO_FP32)
|
|
505
|
+
inline static float wsp_ggml_lookup_fp16_to_fp32(wsp_ggml_fp16_t f) {
|
|
506
|
+
uint16_t s;
|
|
507
|
+
memcpy(&s, &f, sizeof(uint16_t));
|
|
508
|
+
return wsp_ggml_table_f32_f16[s];
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
#define WSP_GGML_FP16_TO_FP32(x) wsp_ggml_lookup_fp16_to_fp32(x)
|
|
512
|
+
#endif
|
|
513
|
+
|
|
514
|
+
#if !defined(WSP_GGML_FP32_TO_FP16)
|
|
515
|
+
#define WSP_GGML_FP32_TO_FP16(x) WSP_GGML_COMPUTE_FP32_TO_FP16(x)
|
|
516
|
+
#endif
|
|
517
|
+
|
|
518
|
+
/**
|
|
519
|
+
* Converts brain16 to float32.
|
|
520
|
+
*
|
|
521
|
+
* The bfloat16 floating point format has the following structure:
|
|
522
|
+
*
|
|
523
|
+
* ┌sign
|
|
524
|
+
* │
|
|
525
|
+
* │ ┌exponent
|
|
526
|
+
* │ │
|
|
527
|
+
* │ │ ┌mantissa
|
|
528
|
+
* │ │ │
|
|
529
|
+
* │┌──┴───┐┌─┴───┐
|
|
530
|
+
* 0b0000000000000000 brain16
|
|
531
|
+
*
|
|
532
|
+
* Since bf16 has the same number of exponent bits as a 32bit float,
|
|
533
|
+
* encoding and decoding numbers becomes relatively straightforward.
|
|
534
|
+
*
|
|
535
|
+
* ┌sign
|
|
536
|
+
* │
|
|
537
|
+
* │ ┌exponent
|
|
538
|
+
* │ │
|
|
539
|
+
* │ │ ┌mantissa
|
|
540
|
+
* │ │ │
|
|
541
|
+
* │┌──┴───┐┌─┴───────────────────┐
|
|
542
|
+
* 0b00000000000000000000000000000000 IEEE binary32
|
|
543
|
+
*
|
|
544
|
+
* For comparison, the standard fp16 format has fewer exponent bits.
|
|
545
|
+
*
|
|
546
|
+
* ┌sign
|
|
547
|
+
* │
|
|
548
|
+
* │ ┌exponent
|
|
549
|
+
* │ │
|
|
550
|
+
* │ │ ┌mantissa
|
|
551
|
+
* │ │ │
|
|
552
|
+
* │┌─┴─┐┌─┴──────┐
|
|
553
|
+
* 0b0000000000000000 IEEE binary16
|
|
554
|
+
*
|
|
555
|
+
* @see IEEE 754-2008
|
|
556
|
+
*/
|
|
557
|
+
static inline float wsp_ggml_compute_bf16_to_fp32(wsp_ggml_bf16_t h) {
|
|
558
|
+
union {
|
|
559
|
+
float f;
|
|
560
|
+
uint32_t i;
|
|
561
|
+
} u;
|
|
562
|
+
u.i = (uint32_t)h.bits << 16;
|
|
563
|
+
return u.f;
|
|
564
|
+
}
|
|
565
|
+
|
|
566
|
+
/**
|
|
567
|
+
* Converts float32 to brain16.
|
|
568
|
+
*
|
|
569
|
+
* This is binary identical with Google Brain float conversion.
|
|
570
|
+
* Floats shall round to nearest even, and NANs shall be quiet.
|
|
571
|
+
* Subnormals aren't flushed to zero, except perhaps when used.
|
|
572
|
+
* This code should vectorize nicely if using modern compilers.
|
|
573
|
+
*/
|
|
574
|
+
static inline wsp_ggml_bf16_t wsp_ggml_compute_fp32_to_bf16(float s) {
|
|
575
|
+
wsp_ggml_bf16_t h;
|
|
576
|
+
union {
|
|
577
|
+
float f;
|
|
578
|
+
uint32_t i;
|
|
579
|
+
} u;
|
|
580
|
+
u.f = s;
|
|
581
|
+
if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */
|
|
582
|
+
h.bits = (u.i >> 16) | 64; /* force to quiet */
|
|
583
|
+
return h;
|
|
584
|
+
}
|
|
585
|
+
h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
|
|
586
|
+
return h;
|
|
587
|
+
}
|
|
588
|
+
|
|
589
|
+
#define WSP_GGML_FP32_TO_BF16(x) wsp_ggml_compute_fp32_to_bf16(x)
|
|
590
|
+
#define WSP_GGML_BF16_TO_FP32(x) wsp_ggml_compute_bf16_to_fp32(x)
|
|
206
591
|
|
|
207
592
|
#ifdef __cplusplus
|
|
208
593
|
}
|
|
209
594
|
#endif
|
|
595
|
+
|
|
596
|
+
#ifdef __cplusplus
|
|
597
|
+
#include <vector>
|
|
598
|
+
|
|
599
|
+
// expose GGUF internals for test code
|
|
600
|
+
WSP_GGML_API size_t wsp_gguf_type_size(enum wsp_gguf_type type);
|
|
601
|
+
WSP_GGML_API struct wsp_gguf_context * wsp_gguf_init_from_file_impl(FILE * file, struct wsp_gguf_init_params params);
|
|
602
|
+
WSP_GGML_API void wsp_gguf_write_to_buf(const struct wsp_gguf_context * ctx, std::vector<int8_t> & buf, bool only_meta);
|
|
603
|
+
#endif // __cplusplus
|