whisper.rn 0.4.0-rc.9 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (183) hide show
  1. package/README.md +5 -1
  2. package/android/build.gradle +12 -3
  3. package/android/src/main/CMakeLists.txt +43 -13
  4. package/android/src/main/java/com/rnwhisper/WhisperContext.java +33 -35
  5. package/android/src/main/jni.cpp +9 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnwhisper.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnwhisper_v8fp16_va_2.so +0 -0
  8. package/android/src/main/jniLibs/armeabi-v7a/librnwhisper.so +0 -0
  9. package/android/src/main/jniLibs/armeabi-v7a/librnwhisper_vfpv4.so +0 -0
  10. package/android/src/main/jniLibs/x86_64/librnwhisper.so +0 -0
  11. package/android/src/main/jniLibs/x86_64/librnwhisper_x86_64.so +0 -0
  12. package/cpp/coreml/whisper-compat.h +10 -0
  13. package/cpp/coreml/whisper-compat.m +35 -0
  14. package/cpp/coreml/whisper-decoder-impl.h +27 -15
  15. package/cpp/coreml/whisper-decoder-impl.m +36 -10
  16. package/cpp/coreml/whisper-encoder-impl.h +21 -9
  17. package/cpp/coreml/whisper-encoder-impl.m +29 -3
  18. package/cpp/ggml-alloc.c +39 -37
  19. package/cpp/ggml-alloc.h +1 -1
  20. package/cpp/ggml-backend-impl.h +55 -27
  21. package/cpp/ggml-backend-reg.cpp +591 -0
  22. package/cpp/ggml-backend.cpp +336 -955
  23. package/cpp/ggml-backend.h +70 -42
  24. package/cpp/ggml-common.h +57 -49
  25. package/cpp/ggml-cpp.h +39 -0
  26. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  27. package/cpp/ggml-cpu/amx/amx.h +8 -0
  28. package/cpp/ggml-cpu/amx/common.h +91 -0
  29. package/cpp/ggml-cpu/amx/mmq.cpp +2511 -0
  30. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  31. package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  32. package/cpp/ggml-cpu/arch/arm/quants.c +4113 -0
  33. package/cpp/ggml-cpu/arch/arm/repack.cpp +2162 -0
  34. package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
  35. package/cpp/ggml-cpu/arch/x86/quants.c +4310 -0
  36. package/cpp/ggml-cpu/arch/x86/repack.cpp +3284 -0
  37. package/cpp/ggml-cpu/arch-fallback.h +184 -0
  38. package/cpp/ggml-cpu/binary-ops.cpp +158 -0
  39. package/cpp/ggml-cpu/binary-ops.h +16 -0
  40. package/cpp/ggml-cpu/common.h +72 -0
  41. package/cpp/ggml-cpu/ggml-cpu-impl.h +511 -0
  42. package/cpp/ggml-cpu/ggml-cpu.c +3473 -0
  43. package/cpp/ggml-cpu/ggml-cpu.cpp +671 -0
  44. package/cpp/ggml-cpu/ops.cpp +9085 -0
  45. package/cpp/ggml-cpu/ops.h +111 -0
  46. package/cpp/ggml-cpu/quants.c +1157 -0
  47. package/cpp/ggml-cpu/quants.h +89 -0
  48. package/cpp/ggml-cpu/repack.cpp +1570 -0
  49. package/cpp/ggml-cpu/repack.h +98 -0
  50. package/cpp/ggml-cpu/simd-mappings.h +1006 -0
  51. package/cpp/ggml-cpu/traits.cpp +36 -0
  52. package/cpp/ggml-cpu/traits.h +38 -0
  53. package/cpp/ggml-cpu/unary-ops.cpp +186 -0
  54. package/cpp/ggml-cpu/unary-ops.h +28 -0
  55. package/cpp/ggml-cpu/vec.cpp +321 -0
  56. package/cpp/ggml-cpu/vec.h +973 -0
  57. package/cpp/ggml-cpu.h +143 -0
  58. package/cpp/ggml-impl.h +417 -23
  59. package/cpp/ggml-metal-impl.h +622 -0
  60. package/cpp/ggml-metal.h +9 -9
  61. package/cpp/ggml-metal.m +3451 -1344
  62. package/cpp/ggml-opt.cpp +1037 -0
  63. package/cpp/ggml-opt.h +237 -0
  64. package/cpp/ggml-quants.c +296 -10818
  65. package/cpp/ggml-quants.h +78 -125
  66. package/cpp/ggml-threading.cpp +12 -0
  67. package/cpp/ggml-threading.h +14 -0
  68. package/cpp/ggml-whisper-sim.metallib +0 -0
  69. package/cpp/ggml-whisper.metallib +0 -0
  70. package/cpp/ggml.c +4633 -21450
  71. package/cpp/ggml.h +320 -661
  72. package/cpp/gguf.cpp +1347 -0
  73. package/cpp/gguf.h +202 -0
  74. package/cpp/rn-whisper.cpp +4 -11
  75. package/cpp/whisper-arch.h +197 -0
  76. package/cpp/whisper.cpp +2022 -495
  77. package/cpp/whisper.h +75 -18
  78. package/ios/CMakeLists.txt +95 -0
  79. package/ios/RNWhisper.h +5 -0
  80. package/ios/RNWhisperAudioUtils.m +4 -0
  81. package/ios/RNWhisperContext.h +5 -0
  82. package/ios/RNWhisperContext.mm +4 -2
  83. package/ios/rnwhisper.xcframework/Info.plist +74 -0
  84. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-alloc.h +76 -0
  85. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-backend-impl.h +255 -0
  86. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-backend.h +354 -0
  87. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-common.h +1861 -0
  88. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-cpp.h +39 -0
  89. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-cpu.h +143 -0
  90. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-impl.h +603 -0
  91. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-metal-impl.h +622 -0
  92. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-metal.h +66 -0
  93. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-opt.h +237 -0
  94. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-quants.h +100 -0
  95. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml-threading.h +14 -0
  96. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/ggml.h +2221 -0
  97. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/gguf.h +202 -0
  98. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/rn-audioutils.h +14 -0
  99. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/rn-whisper-log.h +11 -0
  100. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/rn-whisper.h +52 -0
  101. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/whisper-arch.h +197 -0
  102. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Headers/whisper.h +739 -0
  103. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/Info.plist +0 -0
  104. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/ggml-whisper.metallib +0 -0
  105. package/ios/rnwhisper.xcframework/ios-arm64/rnwhisper.framework/rnwhisper +0 -0
  106. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-alloc.h +76 -0
  107. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend-impl.h +255 -0
  108. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend.h +354 -0
  109. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-common.h +1861 -0
  110. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpp.h +39 -0
  111. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpu.h +143 -0
  112. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-impl.h +603 -0
  113. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal-impl.h +622 -0
  114. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal.h +66 -0
  115. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-opt.h +237 -0
  116. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-quants.h +100 -0
  117. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-threading.h +14 -0
  118. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml.h +2221 -0
  119. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/gguf.h +202 -0
  120. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-audioutils.h +14 -0
  121. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-whisper-log.h +11 -0
  122. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-whisper.h +52 -0
  123. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper-arch.h +197 -0
  124. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper.h +739 -0
  125. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/Info.plist +0 -0
  126. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/_CodeSignature/CodeResources +101 -0
  127. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/ggml-whisper-sim.metallib +0 -0
  128. package/ios/rnwhisper.xcframework/ios-arm64_x86_64-simulator/rnwhisper.framework/rnwhisper +0 -0
  129. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-alloc.h +76 -0
  130. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-backend-impl.h +255 -0
  131. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-backend.h +354 -0
  132. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-common.h +1861 -0
  133. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-cpp.h +39 -0
  134. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-cpu.h +143 -0
  135. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-impl.h +603 -0
  136. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-metal-impl.h +622 -0
  137. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-metal.h +66 -0
  138. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-opt.h +237 -0
  139. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-quants.h +100 -0
  140. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml-threading.h +14 -0
  141. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/ggml.h +2221 -0
  142. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/gguf.h +202 -0
  143. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/rn-audioutils.h +14 -0
  144. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/rn-whisper-log.h +11 -0
  145. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/rn-whisper.h +52 -0
  146. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/whisper-arch.h +197 -0
  147. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Headers/whisper.h +739 -0
  148. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/Info.plist +0 -0
  149. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/ggml-whisper.metallib +0 -0
  150. package/ios/rnwhisper.xcframework/tvos-arm64/rnwhisper.framework/rnwhisper +0 -0
  151. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-alloc.h +76 -0
  152. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend-impl.h +255 -0
  153. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-backend.h +354 -0
  154. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-common.h +1861 -0
  155. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpp.h +39 -0
  156. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-cpu.h +143 -0
  157. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-impl.h +603 -0
  158. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal-impl.h +622 -0
  159. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-metal.h +66 -0
  160. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-opt.h +237 -0
  161. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-quants.h +100 -0
  162. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml-threading.h +14 -0
  163. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/ggml.h +2221 -0
  164. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/gguf.h +202 -0
  165. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-audioutils.h +14 -0
  166. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-whisper-log.h +11 -0
  167. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/rn-whisper.h +52 -0
  168. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper-arch.h +197 -0
  169. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Headers/whisper.h +739 -0
  170. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/Info.plist +0 -0
  171. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/_CodeSignature/CodeResources +101 -0
  172. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/ggml-whisper-sim.metallib +0 -0
  173. package/ios/rnwhisper.xcframework/tvos-arm64_x86_64-simulator/rnwhisper.framework/rnwhisper +0 -0
  174. package/jest/mock.js +5 -0
  175. package/lib/commonjs/version.json +1 -1
  176. package/lib/module/version.json +1 -1
  177. package/package.json +10 -6
  178. package/src/version.json +1 -1
  179. package/whisper-rn.podspec +11 -18
  180. package/cpp/README.md +0 -4
  181. package/cpp/ggml-aarch64.c +0 -3209
  182. package/cpp/ggml-aarch64.h +0 -39
  183. package/cpp/ggml-cpu-impl.h +0 -614
@@ -0,0 +1,1006 @@
1
+ #pragma once
2
+
3
+ #include "ggml-cpu-impl.h"
4
+
5
+ //
6
+ // simd mappings
7
+ //
8
+
9
+ // we define a common set of C macros which map to specific intrinsics based on the current architecture
10
+ // we then implement the fundamental computation operations below using only these macros
11
+ // adding support for new architectures requires to define the corresponding SIMD macros
12
+ //
13
+ // WSP_GGML_F32_STEP / WSP_GGML_F16_STEP
14
+ // number of elements to process in a single step
15
+ //
16
+ // WSP_GGML_F32_EPR / WSP_GGML_F16_EPR
17
+ // number of elements to fit in a single register
18
+ //
19
+
20
+ #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_FMA)
21
+
22
+ #define WSP_GGML_SIMD
23
+
24
+ // F32 SVE
25
+ #define WSP_GGML_F32_EPR 8
26
+ #define DEFAULT_PG svptrue_b32()
27
+
28
+ #define WSP_GGML_F32xt svfloat32_t
29
+ #define WSP_GGML_F32xt_ZERO svdup_n_f32(0.0f)
30
+ #define WSP_GGML_F32xt_SET1(x) svdup_n_f32(x)
31
+ #define WSP_GGML_F32xt_LOAD_IMPL(pg, a, ...) svld1_f32(pg, a)
32
+ #define WSP_GGML_F32xt_LOAD(...) WSP_GGML_F32xt_LOAD_IMPL(DEFAULT_PG, __VA_ARGS__)
33
+ #define WSP_GGML_F32xt_STORE_IMPL(pg,a,b) svst1_f32(pg, a, b)
34
+ #define WSP_GGML_F32xt_STORE(...) WSP_GGML_F32xt_STORE_IMPL(DEFAULT_PG, __VA_ARGS__)
35
+ #define WSP_GGML_F32xt_FMA_IMPL(pg, a, b, c) svmad_f32_m(pg, a, b, c)
36
+ #define WSP_GGML_F32xt_FMA(...) WSP_GGML_F32xt_FMA_IMPL(DEFAULT_PG, __VA_ARGS__)
37
+ #define WSP_GGML_F32xt_ADD_IMPL(pg, a, b) svadd_f32_m(pg, a, b)
38
+ #define WSP_GGML_F32xt_ADD(...) WSP_GGML_F32xt_ADD_IMPL(DEFAULT_PG, __VA_ARGS__)
39
+ #define WSP_GGML_F32xt_MUL_IMPL(pg, a, b) svmul_f32_m(pg, a, b)
40
+ #define WSP_GGML_F32xt_MUL(...) WSP_GGML_F32xt_MUL_IMPL(DEFAULT_PG, __VA_ARGS__)
41
+ #define WSP_GGML_F32xt_REDUCE_ONE_IMPL(pg, a) svaddv(pg, a)
42
+ #define WSP_GGML_F32xt_REDUCE_ONE(...) WSP_GGML_F32xt_REDUCE_ONE_IMPL(DEFAULT_PG, __VA_ARGS__)
43
+ #define WSP_GGML_F32xt_REDUCE_IMPL(pg, res, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8) \
44
+ { \
45
+ sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum2); \
46
+ sum3 = svadd_f32_m(DEFAULT_PG, sum3, sum4); \
47
+ sum5 = svadd_f32_m(DEFAULT_PG, sum5, sum6); \
48
+ sum7 = svadd_f32_m(DEFAULT_PG, sum7, sum8); \
49
+ sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum3); \
50
+ sum5 = svadd_f32_m(DEFAULT_PG, sum5, sum7); \
51
+ sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum5); \
52
+ (res) = (wsp_ggml_float) WSP_GGML_F32xt_REDUCE_ONE(sum1); \
53
+ }
54
+ #define WSP_GGML_F32xt_REDUCE(...) WSP_GGML_F32xt_REDUCE_IMPL(DEFAULT_PG, __VA_ARGS__)
55
+
56
+ #define WSP_GGML_F32_VEC WSP_GGML_F32xt
57
+ #define WSP_GGML_F32_VEC_ZERO WSP_GGML_F32xt_ZERO
58
+ #define WSP_GGML_F32_VEC_SET1 WSP_GGML_F32xt_SET1
59
+ #define WSP_GGML_F32_VEC_LOAD WSP_GGML_F32xt_LOAD
60
+ #define WSP_GGML_F32_VEC_STORE WSP_GGML_F32xt_STORE
61
+ #define WSP_GGML_F32_VEC_FMA WSP_GGML_F32xt_FMA
62
+ #define WSP_GGML_F32_VEC_ADD WSP_GGML_F32xt_ADD
63
+ #define WSP_GGML_F32_VEC_MUL WSP_GGML_F32xt_MUL
64
+ #define WSP_GGML_F32_VEC_REDUCE WSP_GGML_F32xt_REDUCE
65
+
66
+ // F16 NEON
67
+
68
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
69
+ #define WSP_GGML_F16_STEP 32
70
+ #define WSP_GGML_F16_EPR 8
71
+
72
+ #define WSP_GGML_F16x8 float16x8_t
73
+ #define WSP_GGML_F16x8_ZERO vdupq_n_f16(0.0f)
74
+ #define WSP_GGML_F16x8_SET1(x) vdupq_n_f16(x)
75
+ #define WSP_GGML_F16x8_LOAD(x) vld1q_f16((const __fp16 *)(x))
76
+ #define WSP_GGML_F16x8_STORE vst1q_f16
77
+ #define WSP_GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
78
+ #define WSP_GGML_F16x8_ADD vaddq_f16
79
+ #define WSP_GGML_F16x8_MUL vmulq_f16
80
+ #define WSP_GGML_F16x8_REDUCE(res, x) \
81
+ do { \
82
+ int offset = WSP_GGML_F16_ARR >> 1; \
83
+ for (int i = 0; i < offset; ++i) { \
84
+ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \
85
+ } \
86
+ offset >>= 1; \
87
+ for (int i = 0; i < offset; ++i) { \
88
+ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \
89
+ } \
90
+ offset >>= 1; \
91
+ for (int i = 0; i < offset; ++i) { \
92
+ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \
93
+ } \
94
+ const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 ((x)[0])); \
95
+ const float32x4_t t1 = vcvt_f32_f16(vget_high_f16((x)[0])); \
96
+ (res) = (wsp_ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
97
+ } while (0)
98
+
99
+ #define WSP_GGML_F16_VEC WSP_GGML_F16x8
100
+ #define WSP_GGML_F16_VEC_ZERO WSP_GGML_F16x8_ZERO
101
+ #define WSP_GGML_F16_VEC_SET1 WSP_GGML_F16x8_SET1
102
+ #define WSP_GGML_F16_VEC_LOAD(p, i) WSP_GGML_F16x8_LOAD(p)
103
+ #define WSP_GGML_F16_VEC_STORE(p, r, i) WSP_GGML_F16x8_STORE((__fp16 *)(p), (r)[i])
104
+ #define WSP_GGML_F16_VEC_FMA WSP_GGML_F16x8_FMA
105
+ #define WSP_GGML_F16_VEC_ADD WSP_GGML_F16x8_ADD
106
+ #define WSP_GGML_F16_VEC_MUL WSP_GGML_F16x8_MUL
107
+ #define WSP_GGML_F16_VEC_REDUCE WSP_GGML_F16x8_REDUCE
108
+ #else
109
+ // if FP16 vector arithmetic is not supported, we use FP32 instead
110
+ // and take advantage of the vcvt_ functions to convert to/from FP16
111
+
112
+ #define WSP_GGML_F16_STEP 16
113
+ #define WSP_GGML_F16_EPR 4
114
+
115
+ #define WSP_GGML_F32Cx4 float32x4_t
116
+ #define WSP_GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
117
+ #define WSP_GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
118
+ #define WSP_GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16((const __fp16 *)(x)))
119
+ #define WSP_GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
120
+ #define WSP_GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
121
+ #define WSP_GGML_F32Cx4_ADD vaddq_f32
122
+ #define WSP_GGML_F32Cx4_MUL vmulq_f32
123
+ #define WSP_GGML_F32Cx4_REDUCE WSP_GGML_F32x4_REDUCE
124
+
125
+ #define WSP_GGML_F16_VEC WSP_GGML_F32Cx4
126
+ #define WSP_GGML_F16_VEC_ZERO WSP_GGML_F32Cx4_ZERO
127
+ #define WSP_GGML_F16_VEC_SET1 WSP_GGML_F32Cx4_SET1
128
+ #define WSP_GGML_F16_VEC_LOAD(p, i) WSP_GGML_F32Cx4_LOAD(p)
129
+ #define WSP_GGML_F16_VEC_STORE(p, r, i) WSP_GGML_F32Cx4_STORE((__fp16 *)(p), r[i])
130
+ #define WSP_GGML_F16_VEC_FMA WSP_GGML_F32Cx4_FMA
131
+ #define WSP_GGML_F16_VEC_ADD WSP_GGML_F32Cx4_ADD
132
+ #define WSP_GGML_F16_VEC_MUL WSP_GGML_F32Cx4_MUL
133
+ #define WSP_GGML_F16_VEC_REDUCE WSP_GGML_F32Cx4_REDUCE
134
+ #endif
135
+
136
+ #elif defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
137
+
138
+ #define WSP_GGML_SIMD
139
+
140
+ // F32 NEON
141
+
142
+ #define WSP_GGML_F32_STEP 16
143
+ #define WSP_GGML_F32_EPR 4
144
+
145
+ #define WSP_GGML_F32x4 float32x4_t
146
+ #define WSP_GGML_F32x4_ZERO vdupq_n_f32(0.0f)
147
+ #define WSP_GGML_F32x4_SET1(x) vdupq_n_f32(x)
148
+ #define WSP_GGML_F32x4_LOAD vld1q_f32
149
+ #define WSP_GGML_F32x4_STORE vst1q_f32
150
+ #define WSP_GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
151
+ #define WSP_GGML_F32x4_ADD vaddq_f32
152
+ #define WSP_GGML_F32x4_MUL vmulq_f32
153
+ #define WSP_GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
154
+ #define WSP_GGML_F32x4_REDUCE(res, x) \
155
+ { \
156
+ int offset = WSP_GGML_F32_ARR >> 1; \
157
+ for (int i = 0; i < offset; ++i) { \
158
+ (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \
159
+ } \
160
+ offset >>= 1; \
161
+ for (int i = 0; i < offset; ++i) { \
162
+ (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \
163
+ } \
164
+ offset >>= 1; \
165
+ for (int i = 0; i < offset; ++i) { \
166
+ (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \
167
+ } \
168
+ (res) = (wsp_ggml_float) WSP_GGML_F32x4_REDUCE_ONE((x)[0]); \
169
+ }
170
+
171
+ #define WSP_GGML_F32_VEC WSP_GGML_F32x4
172
+ #define WSP_GGML_F32_VEC_ZERO WSP_GGML_F32x4_ZERO
173
+ #define WSP_GGML_F32_VEC_SET1 WSP_GGML_F32x4_SET1
174
+ #define WSP_GGML_F32_VEC_LOAD WSP_GGML_F32x4_LOAD
175
+ #define WSP_GGML_F32_VEC_STORE WSP_GGML_F32x4_STORE
176
+ #define WSP_GGML_F32_VEC_FMA WSP_GGML_F32x4_FMA
177
+ #define WSP_GGML_F32_VEC_ADD WSP_GGML_F32x4_ADD
178
+ #define WSP_GGML_F32_VEC_MUL WSP_GGML_F32x4_MUL
179
+ #define WSP_GGML_F32_VEC_REDUCE WSP_GGML_F32x4_REDUCE
180
+
181
+ // F16 NEON
182
+
183
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
184
+ #define WSP_GGML_F16_STEP 32
185
+ #define WSP_GGML_F16_EPR 8
186
+
187
+ #define WSP_GGML_F16x8 float16x8_t
188
+ #define WSP_GGML_F16x8_ZERO vdupq_n_f16(0.0f)
189
+ #define WSP_GGML_F16x8_SET1(x) vdupq_n_f16(x)
190
+ #define WSP_GGML_F16x8_LOAD(x) vld1q_f16((const __fp16 *)(x))
191
+ #define WSP_GGML_F16x8_STORE vst1q_f16
192
+ #define WSP_GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
193
+ #define WSP_GGML_F16x8_ADD vaddq_f16
194
+ #define WSP_GGML_F16x8_MUL vmulq_f16
195
+ #define WSP_GGML_F16x8_REDUCE(res, x) \
196
+ do { \
197
+ int offset = WSP_GGML_F16_ARR >> 1; \
198
+ for (int i = 0; i < offset; ++i) { \
199
+ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \
200
+ } \
201
+ offset >>= 1; \
202
+ for (int i = 0; i < offset; ++i) { \
203
+ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \
204
+ } \
205
+ offset >>= 1; \
206
+ for (int i = 0; i < offset; ++i) { \
207
+ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \
208
+ } \
209
+ const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 ((x)[0])); \
210
+ const float32x4_t t1 = vcvt_f32_f16(vget_high_f16((x)[0])); \
211
+ (res) = (wsp_ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
212
+ } while (0)
213
+
214
+ #define WSP_GGML_F16_VEC WSP_GGML_F16x8
215
+ #define WSP_GGML_F16_VEC_ZERO WSP_GGML_F16x8_ZERO
216
+ #define WSP_GGML_F16_VEC_SET1 WSP_GGML_F16x8_SET1
217
+ #define WSP_GGML_F16_VEC_LOAD(p, i) WSP_GGML_F16x8_LOAD(p)
218
+ #define WSP_GGML_F16_VEC_STORE(p, r, i) WSP_GGML_F16x8_STORE((__fp16 *)(p), (r)[i])
219
+ #define WSP_GGML_F16_VEC_FMA WSP_GGML_F16x8_FMA
220
+ #define WSP_GGML_F16_VEC_ADD WSP_GGML_F16x8_ADD
221
+ #define WSP_GGML_F16_VEC_MUL WSP_GGML_F16x8_MUL
222
+ #define WSP_GGML_F16_VEC_REDUCE WSP_GGML_F16x8_REDUCE
223
+ #else
224
+ // if FP16 vector arithmetic is not supported, we use FP32 instead
225
+ // and take advantage of the vcvt_ functions to convert to/from FP16
226
+
227
+ #define WSP_GGML_F16_STEP 16
228
+ #define WSP_GGML_F16_EPR 4
229
+
230
+ #define WSP_GGML_F32Cx4 float32x4_t
231
+ #define WSP_GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
232
+ #define WSP_GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
233
+ #define WSP_GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16((const __fp16 *)(x)))
234
+ #define WSP_GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
235
+ #define WSP_GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
236
+ #define WSP_GGML_F32Cx4_ADD vaddq_f32
237
+ #define WSP_GGML_F32Cx4_MUL vmulq_f32
238
+ #define WSP_GGML_F32Cx4_REDUCE WSP_GGML_F32x4_REDUCE
239
+
240
+ #define WSP_GGML_F16_VEC WSP_GGML_F32Cx4
241
+ #define WSP_GGML_F16_VEC_ZERO WSP_GGML_F32Cx4_ZERO
242
+ #define WSP_GGML_F16_VEC_SET1 WSP_GGML_F32Cx4_SET1
243
+ #define WSP_GGML_F16_VEC_LOAD(p, i) WSP_GGML_F32Cx4_LOAD(p)
244
+ #define WSP_GGML_F16_VEC_STORE(p, r, i) WSP_GGML_F32Cx4_STORE((__fp16 *)(p), r[i])
245
+ #define WSP_GGML_F16_VEC_FMA WSP_GGML_F32Cx4_FMA
246
+ #define WSP_GGML_F16_VEC_ADD WSP_GGML_F32Cx4_ADD
247
+ #define WSP_GGML_F16_VEC_MUL WSP_GGML_F32Cx4_MUL
248
+ #define WSP_GGML_F16_VEC_REDUCE WSP_GGML_F32Cx4_REDUCE
249
+ #endif
250
+
251
+ #elif defined(__AVX512F__)
252
+
253
+ #define WSP_GGML_SIMD
254
+
255
+ // F32 AVX512
256
+
257
+ #define WSP_GGML_F32_STEP 64
258
+ #define WSP_GGML_F32_EPR 16
259
+
260
+ #define WSP_GGML_F32x16 __m512
261
+ #define WSP_GGML_F32x16_ZERO _mm512_setzero_ps()
262
+ #define WSP_GGML_F32x16_SET1(x) _mm512_set1_ps(x)
263
+ #define WSP_GGML_F32x16_LOAD _mm512_loadu_ps
264
+ #define WSP_GGML_F32x16_STORE _mm512_storeu_ps
265
+ // _mm512_fmadd_ps is defined in AVX512F so no guard is required
266
+ #define WSP_GGML_F32x16_FMA(a, b, c) _mm512_fmadd_ps(b, c, a)
267
+ #define WSP_GGML_F32x16_ADD _mm512_add_ps
268
+ #define WSP_GGML_F32x16_MUL _mm512_mul_ps
269
+ #define WSP_GGML_F32x16_REDUCE(res, x) \
270
+ do { \
271
+ int offset = WSP_GGML_F32_ARR >> 1; \
272
+ for (int i = 0; i < offset; ++i) { \
273
+ x[i] = _mm512_add_ps(x[i], x[offset+i]); \
274
+ } \
275
+ offset >>= 1; \
276
+ for (int i = 0; i < offset; ++i) { \
277
+ x[i] = _mm512_add_ps(x[i], x[offset+i]); \
278
+ } \
279
+ offset >>= 1; \
280
+ for (int i = 0; i < offset; ++i) { \
281
+ x[i] = _mm512_add_ps(x[i], x[offset+i]); \
282
+ } \
283
+ res = (wsp_ggml_float) _mm512_reduce_add_ps(x[0]); \
284
+ } while (0)
285
+
286
+ // TODO: is this optimal ?
287
+
288
+ #define WSP_GGML_F32_VEC WSP_GGML_F32x16
289
+ #define WSP_GGML_F32_VEC_ZERO WSP_GGML_F32x16_ZERO
290
+ #define WSP_GGML_F32_VEC_SET1 WSP_GGML_F32x16_SET1
291
+ #define WSP_GGML_F32_VEC_LOAD WSP_GGML_F32x16_LOAD
292
+ #define WSP_GGML_F32_VEC_STORE WSP_GGML_F32x16_STORE
293
+ #define WSP_GGML_F32_VEC_FMA WSP_GGML_F32x16_FMA
294
+ #define WSP_GGML_F32_VEC_ADD WSP_GGML_F32x16_ADD
295
+ #define WSP_GGML_F32_VEC_MUL WSP_GGML_F32x16_MUL
296
+ #define WSP_GGML_F32_VEC_REDUCE WSP_GGML_F32x16_REDUCE
297
+
298
+ // F16 AVX512
299
+
300
+ // F16 AVX
301
+
302
+ #define WSP_GGML_F16_STEP 64
303
+ #define WSP_GGML_F16_EPR 16
304
+
305
+ // AVX512 has FP16 extension (AVX512_FP16) but I don't have it on my machine so I use FP32 instead
306
+
307
+ #define WSP_GGML_F32Cx16 __m512
308
+ #define WSP_GGML_F32Cx16_ZERO _mm512_setzero_ps()
309
+ #define WSP_GGML_F32Cx16_SET1(x) _mm512_set1_ps(x)
310
+
311
+ // unlike _mm256_cvt intrinsics that require F16C, _mm512_cvt is defined in AVX512F
312
+ // so F16C guard isn't required
313
+ #define WSP_GGML_F32Cx16_LOAD(x) _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(x)))
314
+ #define WSP_GGML_F32Cx16_STORE(x, y) _mm256_storeu_si256((__m256i *)(x), _mm512_cvtps_ph(y, 0))
315
+
316
+ #define WSP_GGML_F32Cx16_FMA(a, b, c) _mm512_fmadd_ps(b, c, a)
317
+ #define WSP_GGML_F32Cx16_ADD _mm512_add_ps
318
+ #define WSP_GGML_F32Cx16_MUL _mm512_mul_ps
319
+ #define WSP_GGML_F32Cx16_REDUCE(res, x) \
320
+ do { \
321
+ int offset = WSP_GGML_F32_ARR >> 1; \
322
+ for (int i = 0; i < offset; ++i) { \
323
+ x[i] = _mm512_add_ps(x[i], x[offset+i]); \
324
+ } \
325
+ offset >>= 1; \
326
+ for (int i = 0; i < offset; ++i) { \
327
+ x[i] = _mm512_add_ps(x[i], x[offset+i]); \
328
+ } \
329
+ offset >>= 1; \
330
+ for (int i = 0; i < offset; ++i) { \
331
+ x[i] = _mm512_add_ps(x[i], x[offset+i]); \
332
+ } \
333
+ res = (wsp_ggml_float) _mm512_reduce_add_ps(x[0]); \
334
+ } while (0)
335
+
336
+ #define WSP_GGML_F16_VEC WSP_GGML_F32Cx16
337
+ #define WSP_GGML_F16_VEC_ZERO WSP_GGML_F32Cx16_ZERO
338
+ #define WSP_GGML_F16_VEC_SET1 WSP_GGML_F32Cx16_SET1
339
+ #define WSP_GGML_F16_VEC_LOAD(p, i) WSP_GGML_F32Cx16_LOAD(p)
340
+ #define WSP_GGML_F16_VEC_STORE(p, r, i) WSP_GGML_F32Cx16_STORE(p, r[i])
341
+ #define WSP_GGML_F16_VEC_FMA WSP_GGML_F32Cx16_FMA
342
+ #define WSP_GGML_F16_VEC_ADD WSP_GGML_F32Cx16_ADD
343
+ #define WSP_GGML_F16_VEC_MUL WSP_GGML_F32Cx16_MUL
344
+
345
+ #define WSP_GGML_F16_VEC_REDUCE WSP_GGML_F32Cx16_REDUCE
346
+ #elif defined(__AVX__)
347
+
348
+ #define WSP_GGML_SIMD
349
+
350
+ // F32 AVX
351
+
352
+ #define WSP_GGML_F32_STEP 32
353
+ #define WSP_GGML_F32_EPR 8
354
+
355
+ #define WSP_GGML_F32x8 __m256
356
+ #define WSP_GGML_F32x8_ZERO _mm256_setzero_ps()
357
+ #define WSP_GGML_F32x8_SET1(x) _mm256_set1_ps(x)
358
+ #define WSP_GGML_F32x8_LOAD _mm256_loadu_ps
359
+ #define WSP_GGML_F32x8_STORE _mm256_storeu_ps
360
+ #if defined(__FMA__)
361
+ #define WSP_GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
362
+ #else
363
+ #define WSP_GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
364
+ #endif
365
+ #define WSP_GGML_F32x8_ADD _mm256_add_ps
366
+ #define WSP_GGML_F32x8_MUL _mm256_mul_ps
367
+ #define WSP_GGML_F32x8_REDUCE(res, x) \
368
+ do { \
369
+ int offset = WSP_GGML_F32_ARR >> 1; \
370
+ for (int i = 0; i < offset; ++i) { \
371
+ x[i] = _mm256_add_ps(x[i], x[offset+i]); \
372
+ } \
373
+ offset >>= 1; \
374
+ for (int i = 0; i < offset; ++i) { \
375
+ x[i] = _mm256_add_ps(x[i], x[offset+i]); \
376
+ } \
377
+ offset >>= 1; \
378
+ for (int i = 0; i < offset; ++i) { \
379
+ x[i] = _mm256_add_ps(x[i], x[offset+i]); \
380
+ } \
381
+ const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
382
+ _mm256_extractf128_ps(x[0], 1)); \
383
+ const __m128 t1 = _mm_hadd_ps(t0, t0); \
384
+ res = (wsp_ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
385
+ } while (0)
386
+ // TODO: is this optimal ?
387
+
388
+ #define WSP_GGML_F32_VEC WSP_GGML_F32x8
389
+ #define WSP_GGML_F32_VEC_ZERO WSP_GGML_F32x8_ZERO
390
+ #define WSP_GGML_F32_VEC_SET1 WSP_GGML_F32x8_SET1
391
+ #define WSP_GGML_F32_VEC_LOAD WSP_GGML_F32x8_LOAD
392
+ #define WSP_GGML_F32_VEC_STORE WSP_GGML_F32x8_STORE
393
+ #define WSP_GGML_F32_VEC_FMA WSP_GGML_F32x8_FMA
394
+ #define WSP_GGML_F32_VEC_ADD WSP_GGML_F32x8_ADD
395
+ #define WSP_GGML_F32_VEC_MUL WSP_GGML_F32x8_MUL
396
+ #define WSP_GGML_F32_VEC_REDUCE WSP_GGML_F32x8_REDUCE
397
+
398
+ // F16 AVX
399
+
400
+ #define WSP_GGML_F16_STEP 32
401
+ #define WSP_GGML_F16_EPR 8
402
+
403
+ // F16 arithmetic is not supported by AVX, so we use F32 instead
404
+
405
+ #define WSP_GGML_F32Cx8 __m256
406
+ #define WSP_GGML_F32Cx8_ZERO _mm256_setzero_ps()
407
+ #define WSP_GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
408
+
409
+ #if defined(__F16C__)
410
+ // the _mm256_cvt intrinsics require F16C
411
+ #define WSP_GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)(x)))
412
+ #define WSP_GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
413
+ #else
414
+ static inline __m256 __avx_f32cx8_load(const wsp_ggml_fp16_t * x) {
415
+ float tmp[8];
416
+
417
+ for (int i = 0; i < 8; i++) {
418
+ tmp[i] = WSP_GGML_FP16_TO_FP32(x[i]);
419
+ }
420
+
421
+ return _mm256_loadu_ps(tmp);
422
+ }
423
+ static inline void __avx_f32cx8_store(wsp_ggml_fp16_t *x, __m256 y) {
424
+ float arr[8];
425
+
426
+ _mm256_storeu_ps(arr, y);
427
+
428
+ for (int i = 0; i < 8; i++)
429
+ x[i] = WSP_GGML_FP32_TO_FP16(arr[i]);
430
+ }
431
+ #define WSP_GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
432
+ #define WSP_GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
433
+ #endif
434
+
435
+ #define WSP_GGML_F32Cx8_FMA WSP_GGML_F32x8_FMA
436
+ #define WSP_GGML_F32Cx8_ADD _mm256_add_ps
437
+ #define WSP_GGML_F32Cx8_MUL _mm256_mul_ps
438
+ #define WSP_GGML_F32Cx8_REDUCE WSP_GGML_F32x8_REDUCE
439
+
440
+ #define WSP_GGML_F16_VEC WSP_GGML_F32Cx8
441
+ #define WSP_GGML_F16_VEC_ZERO WSP_GGML_F32Cx8_ZERO
442
+ #define WSP_GGML_F16_VEC_SET1 WSP_GGML_F32Cx8_SET1
443
+ #define WSP_GGML_F16_VEC_LOAD(p, i) WSP_GGML_F32Cx8_LOAD(p)
444
+ #define WSP_GGML_F16_VEC_STORE(p, r, i) WSP_GGML_F32Cx8_STORE(p, r[i])
445
+ #define WSP_GGML_F16_VEC_FMA WSP_GGML_F32Cx8_FMA
446
+ #define WSP_GGML_F16_VEC_ADD WSP_GGML_F32Cx8_ADD
447
+ #define WSP_GGML_F16_VEC_MUL WSP_GGML_F32Cx8_MUL
448
+ #define WSP_GGML_F16_VEC_REDUCE WSP_GGML_F32Cx8_REDUCE
449
+
450
+ #elif defined(__POWER9_VECTOR__)
451
+
452
+ #define WSP_GGML_SIMD
453
+
454
+ // F32 POWER9
455
+
456
+ #define WSP_GGML_F32_STEP 32
457
+ #define WSP_GGML_F32_EPR 4
458
+
459
+ #define WSP_GGML_F32x4 vector float
460
+ #define WSP_GGML_F32x4_ZERO {0.0f}
461
+ #define WSP_GGML_F32x4_SET1 vec_splats
462
+ #define WSP_GGML_F32x4_LOAD(p) vec_xl(0, p)
463
+ #define WSP_GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
464
+ #define WSP_GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
465
+ #define WSP_GGML_F32x4_ADD vec_add
466
+ #define WSP_GGML_F32x4_MUL vec_mul
467
+ #define WSP_GGML_F32x4_REDUCE(res, x) \
468
+ { \
469
+ int offset = WSP_GGML_F32_ARR >> 1; \
470
+ for (int i = 0; i < offset; ++i) { \
471
+ x[i] = vec_add(x[i], x[offset+i]); \
472
+ } \
473
+ offset >>= 1; \
474
+ for (int i = 0; i < offset; ++i) { \
475
+ x[i] = vec_add(x[i], x[offset+i]); \
476
+ } \
477
+ offset >>= 1; \
478
+ for (int i = 0; i < offset; ++i) { \
479
+ x[i] = vec_add(x[i], x[offset+i]); \
480
+ } \
481
+ res = vec_extract(x[0], 0) + \
482
+ vec_extract(x[0], 1) + \
483
+ vec_extract(x[0], 2) + \
484
+ vec_extract(x[0], 3); \
485
+ }
486
+
487
+ #define WSP_GGML_F32_VEC WSP_GGML_F32x4
488
+ #define WSP_GGML_F32_VEC_ZERO WSP_GGML_F32x4_ZERO
489
+ #define WSP_GGML_F32_VEC_SET1 WSP_GGML_F32x4_SET1
490
+ #define WSP_GGML_F32_VEC_LOAD WSP_GGML_F32x4_LOAD
491
+ #define WSP_GGML_F32_VEC_STORE WSP_GGML_F32x4_STORE
492
+ #define WSP_GGML_F32_VEC_FMA WSP_GGML_F32x4_FMA
493
+ #define WSP_GGML_F32_VEC_ADD WSP_GGML_F32x4_ADD
494
+ #define WSP_GGML_F32_VEC_MUL WSP_GGML_F32x4_MUL
495
+ #define WSP_GGML_F32_VEC_REDUCE WSP_GGML_F32x4_REDUCE
496
+
497
+ // F16 POWER9
498
+ #define WSP_GGML_F16_STEP WSP_GGML_F32_STEP
499
+ #define WSP_GGML_F16_EPR WSP_GGML_F32_EPR
500
+ #define WSP_GGML_F16_VEC WSP_GGML_F32x4
501
+ #define WSP_GGML_F16_VEC_ZERO WSP_GGML_F32x4_ZERO
502
+ #define WSP_GGML_F16_VEC_SET1 WSP_GGML_F32x4_SET1
503
+ #define WSP_GGML_F16_VEC_FMA WSP_GGML_F32x4_FMA
504
+ #define WSP_GGML_F16_VEC_ADD WSP_GGML_F32x4_ADD
505
+ #define WSP_GGML_F16_VEC_MUL WSP_GGML_F32x4_MUL
506
+ #define WSP_GGML_F16_VEC_REDUCE WSP_GGML_F32x4_REDUCE
507
+ // Use vec_xl, not vec_ld, in case the load address is not aligned.
508
+ #define WSP_GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
509
+ vec_extract_fp32_from_shorth(vec_xl(0, p - WSP_GGML_F16_EPR)) : \
510
+ vec_extract_fp32_from_shortl(vec_xl(0, p))
511
+ static inline unsigned char wsp_ggml_endian_byte(int i) {
512
+ uint16_t tmp_val = 1;
513
+ return ((unsigned char *)&tmp_val)[i];
514
+ }
515
+ #define WSP_GGML_ENDIAN_BYTE(i) wsp_ggml_endian_byte(i)
516
+ #define WSP_GGML_F16_VEC_STORE(p, r, i) \
517
+ if (i & 0x1) \
518
+ vec_xst(vec_pack_to_short_fp32(r[i - WSP_GGML_ENDIAN_BYTE(1)], \
519
+ r[i - WSP_GGML_ENDIAN_BYTE(0)]), \
520
+ 0, p - WSP_GGML_F16_EPR)
521
+
522
+ #elif defined(__wasm_simd128__)
523
+
524
+ #define WSP_GGML_SIMD
525
+
526
+ // F32 WASM
527
+
528
+ #define WSP_GGML_F32_STEP 16
529
+ #define WSP_GGML_F32_EPR 4
530
+
531
+ #define WSP_GGML_F32x4 v128_t
532
+ #define WSP_GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
533
+ #define WSP_GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
534
+ #define WSP_GGML_F32x4_LOAD wasm_v128_load
535
+ #define WSP_GGML_F32x4_STORE wasm_v128_store
536
+ #define WSP_GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
537
+ #define WSP_GGML_F32x4_ADD wasm_f32x4_add
538
+ #define WSP_GGML_F32x4_MUL wasm_f32x4_mul
539
+ #define WSP_GGML_F32x4_REDUCE(res, x) \
540
+ { \
541
+ int offset = WSP_GGML_F32_ARR >> 1; \
542
+ for (int i = 0; i < offset; ++i) { \
543
+ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
544
+ } \
545
+ offset >>= 1; \
546
+ for (int i = 0; i < offset; ++i) { \
547
+ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
548
+ } \
549
+ offset >>= 1; \
550
+ for (int i = 0; i < offset; ++i) { \
551
+ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
552
+ } \
553
+ res = wasm_f32x4_extract_lane(x[0], 0) + \
554
+ wasm_f32x4_extract_lane(x[0], 1) + \
555
+ wasm_f32x4_extract_lane(x[0], 2) + \
556
+ wasm_f32x4_extract_lane(x[0], 3); \
557
+ }
558
+
559
+ #define WSP_GGML_F32_VEC WSP_GGML_F32x4
560
+ #define WSP_GGML_F32_VEC_ZERO WSP_GGML_F32x4_ZERO
561
+ #define WSP_GGML_F32_VEC_SET1 WSP_GGML_F32x4_SET1
562
+ #define WSP_GGML_F32_VEC_LOAD WSP_GGML_F32x4_LOAD
563
+ #define WSP_GGML_F32_VEC_STORE WSP_GGML_F32x4_STORE
564
+ #define WSP_GGML_F32_VEC_FMA WSP_GGML_F32x4_FMA
565
+ #define WSP_GGML_F32_VEC_ADD WSP_GGML_F32x4_ADD
566
+ #define WSP_GGML_F32_VEC_MUL WSP_GGML_F32x4_MUL
567
+ #define WSP_GGML_F32_VEC_REDUCE WSP_GGML_F32x4_REDUCE
568
+
569
+ // F16 WASM
570
+
571
+ #define WSP_GGML_F16_STEP 16
572
+ #define WSP_GGML_F16_EPR 4
573
+
574
+ inline static v128_t __wasm_f16x4_load(const wsp_ggml_fp16_t * p) {
575
+ float tmp[4];
576
+
577
+ tmp[0] = WSP_GGML_FP16_TO_FP32(p[0]);
578
+ tmp[1] = WSP_GGML_FP16_TO_FP32(p[1]);
579
+ tmp[2] = WSP_GGML_FP16_TO_FP32(p[2]);
580
+ tmp[3] = WSP_GGML_FP16_TO_FP32(p[3]);
581
+
582
+ return wasm_v128_load(tmp);
583
+ }
584
+
585
+ inline static void __wasm_f16x4_store(wsp_ggml_fp16_t * p, v128_t x) {
586
+ float tmp[4];
587
+
588
+ wasm_v128_store(tmp, x);
589
+
590
+ p[0] = WSP_GGML_FP32_TO_FP16(tmp[0]);
591
+ p[1] = WSP_GGML_FP32_TO_FP16(tmp[1]);
592
+ p[2] = WSP_GGML_FP32_TO_FP16(tmp[2]);
593
+ p[3] = WSP_GGML_FP32_TO_FP16(tmp[3]);
594
+ }
595
+
596
+ #define WSP_GGML_F16x4 v128_t
597
+ #define WSP_GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
598
+ #define WSP_GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
599
+ #define WSP_GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
600
+ #define WSP_GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
601
+ #define WSP_GGML_F16x4_FMA WSP_GGML_F32x4_FMA
602
+ #define WSP_GGML_F16x4_ADD wasm_f32x4_add
603
+ #define WSP_GGML_F16x4_MUL wasm_f32x4_mul
604
+ #define WSP_GGML_F16x4_REDUCE(res, x) \
605
+ { \
606
+ int offset = WSP_GGML_F16_ARR >> 1; \
607
+ for (int i = 0; i < offset; ++i) { \
608
+ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
609
+ } \
610
+ offset >>= 1; \
611
+ for (int i = 0; i < offset; ++i) { \
612
+ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
613
+ } \
614
+ offset >>= 1; \
615
+ for (int i = 0; i < offset; ++i) { \
616
+ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
617
+ } \
618
+ res = (wsp_ggml_float) (wasm_f32x4_extract_lane(x[0], 0) + \
619
+ wasm_f32x4_extract_lane(x[0], 1) + \
620
+ wasm_f32x4_extract_lane(x[0], 2) + \
621
+ wasm_f32x4_extract_lane(x[0], 3)); \
622
+ }
623
+
624
+ #define WSP_GGML_F16_VEC WSP_GGML_F16x4
625
+ #define WSP_GGML_F16_VEC_ZERO WSP_GGML_F16x4_ZERO
626
+ #define WSP_GGML_F16_VEC_SET1 WSP_GGML_F16x4_SET1
627
+ #define WSP_GGML_F16_VEC_LOAD(p, i) WSP_GGML_F16x4_LOAD(p)
628
+ #define WSP_GGML_F16_VEC_STORE(p, r, i) WSP_GGML_F16x4_STORE(p, r[i])
629
+ #define WSP_GGML_F16_VEC_FMA WSP_GGML_F16x4_FMA
630
+ #define WSP_GGML_F16_VEC_ADD WSP_GGML_F16x4_ADD
631
+ #define WSP_GGML_F16_VEC_MUL WSP_GGML_F16x4_MUL
632
+ #define WSP_GGML_F16_VEC_REDUCE WSP_GGML_F16x4_REDUCE
633
+
634
+ #elif defined(__SSE3__)
635
+
636
+ #define WSP_GGML_SIMD
637
+
638
+ // F32 SSE
639
+
640
+ #define WSP_GGML_F32_STEP 32
641
+ #define WSP_GGML_F32_EPR 4
642
+
643
+ #define WSP_GGML_F32x4 __m128
644
+ #define WSP_GGML_F32x4_ZERO _mm_setzero_ps()
645
+ #define WSP_GGML_F32x4_SET1(x) _mm_set1_ps(x)
646
+ #define WSP_GGML_F32x4_LOAD _mm_loadu_ps
647
+ #define WSP_GGML_F32x4_STORE _mm_storeu_ps
648
+ #if defined(__FMA__)
649
+ // TODO: Does this work?
650
+ #define WSP_GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
651
+ #else
652
+ #define WSP_GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
653
+ #endif
654
+ #define WSP_GGML_F32x4_ADD _mm_add_ps
655
+ #define WSP_GGML_F32x4_MUL _mm_mul_ps
656
+ #define WSP_GGML_F32x4_REDUCE(res, x) \
657
+ { \
658
+ int offset = WSP_GGML_F32_ARR >> 1; \
659
+ for (int i = 0; i < offset; ++i) { \
660
+ x[i] = _mm_add_ps(x[i], x[offset+i]); \
661
+ } \
662
+ offset >>= 1; \
663
+ for (int i = 0; i < offset; ++i) { \
664
+ x[i] = _mm_add_ps(x[i], x[offset+i]); \
665
+ } \
666
+ offset >>= 1; \
667
+ for (int i = 0; i < offset; ++i) { \
668
+ x[i] = _mm_add_ps(x[i], x[offset+i]); \
669
+ } \
670
+ const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
671
+ res = (wsp_ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
672
+ }
673
+ // TODO: is this optimal ?
674
+
675
+ #define WSP_GGML_F32_VEC WSP_GGML_F32x4
676
+ #define WSP_GGML_F32_VEC_ZERO WSP_GGML_F32x4_ZERO
677
+ #define WSP_GGML_F32_VEC_SET1 WSP_GGML_F32x4_SET1
678
+ #define WSP_GGML_F32_VEC_LOAD WSP_GGML_F32x4_LOAD
679
+ #define WSP_GGML_F32_VEC_STORE WSP_GGML_F32x4_STORE
680
+ #define WSP_GGML_F32_VEC_FMA WSP_GGML_F32x4_FMA
681
+ #define WSP_GGML_F32_VEC_ADD WSP_GGML_F32x4_ADD
682
+ #define WSP_GGML_F32_VEC_MUL WSP_GGML_F32x4_MUL
683
+ #define WSP_GGML_F32_VEC_REDUCE WSP_GGML_F32x4_REDUCE
684
+
685
+ // F16 SSE
686
+
687
+ #define WSP_GGML_F16_STEP 32
688
+ #define WSP_GGML_F16_EPR 4
689
+
690
+ static inline __m128 __sse_f16x4_load(const wsp_ggml_fp16_t * x) {
691
+ float tmp[4];
692
+
693
+ tmp[0] = WSP_GGML_FP16_TO_FP32(x[0]);
694
+ tmp[1] = WSP_GGML_FP16_TO_FP32(x[1]);
695
+ tmp[2] = WSP_GGML_FP16_TO_FP32(x[2]);
696
+ tmp[3] = WSP_GGML_FP16_TO_FP32(x[3]);
697
+
698
+ return _mm_loadu_ps(tmp);
699
+ }
700
+
701
+ static inline void __sse_f16x4_store(wsp_ggml_fp16_t * x, __m128 y) {
702
+ float arr[4];
703
+
704
+ _mm_storeu_ps(arr, y);
705
+
706
+ x[0] = WSP_GGML_FP32_TO_FP16(arr[0]);
707
+ x[1] = WSP_GGML_FP32_TO_FP16(arr[1]);
708
+ x[2] = WSP_GGML_FP32_TO_FP16(arr[2]);
709
+ x[3] = WSP_GGML_FP32_TO_FP16(arr[3]);
710
+ }
711
+
712
+ #define WSP_GGML_F32Cx4 __m128
713
+ #define WSP_GGML_F32Cx4_ZERO _mm_setzero_ps()
714
+ #define WSP_GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
715
+ #define WSP_GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
716
+ #define WSP_GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
717
+ #define WSP_GGML_F32Cx4_FMA WSP_GGML_F32x4_FMA
718
+ #define WSP_GGML_F32Cx4_ADD _mm_add_ps
719
+ #define WSP_GGML_F32Cx4_MUL _mm_mul_ps
720
+ #define WSP_GGML_F32Cx4_REDUCE WSP_GGML_F32x4_REDUCE
721
+
722
+ #define WSP_GGML_F16_VEC WSP_GGML_F32Cx4
723
+ #define WSP_GGML_F16_VEC_ZERO WSP_GGML_F32Cx4_ZERO
724
+ #define WSP_GGML_F16_VEC_SET1 WSP_GGML_F32Cx4_SET1
725
+ #define WSP_GGML_F16_VEC_LOAD(p, i) WSP_GGML_F32Cx4_LOAD(p)
726
+ #define WSP_GGML_F16_VEC_STORE(p, r, i) WSP_GGML_F32Cx4_STORE(p, r[i])
727
+ #define WSP_GGML_F16_VEC_FMA WSP_GGML_F32Cx4_FMA
728
+ #define WSP_GGML_F16_VEC_ADD WSP_GGML_F32Cx4_ADD
729
+ #define WSP_GGML_F16_VEC_MUL WSP_GGML_F32Cx4_MUL
730
+ #define WSP_GGML_F16_VEC_REDUCE WSP_GGML_F32Cx4_REDUCE
731
+
732
+ #elif defined(__loongarch_asx)
733
+
734
+ #define WSP_GGML_SIMD
735
+
736
+ // F32 LASX
737
+ #define WSP_GGML_F32_STEP 32
738
+ #define WSP_GGML_F32_EPR 8
739
+
740
+ #define WSP_GGML_F32x8 __m256
741
+ #define WSP_GGML_F32x8_ZERO (__m256)__lasx_xvldi(0)
742
+ #define WSP_GGML_F32x8_SET1(x) (__m256)__lasx_xvreplfr2vr_s((x))
743
+ #define WSP_GGML_F32x8_LOAD(x) (__m256)__lasx_xvld((x), 0)
744
+ #define WSP_GGML_F32x8_STORE(x,y) __lasx_xvst((y), (x), 0)
745
+ #define WSP_GGML_F32x8_FMA(a, b, c) __lasx_xvfmadd_s(b, c, a)
746
+ #define WSP_GGML_F32x8_ADD __lasx_xvfadd_s
747
+ #define WSP_GGML_F32x8_MUL __lasx_xvfmul_s
748
+ #define WSP_GGML_F32x8_REDUCE(res, x) \
749
+ do { \
750
+ int offset = WSP_GGML_F32_ARR >> 1; \
751
+ for (int i = 0; i < offset; ++i) { \
752
+ x[i] = __lasx_xvfadd_s(x[i], x[offset+i]); \
753
+ } \
754
+ offset >>= 1; \
755
+ for (int i = 0; i < offset; ++i) { \
756
+ x[i] = __lasx_xvfadd_s(x[i], x[offset+i]); \
757
+ } \
758
+ offset >>= 1; \
759
+ for (int i = 0; i < offset; ++i) { \
760
+ x[i] = __lasx_xvfadd_s(x[i], x[offset+i]); \
761
+ } \
762
+ float *tmp_p = (float *)&x[0]; \
763
+ res = tmp_p[0] + tmp_p[1] + tmp_p[2] + tmp_p[3] + tmp_p[4] + tmp_p[5] + tmp_p[6] + tmp_p[7]; \
764
+ } while (0)
765
+ // TODO: is this optimal ?
766
+
767
+ #define WSP_GGML_F32_VEC WSP_GGML_F32x8
768
+ #define WSP_GGML_F32_VEC_ZERO WSP_GGML_F32x8_ZERO
769
+ #define WSP_GGML_F32_VEC_SET1 WSP_GGML_F32x8_SET1
770
+ #define WSP_GGML_F32_VEC_LOAD WSP_GGML_F32x8_LOAD
771
+ #define WSP_GGML_F32_VEC_STORE WSP_GGML_F32x8_STORE
772
+ #define WSP_GGML_F32_VEC_FMA WSP_GGML_F32x8_FMA
773
+ #define WSP_GGML_F32_VEC_ADD WSP_GGML_F32x8_ADD
774
+ #define WSP_GGML_F32_VEC_MUL WSP_GGML_F32x8_MUL
775
+ #define WSP_GGML_F32_VEC_REDUCE WSP_GGML_F32x8_REDUCE
776
+
777
+ // F16 LASX
778
+
779
+ #define WSP_GGML_F16_STEP 32
780
+ #define WSP_GGML_F16_EPR 8
781
+
782
+ // F16 arithmetic is not supported by LASX, so we use F32 instead
783
+
784
+ #define WSP_GGML_F32Cx8 __m256
785
+ #define WSP_GGML_F32Cx8_ZERO (__m256)__lasx_xvldi(0)
786
+ #define WSP_GGML_F32Cx8_SET1(x) (__m256)__lasx_xvreplgr2vr_w((x))
787
+
788
+ static inline __m256 __lasx_f32cx8_load(const wsp_ggml_fp16_t * x) {
789
+ __m256i a;
790
+ memcpy(&a, x, sizeof(wsp_ggml_fp16_t) * 8);
791
+ a = __lasx_xvpermi_d(a, 0 | (1 << 4));
792
+ return __lasx_xvfcvtl_s_h(a);
793
+ }
794
+
795
+ static inline void __lasx_f32cx8_store(wsp_ggml_fp16_t * x, __m256 y) {
796
+ __m256i a = __lasx_xvfcvt_h_s(y, y);
797
+ a = __lasx_xvpermi_d(a, 0 | (2 << 2));
798
+ memcpy(x, &a, sizeof(wsp_ggml_fp16_t) * 8);
799
+ }
800
+ #define WSP_GGML_F32Cx8_LOAD(x) __lasx_f32cx8_load(x)
801
+ #define WSP_GGML_F32Cx8_STORE(x, y) __lasx_f32cx8_store(x, y)
802
+
803
+ #define WSP_GGML_F32Cx8_FMA WSP_GGML_F32x8_FMA
804
+ #define WSP_GGML_F32Cx8_ADD __lasx_xvfadd_s
805
+ #define WSP_GGML_F32Cx8_MUL __lasx_xvfmul_s
806
+ #define WSP_GGML_F32Cx8_REDUCE WSP_GGML_F32x8_REDUCE
807
+
808
+ #define WSP_GGML_F16_VEC WSP_GGML_F32Cx8
809
+ #define WSP_GGML_F16_VEC_ZERO WSP_GGML_F32Cx8_ZERO
810
+ #define WSP_GGML_F16_VEC_SET1 WSP_GGML_F32Cx8_SET1
811
+ #define WSP_GGML_F16_VEC_LOAD(p, i) WSP_GGML_F32Cx8_LOAD(p)
812
+ #define WSP_GGML_F16_VEC_STORE(p, r, i) WSP_GGML_F32Cx8_STORE(p, r[i])
813
+ #define WSP_GGML_F16_VEC_FMA WSP_GGML_F32Cx8_FMA
814
+ #define WSP_GGML_F16_VEC_ADD WSP_GGML_F32Cx8_ADD
815
+ #define WSP_GGML_F16_VEC_MUL WSP_GGML_F32Cx8_MUL
816
+ #define WSP_GGML_F16_VEC_REDUCE WSP_GGML_F32Cx8_REDUCE
817
+
818
+ #elif defined(__loongarch_sx)
819
+
820
+ #define WSP_GGML_SIMD
821
+
822
+ // F32 LSX
823
+
824
+ #define WSP_GGML_F32_STEP 32
825
+ #define WSP_GGML_F32_EPR 4
826
+
827
+ #define WSP_GGML_F32x4 __m128
828
+ #define WSP_GGML_F32x4_ZERO __lsx_vldi(0)
829
+ #define WSP_GGML_F32x4_SET1(x) __lsx_vinsgr2vr_w(__lsx_vldi(0),(x), 0)
830
+ #define WSP_GGML_F32x4_LOAD(x) __lsx_vld((x), 0)
831
+ #define WSP_GGML_F32x4_STORE((x),(y)) __lsx_vst((y), (x), 0)
832
+ #define WSP_GGML_F32x4_FMA(a, b, c) __lsx_vfmadd_s(b, c, a)
833
+ #define WSP_GGML_F32x4_ADD __lsx_vfadd_s
834
+ #define WSP_GGML_F32x4_MUL __lsx_vfmul_s
835
+ #define WSP_GGML_F32x4_REDUCE(res, x) \
836
+ { \
837
+ int offset = WSP_GGML_F32_ARR >> 1; \
838
+ for (int i = 0; i < offset; ++i) { \
839
+ x[i] = __lsx_vfadd_s(x[i], x[offset + i]); \
840
+ } \
841
+ offset >>= 1; \
842
+ for (int i = 0; i < offset; ++i) { \
843
+ x[i] = __lsx_vfadd_s(x[i], x[offset + i]); \
844
+ } \
845
+ offset >>= 1; \
846
+ for (int i = 0; i < offset; ++i) { \
847
+ x[i] = __lsx_vfadd_s(x[i], x[offset + i]); \
848
+ } \
849
+ __m128i tmp = __lsx_vsrli_d((__m128i) x[0], 32); \
850
+ tmp = (__m128i) __lsx_vfadd_s((__m128) tmp, x[0]); \
851
+ tmp = __lsx_vpickev_w(__lsx_vldi(0), tmp); \
852
+ const __m128 t0 = __lsx_vshuf4i_w(tmp, 0x88); \
853
+ tmp = __lsx_vsrli_d((__m128i) t0, 32); \
854
+ tmp = (__m128i) __lsx_vfadd_s((__m128) tmp, t0); \
855
+ tmp = __lsx_vpickev_w(__lsx_vldi(0), tmp); \
856
+ res = (wsp_ggml_float) __lsx_vpickve2gr_w(__lsx_vshuf4i_w(tmp, 0x88), 0); \
857
+ }
858
+
859
+ #define WSP_GGML_F32_VEC WSP_GGML_F32x4
860
+ #define WSP_GGML_F32_VEC_ZERO WSP_GGML_F32x4_ZERO
861
+ #define WSP_GGML_F32_VEC_SET1 WSP_GGML_F32x4_SET1
862
+ #define WSP_GGML_F32_VEC_LOAD WSP_GGML_F32x4_LOAD
863
+ #define WSP_GGML_F32_VEC_STORE WSP_GGML_F32x4_STORE
864
+ #define WSP_GGML_F32_VEC_FMA WSP_GGML_F32x4_FMA
865
+ #define WSP_GGML_F32_VEC_ADD WSP_GGML_F32x4_ADD
866
+ #define WSP_GGML_F32_VEC_MUL WSP_GGML_F32x4_MUL
867
+ #define WSP_GGML_F32_VEC_REDUCE WSP_GGML_F32x4_REDUCE
868
+
869
+ // F16 LSX
870
+
871
+ #define WSP_GGML_F16_STEP 32
872
+ #define WSP_GGML_F16_EPR 4
873
+
874
+ static inline __m128 __lsx_f16x4_load(const wsp_ggml_fp16_t * x) {
875
+ float tmp[4];
876
+
877
+ tmp[0] = WSP_GGML_FP16_TO_FP32(x[0]);
878
+ tmp[1] = WSP_GGML_FP16_TO_FP32(x[1]);
879
+ tmp[2] = WSP_GGML_FP16_TO_FP32(x[2]);
880
+ tmp[3] = WSP_GGML_FP16_TO_FP32(x[3]);
881
+
882
+ return __lsx_vld(tmp, 0);
883
+ }
884
+
885
+ static inline void __lsx_f16x4_store(wsp_ggml_fp16_t * x, __m128 y) {
886
+ float arr[4];
887
+
888
+ __lsx_vst(y, arr, 0);
889
+
890
+ x[0] = WSP_GGML_FP32_TO_FP16(arr[0]);
891
+ x[1] = WSP_GGML_FP32_TO_FP16(arr[1]);
892
+ x[2] = WSP_GGML_FP32_TO_FP16(arr[2]);
893
+ x[3] = WSP_GGML_FP32_TO_FP16(arr[3]);
894
+ }
895
+
896
+ #define WSP_GGML_F32Cx4 __m128
897
+ #define WSP_GGML_F32Cx4_ZERO __lsx_vldi(0)
898
+ #define WSP_GGML_F32Cx4_SET1(x) __lsx_vinsgr2vr_w(__lsx_vldi(0),(x), 0)
899
+ #define WSP_GGML_F32Cx4_LOAD(x) __lsx_f16x4_load(x)
900
+ #define WSP_GGML_F32Cx4_STORE(x, y) __lsx_f16x4_store(x, y)
901
+ #define WSP_GGML_F32Cx4_FMA WSP_GGML_F32x4_FMA
902
+ #define WSP_GGML_F32Cx4_ADD __lsx_vfadd_s
903
+ #define WSP_GGML_F32Cx4_MUL __lsx_vfmul_s
904
+ #define WSP_GGML_F32Cx4_REDUCE WSP_GGML_F32x4_REDUCE
905
+
906
+ #define WSP_GGML_F16_VEC WSP_GGML_F32Cx4
907
+ #define WSP_GGML_F16_VEC_ZERO WSP_GGML_F32Cx4_ZERO
908
+ #define WSP_GGML_F16_VEC_SET1 WSP_GGML_F32Cx4_SET1
909
+ #define WSP_GGML_F16_VEC_LOAD(p, i) WSP_GGML_F32Cx4_LOAD(p)
910
+ #define WSP_GGML_F16_VEC_STORE(p, r, i) WSP_GGML_F32Cx4_STORE(p, r[i])
911
+ #define WSP_GGML_F16_VEC_FMA WSP_GGML_F32Cx4_FMA
912
+ #define WSP_GGML_F16_VEC_ADD WSP_GGML_F32Cx4_ADD
913
+ #define WSP_GGML_F16_VEC_MUL WSP_GGML_F32Cx4_MUL
914
+ #define WSP_GGML_F16_VEC_REDUCE WSP_GGML_F32Cx4_REDUCE
915
+
916
+ #elif defined(__VXE__) || defined(__VXE2__)
917
+
918
+ #define WSP_GGML_SIMD
919
+
920
+ // F32 s390x
921
+
922
+ #define WSP_GGML_F32_STEP 32
923
+ #define WSP_GGML_F32_EPR 4
924
+
925
+ #define WSP_GGML_F32x4 __vector float
926
+ #define WSP_GGML_F32x4_ZERO vec_splats(0.0f)
927
+ #define WSP_GGML_F32x4_SET1 vec_splats
928
+ #define WSP_GGML_F32x4_LOAD(p) vec_xl(0, p)
929
+ #define WSP_GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
930
+ #define WSP_GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
931
+ #define WSP_GGML_F32x4_ADD vec_add
932
+ #define WSP_GGML_F32x4_MUL vec_mul
933
+ #define WSP_GGML_F32x4_REDUCE(res, x) \
934
+ { \
935
+ int offset = WSP_GGML_F32_ARR >> 1; \
936
+ for (int i = 0; i < offset; ++i) { \
937
+ x[i] = vec_add(x[i], x[offset + i]); \
938
+ } \
939
+ offset >>= 1; \
940
+ for (int i = 0; i < offset; ++i) { \
941
+ x[i] = vec_add(x[i], x[offset + i]); \
942
+ } \
943
+ offset >>= 1; \
944
+ for (int i = 0; i < offset; ++i) { \
945
+ x[i] = vec_add(x[i], x[offset + i]); \
946
+ } \
947
+ float32x4_t tmp = x[0] + vec_reve(x[0]); \
948
+ res = tmp[0] + tmp[1]; \
949
+ }
950
+
951
+ #define WSP_GGML_F32_VEC WSP_GGML_F32x4
952
+ #define WSP_GGML_F32_VEC_ZERO WSP_GGML_F32x4_ZERO
953
+ #define WSP_GGML_F32_VEC_SET1 WSP_GGML_F32x4_SET1
954
+ #define WSP_GGML_F32_VEC_LOAD WSP_GGML_F32x4_LOAD
955
+ #define WSP_GGML_F32_VEC_STORE WSP_GGML_F32x4_STORE
956
+ #define WSP_GGML_F32_VEC_FMA WSP_GGML_F32x4_FMA
957
+ #define WSP_GGML_F32_VEC_ADD WSP_GGML_F32x4_ADD
958
+ #define WSP_GGML_F32_VEC_MUL WSP_GGML_F32x4_MUL
959
+ #define WSP_GGML_F32_VEC_REDUCE WSP_GGML_F32x4_REDUCE
960
+
961
+ // F16 s390x
962
+ #define WSP_GGML_F16_STEP WSP_GGML_F32_STEP
963
+ #define WSP_GGML_F16_EPR WSP_GGML_F32_EPR
964
+
965
+ static inline __vector float __lzs_f16cx4_load(const wsp_ggml_fp16_t * x) {
966
+ float tmp[4];
967
+
968
+ for (int i = 0; i < 4; i++) {
969
+ tmp[i] = WSP_GGML_FP16_TO_FP32(x[i]);
970
+ }
971
+
972
+ // note: keep type-cast here to prevent compiler bugs
973
+ // see: https://github.com/ggml-org/llama.cpp/issues/12846
974
+ return vec_xl(0, (const float *)(tmp));
975
+ }
976
+
977
+ static inline void __lzs_f16cx4_store(wsp_ggml_fp16_t * x, __vector float y) {
978
+ float arr[4];
979
+
980
+ // note: keep type-cast here to prevent compiler bugs
981
+ // see: https://github.com/ggml-org/llama.cpp/issues/12846
982
+ vec_xst(y, 0, (float *)(arr));
983
+
984
+ for (int i = 0; i < 4; i++) {
985
+ x[i] = WSP_GGML_FP32_TO_FP16(arr[i]);
986
+ }
987
+ }
988
+
989
+ #define WSP_GGML_F16_VEC WSP_GGML_F32x4
990
+ #define WSP_GGML_F16_VEC_ZERO WSP_GGML_F32x4_ZERO
991
+ #define WSP_GGML_F16_VEC_SET1 WSP_GGML_F32x4_SET1
992
+ #define WSP_GGML_F16_VEC_LOAD(p, i) __lzs_f16cx4_load(p)
993
+ #define WSP_GGML_F16_VEC_STORE(p, r, i) __lzs_f16cx4_store(p, r[i])
994
+ #define WSP_GGML_F16_VEC_FMA WSP_GGML_F32x4_FMA
995
+ #define WSP_GGML_F16_VEC_ADD WSP_GGML_F32x4_ADD
996
+ #define WSP_GGML_F16_VEC_MUL WSP_GGML_F32x4_MUL
997
+ #define WSP_GGML_F16_VEC_REDUCE WSP_GGML_F32x4_REDUCE
998
+
999
+ #endif
1000
+
1001
+ // WSP_GGML_F32_ARR / WSP_GGML_F16_ARR
1002
+ // number of registers to use per step
1003
+ #ifdef WSP_GGML_SIMD
1004
+ #define WSP_GGML_F32_ARR (WSP_GGML_F32_STEP/WSP_GGML_F32_EPR)
1005
+ #define WSP_GGML_F16_ARR (WSP_GGML_F16_STEP/WSP_GGML_F16_EPR)
1006
+ #endif