llama-cpp-capacitor 0.0.6 → 0.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. package/android/src/main/CMakeLists.txt +9 -9
  2. package/cpp/LICENSE +21 -0
  3. package/cpp/README.md +4 -0
  4. package/cpp/anyascii.c +22223 -0
  5. package/cpp/anyascii.h +42 -0
  6. package/cpp/chat-parser.cpp +393 -0
  7. package/cpp/chat-parser.h +120 -0
  8. package/cpp/chat.cpp +2315 -0
  9. package/cpp/chat.h +221 -0
  10. package/cpp/common.cpp +1619 -0
  11. package/cpp/common.h +744 -0
  12. package/cpp/ggml-alloc.c +1028 -0
  13. package/cpp/ggml-alloc.h +76 -0
  14. package/cpp/ggml-backend-impl.h +255 -0
  15. package/cpp/ggml-backend-reg.cpp +600 -0
  16. package/cpp/ggml-backend.cpp +2118 -0
  17. package/cpp/ggml-backend.h +354 -0
  18. package/cpp/ggml-common.h +1878 -0
  19. package/cpp/ggml-cpp.h +39 -0
  20. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  21. package/cpp/ggml-cpu/amx/amx.h +8 -0
  22. package/cpp/ggml-cpu/amx/common.h +91 -0
  23. package/cpp/ggml-cpu/amx/mmq.cpp +2512 -0
  24. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  25. package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  26. package/cpp/ggml-cpu/arch/arm/quants.c +3650 -0
  27. package/cpp/ggml-cpu/arch/arm/repack.cpp +1891 -0
  28. package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
  29. package/cpp/ggml-cpu/arch/x86/quants.c +3820 -0
  30. package/cpp/ggml-cpu/arch/x86/repack.cpp +6307 -0
  31. package/cpp/ggml-cpu/arch-fallback.h +215 -0
  32. package/cpp/ggml-cpu/binary-ops.cpp +158 -0
  33. package/cpp/ggml-cpu/binary-ops.h +16 -0
  34. package/cpp/ggml-cpu/common.h +73 -0
  35. package/cpp/ggml-cpu/ggml-cpu-impl.h +525 -0
  36. package/cpp/ggml-cpu/ggml-cpu.c +3578 -0
  37. package/cpp/ggml-cpu/ggml-cpu.cpp +672 -0
  38. package/cpp/ggml-cpu/ops.cpp +10587 -0
  39. package/cpp/ggml-cpu/ops.h +114 -0
  40. package/cpp/ggml-cpu/quants.c +1193 -0
  41. package/cpp/ggml-cpu/quants.h +97 -0
  42. package/cpp/ggml-cpu/repack.cpp +1982 -0
  43. package/cpp/ggml-cpu/repack.h +120 -0
  44. package/cpp/ggml-cpu/simd-mappings.h +1184 -0
  45. package/cpp/ggml-cpu/traits.cpp +36 -0
  46. package/cpp/ggml-cpu/traits.h +38 -0
  47. package/cpp/ggml-cpu/unary-ops.cpp +186 -0
  48. package/cpp/ggml-cpu/unary-ops.h +28 -0
  49. package/cpp/ggml-cpu/vec.cpp +348 -0
  50. package/cpp/ggml-cpu/vec.h +1121 -0
  51. package/cpp/ggml-cpu.h +145 -0
  52. package/cpp/ggml-impl.h +622 -0
  53. package/cpp/ggml-metal-impl.h +688 -0
  54. package/cpp/ggml-metal.h +66 -0
  55. package/cpp/ggml-metal.m +6833 -0
  56. package/cpp/ggml-opt.cpp +1093 -0
  57. package/cpp/ggml-opt.h +256 -0
  58. package/cpp/ggml-quants.c +5324 -0
  59. package/cpp/ggml-quants.h +106 -0
  60. package/cpp/ggml-threading.cpp +12 -0
  61. package/cpp/ggml-threading.h +14 -0
  62. package/cpp/ggml.c +7108 -0
  63. package/cpp/ggml.h +2492 -0
  64. package/cpp/gguf.cpp +1358 -0
  65. package/cpp/gguf.h +202 -0
  66. package/cpp/json-partial.cpp +256 -0
  67. package/cpp/json-partial.h +38 -0
  68. package/cpp/json-schema-to-grammar.cpp +985 -0
  69. package/cpp/json-schema-to-grammar.h +21 -0
  70. package/cpp/llama-adapter.cpp +388 -0
  71. package/cpp/llama-adapter.h +76 -0
  72. package/cpp/llama-arch.cpp +2355 -0
  73. package/cpp/llama-arch.h +499 -0
  74. package/cpp/llama-batch.cpp +875 -0
  75. package/cpp/llama-batch.h +160 -0
  76. package/cpp/llama-chat.cpp +783 -0
  77. package/cpp/llama-chat.h +65 -0
  78. package/cpp/llama-context.cpp +2748 -0
  79. package/cpp/llama-context.h +306 -0
  80. package/cpp/llama-cparams.cpp +5 -0
  81. package/cpp/llama-cparams.h +41 -0
  82. package/cpp/llama-cpp.h +30 -0
  83. package/cpp/llama-grammar.cpp +1229 -0
  84. package/cpp/llama-grammar.h +173 -0
  85. package/cpp/llama-graph.cpp +1891 -0
  86. package/cpp/llama-graph.h +810 -0
  87. package/cpp/llama-hparams.cpp +180 -0
  88. package/cpp/llama-hparams.h +233 -0
  89. package/cpp/llama-impl.cpp +167 -0
  90. package/cpp/llama-impl.h +61 -0
  91. package/cpp/llama-io.cpp +15 -0
  92. package/cpp/llama-io.h +35 -0
  93. package/cpp/llama-kv-cache-iswa.cpp +318 -0
  94. package/cpp/llama-kv-cache-iswa.h +135 -0
  95. package/cpp/llama-kv-cache.cpp +2059 -0
  96. package/cpp/llama-kv-cache.h +374 -0
  97. package/cpp/llama-kv-cells.h +491 -0
  98. package/cpp/llama-memory-hybrid.cpp +258 -0
  99. package/cpp/llama-memory-hybrid.h +137 -0
  100. package/cpp/llama-memory-recurrent.cpp +1146 -0
  101. package/cpp/llama-memory-recurrent.h +179 -0
  102. package/cpp/llama-memory.cpp +59 -0
  103. package/cpp/llama-memory.h +119 -0
  104. package/cpp/llama-mmap.cpp +600 -0
  105. package/cpp/llama-mmap.h +68 -0
  106. package/cpp/llama-model-loader.cpp +1164 -0
  107. package/cpp/llama-model-loader.h +170 -0
  108. package/cpp/llama-model-saver.cpp +282 -0
  109. package/cpp/llama-model-saver.h +37 -0
  110. package/cpp/llama-model.cpp +19042 -0
  111. package/cpp/llama-model.h +491 -0
  112. package/cpp/llama-sampling.cpp +2575 -0
  113. package/cpp/llama-sampling.h +32 -0
  114. package/cpp/llama-vocab.cpp +3792 -0
  115. package/cpp/llama-vocab.h +176 -0
  116. package/cpp/llama.cpp +358 -0
  117. package/cpp/llama.h +1373 -0
  118. package/cpp/log.cpp +427 -0
  119. package/cpp/log.h +103 -0
  120. package/cpp/minja/chat-template.hpp +550 -0
  121. package/cpp/minja/minja.hpp +3009 -0
  122. package/cpp/nlohmann/json.hpp +25526 -0
  123. package/cpp/nlohmann/json_fwd.hpp +187 -0
  124. package/cpp/regex-partial.cpp +204 -0
  125. package/cpp/regex-partial.h +56 -0
  126. package/cpp/rn-completion.cpp +681 -0
  127. package/cpp/rn-completion.h +116 -0
  128. package/cpp/rn-llama.cpp +345 -0
  129. package/cpp/rn-llama.h +149 -0
  130. package/cpp/rn-mtmd.hpp +602 -0
  131. package/cpp/rn-tts.cpp +591 -0
  132. package/cpp/rn-tts.h +59 -0
  133. package/cpp/sampling.cpp +579 -0
  134. package/cpp/sampling.h +107 -0
  135. package/cpp/tools/mtmd/clip-impl.h +473 -0
  136. package/cpp/tools/mtmd/clip.cpp +4322 -0
  137. package/cpp/tools/mtmd/clip.h +106 -0
  138. package/cpp/tools/mtmd/miniaudio/miniaudio.h +93468 -0
  139. package/cpp/tools/mtmd/mtmd-audio.cpp +769 -0
  140. package/cpp/tools/mtmd/mtmd-audio.h +47 -0
  141. package/cpp/tools/mtmd/mtmd-helper.cpp +460 -0
  142. package/cpp/tools/mtmd/mtmd-helper.h +91 -0
  143. package/cpp/tools/mtmd/mtmd.cpp +1066 -0
  144. package/cpp/tools/mtmd/mtmd.h +298 -0
  145. package/cpp/tools/mtmd/stb/stb_image.h +7988 -0
  146. package/cpp/unicode-data.cpp +7034 -0
  147. package/cpp/unicode-data.h +20 -0
  148. package/cpp/unicode.cpp +1061 -0
  149. package/cpp/unicode.h +68 -0
  150. package/package.json +2 -1
@@ -0,0 +1,1184 @@
1
+ #pragma once
2
+
3
+ #include "ggml-cpu-impl.h"
4
+
5
+ #ifdef __ARM_FEATURE_SVE
6
+ #include <arm_sve.h>
7
+ #endif // __ARM_FEATURE_SVE
8
+
9
+ #if defined(__ARM_NEON) && !defined(__CUDACC__) && !defined(__MUSACC__)
10
+ // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
11
+ //
12
+ // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
13
+ //
14
+ #include <arm_neon.h>
15
+ #endif
16
+
17
+ #if defined(__F16C__)
18
+ #include <immintrin.h>
19
+ #endif
20
+
21
+ #ifdef __cplusplus
22
+ extern "C" {
23
+ #endif
24
+
25
+ //
26
+ // simd mappings
27
+ //
28
+
29
+ // FP16 to FP32 conversion
30
+
31
+ // 16-bit float
32
+ // on Arm, we use __fp16
33
+ // on x86, we use uint16_t
34
+ //
35
+ // for old CUDA compilers (<= 11), we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/10616
36
+ // for MUSA compilers , we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/11843
37
+ //
38
+ #if defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__)
39
+ #define LM_GGML_CPU_COMPUTE_FP16_TO_FP32(x) neon_compute_fp16_to_fp32(x)
40
+ #define LM_GGML_CPU_COMPUTE_FP32_TO_FP16(x) neon_compute_fp32_to_fp16(x)
41
+
42
+ #define LM_GGML_CPU_FP16_TO_FP32(x) LM_GGML_CPU_COMPUTE_FP16_TO_FP32(x)
43
+
44
+ static inline float neon_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
45
+ __fp16 tmp;
46
+ memcpy(&tmp, &h, sizeof(lm_ggml_fp16_t));
47
+ return (float)tmp;
48
+ }
49
+
50
+ static inline lm_ggml_fp16_t neon_compute_fp32_to_fp16(float f) {
51
+ lm_ggml_fp16_t res;
52
+ __fp16 tmp = f;
53
+ memcpy(&res, &tmp, sizeof(lm_ggml_fp16_t));
54
+ return res;
55
+ }
56
+ #elif defined(__F16C__)
57
+ #ifdef _MSC_VER
58
+ #define LM_GGML_CPU_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
59
+ #define LM_GGML_CPU_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
60
+ #else
61
+ #define LM_GGML_CPU_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
62
+ #define LM_GGML_CPU_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
63
+ #endif
64
+ #elif defined(__POWER9_VECTOR__)
65
+ #define LM_GGML_CPU_COMPUTE_FP16_TO_FP32(x) power_compute_fp16_to_fp32(x)
66
+ #define LM_GGML_CPU_COMPUTE_FP32_TO_FP16(x) power_compute_fp32_to_fp16(x)
67
+ /* the inline asm below is about 12% faster than the lookup method */
68
+ #define LM_GGML_CPU_FP16_TO_FP32(x) LM_GGML_CPU_COMPUTE_FP16_TO_FP32(x)
69
+ #define LM_GGML_CPU_FP32_TO_FP16(x) LM_GGML_CPU_COMPUTE_FP32_TO_FP16(x)
70
+
71
+ static inline float power_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
72
+ float f;
73
+ double d;
74
+ __asm__(
75
+ "mtfprd %0,%2\n"
76
+ "xscvhpdp %0,%0\n"
77
+ "frsp %1,%0\n" :
78
+ /* temp */ "=d"(d),
79
+ /* out */ "=f"(f):
80
+ /* in */ "r"(h));
81
+ return f;
82
+ }
83
+
84
+ static inline lm_ggml_fp16_t power_compute_fp32_to_fp16(float f) {
85
+ double d;
86
+ lm_ggml_fp16_t r;
87
+ __asm__( /* xscvdphp can work on double or single precision */
88
+ "xscvdphp %0,%2\n"
89
+ "mffprd %1,%0\n" :
90
+ /* temp */ "=d"(d),
91
+ /* out */ "=r"(r):
92
+ /* in */ "f"(f));
93
+ return r;
94
+ }
95
+ #elif defined(__riscv) && defined(__riscv_zfhmin)
96
+ static inline float riscv_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
97
+ float f;
98
+ __asm__(
99
+ "fmv.h.x %[f], %[h]\n\t"
100
+ "fcvt.s.h %[f], %[f]"
101
+ : [f] "=&f" (f)
102
+ : [h] "r" (h)
103
+ );
104
+ return f;
105
+ }
106
+
107
+ static inline lm_ggml_fp16_t riscv_compute_fp32_to_fp16(float f) {
108
+ lm_ggml_fp16_t res;
109
+ __asm__(
110
+ "fcvt.h.s %[f], %[f]\n\t"
111
+ "fmv.x.h %[h], %[f]"
112
+ : [h] "=&r" (res)
113
+ : [f] "f" (f)
114
+ );
115
+ return res;
116
+ }
117
+
118
+ #define LM_GGML_CPU_COMPUTE_FP16_TO_FP32(x) riscv_compute_fp16_to_fp32(x)
119
+ #define LM_GGML_CPU_COMPUTE_FP32_TO_FP16(x) riscv_compute_fp32_to_fp16(x)
120
+ #define LM_GGML_CPU_FP16_TO_FP32(x) LM_GGML_CPU_COMPUTE_FP16_TO_FP32(x)
121
+ #define LM_GGML_CPU_FP32_TO_FP16(x) LM_GGML_CPU_COMPUTE_FP32_TO_FP16(x)
122
+ #elif defined(__NNPA__)
123
+ #define LM_GGML_CPU_COMPUTE_FP16_TO_FP32(x) nnpa_compute_fp16_to_fp32(x)
124
+ #define LM_GGML_CPU_COMPUTE_FP32_TO_FP16(x) nnpa_compute_fp32_to_fp16(x)
125
+
126
+ #define LM_GGML_CPU_FP16_TO_FP32(x) LM_GGML_CPU_COMPUTE_FP16_TO_FP32(x)
127
+ #define LM_GGML_CPU_FP32_TO_FP16(x) LM_GGML_CPU_COMPUTE_FP32_TO_FP16(x)
128
+
129
+ static inline float nnpa_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
130
+ uint16x8_t v_h = vec_splats(h);
131
+ uint16x8_t v_hd = vec_convert_from_fp16(v_h, 0);
132
+ return vec_extend_to_fp32_hi(v_hd, 0)[0];
133
+ }
134
+
135
+ static inline lm_ggml_fp16_t nnpa_compute_fp32_to_fp16(float f) {
136
+ float32x4_t v_f = vec_splats(f);
137
+ float32x4_t v_zero = vec_splats(0.0f);
138
+ uint16x8_t v_hd = vec_round_from_fp32(v_f, v_zero, 0);
139
+ uint16x8_t v_h = vec_convert_to_fp16(v_hd, 0);
140
+ return vec_extract(v_h, 0);
141
+ }
142
+ #endif
143
+
144
+ // precomputed f32 table for f16 (256 KB)
145
+ // defined in ggml-cpu.c, initialized in lm_ggml_cpu_init()
146
+ extern float lm_ggml_table_f32_f16[1 << 16];
147
+
148
+ // On ARM NEON, it's quicker to directly convert x -> x instead of calling into lm_ggml_lookup_fp16_to_fp32,
149
+ // so we define LM_GGML_CPU_FP16_TO_FP32 and LM_GGML_CPU_FP32_TO_FP16 elsewhere for NEON.
150
+ // This is also true for POWER9.
151
+ #if !defined(LM_GGML_CPU_FP16_TO_FP32)
152
+ inline static float lm_ggml_lookup_fp16_to_fp32(lm_ggml_fp16_t f) {
153
+ uint16_t s;
154
+ memcpy(&s, &f, sizeof(uint16_t));
155
+ return lm_ggml_table_f32_f16[s];
156
+ }
157
+
158
+ #define LM_GGML_CPU_FP16_TO_FP32(x) lm_ggml_lookup_fp16_to_fp32(x)
159
+ #endif
160
+
161
+ #if !defined(LM_GGML_CPU_FP32_TO_FP16)
162
+ #define LM_GGML_CPU_FP32_TO_FP16(x) LM_GGML_COMPUTE_FP32_TO_FP16(x)
163
+ #endif
164
+
165
+
166
+ // we define a common set of C macros which map to specific intrinsics based on the current architecture
167
+ // we then implement the fundamental computation operations below using only these macros
168
+ // adding support for new architectures requires to define the corresponding SIMD macros
169
+ //
170
+ // LM_GGML_F32_STEP / LM_GGML_F16_STEP
171
+ // number of elements to process in a single step
172
+ //
173
+ // LM_GGML_F32_EPR / LM_GGML_F16_EPR
174
+ // number of elements to fit in a single register
175
+ //
176
+
177
+ #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_FMA)
178
+
179
+ #define LM_GGML_SIMD
180
+
181
+ // F32 SVE
182
+ #define LM_GGML_F32_EPR 8
183
+ #define DEFAULT_PG svptrue_b32()
184
+
185
+ #define LM_GGML_F32xt svfloat32_t
186
+ #define LM_GGML_F32xt_ZERO svdup_n_f32(0.0f)
187
+ #define LM_GGML_F32xt_SET1(x) svdup_n_f32(x)
188
+ #define LM_GGML_F32xt_LOAD_IMPL(pg, a, ...) svld1_f32(pg, a)
189
+ #define LM_GGML_F32xt_LOAD(...) LM_GGML_F32xt_LOAD_IMPL(DEFAULT_PG, __VA_ARGS__)
190
+ #define LM_GGML_F32xt_STORE_IMPL(pg,a,b) svst1_f32(pg, a, b)
191
+ #define LM_GGML_F32xt_STORE(...) LM_GGML_F32xt_STORE_IMPL(DEFAULT_PG, __VA_ARGS__)
192
+ #define LM_GGML_F32xt_FMA_IMPL(pg, a, b, c) svmad_f32_m(pg, b, c, a)
193
+ #define LM_GGML_F32xt_FMA(...) LM_GGML_F32xt_FMA_IMPL(DEFAULT_PG, __VA_ARGS__)
194
+ #define LM_GGML_F32xt_ADD_IMPL(pg, a, b) svadd_f32_m(pg, a, b)
195
+ #define LM_GGML_F32xt_ADD(...) LM_GGML_F32xt_ADD_IMPL(DEFAULT_PG, __VA_ARGS__)
196
+ #define LM_GGML_F32xt_MUL_IMPL(pg, a, b) svmul_f32_m(pg, a, b)
197
+ #define LM_GGML_F32xt_MUL(...) LM_GGML_F32xt_MUL_IMPL(DEFAULT_PG, __VA_ARGS__)
198
+ #define LM_GGML_F32xt_REDUCE_ONE_IMPL(pg, a) svaddv(pg, a)
199
+ #define LM_GGML_F32xt_REDUCE_ONE(...) LM_GGML_F32xt_REDUCE_ONE_IMPL(DEFAULT_PG, __VA_ARGS__)
200
+ #define LM_GGML_F32xt_REDUCE_IMPL(pg, res, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8) \
201
+ { \
202
+ sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum2); \
203
+ sum3 = svadd_f32_m(DEFAULT_PG, sum3, sum4); \
204
+ sum5 = svadd_f32_m(DEFAULT_PG, sum5, sum6); \
205
+ sum7 = svadd_f32_m(DEFAULT_PG, sum7, sum8); \
206
+ sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum3); \
207
+ sum5 = svadd_f32_m(DEFAULT_PG, sum5, sum7); \
208
+ sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum5); \
209
+ (res) = (lm_ggml_float) LM_GGML_F32xt_REDUCE_ONE(sum1); \
210
+ }
211
+ #define LM_GGML_F32xt_REDUCE(...) LM_GGML_F32xt_REDUCE_IMPL(DEFAULT_PG, __VA_ARGS__)
212
+
213
+ #define LM_GGML_F32_VEC LM_GGML_F32xt
214
+ #define LM_GGML_F32_VEC_ZERO LM_GGML_F32xt_ZERO
215
+ #define LM_GGML_F32_VEC_SET1 LM_GGML_F32xt_SET1
216
+ #define LM_GGML_F32_VEC_LOAD LM_GGML_F32xt_LOAD
217
+ #define LM_GGML_F32_VEC_STORE LM_GGML_F32xt_STORE
218
+ #define LM_GGML_F32_VEC_FMA LM_GGML_F32xt_FMA
219
+ #define LM_GGML_F32_VEC_ADD LM_GGML_F32xt_ADD
220
+ #define LM_GGML_F32_VEC_MUL LM_GGML_F32xt_MUL
221
+ #define LM_GGML_F32_VEC_REDUCE LM_GGML_F32xt_REDUCE
222
+
223
+ // F16 NEON
224
+
225
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
226
+ #define LM_GGML_F16_STEP 32
227
+ #define LM_GGML_F16_EPR 8
228
+
229
+ #define LM_GGML_F16x8 float16x8_t
230
+ #define LM_GGML_F16x8_ZERO vdupq_n_f16(0.0f)
231
+ #define LM_GGML_F16x8_SET1(x) vdupq_n_f16(x)
232
+ #define LM_GGML_F16x8_LOAD(x) vld1q_f16((const __fp16 *)(x))
233
+ #define LM_GGML_F16x8_STORE vst1q_f16
234
+ #define LM_GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
235
+ #define LM_GGML_F16x8_ADD vaddq_f16
236
+ #define LM_GGML_F16x8_MUL vmulq_f16
237
+ #define LM_GGML_F16x8_REDUCE(res, x) \
238
+ do { \
239
+ int offset = LM_GGML_F16_ARR >> 1; \
240
+ for (int i = 0; i < offset; ++i) { \
241
+ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \
242
+ } \
243
+ offset >>= 1; \
244
+ for (int i = 0; i < offset; ++i) { \
245
+ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \
246
+ } \
247
+ offset >>= 1; \
248
+ for (int i = 0; i < offset; ++i) { \
249
+ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \
250
+ } \
251
+ const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 ((x)[0])); \
252
+ const float32x4_t t1 = vcvt_f32_f16(vget_high_f16((x)[0])); \
253
+ (res) = (lm_ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
254
+ } while (0)
255
+
256
+ #define LM_GGML_F16_VEC LM_GGML_F16x8
257
+ #define LM_GGML_F16_VEC_ZERO LM_GGML_F16x8_ZERO
258
+ #define LM_GGML_F16_VEC_SET1 LM_GGML_F16x8_SET1
259
+ #define LM_GGML_F16_VEC_LOAD(p, i) LM_GGML_F16x8_LOAD(p)
260
+ #define LM_GGML_F16_VEC_STORE(p, r, i) LM_GGML_F16x8_STORE((__fp16 *)(p), (r)[i])
261
+ #define LM_GGML_F16_VEC_FMA LM_GGML_F16x8_FMA
262
+ #define LM_GGML_F16_VEC_ADD LM_GGML_F16x8_ADD
263
+ #define LM_GGML_F16_VEC_MUL LM_GGML_F16x8_MUL
264
+ #define LM_GGML_F16_VEC_REDUCE LM_GGML_F16x8_REDUCE
265
+ #else
266
+ // if FP16 vector arithmetic is not supported, we use FP32 instead
267
+ // and take advantage of the vcvt_ functions to convert to/from FP16
268
+
269
+ #define LM_GGML_F16_STEP 16
270
+ #define LM_GGML_F16_EPR 4
271
+
272
+ #define LM_GGML_F32Cx4 float32x4_t
273
+ #define LM_GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
274
+ #define LM_GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
275
+ #define LM_GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16((const __fp16 *)(x)))
276
+ #define LM_GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
277
+ #define LM_GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
278
+ #define LM_GGML_F32Cx4_ADD vaddq_f32
279
+ #define LM_GGML_F32Cx4_MUL vmulq_f32
280
+ #define LM_GGML_F32Cx4_REDUCE LM_GGML_F32x4_REDUCE
281
+
282
+ #define LM_GGML_F16_VEC LM_GGML_F32Cx4
283
+ #define LM_GGML_F16_VEC_ZERO LM_GGML_F32Cx4_ZERO
284
+ #define LM_GGML_F16_VEC_SET1 LM_GGML_F32Cx4_SET1
285
+ #define LM_GGML_F16_VEC_LOAD(p, i) LM_GGML_F32Cx4_LOAD(p)
286
+ #define LM_GGML_F16_VEC_STORE(p, r, i) LM_GGML_F32Cx4_STORE((__fp16 *)(p), r[i])
287
+ #define LM_GGML_F16_VEC_FMA LM_GGML_F32Cx4_FMA
288
+ #define LM_GGML_F16_VEC_ADD LM_GGML_F32Cx4_ADD
289
+ #define LM_GGML_F16_VEC_MUL LM_GGML_F32Cx4_MUL
290
+ #define LM_GGML_F16_VEC_REDUCE LM_GGML_F32Cx4_REDUCE
291
+ #endif
292
+
293
+ #elif defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)
294
+
295
+ #define LM_GGML_SIMD
296
+
297
+ // F32 NEON
298
+
299
+ #define LM_GGML_F32_STEP 16
300
+ #define LM_GGML_F32_EPR 4
301
+
302
+ #define LM_GGML_F32x4 float32x4_t
303
+ #define LM_GGML_F32x4_ZERO vdupq_n_f32(0.0f)
304
+ #define LM_GGML_F32x4_SET1(x) vdupq_n_f32(x)
305
+ #define LM_GGML_F32x4_LOAD vld1q_f32
306
+ #define LM_GGML_F32x4_STORE vst1q_f32
307
+ #define LM_GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c)
308
+ #define LM_GGML_F32x4_ADD vaddq_f32
309
+ #define LM_GGML_F32x4_MUL vmulq_f32
310
+ #define LM_GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x)
311
+ #define LM_GGML_F32x4_REDUCE(res, x) \
312
+ { \
313
+ int offset = LM_GGML_F32_ARR >> 1; \
314
+ for (int i = 0; i < offset; ++i) { \
315
+ (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \
316
+ } \
317
+ offset >>= 1; \
318
+ for (int i = 0; i < offset; ++i) { \
319
+ (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \
320
+ } \
321
+ offset >>= 1; \
322
+ for (int i = 0; i < offset; ++i) { \
323
+ (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \
324
+ } \
325
+ (res) = (lm_ggml_float) LM_GGML_F32x4_REDUCE_ONE((x)[0]); \
326
+ }
327
+
328
+ #define LM_GGML_F32_VEC LM_GGML_F32x4
329
+ #define LM_GGML_F32_VEC_ZERO LM_GGML_F32x4_ZERO
330
+ #define LM_GGML_F32_VEC_SET1 LM_GGML_F32x4_SET1
331
+ #define LM_GGML_F32_VEC_LOAD LM_GGML_F32x4_LOAD
332
+ #define LM_GGML_F32_VEC_STORE LM_GGML_F32x4_STORE
333
+ #define LM_GGML_F32_VEC_FMA LM_GGML_F32x4_FMA
334
+ #define LM_GGML_F32_VEC_ADD LM_GGML_F32x4_ADD
335
+ #define LM_GGML_F32_VEC_MUL LM_GGML_F32x4_MUL
336
+ #define LM_GGML_F32_VEC_REDUCE LM_GGML_F32x4_REDUCE
337
+
338
+ // F16 NEON
339
+
340
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
341
+ #define LM_GGML_F16_STEP 32
342
+ #define LM_GGML_F16_EPR 8
343
+
344
+ #define LM_GGML_F16x8 float16x8_t
345
+ #define LM_GGML_F16x8_ZERO vdupq_n_f16(0.0f)
346
+ #define LM_GGML_F16x8_SET1(x) vdupq_n_f16(x)
347
+ #define LM_GGML_F16x8_LOAD(x) vld1q_f16((const __fp16 *)(x))
348
+ #define LM_GGML_F16x8_STORE vst1q_f16
349
+ #define LM_GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
350
+ #define LM_GGML_F16x8_ADD vaddq_f16
351
+ #define LM_GGML_F16x8_MUL vmulq_f16
352
+ #define LM_GGML_F16x8_REDUCE(res, x) \
353
+ do { \
354
+ int offset = LM_GGML_F16_ARR >> 1; \
355
+ for (int i = 0; i < offset; ++i) { \
356
+ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \
357
+ } \
358
+ offset >>= 1; \
359
+ for (int i = 0; i < offset; ++i) { \
360
+ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \
361
+ } \
362
+ offset >>= 1; \
363
+ for (int i = 0; i < offset; ++i) { \
364
+ (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \
365
+ } \
366
+ const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 ((x)[0])); \
367
+ const float32x4_t t1 = vcvt_f32_f16(vget_high_f16((x)[0])); \
368
+ (res) = (lm_ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
369
+ } while (0)
370
+
371
+ #define LM_GGML_F16_VEC LM_GGML_F16x8
372
+ #define LM_GGML_F16_VEC_ZERO LM_GGML_F16x8_ZERO
373
+ #define LM_GGML_F16_VEC_SET1 LM_GGML_F16x8_SET1
374
+ #define LM_GGML_F16_VEC_LOAD(p, i) LM_GGML_F16x8_LOAD(p)
375
+ #define LM_GGML_F16_VEC_STORE(p, r, i) LM_GGML_F16x8_STORE((__fp16 *)(p), (r)[i])
376
+ #define LM_GGML_F16_VEC_FMA LM_GGML_F16x8_FMA
377
+ #define LM_GGML_F16_VEC_ADD LM_GGML_F16x8_ADD
378
+ #define LM_GGML_F16_VEC_MUL LM_GGML_F16x8_MUL
379
+ #define LM_GGML_F16_VEC_REDUCE LM_GGML_F16x8_REDUCE
380
+ #else
381
+ // if FP16 vector arithmetic is not supported, we use FP32 instead
382
+ // and take advantage of the vcvt_ functions to convert to/from FP16
383
+
384
+ #define LM_GGML_F16_STEP 16
385
+ #define LM_GGML_F16_EPR 4
386
+
387
+ #define LM_GGML_F32Cx4 float32x4_t
388
+ #define LM_GGML_F32Cx4_ZERO vdupq_n_f32(0.0f)
389
+ #define LM_GGML_F32Cx4_SET1(x) vdupq_n_f32(x)
390
+ #define LM_GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16((const __fp16 *)(x)))
391
+ #define LM_GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y))
392
+ #define LM_GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
393
+ #define LM_GGML_F32Cx4_ADD vaddq_f32
394
+ #define LM_GGML_F32Cx4_MUL vmulq_f32
395
+ #define LM_GGML_F32Cx4_REDUCE LM_GGML_F32x4_REDUCE
396
+
397
+ #define LM_GGML_F16_VEC LM_GGML_F32Cx4
398
+ #define LM_GGML_F16_VEC_ZERO LM_GGML_F32Cx4_ZERO
399
+ #define LM_GGML_F16_VEC_SET1 LM_GGML_F32Cx4_SET1
400
+ #define LM_GGML_F16_VEC_LOAD(p, i) LM_GGML_F32Cx4_LOAD(p)
401
+ #define LM_GGML_F16_VEC_STORE(p, r, i) LM_GGML_F32Cx4_STORE((__fp16 *)(p), r[i])
402
+ #define LM_GGML_F16_VEC_FMA LM_GGML_F32Cx4_FMA
403
+ #define LM_GGML_F16_VEC_ADD LM_GGML_F32Cx4_ADD
404
+ #define LM_GGML_F16_VEC_MUL LM_GGML_F32Cx4_MUL
405
+ #define LM_GGML_F16_VEC_REDUCE LM_GGML_F32Cx4_REDUCE
406
+ #endif
407
+
408
+ #elif defined(__AVX512F__)
409
+
410
+ #define LM_GGML_SIMD
411
+
412
+ // F32 AVX512
413
+
414
+ #define LM_GGML_F32_STEP 64
415
+ #define LM_GGML_F32_EPR 16
416
+
417
+ #define LM_GGML_F32x16 __m512
418
+ #define LM_GGML_F32x16_ZERO _mm512_setzero_ps()
419
+ #define LM_GGML_F32x16_SET1(x) _mm512_set1_ps(x)
420
+ #define LM_GGML_F32x16_LOAD _mm512_loadu_ps
421
+ #define LM_GGML_F32x16_STORE _mm512_storeu_ps
422
+ // _mm512_fmadd_ps is defined in AVX512F so no guard is required
423
+ #define LM_GGML_F32x16_FMA(a, b, c) _mm512_fmadd_ps(b, c, a)
424
+ #define LM_GGML_F32x16_ADD _mm512_add_ps
425
+ #define LM_GGML_F32x16_MUL _mm512_mul_ps
426
+ #define LM_GGML_F32x16_REDUCE(res, x) \
427
+ do { \
428
+ int offset = LM_GGML_F32_ARR >> 1; \
429
+ for (int i = 0; i < offset; ++i) { \
430
+ x[i] = _mm512_add_ps(x[i], x[offset+i]); \
431
+ } \
432
+ offset >>= 1; \
433
+ for (int i = 0; i < offset; ++i) { \
434
+ x[i] = _mm512_add_ps(x[i], x[offset+i]); \
435
+ } \
436
+ offset >>= 1; \
437
+ for (int i = 0; i < offset; ++i) { \
438
+ x[i] = _mm512_add_ps(x[i], x[offset+i]); \
439
+ } \
440
+ res = (lm_ggml_float) _mm512_reduce_add_ps(x[0]); \
441
+ } while (0)
442
+
443
+ // TODO: is this optimal ?
444
+
445
+ #define LM_GGML_F32_VEC LM_GGML_F32x16
446
+ #define LM_GGML_F32_VEC_ZERO LM_GGML_F32x16_ZERO
447
+ #define LM_GGML_F32_VEC_SET1 LM_GGML_F32x16_SET1
448
+ #define LM_GGML_F32_VEC_LOAD LM_GGML_F32x16_LOAD
449
+ #define LM_GGML_F32_VEC_STORE LM_GGML_F32x16_STORE
450
+ #define LM_GGML_F32_VEC_FMA LM_GGML_F32x16_FMA
451
+ #define LM_GGML_F32_VEC_ADD LM_GGML_F32x16_ADD
452
+ #define LM_GGML_F32_VEC_MUL LM_GGML_F32x16_MUL
453
+ #define LM_GGML_F32_VEC_REDUCE LM_GGML_F32x16_REDUCE
454
+
455
+ // F16 AVX512
456
+
457
+ // F16 AVX
458
+
459
+ #define LM_GGML_F16_STEP 64
460
+ #define LM_GGML_F16_EPR 16
461
+
462
+ // AVX512 has FP16 extension (AVX512_FP16) but I don't have it on my machine so I use FP32 instead
463
+
464
+ #define LM_GGML_F32Cx16 __m512
465
+ #define LM_GGML_F32Cx16_ZERO _mm512_setzero_ps()
466
+ #define LM_GGML_F32Cx16_SET1(x) _mm512_set1_ps(x)
467
+
468
+ // unlike _mm256_cvt intrinsics that require F16C, _mm512_cvt is defined in AVX512F
469
+ // so F16C guard isn't required
470
+ #define LM_GGML_F32Cx16_LOAD(x) _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(x)))
471
+ #define LM_GGML_F32Cx16_STORE(x, y) _mm256_storeu_si256((__m256i *)(x), _mm512_cvtps_ph(y, 0))
472
+
473
+ #define LM_GGML_F32Cx16_FMA(a, b, c) _mm512_fmadd_ps(b, c, a)
474
+ #define LM_GGML_F32Cx16_ADD _mm512_add_ps
475
+ #define LM_GGML_F32Cx16_MUL _mm512_mul_ps
476
+ #define LM_GGML_F32Cx16_REDUCE(res, x) \
477
+ do { \
478
+ int offset = LM_GGML_F32_ARR >> 1; \
479
+ for (int i = 0; i < offset; ++i) { \
480
+ x[i] = _mm512_add_ps(x[i], x[offset+i]); \
481
+ } \
482
+ offset >>= 1; \
483
+ for (int i = 0; i < offset; ++i) { \
484
+ x[i] = _mm512_add_ps(x[i], x[offset+i]); \
485
+ } \
486
+ offset >>= 1; \
487
+ for (int i = 0; i < offset; ++i) { \
488
+ x[i] = _mm512_add_ps(x[i], x[offset+i]); \
489
+ } \
490
+ res = (lm_ggml_float) _mm512_reduce_add_ps(x[0]); \
491
+ } while (0)
492
+
493
+ #define LM_GGML_F16_VEC LM_GGML_F32Cx16
494
+ #define LM_GGML_F16_VEC_ZERO LM_GGML_F32Cx16_ZERO
495
+ #define LM_GGML_F16_VEC_SET1 LM_GGML_F32Cx16_SET1
496
+ #define LM_GGML_F16_VEC_LOAD(p, i) LM_GGML_F32Cx16_LOAD(p)
497
+ #define LM_GGML_F16_VEC_STORE(p, r, i) LM_GGML_F32Cx16_STORE(p, r[i])
498
+ #define LM_GGML_F16_VEC_FMA LM_GGML_F32Cx16_FMA
499
+ #define LM_GGML_F16_VEC_ADD LM_GGML_F32Cx16_ADD
500
+ #define LM_GGML_F16_VEC_MUL LM_GGML_F32Cx16_MUL
501
+
502
+ #define LM_GGML_F16_VEC_REDUCE LM_GGML_F32Cx16_REDUCE
503
+ #elif defined(__AVX__)
504
+
505
+ #define LM_GGML_SIMD
506
+
507
+ // F32 AVX
508
+
509
+ #define LM_GGML_F32_STEP 32
510
+ #define LM_GGML_F32_EPR 8
511
+
512
+ #define LM_GGML_F32x8 __m256
513
+ #define LM_GGML_F32x8_ZERO _mm256_setzero_ps()
514
+ #define LM_GGML_F32x8_SET1(x) _mm256_set1_ps(x)
515
+ #define LM_GGML_F32x8_LOAD _mm256_loadu_ps
516
+ #define LM_GGML_F32x8_STORE _mm256_storeu_ps
517
+ #if defined(__FMA__)
518
+ #define LM_GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a)
519
+ #else
520
+ #define LM_GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a)
521
+ #endif
522
+ #define LM_GGML_F32x8_ADD _mm256_add_ps
523
+ #define LM_GGML_F32x8_MUL _mm256_mul_ps
524
+ #define LM_GGML_F32x8_REDUCE(res, x) \
525
+ do { \
526
+ int offset = LM_GGML_F32_ARR >> 1; \
527
+ for (int i = 0; i < offset; ++i) { \
528
+ x[i] = _mm256_add_ps(x[i], x[offset+i]); \
529
+ } \
530
+ offset >>= 1; \
531
+ for (int i = 0; i < offset; ++i) { \
532
+ x[i] = _mm256_add_ps(x[i], x[offset+i]); \
533
+ } \
534
+ offset >>= 1; \
535
+ for (int i = 0; i < offset; ++i) { \
536
+ x[i] = _mm256_add_ps(x[i], x[offset+i]); \
537
+ } \
538
+ const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
539
+ _mm256_extractf128_ps(x[0], 1)); \
540
+ const __m128 t1 = _mm_hadd_ps(t0, t0); \
541
+ res = (lm_ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
542
+ } while (0)
543
+ // TODO: is this optimal ?
544
+
545
+ #define LM_GGML_F32_VEC LM_GGML_F32x8
546
+ #define LM_GGML_F32_VEC_ZERO LM_GGML_F32x8_ZERO
547
+ #define LM_GGML_F32_VEC_SET1 LM_GGML_F32x8_SET1
548
+ #define LM_GGML_F32_VEC_LOAD LM_GGML_F32x8_LOAD
549
+ #define LM_GGML_F32_VEC_STORE LM_GGML_F32x8_STORE
550
+ #define LM_GGML_F32_VEC_FMA LM_GGML_F32x8_FMA
551
+ #define LM_GGML_F32_VEC_ADD LM_GGML_F32x8_ADD
552
+ #define LM_GGML_F32_VEC_MUL LM_GGML_F32x8_MUL
553
+ #define LM_GGML_F32_VEC_REDUCE LM_GGML_F32x8_REDUCE
554
+
555
+ // F16 AVX
556
+
557
+ #define LM_GGML_F16_STEP 32
558
+ #define LM_GGML_F16_EPR 8
559
+
560
+ // F16 arithmetic is not supported by AVX, so we use F32 instead
561
+
562
+ #define LM_GGML_F32Cx8 __m256
563
+ #define LM_GGML_F32Cx8_ZERO _mm256_setzero_ps()
564
+ #define LM_GGML_F32Cx8_SET1(x) _mm256_set1_ps(x)
565
+
566
+ #if defined(__F16C__)
567
+ // the _mm256_cvt intrinsics require F16C
568
+ #define LM_GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)(x)))
569
+ #define LM_GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0))
570
+ #else
571
+ static inline __m256 __avx_f32cx8_load(const lm_ggml_fp16_t * x) {
572
+ float tmp[8];
573
+
574
+ for (int i = 0; i < 8; i++) {
575
+ tmp[i] = LM_GGML_CPU_FP16_TO_FP32(x[i]);
576
+ }
577
+
578
+ return _mm256_loadu_ps(tmp);
579
+ }
580
+ static inline void __avx_f32cx8_store(lm_ggml_fp16_t *x, __m256 y) {
581
+ float arr[8];
582
+
583
+ _mm256_storeu_ps(arr, y);
584
+
585
+ for (int i = 0; i < 8; i++)
586
+ x[i] = LM_GGML_CPU_FP32_TO_FP16(arr[i]);
587
+ }
588
+ #define LM_GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x)
589
+ #define LM_GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y)
590
+ #endif
591
+
592
+ #define LM_GGML_F32Cx8_FMA LM_GGML_F32x8_FMA
593
+ #define LM_GGML_F32Cx8_ADD _mm256_add_ps
594
+ #define LM_GGML_F32Cx8_MUL _mm256_mul_ps
595
+ #define LM_GGML_F32Cx8_REDUCE LM_GGML_F32x8_REDUCE
596
+
597
+ #define LM_GGML_F16_VEC LM_GGML_F32Cx8
598
+ #define LM_GGML_F16_VEC_ZERO LM_GGML_F32Cx8_ZERO
599
+ #define LM_GGML_F16_VEC_SET1 LM_GGML_F32Cx8_SET1
600
+ #define LM_GGML_F16_VEC_LOAD(p, i) LM_GGML_F32Cx8_LOAD(p)
601
+ #define LM_GGML_F16_VEC_STORE(p, r, i) LM_GGML_F32Cx8_STORE(p, r[i])
602
+ #define LM_GGML_F16_VEC_FMA LM_GGML_F32Cx8_FMA
603
+ #define LM_GGML_F16_VEC_ADD LM_GGML_F32Cx8_ADD
604
+ #define LM_GGML_F16_VEC_MUL LM_GGML_F32Cx8_MUL
605
+ #define LM_GGML_F16_VEC_REDUCE LM_GGML_F32Cx8_REDUCE
606
+
607
+ #elif defined(__POWER9_VECTOR__)
608
+
609
+ #define LM_GGML_SIMD
610
+
611
+ // F32 POWER9
612
+
613
+ #define LM_GGML_F32_STEP 32
614
+ #define LM_GGML_F32_EPR 4
615
+
616
+ #define LM_GGML_F32x4 vector float
617
+ #define LM_GGML_F32x4_ZERO {0.0f}
618
+ #define LM_GGML_F32x4_SET1 vec_splats
619
+ #define LM_GGML_F32x4_LOAD(p) vec_xl(0, p)
620
+ #define LM_GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
621
+ #define LM_GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
622
+ #define LM_GGML_F32x4_ADD vec_add
623
+ #define LM_GGML_F32x4_MUL vec_mul
624
+ #define LM_GGML_F32x4_REDUCE(res, x) \
625
+ { \
626
+ int offset = LM_GGML_F32_ARR >> 1; \
627
+ for (int i = 0; i < offset; ++i) { \
628
+ x[i] = vec_add(x[i], x[offset+i]); \
629
+ } \
630
+ offset >>= 1; \
631
+ for (int i = 0; i < offset; ++i) { \
632
+ x[i] = vec_add(x[i], x[offset+i]); \
633
+ } \
634
+ offset >>= 1; \
635
+ for (int i = 0; i < offset; ++i) { \
636
+ x[i] = vec_add(x[i], x[offset+i]); \
637
+ } \
638
+ res = vec_extract(x[0], 0) + \
639
+ vec_extract(x[0], 1) + \
640
+ vec_extract(x[0], 2) + \
641
+ vec_extract(x[0], 3); \
642
+ }
643
+
644
+ #define LM_GGML_F32_VEC LM_GGML_F32x4
645
+ #define LM_GGML_F32_VEC_ZERO LM_GGML_F32x4_ZERO
646
+ #define LM_GGML_F32_VEC_SET1 LM_GGML_F32x4_SET1
647
+ #define LM_GGML_F32_VEC_LOAD LM_GGML_F32x4_LOAD
648
+ #define LM_GGML_F32_VEC_STORE LM_GGML_F32x4_STORE
649
+ #define LM_GGML_F32_VEC_FMA LM_GGML_F32x4_FMA
650
+ #define LM_GGML_F32_VEC_ADD LM_GGML_F32x4_ADD
651
+ #define LM_GGML_F32_VEC_MUL LM_GGML_F32x4_MUL
652
+ #define LM_GGML_F32_VEC_REDUCE LM_GGML_F32x4_REDUCE
653
+
654
+ // F16 POWER9
655
+ #define LM_GGML_F16_STEP LM_GGML_F32_STEP
656
+ #define LM_GGML_F16_EPR LM_GGML_F32_EPR
657
+ #define LM_GGML_F16_VEC LM_GGML_F32x4
658
+ #define LM_GGML_F16_VEC_ZERO LM_GGML_F32x4_ZERO
659
+ #define LM_GGML_F16_VEC_SET1 LM_GGML_F32x4_SET1
660
+ #define LM_GGML_F16_VEC_FMA LM_GGML_F32x4_FMA
661
+ #define LM_GGML_F16_VEC_ADD LM_GGML_F32x4_ADD
662
+ #define LM_GGML_F16_VEC_MUL LM_GGML_F32x4_MUL
663
+ #define LM_GGML_F16_VEC_REDUCE LM_GGML_F32x4_REDUCE
664
+ // Use vec_xl, not vec_ld, in case the load address is not aligned.
665
+ #define LM_GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
666
+ vec_extract_fp32_from_shorth(vec_xl(0, p - LM_GGML_F16_EPR)) : \
667
+ vec_extract_fp32_from_shortl(vec_xl(0, p))
668
+ static inline unsigned char lm_ggml_endian_byte(int i) {
669
+ uint16_t tmp_val = 1;
670
+ return ((unsigned char *)&tmp_val)[i];
671
+ }
672
+ #define LM_GGML_ENDIAN_BYTE(i) lm_ggml_endian_byte(i)
673
+ #define LM_GGML_F16_VEC_STORE(p, r, i) \
674
+ if (i & 0x1) \
675
+ vec_xst(vec_pack_to_short_fp32(r[i - LM_GGML_ENDIAN_BYTE(1)], \
676
+ r[i - LM_GGML_ENDIAN_BYTE(0)]), \
677
+ 0, p - LM_GGML_F16_EPR)
678
+
679
+ #elif defined(__wasm_simd128__)
680
+
681
+ #define LM_GGML_SIMD
682
+
683
+ // F32 WASM
684
+
685
+ #define LM_GGML_F32_STEP 16
686
+ #define LM_GGML_F32_EPR 4
687
+
688
+ #define LM_GGML_F32x4 v128_t
689
+ #define LM_GGML_F32x4_ZERO wasm_f32x4_splat(0.0f)
690
+ #define LM_GGML_F32x4_SET1(x) wasm_f32x4_splat(x)
691
+ #define LM_GGML_F32x4_LOAD wasm_v128_load
692
+ #define LM_GGML_F32x4_STORE wasm_v128_store
693
+ #define LM_GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a)
694
+ #define LM_GGML_F32x4_ADD wasm_f32x4_add
695
+ #define LM_GGML_F32x4_MUL wasm_f32x4_mul
696
+ #define LM_GGML_F32x4_REDUCE(res, x) \
697
+ { \
698
+ int offset = LM_GGML_F32_ARR >> 1; \
699
+ for (int i = 0; i < offset; ++i) { \
700
+ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
701
+ } \
702
+ offset >>= 1; \
703
+ for (int i = 0; i < offset; ++i) { \
704
+ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
705
+ } \
706
+ offset >>= 1; \
707
+ for (int i = 0; i < offset; ++i) { \
708
+ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
709
+ } \
710
+ res = wasm_f32x4_extract_lane(x[0], 0) + \
711
+ wasm_f32x4_extract_lane(x[0], 1) + \
712
+ wasm_f32x4_extract_lane(x[0], 2) + \
713
+ wasm_f32x4_extract_lane(x[0], 3); \
714
+ }
715
+
716
+ #define LM_GGML_F32_VEC LM_GGML_F32x4
717
+ #define LM_GGML_F32_VEC_ZERO LM_GGML_F32x4_ZERO
718
+ #define LM_GGML_F32_VEC_SET1 LM_GGML_F32x4_SET1
719
+ #define LM_GGML_F32_VEC_LOAD LM_GGML_F32x4_LOAD
720
+ #define LM_GGML_F32_VEC_STORE LM_GGML_F32x4_STORE
721
+ #define LM_GGML_F32_VEC_FMA LM_GGML_F32x4_FMA
722
+ #define LM_GGML_F32_VEC_ADD LM_GGML_F32x4_ADD
723
+ #define LM_GGML_F32_VEC_MUL LM_GGML_F32x4_MUL
724
+ #define LM_GGML_F32_VEC_REDUCE LM_GGML_F32x4_REDUCE
725
+
726
+ // F16 WASM
727
+
728
+ #define LM_GGML_F16_STEP 16
729
+ #define LM_GGML_F16_EPR 4
730
+
731
+ inline static v128_t __wasm_f16x4_load(const lm_ggml_fp16_t * p) {
732
+ float tmp[4];
733
+
734
+ tmp[0] = LM_GGML_CPU_FP16_TO_FP32(p[0]);
735
+ tmp[1] = LM_GGML_CPU_FP16_TO_FP32(p[1]);
736
+ tmp[2] = LM_GGML_CPU_FP16_TO_FP32(p[2]);
737
+ tmp[3] = LM_GGML_CPU_FP16_TO_FP32(p[3]);
738
+
739
+ return wasm_v128_load(tmp);
740
+ }
741
+
742
+ inline static void __wasm_f16x4_store(lm_ggml_fp16_t * p, v128_t x) {
743
+ float tmp[4];
744
+
745
+ wasm_v128_store(tmp, x);
746
+
747
+ p[0] = LM_GGML_CPU_FP32_TO_FP16(tmp[0]);
748
+ p[1] = LM_GGML_CPU_FP32_TO_FP16(tmp[1]);
749
+ p[2] = LM_GGML_CPU_FP32_TO_FP16(tmp[2]);
750
+ p[3] = LM_GGML_CPU_FP32_TO_FP16(tmp[3]);
751
+ }
752
+
753
+ #define LM_GGML_F16x4 v128_t
754
+ #define LM_GGML_F16x4_ZERO wasm_f32x4_splat(0.0f)
755
+ #define LM_GGML_F16x4_SET1(x) wasm_f32x4_splat(x)
756
+ #define LM_GGML_F16x4_LOAD(x) __wasm_f16x4_load(x)
757
+ #define LM_GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y)
758
+ #define LM_GGML_F16x4_FMA LM_GGML_F32x4_FMA
759
+ #define LM_GGML_F16x4_ADD wasm_f32x4_add
760
+ #define LM_GGML_F16x4_MUL wasm_f32x4_mul
761
+ #define LM_GGML_F16x4_REDUCE(res, x) \
762
+ { \
763
+ int offset = LM_GGML_F16_ARR >> 1; \
764
+ for (int i = 0; i < offset; ++i) { \
765
+ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
766
+ } \
767
+ offset >>= 1; \
768
+ for (int i = 0; i < offset; ++i) { \
769
+ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
770
+ } \
771
+ offset >>= 1; \
772
+ for (int i = 0; i < offset; ++i) { \
773
+ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \
774
+ } \
775
+ res = (lm_ggml_float) (wasm_f32x4_extract_lane(x[0], 0) + \
776
+ wasm_f32x4_extract_lane(x[0], 1) + \
777
+ wasm_f32x4_extract_lane(x[0], 2) + \
778
+ wasm_f32x4_extract_lane(x[0], 3)); \
779
+ }
780
+
781
+ #define LM_GGML_F16_VEC LM_GGML_F16x4
782
+ #define LM_GGML_F16_VEC_ZERO LM_GGML_F16x4_ZERO
783
+ #define LM_GGML_F16_VEC_SET1 LM_GGML_F16x4_SET1
784
+ #define LM_GGML_F16_VEC_LOAD(p, i) LM_GGML_F16x4_LOAD(p)
785
+ #define LM_GGML_F16_VEC_STORE(p, r, i) LM_GGML_F16x4_STORE(p, r[i])
786
+ #define LM_GGML_F16_VEC_FMA LM_GGML_F16x4_FMA
787
+ #define LM_GGML_F16_VEC_ADD LM_GGML_F16x4_ADD
788
+ #define LM_GGML_F16_VEC_MUL LM_GGML_F16x4_MUL
789
+ #define LM_GGML_F16_VEC_REDUCE LM_GGML_F16x4_REDUCE
790
+
791
+ #elif defined(__SSE3__)
792
+
793
+ #define LM_GGML_SIMD
794
+
795
+ // F32 SSE
796
+
797
+ #define LM_GGML_F32_STEP 32
798
+ #define LM_GGML_F32_EPR 4
799
+
800
+ #define LM_GGML_F32x4 __m128
801
+ #define LM_GGML_F32x4_ZERO _mm_setzero_ps()
802
+ #define LM_GGML_F32x4_SET1(x) _mm_set1_ps(x)
803
+ #define LM_GGML_F32x4_LOAD _mm_loadu_ps
804
+ #define LM_GGML_F32x4_STORE _mm_storeu_ps
805
+ #if defined(__FMA__)
806
+ // TODO: Does this work?
807
+ #define LM_GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a)
808
+ #else
809
+ #define LM_GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a)
810
+ #endif
811
+ #define LM_GGML_F32x4_ADD _mm_add_ps
812
+ #define LM_GGML_F32x4_MUL _mm_mul_ps
813
+ #define LM_GGML_F32x4_REDUCE(res, x) \
814
+ { \
815
+ int offset = LM_GGML_F32_ARR >> 1; \
816
+ for (int i = 0; i < offset; ++i) { \
817
+ x[i] = _mm_add_ps(x[i], x[offset+i]); \
818
+ } \
819
+ offset >>= 1; \
820
+ for (int i = 0; i < offset; ++i) { \
821
+ x[i] = _mm_add_ps(x[i], x[offset+i]); \
822
+ } \
823
+ offset >>= 1; \
824
+ for (int i = 0; i < offset; ++i) { \
825
+ x[i] = _mm_add_ps(x[i], x[offset+i]); \
826
+ } \
827
+ const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
828
+ res = (lm_ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
829
+ }
830
+ // TODO: is this optimal ?
831
+
832
+ #define LM_GGML_F32_VEC LM_GGML_F32x4
833
+ #define LM_GGML_F32_VEC_ZERO LM_GGML_F32x4_ZERO
834
+ #define LM_GGML_F32_VEC_SET1 LM_GGML_F32x4_SET1
835
+ #define LM_GGML_F32_VEC_LOAD LM_GGML_F32x4_LOAD
836
+ #define LM_GGML_F32_VEC_STORE LM_GGML_F32x4_STORE
837
+ #define LM_GGML_F32_VEC_FMA LM_GGML_F32x4_FMA
838
+ #define LM_GGML_F32_VEC_ADD LM_GGML_F32x4_ADD
839
+ #define LM_GGML_F32_VEC_MUL LM_GGML_F32x4_MUL
840
+ #define LM_GGML_F32_VEC_REDUCE LM_GGML_F32x4_REDUCE
841
+
842
+ // F16 SSE
843
+
844
+ #define LM_GGML_F16_STEP 32
845
+ #define LM_GGML_F16_EPR 4
846
+
847
+ static inline __m128 __sse_f16x4_load(const lm_ggml_fp16_t * x) {
848
+ float tmp[4];
849
+
850
+ tmp[0] = LM_GGML_CPU_FP16_TO_FP32(x[0]);
851
+ tmp[1] = LM_GGML_CPU_FP16_TO_FP32(x[1]);
852
+ tmp[2] = LM_GGML_CPU_FP16_TO_FP32(x[2]);
853
+ tmp[3] = LM_GGML_CPU_FP16_TO_FP32(x[3]);
854
+
855
+ return _mm_loadu_ps(tmp);
856
+ }
857
+
858
+ static inline void __sse_f16x4_store(lm_ggml_fp16_t * x, __m128 y) {
859
+ float arr[4];
860
+
861
+ _mm_storeu_ps(arr, y);
862
+
863
+ x[0] = LM_GGML_CPU_FP32_TO_FP16(arr[0]);
864
+ x[1] = LM_GGML_CPU_FP32_TO_FP16(arr[1]);
865
+ x[2] = LM_GGML_CPU_FP32_TO_FP16(arr[2]);
866
+ x[3] = LM_GGML_CPU_FP32_TO_FP16(arr[3]);
867
+ }
868
+
869
+ #define LM_GGML_F32Cx4 __m128
870
+ #define LM_GGML_F32Cx4_ZERO _mm_setzero_ps()
871
+ #define LM_GGML_F32Cx4_SET1(x) _mm_set1_ps(x)
872
+ #define LM_GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x)
873
+ #define LM_GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y)
874
+ #define LM_GGML_F32Cx4_FMA LM_GGML_F32x4_FMA
875
+ #define LM_GGML_F32Cx4_ADD _mm_add_ps
876
+ #define LM_GGML_F32Cx4_MUL _mm_mul_ps
877
+ #define LM_GGML_F32Cx4_REDUCE LM_GGML_F32x4_REDUCE
878
+
879
+ #define LM_GGML_F16_VEC LM_GGML_F32Cx4
880
+ #define LM_GGML_F16_VEC_ZERO LM_GGML_F32Cx4_ZERO
881
+ #define LM_GGML_F16_VEC_SET1 LM_GGML_F32Cx4_SET1
882
+ #define LM_GGML_F16_VEC_LOAD(p, i) LM_GGML_F32Cx4_LOAD(p)
883
+ #define LM_GGML_F16_VEC_STORE(p, r, i) LM_GGML_F32Cx4_STORE(p, r[i])
884
+ #define LM_GGML_F16_VEC_FMA LM_GGML_F32Cx4_FMA
885
+ #define LM_GGML_F16_VEC_ADD LM_GGML_F32Cx4_ADD
886
+ #define LM_GGML_F16_VEC_MUL LM_GGML_F32Cx4_MUL
887
+ #define LM_GGML_F16_VEC_REDUCE LM_GGML_F32Cx4_REDUCE
888
+
889
+ #elif defined(__loongarch_asx)
890
+
891
+ #define LM_GGML_SIMD
892
+
893
+ // F32 LASX
894
+ #define LM_GGML_F32_STEP 32
895
+ #define LM_GGML_F32_EPR 8
896
+
897
+ #define LM_GGML_F32x8 __m256
898
+ #define LM_GGML_F32x8_ZERO (__m256)__lasx_xvldi(0)
899
+ #define LM_GGML_F32x8_SET1(x) (__m256)__lasx_xvreplfr2vr_s((x))
900
+ #define LM_GGML_F32x8_LOAD(x) (__m256)__lasx_xvld((x), 0)
901
+ #define LM_GGML_F32x8_STORE(x,y) __lasx_xvst((y), (x), 0)
902
+ #define LM_GGML_F32x8_FMA(a, b, c) __lasx_xvfmadd_s(b, c, a)
903
+ #define LM_GGML_F32x8_ADD __lasx_xvfadd_s
904
+ #define LM_GGML_F32x8_MUL __lasx_xvfmul_s
905
+ #define LM_GGML_F32x8_REDUCE(res, x) \
906
+ do { \
907
+ int offset = LM_GGML_F32_ARR >> 1; \
908
+ for (int i = 0; i < offset; ++i) { \
909
+ x[i] = __lasx_xvfadd_s(x[i], x[offset+i]); \
910
+ } \
911
+ offset >>= 1; \
912
+ for (int i = 0; i < offset; ++i) { \
913
+ x[i] = __lasx_xvfadd_s(x[i], x[offset+i]); \
914
+ } \
915
+ offset >>= 1; \
916
+ for (int i = 0; i < offset; ++i) { \
917
+ x[i] = __lasx_xvfadd_s(x[i], x[offset+i]); \
918
+ } \
919
+ float *tmp_p = (float *)&x[0]; \
920
+ res = tmp_p[0] + tmp_p[1] + tmp_p[2] + tmp_p[3] + tmp_p[4] + tmp_p[5] + tmp_p[6] + tmp_p[7]; \
921
+ } while (0)
922
+ // TODO: is this optimal ?
923
+
924
+ #define LM_GGML_F32_VEC LM_GGML_F32x8
925
+ #define LM_GGML_F32_VEC_ZERO LM_GGML_F32x8_ZERO
926
+ #define LM_GGML_F32_VEC_SET1 LM_GGML_F32x8_SET1
927
+ #define LM_GGML_F32_VEC_LOAD LM_GGML_F32x8_LOAD
928
+ #define LM_GGML_F32_VEC_STORE LM_GGML_F32x8_STORE
929
+ #define LM_GGML_F32_VEC_FMA LM_GGML_F32x8_FMA
930
+ #define LM_GGML_F32_VEC_ADD LM_GGML_F32x8_ADD
931
+ #define LM_GGML_F32_VEC_MUL LM_GGML_F32x8_MUL
932
+ #define LM_GGML_F32_VEC_REDUCE LM_GGML_F32x8_REDUCE
933
+
934
+ // F16 LASX
935
+
936
+ #define LM_GGML_F16_STEP 32
937
+ #define LM_GGML_F16_EPR 8
938
+
939
+ // F16 arithmetic is not supported by LASX, so we use F32 instead
940
+
941
+ #define LM_GGML_F32Cx8 __m256
942
+ #define LM_GGML_F32Cx8_ZERO (__m256)__lasx_xvldi(0)
943
+ #define LM_GGML_F32Cx8_SET1(x) (__m256)__lasx_xvreplgr2vr_w((x))
944
+
945
+ static inline __m256 __lasx_f32cx8_load(const lm_ggml_fp16_t * x) {
946
+ __m256i a;
947
+ memcpy(&a, x, sizeof(lm_ggml_fp16_t) * 8);
948
+ a = __lasx_xvpermi_d(a, 0 | (1 << 4));
949
+ return __lasx_xvfcvtl_s_h(a);
950
+ }
951
+
952
+ static inline void __lasx_f32cx8_store(lm_ggml_fp16_t * x, __m256 y) {
953
+ __m256i a = __lasx_xvfcvt_h_s(y, y);
954
+ a = __lasx_xvpermi_d(a, 0 | (2 << 2));
955
+ memcpy(x, &a, sizeof(lm_ggml_fp16_t) * 8);
956
+ }
957
+ #define LM_GGML_F32Cx8_LOAD(x) __lasx_f32cx8_load(x)
958
+ #define LM_GGML_F32Cx8_STORE(x, y) __lasx_f32cx8_store(x, y)
959
+
960
+ #define LM_GGML_F32Cx8_FMA LM_GGML_F32x8_FMA
961
+ #define LM_GGML_F32Cx8_ADD __lasx_xvfadd_s
962
+ #define LM_GGML_F32Cx8_MUL __lasx_xvfmul_s
963
+ #define LM_GGML_F32Cx8_REDUCE LM_GGML_F32x8_REDUCE
964
+
965
+ #define LM_GGML_F16_VEC LM_GGML_F32Cx8
966
+ #define LM_GGML_F16_VEC_ZERO LM_GGML_F32Cx8_ZERO
967
+ #define LM_GGML_F16_VEC_SET1 LM_GGML_F32Cx8_SET1
968
+ #define LM_GGML_F16_VEC_LOAD(p, i) LM_GGML_F32Cx8_LOAD(p)
969
+ #define LM_GGML_F16_VEC_STORE(p, r, i) LM_GGML_F32Cx8_STORE(p, r[i])
970
+ #define LM_GGML_F16_VEC_FMA LM_GGML_F32Cx8_FMA
971
+ #define LM_GGML_F16_VEC_ADD LM_GGML_F32Cx8_ADD
972
+ #define LM_GGML_F16_VEC_MUL LM_GGML_F32Cx8_MUL
973
+ #define LM_GGML_F16_VEC_REDUCE LM_GGML_F32Cx8_REDUCE
974
+
975
+ #elif defined(__loongarch_sx)
976
+
977
+ #define LM_GGML_SIMD
978
+
979
+ // F32 LSX
980
+
981
+ #define LM_GGML_F32_STEP 32
982
+ #define LM_GGML_F32_EPR 4
983
+
984
+ #define LM_GGML_F32x4 __m128
985
+ #define LM_GGML_F32x4_ZERO __lsx_vldi(0)
986
+ #define LM_GGML_F32x4_SET1(x) __lsx_vinsgr2vr_w(__lsx_vldi(0),(x), 0)
987
+ #define LM_GGML_F32x4_LOAD(x) __lsx_vld((x), 0)
988
+ #define LM_GGML_F32x4_STORE(x, y) __lsx_vst(y, x, 0)
989
+ #define LM_GGML_F32x4_FMA(a, b, c) __lsx_vfmadd_s(b, c, a)
990
+ #define LM_GGML_F32x4_ADD __lsx_vfadd_s
991
+ #define LM_GGML_F32x4_MUL __lsx_vfmul_s
992
+ #define LM_GGML_F32x4_REDUCE(res, x) \
993
+ { \
994
+ int offset = LM_GGML_F32_ARR >> 1; \
995
+ for (int i = 0; i < offset; ++i) { \
996
+ x[i] = __lsx_vfadd_s(x[i], x[offset + i]); \
997
+ } \
998
+ offset >>= 1; \
999
+ for (int i = 0; i < offset; ++i) { \
1000
+ x[i] = __lsx_vfadd_s(x[i], x[offset + i]); \
1001
+ } \
1002
+ offset >>= 1; \
1003
+ for (int i = 0; i < offset; ++i) { \
1004
+ x[i] = __lsx_vfadd_s(x[i], x[offset + i]); \
1005
+ } \
1006
+ __m128i tmp = __lsx_vsrli_d((__m128i) x[0], 32); \
1007
+ tmp = (__m128i) __lsx_vfadd_s((__m128) tmp, x[0]); \
1008
+ tmp = __lsx_vpickev_w(__lsx_vldi(0), tmp); \
1009
+ const __m128 t0 = __lsx_vshuf4i_w(tmp, 0x88); \
1010
+ tmp = __lsx_vsrli_d((__m128i) t0, 32); \
1011
+ tmp = (__m128i) __lsx_vfadd_s((__m128) tmp, t0); \
1012
+ tmp = __lsx_vpickev_w(__lsx_vldi(0), tmp); \
1013
+ res = (lm_ggml_float) __lsx_vpickve2gr_w(__lsx_vshuf4i_w(tmp, 0x88), 0); \
1014
+ }
1015
+
1016
+ #define LM_GGML_F32_VEC LM_GGML_F32x4
1017
+ #define LM_GGML_F32_VEC_ZERO LM_GGML_F32x4_ZERO
1018
+ #define LM_GGML_F32_VEC_SET1 LM_GGML_F32x4_SET1
1019
+ #define LM_GGML_F32_VEC_LOAD LM_GGML_F32x4_LOAD
1020
+ #define LM_GGML_F32_VEC_STORE LM_GGML_F32x4_STORE
1021
+ #define LM_GGML_F32_VEC_FMA LM_GGML_F32x4_FMA
1022
+ #define LM_GGML_F32_VEC_ADD LM_GGML_F32x4_ADD
1023
+ #define LM_GGML_F32_VEC_MUL LM_GGML_F32x4_MUL
1024
+ #define LM_GGML_F32_VEC_REDUCE LM_GGML_F32x4_REDUCE
1025
+
1026
+ // F16 LSX
1027
+
1028
+ #define LM_GGML_F16_STEP 32
1029
+ #define LM_GGML_F16_EPR 4
1030
+
1031
+ static inline __m128 __lsx_f16x4_load(const lm_ggml_fp16_t * x) {
1032
+ float tmp[4];
1033
+
1034
+ tmp[0] = LM_GGML_CPU_FP16_TO_FP32(x[0]);
1035
+ tmp[1] = LM_GGML_CPU_FP16_TO_FP32(x[1]);
1036
+ tmp[2] = LM_GGML_CPU_FP16_TO_FP32(x[2]);
1037
+ tmp[3] = LM_GGML_CPU_FP16_TO_FP32(x[3]);
1038
+
1039
+ return __lsx_vld(tmp, 0);
1040
+ }
1041
+
1042
+ static inline void __lsx_f16x4_store(lm_ggml_fp16_t * x, __m128 y) {
1043
+ float arr[4];
1044
+
1045
+ __lsx_vst(y, arr, 0);
1046
+
1047
+ x[0] = LM_GGML_CPU_FP32_TO_FP16(arr[0]);
1048
+ x[1] = LM_GGML_CPU_FP32_TO_FP16(arr[1]);
1049
+ x[2] = LM_GGML_CPU_FP32_TO_FP16(arr[2]);
1050
+ x[3] = LM_GGML_CPU_FP32_TO_FP16(arr[3]);
1051
+ }
1052
+
1053
+ #define LM_GGML_F32Cx4 __m128
1054
+ #define LM_GGML_F32Cx4_ZERO __lsx_vldi(0)
1055
+ #define LM_GGML_F32Cx4_SET1(x) __lsx_vinsgr2vr_w(__lsx_vldi(0),(x), 0)
1056
+ #define LM_GGML_F32Cx4_LOAD(x) __lsx_f16x4_load(x)
1057
+ #define LM_GGML_F32Cx4_STORE(x, y) __lsx_f16x4_store(x, y)
1058
+ #define LM_GGML_F32Cx4_FMA LM_GGML_F32x4_FMA
1059
+ #define LM_GGML_F32Cx4_ADD __lsx_vfadd_s
1060
+ #define LM_GGML_F32Cx4_MUL __lsx_vfmul_s
1061
+ #define LM_GGML_F32Cx4_REDUCE LM_GGML_F32x4_REDUCE
1062
+
1063
+ #define LM_GGML_F16_VEC LM_GGML_F32Cx4
1064
+ #define LM_GGML_F16_VEC_ZERO LM_GGML_F32Cx4_ZERO
1065
+ #define LM_GGML_F16_VEC_SET1 LM_GGML_F32Cx4_SET1
1066
+ #define LM_GGML_F16_VEC_LOAD(p, i) LM_GGML_F32Cx4_LOAD(p)
1067
+ #define LM_GGML_F16_VEC_STORE(p, r, i) LM_GGML_F32Cx4_STORE(p, r[i])
1068
+ #define LM_GGML_F16_VEC_FMA LM_GGML_F32Cx4_FMA
1069
+ #define LM_GGML_F16_VEC_ADD LM_GGML_F32Cx4_ADD
1070
+ #define LM_GGML_F16_VEC_MUL LM_GGML_F32Cx4_MUL
1071
+ #define LM_GGML_F16_VEC_REDUCE LM_GGML_F32Cx4_REDUCE
1072
+
1073
+ #elif defined(__VXE__) || defined(__VXE2__)
1074
+
1075
+ #define LM_GGML_SIMD
1076
+
1077
+ // F32 s390x
1078
+
1079
+ #define LM_GGML_F32_STEP 32
1080
+ #define LM_GGML_F32_EPR 4
1081
+
1082
+ #define LM_GGML_F32x4 float32x4_t
1083
+ #define LM_GGML_F32x4_ZERO vec_splats(0.0f)
1084
+ #define LM_GGML_F32x4_SET1 vec_splats
1085
+ #define LM_GGML_F32x4_LOAD(p) vec_xl(0, p)
1086
+ #define LM_GGML_F32x4_STORE(p, r) vec_xst(r, 0, p)
1087
+ #define LM_GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a)
1088
+ #define LM_GGML_F32x4_ADD vec_add
1089
+ #define LM_GGML_F32x4_MUL vec_mul
1090
+ #define LM_GGML_F32x4_REDUCE(res, x) \
1091
+ { \
1092
+ int offset = LM_GGML_F32_ARR >> 1; \
1093
+ for (int i = 0; i < offset; ++i) { \
1094
+ x[i] = vec_add(x[i], x[offset + i]); \
1095
+ } \
1096
+ offset >>= 1; \
1097
+ for (int i = 0; i < offset; ++i) { \
1098
+ x[i] = vec_add(x[i], x[offset + i]); \
1099
+ } \
1100
+ offset >>= 1; \
1101
+ for (int i = 0; i < offset; ++i) { \
1102
+ x[i] = vec_add(x[i], x[offset + i]); \
1103
+ } \
1104
+ float32x4_t tmp = x[0] + vec_reve(x[0]); \
1105
+ res = tmp[0] + tmp[1]; \
1106
+ }
1107
+
1108
+ #define LM_GGML_F32_VEC LM_GGML_F32x4
1109
+ #define LM_GGML_F32_VEC_ZERO LM_GGML_F32x4_ZERO
1110
+ #define LM_GGML_F32_VEC_SET1 LM_GGML_F32x4_SET1
1111
+ #define LM_GGML_F32_VEC_LOAD LM_GGML_F32x4_LOAD
1112
+ #define LM_GGML_F32_VEC_STORE LM_GGML_F32x4_STORE
1113
+ #define LM_GGML_F32_VEC_FMA LM_GGML_F32x4_FMA
1114
+ #define LM_GGML_F32_VEC_ADD LM_GGML_F32x4_ADD
1115
+ #define LM_GGML_F32_VEC_MUL LM_GGML_F32x4_MUL
1116
+ #define LM_GGML_F32_VEC_REDUCE LM_GGML_F32x4_REDUCE
1117
+
1118
+ // F16 s390x
1119
+ #define LM_GGML_F16_STEP LM_GGML_F32_STEP
1120
+ #define LM_GGML_F16_EPR LM_GGML_F32_EPR
1121
+
1122
+ static inline float32x4_t __lzs_f16cx4_load(const lm_ggml_fp16_t * x) {
1123
+ #if defined(__NNPA__)
1124
+ uint16x8_t v_x = vec_xl(0, (const lm_ggml_fp16_t *)x);
1125
+ uint16x8_t v_xd = vec_convert_from_fp16(v_x, 0);
1126
+ return vec_extend_to_fp32_hi(v_xd, 0);
1127
+ #else
1128
+ float tmp[4];
1129
+
1130
+ for (int i = 0; i < 4; i++) {
1131
+ tmp[i] = LM_GGML_CPU_FP16_TO_FP32(x[i]);
1132
+ }
1133
+
1134
+ // note: keep type-cast here to prevent compiler bugs
1135
+ // see: https://github.com/ggml-org/llama.cpp/issues/12846
1136
+ return vec_xl(0, (const float *)(tmp));
1137
+ #endif
1138
+ }
1139
+
1140
+ static inline void __lzs_f16cx4_store(lm_ggml_fp16_t * x, float32x4_t v_y) {
1141
+ #if defined(__NNPA__)
1142
+ float32x4_t v_zero = vec_splats(0.0f);
1143
+ uint16x8_t v_xd = vec_round_from_fp32(v_y, v_zero, 0);
1144
+ uint16x8_t v_x = vec_convert_to_fp16(v_xd, 0);
1145
+
1146
+ x[0] = vec_extract(v_x, 0);
1147
+ x[1] = vec_extract(v_x, 1);
1148
+ x[2] = vec_extract(v_x, 2);
1149
+ x[3] = vec_extract(v_x, 3);
1150
+ #else
1151
+ float arr[4];
1152
+
1153
+ // note: keep type-cast here to prevent compiler bugs
1154
+ // see: https://github.com/ggml-org/llama.cpp/issues/12846
1155
+ vec_xst(v_y, 0, (float *)(arr));
1156
+
1157
+ for (int i = 0; i < 4; i++) {
1158
+ x[i] = LM_GGML_CPU_FP32_TO_FP16(arr[i]);
1159
+ }
1160
+ #endif
1161
+ }
1162
+
1163
+ #define LM_GGML_F16_VEC LM_GGML_F32x4
1164
+ #define LM_GGML_F16_VEC_ZERO LM_GGML_F32x4_ZERO
1165
+ #define LM_GGML_F16_VEC_SET1 LM_GGML_F32x4_SET1
1166
+ #define LM_GGML_F16_VEC_LOAD(p, i) __lzs_f16cx4_load(p)
1167
+ #define LM_GGML_F16_VEC_STORE(p, r, i) __lzs_f16cx4_store(p, r[i])
1168
+ #define LM_GGML_F16_VEC_FMA LM_GGML_F32x4_FMA
1169
+ #define LM_GGML_F16_VEC_ADD LM_GGML_F32x4_ADD
1170
+ #define LM_GGML_F16_VEC_MUL LM_GGML_F32x4_MUL
1171
+ #define LM_GGML_F16_VEC_REDUCE LM_GGML_F32x4_REDUCE
1172
+
1173
+ #endif
1174
+
1175
+ // LM_GGML_F32_ARR / LM_GGML_F16_ARR
1176
+ // number of registers to use per step
1177
+ #ifdef LM_GGML_SIMD
1178
+ #define LM_GGML_F32_ARR (LM_GGML_F32_STEP/LM_GGML_F32_EPR)
1179
+ #define LM_GGML_F16_ARR (LM_GGML_F16_STEP/LM_GGML_F16_EPR)
1180
+ #endif
1181
+
1182
+ #ifdef __cplusplus
1183
+ }
1184
+ #endif