numkong 7.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (294) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +495 -0
  3. package/binding.gyp +540 -0
  4. package/c/dispatch.h +512 -0
  5. package/c/dispatch_bf16.c +389 -0
  6. package/c/dispatch_bf16c.c +52 -0
  7. package/c/dispatch_e2m3.c +263 -0
  8. package/c/dispatch_e3m2.c +243 -0
  9. package/c/dispatch_e4m3.c +276 -0
  10. package/c/dispatch_e5m2.c +272 -0
  11. package/c/dispatch_f16.c +376 -0
  12. package/c/dispatch_f16c.c +58 -0
  13. package/c/dispatch_f32.c +378 -0
  14. package/c/dispatch_f32c.c +99 -0
  15. package/c/dispatch_f64.c +296 -0
  16. package/c/dispatch_f64c.c +98 -0
  17. package/c/dispatch_i16.c +96 -0
  18. package/c/dispatch_i32.c +89 -0
  19. package/c/dispatch_i4.c +150 -0
  20. package/c/dispatch_i64.c +86 -0
  21. package/c/dispatch_i8.c +289 -0
  22. package/c/dispatch_other.c +330 -0
  23. package/c/dispatch_u1.c +148 -0
  24. package/c/dispatch_u16.c +124 -0
  25. package/c/dispatch_u32.c +118 -0
  26. package/c/dispatch_u4.c +150 -0
  27. package/c/dispatch_u64.c +102 -0
  28. package/c/dispatch_u8.c +303 -0
  29. package/c/numkong.c +950 -0
  30. package/include/README.md +573 -0
  31. package/include/module.modulemap +129 -0
  32. package/include/numkong/attention/sapphireamx.h +1361 -0
  33. package/include/numkong/attention/sme.h +2066 -0
  34. package/include/numkong/attention.h +49 -0
  35. package/include/numkong/capabilities.h +748 -0
  36. package/include/numkong/cast/README.md +262 -0
  37. package/include/numkong/cast/haswell.h +975 -0
  38. package/include/numkong/cast/icelake.h +470 -0
  39. package/include/numkong/cast/neon.h +1192 -0
  40. package/include/numkong/cast/rvv.h +1021 -0
  41. package/include/numkong/cast/sapphire.h +262 -0
  42. package/include/numkong/cast/serial.h +2262 -0
  43. package/include/numkong/cast/skylake.h +856 -0
  44. package/include/numkong/cast/v128relaxed.h +180 -0
  45. package/include/numkong/cast.h +230 -0
  46. package/include/numkong/curved/README.md +223 -0
  47. package/include/numkong/curved/genoa.h +182 -0
  48. package/include/numkong/curved/haswell.h +276 -0
  49. package/include/numkong/curved/neon.h +205 -0
  50. package/include/numkong/curved/neonbfdot.h +212 -0
  51. package/include/numkong/curved/neonhalf.h +212 -0
  52. package/include/numkong/curved/rvv.h +305 -0
  53. package/include/numkong/curved/serial.h +207 -0
  54. package/include/numkong/curved/skylake.h +457 -0
  55. package/include/numkong/curved/smef64.h +506 -0
  56. package/include/numkong/curved.h +517 -0
  57. package/include/numkong/curved.hpp +144 -0
  58. package/include/numkong/dot/README.md +425 -0
  59. package/include/numkong/dot/alder.h +563 -0
  60. package/include/numkong/dot/genoa.h +315 -0
  61. package/include/numkong/dot/haswell.h +1688 -0
  62. package/include/numkong/dot/icelake.h +883 -0
  63. package/include/numkong/dot/neon.h +818 -0
  64. package/include/numkong/dot/neonbfdot.h +244 -0
  65. package/include/numkong/dot/neonfhm.h +360 -0
  66. package/include/numkong/dot/neonhalf.h +198 -0
  67. package/include/numkong/dot/neonsdot.h +508 -0
  68. package/include/numkong/dot/rvv.h +714 -0
  69. package/include/numkong/dot/rvvbb.h +72 -0
  70. package/include/numkong/dot/rvvbf16.h +123 -0
  71. package/include/numkong/dot/rvvhalf.h +129 -0
  72. package/include/numkong/dot/sapphire.h +141 -0
  73. package/include/numkong/dot/serial.h +838 -0
  74. package/include/numkong/dot/sierra.h +405 -0
  75. package/include/numkong/dot/skylake.h +1084 -0
  76. package/include/numkong/dot/sve.h +379 -0
  77. package/include/numkong/dot/svebfdot.h +74 -0
  78. package/include/numkong/dot/svehalf.h +123 -0
  79. package/include/numkong/dot/v128relaxed.h +1258 -0
  80. package/include/numkong/dot.h +1070 -0
  81. package/include/numkong/dot.hpp +94 -0
  82. package/include/numkong/dots/README.md +496 -0
  83. package/include/numkong/dots/alder.h +114 -0
  84. package/include/numkong/dots/genoa.h +94 -0
  85. package/include/numkong/dots/haswell.h +295 -0
  86. package/include/numkong/dots/icelake.h +171 -0
  87. package/include/numkong/dots/neon.h +120 -0
  88. package/include/numkong/dots/neonbfdot.h +58 -0
  89. package/include/numkong/dots/neonfhm.h +94 -0
  90. package/include/numkong/dots/neonhalf.h +57 -0
  91. package/include/numkong/dots/neonsdot.h +108 -0
  92. package/include/numkong/dots/rvv.h +2486 -0
  93. package/include/numkong/dots/sapphireamx.h +3973 -0
  94. package/include/numkong/dots/serial.h +2844 -0
  95. package/include/numkong/dots/sierra.h +97 -0
  96. package/include/numkong/dots/skylake.h +196 -0
  97. package/include/numkong/dots/sme.h +5372 -0
  98. package/include/numkong/dots/smebi32.h +461 -0
  99. package/include/numkong/dots/smef64.h +1318 -0
  100. package/include/numkong/dots/smehalf.h +47 -0
  101. package/include/numkong/dots/v128relaxed.h +294 -0
  102. package/include/numkong/dots.h +2804 -0
  103. package/include/numkong/dots.hpp +639 -0
  104. package/include/numkong/each/README.md +469 -0
  105. package/include/numkong/each/haswell.h +1658 -0
  106. package/include/numkong/each/icelake.h +272 -0
  107. package/include/numkong/each/neon.h +1104 -0
  108. package/include/numkong/each/neonbfdot.h +212 -0
  109. package/include/numkong/each/neonhalf.h +410 -0
  110. package/include/numkong/each/rvv.h +1121 -0
  111. package/include/numkong/each/sapphire.h +477 -0
  112. package/include/numkong/each/serial.h +260 -0
  113. package/include/numkong/each/skylake.h +1562 -0
  114. package/include/numkong/each.h +2146 -0
  115. package/include/numkong/each.hpp +434 -0
  116. package/include/numkong/geospatial/README.md +147 -0
  117. package/include/numkong/geospatial/haswell.h +593 -0
  118. package/include/numkong/geospatial/neon.h +571 -0
  119. package/include/numkong/geospatial/rvv.h +701 -0
  120. package/include/numkong/geospatial/serial.h +309 -0
  121. package/include/numkong/geospatial/skylake.h +577 -0
  122. package/include/numkong/geospatial/v128relaxed.h +613 -0
  123. package/include/numkong/geospatial.h +453 -0
  124. package/include/numkong/geospatial.hpp +235 -0
  125. package/include/numkong/matrix.hpp +336 -0
  126. package/include/numkong/maxsim/README.md +187 -0
  127. package/include/numkong/maxsim/alder.h +511 -0
  128. package/include/numkong/maxsim/genoa.h +115 -0
  129. package/include/numkong/maxsim/haswell.h +553 -0
  130. package/include/numkong/maxsim/icelake.h +480 -0
  131. package/include/numkong/maxsim/neonsdot.h +394 -0
  132. package/include/numkong/maxsim/sapphireamx.h +877 -0
  133. package/include/numkong/maxsim/serial.h +490 -0
  134. package/include/numkong/maxsim/sme.h +929 -0
  135. package/include/numkong/maxsim/v128relaxed.h +280 -0
  136. package/include/numkong/maxsim.h +571 -0
  137. package/include/numkong/maxsim.hpp +133 -0
  138. package/include/numkong/mesh/README.md +227 -0
  139. package/include/numkong/mesh/haswell.h +2235 -0
  140. package/include/numkong/mesh/neon.h +1329 -0
  141. package/include/numkong/mesh/neonbfdot.h +842 -0
  142. package/include/numkong/mesh/neonhalf.h +616 -0
  143. package/include/numkong/mesh/rvv.h +916 -0
  144. package/include/numkong/mesh/serial.h +742 -0
  145. package/include/numkong/mesh/skylake.h +1135 -0
  146. package/include/numkong/mesh/v128relaxed.h +1052 -0
  147. package/include/numkong/mesh.h +652 -0
  148. package/include/numkong/mesh.hpp +762 -0
  149. package/include/numkong/numkong.h +78 -0
  150. package/include/numkong/numkong.hpp +57 -0
  151. package/include/numkong/probability/README.md +173 -0
  152. package/include/numkong/probability/haswell.h +267 -0
  153. package/include/numkong/probability/neon.h +225 -0
  154. package/include/numkong/probability/rvv.h +409 -0
  155. package/include/numkong/probability/serial.h +169 -0
  156. package/include/numkong/probability/skylake.h +324 -0
  157. package/include/numkong/probability.h +383 -0
  158. package/include/numkong/probability.hpp +120 -0
  159. package/include/numkong/random.h +50 -0
  160. package/include/numkong/random.hpp +285 -0
  161. package/include/numkong/reduce/README.md +547 -0
  162. package/include/numkong/reduce/alder.h +632 -0
  163. package/include/numkong/reduce/genoa.h +201 -0
  164. package/include/numkong/reduce/haswell.h +3783 -0
  165. package/include/numkong/reduce/icelake.h +549 -0
  166. package/include/numkong/reduce/neon.h +3841 -0
  167. package/include/numkong/reduce/neonbfdot.h +353 -0
  168. package/include/numkong/reduce/neonfhm.h +665 -0
  169. package/include/numkong/reduce/neonhalf.h +157 -0
  170. package/include/numkong/reduce/neonsdot.h +357 -0
  171. package/include/numkong/reduce/rvv.h +3407 -0
  172. package/include/numkong/reduce/serial.h +757 -0
  173. package/include/numkong/reduce/sierra.h +338 -0
  174. package/include/numkong/reduce/skylake.h +3792 -0
  175. package/include/numkong/reduce/v128relaxed.h +2302 -0
  176. package/include/numkong/reduce.h +1597 -0
  177. package/include/numkong/reduce.hpp +633 -0
  178. package/include/numkong/scalar/README.md +89 -0
  179. package/include/numkong/scalar/haswell.h +113 -0
  180. package/include/numkong/scalar/neon.h +122 -0
  181. package/include/numkong/scalar/neonhalf.h +70 -0
  182. package/include/numkong/scalar/rvv.h +211 -0
  183. package/include/numkong/scalar/sapphire.h +63 -0
  184. package/include/numkong/scalar/serial.h +332 -0
  185. package/include/numkong/scalar/v128relaxed.h +56 -0
  186. package/include/numkong/scalar.h +683 -0
  187. package/include/numkong/set/README.md +179 -0
  188. package/include/numkong/set/haswell.h +334 -0
  189. package/include/numkong/set/icelake.h +485 -0
  190. package/include/numkong/set/neon.h +364 -0
  191. package/include/numkong/set/rvv.h +226 -0
  192. package/include/numkong/set/rvvbb.h +117 -0
  193. package/include/numkong/set/serial.h +174 -0
  194. package/include/numkong/set/sve.h +185 -0
  195. package/include/numkong/set/v128relaxed.h +240 -0
  196. package/include/numkong/set.h +457 -0
  197. package/include/numkong/set.hpp +114 -0
  198. package/include/numkong/sets/README.md +149 -0
  199. package/include/numkong/sets/haswell.h +63 -0
  200. package/include/numkong/sets/icelake.h +66 -0
  201. package/include/numkong/sets/neon.h +61 -0
  202. package/include/numkong/sets/serial.h +43 -0
  203. package/include/numkong/sets/smebi32.h +1099 -0
  204. package/include/numkong/sets/v128relaxed.h +58 -0
  205. package/include/numkong/sets.h +339 -0
  206. package/include/numkong/sparse/README.md +156 -0
  207. package/include/numkong/sparse/icelake.h +463 -0
  208. package/include/numkong/sparse/neon.h +288 -0
  209. package/include/numkong/sparse/serial.h +117 -0
  210. package/include/numkong/sparse/sve2.h +507 -0
  211. package/include/numkong/sparse/turin.h +322 -0
  212. package/include/numkong/sparse.h +363 -0
  213. package/include/numkong/sparse.hpp +113 -0
  214. package/include/numkong/spatial/README.md +435 -0
  215. package/include/numkong/spatial/alder.h +607 -0
  216. package/include/numkong/spatial/genoa.h +290 -0
  217. package/include/numkong/spatial/haswell.h +960 -0
  218. package/include/numkong/spatial/icelake.h +586 -0
  219. package/include/numkong/spatial/neon.h +773 -0
  220. package/include/numkong/spatial/neonbfdot.h +165 -0
  221. package/include/numkong/spatial/neonhalf.h +118 -0
  222. package/include/numkong/spatial/neonsdot.h +261 -0
  223. package/include/numkong/spatial/rvv.h +984 -0
  224. package/include/numkong/spatial/rvvbf16.h +123 -0
  225. package/include/numkong/spatial/rvvhalf.h +117 -0
  226. package/include/numkong/spatial/sapphire.h +343 -0
  227. package/include/numkong/spatial/serial.h +346 -0
  228. package/include/numkong/spatial/sierra.h +323 -0
  229. package/include/numkong/spatial/skylake.h +606 -0
  230. package/include/numkong/spatial/sve.h +224 -0
  231. package/include/numkong/spatial/svebfdot.h +122 -0
  232. package/include/numkong/spatial/svehalf.h +109 -0
  233. package/include/numkong/spatial/v128relaxed.h +717 -0
  234. package/include/numkong/spatial.h +1425 -0
  235. package/include/numkong/spatial.hpp +183 -0
  236. package/include/numkong/spatials/README.md +580 -0
  237. package/include/numkong/spatials/alder.h +94 -0
  238. package/include/numkong/spatials/genoa.h +94 -0
  239. package/include/numkong/spatials/haswell.h +219 -0
  240. package/include/numkong/spatials/icelake.h +113 -0
  241. package/include/numkong/spatials/neon.h +109 -0
  242. package/include/numkong/spatials/neonbfdot.h +60 -0
  243. package/include/numkong/spatials/neonfhm.h +92 -0
  244. package/include/numkong/spatials/neonhalf.h +58 -0
  245. package/include/numkong/spatials/neonsdot.h +109 -0
  246. package/include/numkong/spatials/rvv.h +1960 -0
  247. package/include/numkong/spatials/sapphireamx.h +1149 -0
  248. package/include/numkong/spatials/serial.h +226 -0
  249. package/include/numkong/spatials/sierra.h +96 -0
  250. package/include/numkong/spatials/skylake.h +184 -0
  251. package/include/numkong/spatials/sme.h +1901 -0
  252. package/include/numkong/spatials/smef64.h +465 -0
  253. package/include/numkong/spatials/v128relaxed.h +240 -0
  254. package/include/numkong/spatials.h +3021 -0
  255. package/include/numkong/spatials.hpp +508 -0
  256. package/include/numkong/tensor.hpp +1592 -0
  257. package/include/numkong/trigonometry/README.md +184 -0
  258. package/include/numkong/trigonometry/haswell.h +652 -0
  259. package/include/numkong/trigonometry/neon.h +639 -0
  260. package/include/numkong/trigonometry/rvv.h +699 -0
  261. package/include/numkong/trigonometry/serial.h +703 -0
  262. package/include/numkong/trigonometry/skylake.h +721 -0
  263. package/include/numkong/trigonometry/v128relaxed.h +666 -0
  264. package/include/numkong/trigonometry.h +467 -0
  265. package/include/numkong/trigonometry.hpp +166 -0
  266. package/include/numkong/types.h +1384 -0
  267. package/include/numkong/types.hpp +5603 -0
  268. package/include/numkong/vector.hpp +698 -0
  269. package/javascript/README.md +246 -0
  270. package/javascript/dist/cjs/numkong-wasm.d.ts +166 -0
  271. package/javascript/dist/cjs/numkong-wasm.js +617 -0
  272. package/javascript/dist/cjs/numkong.d.ts +343 -0
  273. package/javascript/dist/cjs/numkong.js +523 -0
  274. package/javascript/dist/cjs/package.json +3 -0
  275. package/javascript/dist/cjs/types.d.ts +284 -0
  276. package/javascript/dist/cjs/types.js +653 -0
  277. package/javascript/dist/esm/numkong-wasm.d.ts +166 -0
  278. package/javascript/dist/esm/numkong-wasm.js +595 -0
  279. package/javascript/dist/esm/numkong.d.ts +343 -0
  280. package/javascript/dist/esm/numkong.js +452 -0
  281. package/javascript/dist/esm/package.json +3 -0
  282. package/javascript/dist/esm/types.d.ts +284 -0
  283. package/javascript/dist/esm/types.js +630 -0
  284. package/javascript/dist-package-cjs.json +3 -0
  285. package/javascript/dist-package-esm.json +3 -0
  286. package/javascript/node-gyp-build.d.ts +1 -0
  287. package/javascript/numkong-wasm.ts +756 -0
  288. package/javascript/numkong.c +689 -0
  289. package/javascript/numkong.ts +575 -0
  290. package/javascript/tsconfig-base.json +39 -0
  291. package/javascript/tsconfig-cjs.json +8 -0
  292. package/javascript/tsconfig-esm.json +8 -0
  293. package/javascript/types.ts +674 -0
  294. package/package.json +87 -0
@@ -0,0 +1,2302 @@
1
+ /**
2
+ * @brief SIMD-accelerated Reductions for WASM.
3
+ * @file include/numkong/reduce/v128relaxed.h
4
+ * @author Ash Vardanian
5
+ * @date February 13, 2026
6
+ *
7
+ * @sa include/numkong/reduce.h
8
+ */
9
+ #ifndef NK_REDUCE_V128RELAXED_H
10
+ #define NK_REDUCE_V128RELAXED_H
11
+
12
+ #if NK_TARGET_V128RELAXED
13
+
14
+ #include "numkong/types.h"
15
+ #include "numkong/reduce/serial.h"
16
+ #include "numkong/cast/v128relaxed.h" // `nk_bf16x4_to_f32x4_v128relaxed_`
17
+
18
+ #if defined(__cplusplus)
19
+ extern "C" {
20
+ #endif
21
+
22
+ #if defined(__clang__)
23
+ #pragma clang attribute push(__attribute__((target("relaxed-simd"))), apply_to = function)
24
+ #endif
25
+
26
+ /** @brief Horizontal sum of 4 floats using shuffle tree. */
27
+ NK_INTERNAL nk_f32_t nk_reduce_add_f32x4_v128relaxed_(v128_t vec_f32x4) {
28
+ v128_t high_f32x4 = wasm_i32x4_shuffle(vec_f32x4, vec_f32x4, 2, 3, 0, 0);
29
+ v128_t sum1_f32x4 = wasm_f32x4_add(vec_f32x4, high_f32x4);
30
+ v128_t high2_f32x4 = wasm_i32x4_shuffle(sum1_f32x4, sum1_f32x4, 1, 0, 0, 0);
31
+ v128_t sum2_f32x4 = wasm_f32x4_add(sum1_f32x4, high2_f32x4);
32
+ return wasm_f32x4_extract_lane(sum2_f32x4, 0);
33
+ }
34
+
35
+ /** @brief Horizontal sum of 2 doubles using single shuffle. */
36
+ NK_INTERNAL nk_f64_t nk_reduce_add_f64x2_v128relaxed_(v128_t vec_f64x2) {
37
+ v128_t high_f64x2 = wasm_i64x2_shuffle(vec_f64x2, vec_f64x2, 1, 0);
38
+ v128_t sum_f64x2 = wasm_f64x2_add(vec_f64x2, high_f64x2);
39
+ return wasm_f64x2_extract_lane(sum_f64x2, 0);
40
+ }
41
+
42
+ /** @brief Horizontal sum of 4 signed 32-bit integers using shuffle tree. */
43
+ NK_INTERNAL nk_i32_t nk_reduce_add_i32x4_v128relaxed_(v128_t vec_i32x4) {
44
+ v128_t high_i32x4 = wasm_i32x4_shuffle(vec_i32x4, vec_i32x4, 2, 3, 0, 0);
45
+ v128_t sum1_i32x4 = wasm_i32x4_add(vec_i32x4, high_i32x4);
46
+ v128_t high2_i32x4 = wasm_i32x4_shuffle(sum1_i32x4, sum1_i32x4, 1, 0, 0, 0);
47
+ v128_t sum2_i32x4 = wasm_i32x4_add(sum1_i32x4, high2_i32x4);
48
+ return wasm_i32x4_extract_lane(sum2_i32x4, 0);
49
+ }
50
+
51
+ /** @brief Horizontal sum of 4 unsigned 32-bit integers using shuffle tree. */
52
+ NK_INTERNAL nk_u32_t nk_reduce_add_u32x4_v128relaxed_(v128_t vec_u32x4) {
53
+ v128_t high_u32x4 = wasm_i32x4_shuffle(vec_u32x4, vec_u32x4, 2, 3, 0, 0);
54
+ v128_t sum1_u32x4 = wasm_i32x4_add(vec_u32x4, high_u32x4);
55
+ v128_t high2_u32x4 = wasm_i32x4_shuffle(sum1_u32x4, sum1_u32x4, 1, 0, 0, 0);
56
+ v128_t sum2_u32x4 = wasm_i32x4_add(sum1_u32x4, high2_u32x4);
57
+ return (nk_u32_t)wasm_i32x4_extract_lane(sum2_u32x4, 0);
58
+ }
59
+
60
+ /** @brief Horizontal sum of 16 unsigned 8-bit integers using pairwise widening. */
61
+ NK_INTERNAL nk_u32_t nk_reduce_add_u8x16_v128relaxed_(v128_t vec_u8x16) {
62
+ v128_t sum_u16x8 = wasm_u16x8_extadd_pairwise_u8x16(vec_u8x16);
63
+ v128_t sum_u32x4 = wasm_u32x4_extadd_pairwise_u16x8(sum_u16x8);
64
+ return nk_reduce_add_u32x4_v128relaxed_(sum_u32x4);
65
+ }
66
+
67
+ NK_INTERNAL nk_i64_t nk_reduce_add_i64x2_v128relaxed_(v128_t vec_i64x2) {
68
+ v128_t high_i64x2 = wasm_i64x2_shuffle(vec_i64x2, vec_i64x2, 1, 0);
69
+ v128_t sum_i64x2 = wasm_i64x2_add(vec_i64x2, high_i64x2);
70
+ return (nk_i64_t)wasm_i64x2_extract_lane(sum_i64x2, 0);
71
+ }
72
+
73
+ NK_INTERNAL nk_u64_t nk_reduce_add_u64x2_v128relaxed_(v128_t vec_u64x2) {
74
+ v128_t high_u64x2 = wasm_i64x2_shuffle(vec_u64x2, vec_u64x2, 1, 0);
75
+ v128_t sum_u64x2 = wasm_i64x2_add(vec_u64x2, high_u64x2);
76
+ return (nk_u64_t)wasm_i64x2_extract_lane(sum_u64x2, 0);
77
+ }
78
+
79
+ NK_INTERNAL nk_i32_t nk_reduce_add_i16x8_v128relaxed_(v128_t vec_i16x8) {
80
+ v128_t pairwise_i32x4 = wasm_i32x4_extadd_pairwise_i16x8(vec_i16x8);
81
+ return nk_reduce_add_i32x4_v128relaxed_(pairwise_i32x4);
82
+ }
83
+
84
+ NK_INTERNAL nk_i64_t nk_reduce_add_i32x4_to_i64_v128relaxed_(v128_t vec_i32x4) {
85
+ v128_t low_i64x2 = wasm_i64x2_extend_low_i32x4(vec_i32x4);
86
+ v128_t high_i64x2 = wasm_i64x2_extend_high_i32x4(vec_i32x4);
87
+ v128_t sum_i64x2 = wasm_i64x2_add(low_i64x2, high_i64x2);
88
+ return nk_reduce_add_i64x2_v128relaxed_(sum_i64x2);
89
+ }
90
+
91
+ NK_INTERNAL nk_u64_t nk_reduce_add_u32x4_to_u64_v128relaxed_(v128_t vec_u32x4) {
92
+ v128_t low_u64x2 = wasm_u64x2_extend_low_u32x4(vec_u32x4);
93
+ v128_t high_u64x2 = wasm_u64x2_extend_high_u32x4(vec_u32x4);
94
+ v128_t sum_u64x2 = wasm_i64x2_add(low_u64x2, high_u64x2);
95
+ return nk_reduce_add_u64x2_v128relaxed_(sum_u64x2);
96
+ }
97
+
98
+ NK_INTERNAL v128_t nk_u64_sadd_epi64_v128relaxed_(v128_t a_u64x2, v128_t b_u64x2) {
99
+ v128_t result_u64x2 = wasm_i64x2_add(a_u64x2, b_u64x2);
100
+ v128_t sign_bit_i64x2 = wasm_i64x2_splat((nk_i64_t)0x8000000000000000LL);
101
+ v128_t a_biased_i64x2 = wasm_v128_xor(a_u64x2, sign_bit_i64x2);
102
+ v128_t result_biased_i64x2 = wasm_v128_xor(result_u64x2, sign_bit_i64x2);
103
+ v128_t overflow_u64x2 = wasm_i64x2_gt(a_biased_i64x2, result_biased_i64x2);
104
+ return wasm_v128_or(result_u64x2, overflow_u64x2);
105
+ }
106
+
107
+ NK_INTERNAL v128_t nk_i64_smul_sq_epi64_v128relaxed_(v128_t val_i64x2) {
108
+ v128_t sign_i64x2 = wasm_i64x2_gt(wasm_i64x2_splat(0), val_i64x2);
109
+ v128_t abs_val_u64x2 = wasm_i64x2_sub(wasm_v128_xor(val_i64x2, sign_i64x2), sign_i64x2);
110
+ v128_t low_halves_i32x4 = wasm_i32x4_shuffle(abs_val_u64x2, abs_val_u64x2, 0, 2, 0, 0);
111
+ v128_t low_squared_u64x2 = wasm_u64x2_extmul_low_u32x4(low_halves_i32x4, low_halves_i32x4);
112
+ v128_t high_bits_u64x2 = wasm_u64x2_shr(abs_val_u64x2, 32);
113
+ v128_t is_small_u64x2 = wasm_i64x2_eq(high_bits_u64x2, wasm_i64x2_splat(0));
114
+ v128_t saturated_u64x2 = wasm_i64x2_splat(NK_I64_MAX);
115
+ return wasm_i32x4_relaxed_laneselect(low_squared_u64x2, saturated_u64x2, is_small_u64x2);
116
+ }
117
+
118
+ NK_INTERNAL v128_t nk_u64_smul_sq_epi64_v128relaxed_(v128_t val_u64x2) {
119
+ v128_t low_halves_i32x4 = wasm_i32x4_shuffle(val_u64x2, val_u64x2, 0, 2, 0, 0);
120
+ v128_t low_squared_u64x2 = wasm_u64x2_extmul_low_u32x4(low_halves_i32x4, low_halves_i32x4);
121
+ v128_t high_bits_u64x2 = wasm_u64x2_shr(val_u64x2, 32);
122
+ v128_t is_small_u64x2 = wasm_i64x2_eq(high_bits_u64x2, wasm_i64x2_splat(0));
123
+ v128_t saturated_u64x2 = wasm_i64x2_splat((nk_i64_t)-1);
124
+ return wasm_i32x4_relaxed_laneselect(low_squared_u64x2, saturated_u64x2, is_small_u64x2);
125
+ }
126
+
127
+ NK_INTERNAL nk_u64_t nk_reduce_sadd_u64x2_v128relaxed_(v128_t v_u64x2) {
128
+ v128_t swapped_u64x2 = wasm_i64x2_shuffle(v_u64x2, v_u64x2, 1, 0);
129
+ v128_t sum_u64x2 = wasm_i64x2_add(v_u64x2, swapped_u64x2);
130
+ v128_t sign_bit_i64x2 = wasm_i64x2_splat((nk_i64_t)0x8000000000000000LL);
131
+ v128_t v_biased_i64x2 = wasm_v128_xor(v_u64x2, sign_bit_i64x2);
132
+ v128_t sum_biased_i64x2 = wasm_v128_xor(sum_u64x2, sign_bit_i64x2);
133
+ v128_t overflow_u64x2 = wasm_i64x2_gt(v_biased_i64x2, sum_biased_i64x2);
134
+ sum_u64x2 = wasm_v128_or(sum_u64x2, overflow_u64x2);
135
+ return (nk_u64_t)wasm_i64x2_extract_lane(sum_u64x2, 0);
136
+ }
137
+
138
+ NK_INTERNAL void nk_reduce_moments_f32_v128relaxed_contiguous_( //
139
+ nk_f32_t const *data, nk_size_t count, //
140
+ nk_f64_t *sum_ptr, nk_f64_t *sumsq_ptr) {
141
+ v128_t sum_f64x2 = wasm_f64x2_splat(0), sumsq_f64x2 = wasm_f64x2_splat(0);
142
+ nk_size_t idx = 0;
143
+ for (; idx + 4 <= count; idx += 4) {
144
+ v128_t data_f32x4 = wasm_v128_load(data + idx);
145
+ v128_t low_f64x2 = wasm_f64x2_promote_low_f32x4(data_f32x4);
146
+ v128_t high_f64x2 = wasm_f64x2_promote_low_f32x4(wasm_i32x4_shuffle(data_f32x4, data_f32x4, 2, 3, 0, 1));
147
+ sum_f64x2 = wasm_f64x2_add(wasm_f64x2_add(sum_f64x2, low_f64x2), high_f64x2);
148
+ sumsq_f64x2 = wasm_f64x2_relaxed_madd(low_f64x2, low_f64x2, sumsq_f64x2);
149
+ sumsq_f64x2 = wasm_f64x2_relaxed_madd(high_f64x2, high_f64x2, sumsq_f64x2);
150
+ }
151
+ nk_f64_t sum = nk_reduce_add_f64x2_v128relaxed_(sum_f64x2);
152
+ nk_f64_t sumsq = nk_reduce_add_f64x2_v128relaxed_(sumsq_f64x2);
153
+ for (; idx < count; ++idx) {
154
+ nk_f64_t val = (nk_f64_t)data[idx];
155
+ sum += val, sumsq += val * val;
156
+ }
157
+ *sum_ptr = sum, *sumsq_ptr = sumsq;
158
+ }
159
+
160
+ NK_PUBLIC void nk_reduce_moments_f32_v128relaxed( //
161
+ nk_f32_t const *data, nk_size_t count, nk_size_t stride_bytes, //
162
+ nk_f64_t *sum, nk_f64_t *sumsq) {
163
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_f32_t);
164
+ int aligned = (stride_bytes % sizeof(nk_f32_t) == 0);
165
+ if (count == 0) *sum = 0, *sumsq = 0;
166
+ else if (!aligned) nk_reduce_moments_f32_serial(data, count, stride_bytes, sum, sumsq);
167
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 4) {
168
+ nk_size_t left_count = count / 2;
169
+ nk_f64_t left_sum, left_sumsq, right_sum, right_sumsq;
170
+ nk_reduce_moments_f32_v128relaxed(data, left_count, stride_bytes, &left_sum, &left_sumsq);
171
+ nk_reduce_moments_f32_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
172
+ &right_sum, &right_sumsq);
173
+ *sum = left_sum + right_sum, *sumsq = left_sumsq + right_sumsq;
174
+ }
175
+ else if (stride_elements == 1) nk_reduce_moments_f32_v128relaxed_contiguous_(data, count, sum, sumsq);
176
+ else nk_reduce_moments_f32_serial(data, count, stride_bytes, sum, sumsq);
177
+ }
178
+
179
+ NK_INTERNAL void nk_reduce_moments_f64_v128relaxed_contiguous_( //
180
+ nk_f64_t const *data, nk_size_t count, //
181
+ nk_f64_t *sum_ptr, nk_f64_t *sumsq_ptr) {
182
+ v128_t sum_f64x2 = wasm_f64x2_splat(0);
183
+ v128_t sum_comp_f64x2 = wasm_f64x2_splat(0);
184
+ v128_t sumsq_f64x2 = wasm_f64x2_splat(0);
185
+ v128_t sumsq_comp_f64x2 = wasm_f64x2_splat(0);
186
+ nk_size_t idx = 0;
187
+ for (; idx + 2 <= count; idx += 2) {
188
+ v128_t val_f64x2 = wasm_v128_load(data + idx);
189
+ v128_t tentative_f64x2 = wasm_f64x2_add(sum_f64x2, val_f64x2);
190
+ v128_t round_f64x2 = wasm_f64x2_sub(tentative_f64x2, sum_f64x2);
191
+ v128_t corr_f64x2 = wasm_f64x2_add(wasm_f64x2_sub(sum_f64x2, wasm_f64x2_sub(tentative_f64x2, round_f64x2)),
192
+ wasm_f64x2_sub(val_f64x2, round_f64x2));
193
+ sum_comp_f64x2 = wasm_f64x2_add(sum_comp_f64x2, corr_f64x2);
194
+ sum_f64x2 = tentative_f64x2;
195
+ v128_t sq_f64x2 = wasm_f64x2_mul(val_f64x2, val_f64x2);
196
+ v128_t tentative_sq_f64x2 = wasm_f64x2_add(sumsq_f64x2, sq_f64x2);
197
+ v128_t round_sq_f64x2 = wasm_f64x2_sub(tentative_sq_f64x2, sumsq_f64x2);
198
+ v128_t corr_sq_f64x2 = wasm_f64x2_add(
199
+ wasm_f64x2_sub(sumsq_f64x2, wasm_f64x2_sub(tentative_sq_f64x2, round_sq_f64x2)),
200
+ wasm_f64x2_sub(sq_f64x2, round_sq_f64x2));
201
+ sumsq_comp_f64x2 = wasm_f64x2_add(sumsq_comp_f64x2, corr_sq_f64x2);
202
+ sumsq_f64x2 = tentative_sq_f64x2;
203
+ }
204
+ nk_f64_t sum = nk_reduce_add_f64x2_v128relaxed_(wasm_f64x2_add(sum_f64x2, sum_comp_f64x2));
205
+ nk_f64_t sumsq = nk_reduce_add_f64x2_v128relaxed_(wasm_f64x2_add(sumsq_f64x2, sumsq_comp_f64x2));
206
+ for (; idx < count; ++idx) {
207
+ nk_f64_t val = data[idx];
208
+ sum += val;
209
+ sumsq += val * val;
210
+ }
211
+ *sum_ptr = sum;
212
+ *sumsq_ptr = sumsq;
213
+ }
214
+
215
+ NK_PUBLIC void nk_reduce_moments_f64_v128relaxed( //
216
+ nk_f64_t const *data, nk_size_t count, nk_size_t stride_bytes, //
217
+ nk_f64_t *sum, nk_f64_t *sumsq) {
218
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_f64_t);
219
+ int aligned = (stride_bytes % sizeof(nk_f64_t) == 0);
220
+ if (count == 0) *sum = 0, *sumsq = 0;
221
+ else if (!aligned) nk_reduce_moments_f64_serial(data, count, stride_bytes, sum, sumsq);
222
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 2) {
223
+ nk_size_t left_count = count / 2;
224
+ nk_f64_t left_sum, left_sumsq, right_sum, right_sumsq;
225
+ nk_reduce_moments_f64_v128relaxed(data, left_count, stride_bytes, &left_sum, &left_sumsq);
226
+ nk_reduce_moments_f64_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
227
+ &right_sum, &right_sumsq);
228
+ *sum = left_sum + right_sum, *sumsq = left_sumsq + right_sumsq;
229
+ }
230
+ else if (stride_elements == 1) nk_reduce_moments_f64_v128relaxed_contiguous_(data, count, sum, sumsq);
231
+ else nk_reduce_moments_f64_serial(data, count, stride_bytes, sum, sumsq);
232
+ }
233
+
234
+ NK_INTERNAL void nk_reduce_moments_bf16_v128relaxed_contiguous_( //
235
+ nk_bf16_t const *data, nk_size_t count, //
236
+ nk_f32_t *sum_ptr, nk_f32_t *sumsq_ptr) {
237
+ v128_t sum_f32x4 = wasm_f32x4_splat(0);
238
+ v128_t sumsq_f32x4 = wasm_f32x4_splat(0);
239
+ nk_size_t idx = 0;
240
+ for (; idx + 4 <= count; idx += 4) {
241
+ nk_b64_vec_t raw;
242
+ raw.u64 = *(nk_u64_t const *)(data + idx);
243
+ v128_t data_f32x4 = nk_bf16x4_to_f32x4_v128relaxed_(raw).v128;
244
+ sum_f32x4 = wasm_f32x4_add(sum_f32x4, data_f32x4);
245
+ sumsq_f32x4 = wasm_f32x4_relaxed_madd(data_f32x4, data_f32x4, sumsq_f32x4);
246
+ }
247
+ nk_f32_t sum = nk_reduce_add_f32x4_v128relaxed_(sum_f32x4);
248
+ nk_f32_t sumsq = nk_reduce_add_f32x4_v128relaxed_(sumsq_f32x4);
249
+ for (; idx < count; ++idx) {
250
+ nk_f32_t val;
251
+ nk_bf16_to_f32_serial(data + idx, &val);
252
+ sum += val, sumsq += val * val;
253
+ }
254
+ *sum_ptr = sum, *sumsq_ptr = sumsq;
255
+ }
256
+
257
+ NK_PUBLIC void nk_reduce_moments_bf16_v128relaxed( //
258
+ nk_bf16_t const *data, nk_size_t count, nk_size_t stride_bytes, //
259
+ nk_f32_t *sum, nk_f32_t *sumsq) {
260
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_bf16_t);
261
+ int aligned = (stride_bytes % sizeof(nk_bf16_t) == 0);
262
+ if (count == 0) *sum = 0, *sumsq = 0;
263
+ else if (!aligned) nk_reduce_moments_bf16_serial(data, count, stride_bytes, sum, sumsq);
264
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 4) {
265
+ nk_size_t left_count = count / 2;
266
+ nk_f32_t left_sum, left_sumsq, right_sum, right_sumsq;
267
+ nk_reduce_moments_bf16_v128relaxed(data, left_count, stride_bytes, &left_sum, &left_sumsq);
268
+ nk_reduce_moments_bf16_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
269
+ &right_sum, &right_sumsq);
270
+ *sum = left_sum + right_sum, *sumsq = left_sumsq + right_sumsq;
271
+ }
272
+ else if (stride_elements == 1) nk_reduce_moments_bf16_v128relaxed_contiguous_(data, count, sum, sumsq);
273
+ else nk_reduce_moments_bf16_serial(data, count, stride_bytes, sum, sumsq);
274
+ }
275
+
276
+ NK_INTERNAL void nk_reduce_moments_f16_v128relaxed_contiguous_( //
277
+ nk_f16_t const *data, nk_size_t count, //
278
+ nk_f32_t *sum_ptr, nk_f32_t *sumsq_ptr) {
279
+ v128_t sum_f32x4 = wasm_f32x4_splat(0);
280
+ v128_t sumsq_f32x4 = wasm_f32x4_splat(0);
281
+ nk_size_t idx = 0;
282
+ for (; idx + 4 <= count; idx += 4) {
283
+ nk_b64_vec_t raw;
284
+ raw.u64 = *(nk_u64_t const *)(data + idx);
285
+ v128_t data_f32x4 = nk_f16x4_to_f32x4_v128relaxed_(raw).v128;
286
+ sum_f32x4 = wasm_f32x4_add(sum_f32x4, data_f32x4);
287
+ sumsq_f32x4 = wasm_f32x4_relaxed_madd(data_f32x4, data_f32x4, sumsq_f32x4);
288
+ }
289
+ nk_f32_t sum = nk_reduce_add_f32x4_v128relaxed_(sum_f32x4);
290
+ nk_f32_t sumsq = nk_reduce_add_f32x4_v128relaxed_(sumsq_f32x4);
291
+ for (; idx < count; ++idx) {
292
+ nk_f32_t val;
293
+ nk_f16_to_f32_serial(data + idx, &val);
294
+ sum += val, sumsq += val * val;
295
+ }
296
+ *sum_ptr = sum, *sumsq_ptr = sumsq;
297
+ }
298
+
299
+ NK_PUBLIC void nk_reduce_moments_f16_v128relaxed( //
300
+ nk_f16_t const *data, nk_size_t count, nk_size_t stride_bytes, //
301
+ nk_f32_t *sum, nk_f32_t *sumsq) {
302
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_f16_t);
303
+ int aligned = (stride_bytes % sizeof(nk_f16_t) == 0);
304
+ if (count == 0) *sum = 0, *sumsq = 0;
305
+ else if (!aligned) nk_reduce_moments_f16_serial(data, count, stride_bytes, sum, sumsq);
306
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 4) {
307
+ nk_size_t left_count = count / 2;
308
+ nk_f32_t left_sum, left_sumsq, right_sum, right_sumsq;
309
+ nk_reduce_moments_f16_v128relaxed(data, left_count, stride_bytes, &left_sum, &left_sumsq);
310
+ nk_reduce_moments_f16_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
311
+ &right_sum, &right_sumsq);
312
+ *sum = left_sum + right_sum, *sumsq = left_sumsq + right_sumsq;
313
+ }
314
+ else if (stride_elements == 1) nk_reduce_moments_f16_v128relaxed_contiguous_(data, count, sum, sumsq);
315
+ else nk_reduce_moments_f16_serial(data, count, stride_bytes, sum, sumsq);
316
+ }
317
+
318
+ NK_INTERNAL void nk_reduce_minmax_f32_v128relaxed_contiguous_( //
319
+ nk_f32_t const *data, nk_size_t count, //
320
+ nk_f32_t *min_value_ptr, nk_size_t *min_index_ptr, //
321
+ nk_f32_t *max_value_ptr, nk_size_t *max_index_ptr) {
322
+ v128_t min_f32x4 = wasm_f32x4_splat(NK_F32_MAX), max_f32x4 = wasm_f32x4_splat(NK_F32_MIN);
323
+ v128_t min_iter_u32x4 = wasm_i32x4_splat(0), max_iter_u32x4 = wasm_i32x4_splat(0);
324
+ v128_t iter_u32x4 = wasm_i32x4_splat(0), one_u32x4 = wasm_i32x4_splat(1);
325
+ nk_size_t idx = 0;
326
+ for (; idx + 4 <= count; idx += 4) {
327
+ v128_t data_f32x4 = wasm_v128_load(data + idx);
328
+ v128_t less_b32x4 = wasm_f32x4_lt(data_f32x4, min_f32x4);
329
+ v128_t greater_b32x4 = wasm_f32x4_gt(data_f32x4, max_f32x4);
330
+ min_f32x4 = wasm_i32x4_relaxed_laneselect(data_f32x4, min_f32x4, less_b32x4);
331
+ max_f32x4 = wasm_i32x4_relaxed_laneselect(data_f32x4, max_f32x4, greater_b32x4);
332
+ min_iter_u32x4 = wasm_i32x4_relaxed_laneselect(iter_u32x4, min_iter_u32x4, less_b32x4);
333
+ max_iter_u32x4 = wasm_i32x4_relaxed_laneselect(iter_u32x4, max_iter_u32x4, greater_b32x4);
334
+ iter_u32x4 = wasm_i32x4_add(iter_u32x4, one_u32x4);
335
+ }
336
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
337
+ min_values_vec.v128 = min_f32x4;
338
+ max_values_vec.v128 = max_f32x4;
339
+ min_iters_vec.v128 = min_iter_u32x4;
340
+ max_iters_vec.v128 = max_iter_u32x4;
341
+ nk_f32_t min_value = min_values_vec.f32s[0];
342
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u32s[0] * 4;
343
+ for (int i = 1; i < 4; ++i) {
344
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u32s[i] * 4 + (nk_size_t)i;
345
+ if (min_values_vec.f32s[i] < min_value || (min_values_vec.f32s[i] == min_value && abs_idx < min_idx))
346
+ min_value = min_values_vec.f32s[i], min_idx = abs_idx;
347
+ }
348
+ nk_f32_t max_value = max_values_vec.f32s[0];
349
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u32s[0] * 4;
350
+ for (int i = 1; i < 4; ++i) {
351
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u32s[i] * 4 + (nk_size_t)i;
352
+ if (max_values_vec.f32s[i] > max_value || (max_values_vec.f32s[i] == max_value && abs_idx < max_idx))
353
+ max_value = max_values_vec.f32s[i], max_idx = abs_idx;
354
+ }
355
+ for (; idx < count; ++idx) {
356
+ nk_f32_t val = data[idx];
357
+ if (val < min_value) min_value = val, min_idx = idx;
358
+ if (val > max_value) max_value = val, max_idx = idx;
359
+ }
360
+ if (min_value == NK_F32_MAX && max_value == NK_F32_MIN) {
361
+ *min_value_ptr = NK_F32_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_F32_MIN,
362
+ *max_index_ptr = NK_SIZE_MAX;
363
+ return;
364
+ }
365
+ *min_value_ptr = min_value, *min_index_ptr = min_idx;
366
+ *max_value_ptr = max_value, *max_index_ptr = max_idx;
367
+ }
368
+
369
+ NK_PUBLIC void nk_reduce_minmax_f32_v128relaxed( //
370
+ nk_f32_t const *data, nk_size_t count, nk_size_t stride_bytes, //
371
+ nk_f32_t *min_value_ptr, nk_size_t *min_index_ptr, //
372
+ nk_f32_t *max_value_ptr, nk_size_t *max_index_ptr) {
373
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_f32_t);
374
+ int aligned = (stride_bytes % sizeof(nk_f32_t) == 0);
375
+ if (count == 0)
376
+ *min_value_ptr = NK_F32_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_F32_MIN,
377
+ *max_index_ptr = NK_SIZE_MAX;
378
+ else if (!aligned)
379
+ nk_reduce_minmax_f32_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
380
+ max_index_ptr);
381
+ else if (count > (nk_size_t)NK_U32_MAX * 4) {
382
+ nk_size_t left_count = count / 2;
383
+ nk_f32_t left_min_value, right_min_value, left_max_value, right_max_value;
384
+ nk_size_t left_min_index, right_min_index, left_max_index, right_max_index;
385
+ nk_reduce_minmax_f32_v128relaxed(data, left_count, stride_bytes, &left_min_value, &left_min_index,
386
+ &left_max_value, &left_max_index);
387
+ nk_reduce_minmax_f32_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
388
+ &right_min_value, &right_min_index, &right_max_value, &right_max_index);
389
+ if (right_min_value < left_min_value)
390
+ *min_value_ptr = right_min_value, *min_index_ptr = left_count + right_min_index;
391
+ else *min_value_ptr = left_min_value, *min_index_ptr = left_min_index;
392
+ if (right_max_value > left_max_value)
393
+ *max_value_ptr = right_max_value, *max_index_ptr = left_count + right_max_index;
394
+ else *max_value_ptr = left_max_value, *max_index_ptr = left_max_index;
395
+ }
396
+ else if (stride_elements == 1)
397
+ nk_reduce_minmax_f32_v128relaxed_contiguous_(data, count, min_value_ptr, min_index_ptr, max_value_ptr,
398
+ max_index_ptr);
399
+ else
400
+ nk_reduce_minmax_f32_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
401
+ max_index_ptr);
402
+ }
403
+
404
+ NK_INTERNAL void nk_reduce_minmax_f64_v128relaxed_contiguous_( //
405
+ nk_f64_t const *data, nk_size_t count, //
406
+ nk_f64_t *min_value_ptr, nk_size_t *min_index_ptr, //
407
+ nk_f64_t *max_value_ptr, nk_size_t *max_index_ptr) {
408
+ v128_t min_f64x2 = wasm_f64x2_splat(NK_F64_MAX), max_f64x2 = wasm_f64x2_splat(NK_F64_MIN);
409
+ v128_t min_iter_u64x2 = wasm_i64x2_splat(0), max_iter_u64x2 = wasm_i64x2_splat(0);
410
+ v128_t iter_u64x2 = wasm_i64x2_splat(0), one_u64x2 = wasm_i64x2_splat(1);
411
+ nk_size_t idx = 0;
412
+ for (; idx + 2 <= count; idx += 2) {
413
+ v128_t data_f64x2 = wasm_v128_load(data + idx);
414
+ v128_t less_b64x2 = wasm_f64x2_lt(data_f64x2, min_f64x2);
415
+ v128_t greater_b64x2 = wasm_f64x2_gt(data_f64x2, max_f64x2);
416
+ min_f64x2 = wasm_i64x2_relaxed_laneselect(data_f64x2, min_f64x2, less_b64x2);
417
+ max_f64x2 = wasm_i64x2_relaxed_laneselect(data_f64x2, max_f64x2, greater_b64x2);
418
+ min_iter_u64x2 = wasm_i64x2_relaxed_laneselect(iter_u64x2, min_iter_u64x2, less_b64x2);
419
+ max_iter_u64x2 = wasm_i64x2_relaxed_laneselect(iter_u64x2, max_iter_u64x2, greater_b64x2);
420
+ iter_u64x2 = wasm_i64x2_add(iter_u64x2, one_u64x2);
421
+ }
422
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
423
+ min_values_vec.v128 = min_f64x2;
424
+ max_values_vec.v128 = max_f64x2;
425
+ min_iters_vec.v128 = min_iter_u64x2;
426
+ max_iters_vec.v128 = max_iter_u64x2;
427
+ nk_f64_t min_value = min_values_vec.f64s[0];
428
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u64s[0] * 2;
429
+ if (min_values_vec.f64s[1] < min_value ||
430
+ (min_values_vec.f64s[1] == min_value && (nk_size_t)min_iters_vec.u64s[1] * 2 + 1 < min_idx))
431
+ min_value = min_values_vec.f64s[1], min_idx = (nk_size_t)min_iters_vec.u64s[1] * 2 + 1;
432
+ nk_f64_t max_value = max_values_vec.f64s[0];
433
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u64s[0] * 2;
434
+ if (max_values_vec.f64s[1] > max_value ||
435
+ (max_values_vec.f64s[1] == max_value && (nk_size_t)max_iters_vec.u64s[1] * 2 + 1 < max_idx))
436
+ max_value = max_values_vec.f64s[1], max_idx = (nk_size_t)max_iters_vec.u64s[1] * 2 + 1;
437
+ for (; idx < count; ++idx) {
438
+ nk_f64_t val = data[idx];
439
+ if (val < min_value) min_value = val, min_idx = idx;
440
+ if (val > max_value) max_value = val, max_idx = idx;
441
+ }
442
+ if (min_value == NK_F64_MAX && max_value == NK_F64_MIN) {
443
+ *min_value_ptr = NK_F64_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_F64_MIN,
444
+ *max_index_ptr = NK_SIZE_MAX;
445
+ return;
446
+ }
447
+ *min_value_ptr = min_value, *min_index_ptr = min_idx;
448
+ *max_value_ptr = max_value, *max_index_ptr = max_idx;
449
+ }
450
+
451
+ NK_PUBLIC void nk_reduce_minmax_f64_v128relaxed( //
452
+ nk_f64_t const *data, nk_size_t count, nk_size_t stride_bytes, //
453
+ nk_f64_t *min_value_ptr, nk_size_t *min_index_ptr, //
454
+ nk_f64_t *max_value_ptr, nk_size_t *max_index_ptr) {
455
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_f64_t);
456
+ int aligned = (stride_bytes % sizeof(nk_f64_t) == 0);
457
+ if (count == 0)
458
+ *min_value_ptr = NK_F64_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_F64_MIN,
459
+ *max_index_ptr = NK_SIZE_MAX;
460
+ else if (!aligned)
461
+ nk_reduce_minmax_f64_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
462
+ max_index_ptr);
463
+ else if (stride_elements == 1)
464
+ nk_reduce_minmax_f64_v128relaxed_contiguous_(data, count, min_value_ptr, min_index_ptr, max_value_ptr,
465
+ max_index_ptr);
466
+ else
467
+ nk_reduce_minmax_f64_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
468
+ max_index_ptr);
469
+ }
470
+
471
+ NK_INTERNAL void nk_reduce_minmax_bf16_v128relaxed_contiguous_( //
472
+ nk_bf16_t const *data, nk_size_t count, //
473
+ nk_bf16_t *min_value_ptr, nk_size_t *min_index_ptr, //
474
+ nk_bf16_t *max_value_ptr, nk_size_t *max_index_ptr) {
475
+ v128_t abs_mask_u16x8 = wasm_i16x8_splat(0x7FFF);
476
+ v128_t nan_threshold_u16x8 = wasm_i16x8_splat((short)0x7F80);
477
+ v128_t min_cmp_i16x8 = wasm_i16x8_splat(0x7F80); // +inf comparable
478
+ v128_t max_cmp_i16x8 = wasm_i16x8_splat((short)0x807F); // -inf comparable
479
+ v128_t min_iter_u16x8 = wasm_i16x8_splat(0), max_iter_u16x8 = wasm_i16x8_splat(0);
480
+ v128_t iter_u16x8 = wasm_i16x8_splat(0), one_u16x8 = wasm_i16x8_splat(1);
481
+ nk_size_t idx = 0;
482
+ for (; idx + 8 <= count; idx += 8) {
483
+ v128_t raw_u16x8 = wasm_v128_load(data + idx);
484
+ // Convert to comparable i16: sign = srai(raw, 15), flip = srli(sign, 1), cmp = raw ^ flip
485
+ v128_t sign_i16x8 = wasm_i16x8_shr(raw_u16x8, 15);
486
+ v128_t flip_u16x8 = wasm_u16x8_shr(sign_i16x8, 1);
487
+ v128_t cmp_i16x8 = wasm_v128_xor(raw_u16x8, flip_u16x8);
488
+ // Filter NaN: (raw & 0x7FFF) <= 0x7F80 (both sides non-negative, so signed LE works)
489
+ v128_t abs_u16x8 = wasm_v128_and(raw_u16x8, abs_mask_u16x8);
490
+ v128_t not_nan_i16x8 = wasm_i16x8_le(abs_u16x8, nan_threshold_u16x8);
491
+ // Compare as signed i16, masked by not-NaN
492
+ v128_t less_i16x8 = wasm_v128_and(wasm_i16x8_lt(cmp_i16x8, min_cmp_i16x8), not_nan_i16x8);
493
+ v128_t greater_i16x8 = wasm_v128_and(wasm_i16x8_gt(cmp_i16x8, max_cmp_i16x8), not_nan_i16x8);
494
+ min_cmp_i16x8 = wasm_i16x8_relaxed_laneselect(cmp_i16x8, min_cmp_i16x8, less_i16x8);
495
+ max_cmp_i16x8 = wasm_i16x8_relaxed_laneselect(cmp_i16x8, max_cmp_i16x8, greater_i16x8);
496
+ min_iter_u16x8 = wasm_i16x8_relaxed_laneselect(iter_u16x8, min_iter_u16x8, less_i16x8);
497
+ max_iter_u16x8 = wasm_i16x8_relaxed_laneselect(iter_u16x8, max_iter_u16x8, greater_i16x8);
498
+ iter_u16x8 = wasm_i16x8_add(iter_u16x8, one_u16x8);
499
+ }
500
+ // Horizontal reduction over 8 lanes
501
+ nk_b128_vec_t min_cmp_vec, max_cmp_vec, min_iters_vec, max_iters_vec;
502
+ min_cmp_vec.v128 = min_cmp_i16x8;
503
+ max_cmp_vec.v128 = max_cmp_i16x8;
504
+ min_iters_vec.v128 = min_iter_u16x8;
505
+ max_iters_vec.v128 = max_iter_u16x8;
506
+ nk_i16_t min_comparable = min_cmp_vec.i16s[0];
507
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u16s[0] * 8;
508
+ for (int i = 1; i < 8; ++i) {
509
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u16s[i] * 8 + (nk_size_t)i;
510
+ if (min_cmp_vec.i16s[i] < min_comparable || (min_cmp_vec.i16s[i] == min_comparable && abs_idx < min_idx))
511
+ min_comparable = min_cmp_vec.i16s[i], min_idx = abs_idx;
512
+ }
513
+ nk_i16_t max_comparable = max_cmp_vec.i16s[0];
514
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u16s[0] * 8;
515
+ for (int i = 1; i < 8; ++i) {
516
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u16s[i] * 8 + (nk_size_t)i;
517
+ if (max_cmp_vec.i16s[i] > max_comparable || (max_cmp_vec.i16s[i] == max_comparable && abs_idx < max_idx))
518
+ max_comparable = max_cmp_vec.i16s[i], max_idx = abs_idx;
519
+ }
520
+ // Scalar tail
521
+ for (; idx < count; ++idx) {
522
+ nk_u16_t raw = *(nk_u16_t const *)(data + idx);
523
+ if ((raw & 0x7FFF) > 0x7F80) continue; // skip NaN
524
+ nk_i16_t comparable = (raw & 0x8000) ? (nk_i16_t)(raw ^ 0x7FFF) : (nk_i16_t)raw;
525
+ if (comparable < min_comparable) min_comparable = comparable, min_idx = idx;
526
+ if (comparable > max_comparable) max_comparable = comparable, max_idx = idx;
527
+ }
528
+ if (min_comparable == 0x7F80 && max_comparable == (nk_i16_t)0x807F) {
529
+ *min_value_ptr = NK_BF16_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_BF16_MIN,
530
+ *max_index_ptr = NK_SIZE_MAX;
531
+ return;
532
+ }
533
+ // Convert comparable back to raw bf16
534
+ nk_i16_t min_sign = min_comparable >> 15;
535
+ nk_u16_t min_raw = (nk_u16_t)min_comparable ^ ((nk_u16_t)min_sign >> 1);
536
+ *(nk_u16_t *)min_value_ptr = min_raw, *min_index_ptr = min_idx;
537
+ nk_i16_t max_sign = max_comparable >> 15;
538
+ nk_u16_t max_raw = (nk_u16_t)max_comparable ^ ((nk_u16_t)max_sign >> 1);
539
+ *(nk_u16_t *)max_value_ptr = max_raw, *max_index_ptr = max_idx;
540
+ }
541
+
542
+ NK_PUBLIC void nk_reduce_minmax_bf16_v128relaxed( //
543
+ nk_bf16_t const *data, nk_size_t count, nk_size_t stride_bytes, //
544
+ nk_bf16_t *min_value_ptr, nk_size_t *min_index_ptr, //
545
+ nk_bf16_t *max_value_ptr, nk_size_t *max_index_ptr) {
546
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_bf16_t);
547
+ int aligned = (stride_bytes % sizeof(nk_bf16_t) == 0);
548
+ if (count == 0)
549
+ *min_value_ptr = NK_BF16_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_BF16_MIN,
550
+ *max_index_ptr = NK_SIZE_MAX;
551
+ else if (!aligned)
552
+ nk_reduce_minmax_bf16_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
553
+ max_index_ptr);
554
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 8) {
555
+ nk_size_t left_count = count / 2;
556
+ nk_bf16_t left_min_value, right_min_value, left_max_value, right_max_value;
557
+ nk_size_t left_min_index, right_min_index, left_max_index, right_max_index;
558
+ nk_reduce_minmax_bf16_v128relaxed(data, left_count, stride_bytes, &left_min_value, &left_min_index,
559
+ &left_max_value, &left_max_index);
560
+ nk_reduce_minmax_bf16_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
561
+ &right_min_value, &right_min_index, &right_max_value, &right_max_index);
562
+ if (nk_bf16_order_serial(right_min_value, left_min_value) < 0)
563
+ *min_value_ptr = right_min_value, *min_index_ptr = left_count + right_min_index;
564
+ else *min_value_ptr = left_min_value, *min_index_ptr = left_min_index;
565
+ if (nk_bf16_order_serial(right_max_value, left_max_value) > 0)
566
+ *max_value_ptr = right_max_value, *max_index_ptr = left_count + right_max_index;
567
+ else *max_value_ptr = left_max_value, *max_index_ptr = left_max_index;
568
+ }
569
+ else if (stride_elements == 1)
570
+ nk_reduce_minmax_bf16_v128relaxed_contiguous_(data, count, min_value_ptr, min_index_ptr, max_value_ptr,
571
+ max_index_ptr);
572
+ else
573
+ nk_reduce_minmax_bf16_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
574
+ max_index_ptr);
575
+ }
576
+
577
+ NK_INTERNAL void nk_reduce_minmax_f16_v128relaxed_contiguous_( //
578
+ nk_f16_t const *data, nk_size_t count, //
579
+ nk_f16_t *min_value_ptr, nk_size_t *min_index_ptr, //
580
+ nk_f16_t *max_value_ptr, nk_size_t *max_index_ptr) {
581
+ v128_t min_f32x4 = wasm_f32x4_splat(NK_F32_MAX), max_f32x4 = wasm_f32x4_splat(NK_F32_MIN);
582
+ v128_t min_iter_u32x4 = wasm_i32x4_splat(0), max_iter_u32x4 = wasm_i32x4_splat(0);
583
+ v128_t iter_u32x4 = wasm_i32x4_splat(0), one_u32x4 = wasm_i32x4_splat(1);
584
+ nk_size_t idx = 0;
585
+ for (; idx + 4 <= count; idx += 4) {
586
+ nk_b64_vec_t raw;
587
+ raw.u64 = *(nk_u64_t const *)(data + idx);
588
+ v128_t data_f32x4 = nk_f16x4_to_f32x4_v128relaxed_(raw).v128;
589
+ v128_t less_b32x4 = wasm_f32x4_lt(data_f32x4, min_f32x4);
590
+ v128_t greater_b32x4 = wasm_f32x4_gt(data_f32x4, max_f32x4);
591
+ min_f32x4 = wasm_i32x4_relaxed_laneselect(data_f32x4, min_f32x4, less_b32x4);
592
+ max_f32x4 = wasm_i32x4_relaxed_laneselect(data_f32x4, max_f32x4, greater_b32x4);
593
+ min_iter_u32x4 = wasm_i32x4_relaxed_laneselect(iter_u32x4, min_iter_u32x4, less_b32x4);
594
+ max_iter_u32x4 = wasm_i32x4_relaxed_laneselect(iter_u32x4, max_iter_u32x4, greater_b32x4);
595
+ iter_u32x4 = wasm_i32x4_add(iter_u32x4, one_u32x4);
596
+ }
597
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
598
+ min_values_vec.v128 = min_f32x4;
599
+ max_values_vec.v128 = max_f32x4;
600
+ min_iters_vec.v128 = min_iter_u32x4;
601
+ max_iters_vec.v128 = max_iter_u32x4;
602
+ nk_f32_t min_value_f32 = min_values_vec.f32s[0];
603
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u32s[0] * 4;
604
+ for (int i = 1; i < 4; ++i) {
605
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u32s[i] * 4 + (nk_size_t)i;
606
+ if (min_values_vec.f32s[i] < min_value_f32 || (min_values_vec.f32s[i] == min_value_f32 && abs_idx < min_idx))
607
+ min_value_f32 = min_values_vec.f32s[i], min_idx = abs_idx;
608
+ }
609
+ nk_f32_t max_value_f32 = max_values_vec.f32s[0];
610
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u32s[0] * 4;
611
+ for (int i = 1; i < 4; ++i) {
612
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u32s[i] * 4 + (nk_size_t)i;
613
+ if (max_values_vec.f32s[i] > max_value_f32 || (max_values_vec.f32s[i] == max_value_f32 && abs_idx < max_idx))
614
+ max_value_f32 = max_values_vec.f32s[i], max_idx = abs_idx;
615
+ }
616
+ for (; idx < count; ++idx) {
617
+ nk_f32_t val;
618
+ nk_f16_to_f32_serial(data + idx, &val);
619
+ if (val < min_value_f32) min_value_f32 = val, min_idx = idx;
620
+ if (val > max_value_f32) max_value_f32 = val, max_idx = idx;
621
+ }
622
+ if (min_value_f32 == NK_F32_MAX && max_value_f32 == NK_F32_MIN) {
623
+ *min_value_ptr = nk_f16_from_u16_(NK_F16_MAX), *min_index_ptr = NK_SIZE_MAX,
624
+ *max_value_ptr = nk_f16_from_u16_(NK_F16_MIN), *max_index_ptr = NK_SIZE_MAX;
625
+ return;
626
+ }
627
+ *min_value_ptr = data[min_idx], *min_index_ptr = min_idx;
628
+ *max_value_ptr = data[max_idx], *max_index_ptr = max_idx;
629
+ }
630
+
631
+ NK_PUBLIC void nk_reduce_minmax_f16_v128relaxed( //
632
+ nk_f16_t const *data, nk_size_t count, nk_size_t stride_bytes, //
633
+ nk_f16_t *min_value_ptr, nk_size_t *min_index_ptr, //
634
+ nk_f16_t *max_value_ptr, nk_size_t *max_index_ptr) {
635
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_f16_t);
636
+ int aligned = (stride_bytes % sizeof(nk_f16_t) == 0);
637
+ if (count == 0)
638
+ *min_value_ptr = nk_f16_from_u16_(NK_F16_MAX), *min_index_ptr = NK_SIZE_MAX,
639
+ *max_value_ptr = nk_f16_from_u16_(NK_F16_MIN), *max_index_ptr = NK_SIZE_MAX;
640
+ else if (!aligned)
641
+ nk_reduce_minmax_f16_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
642
+ max_index_ptr);
643
+ else if (count > (nk_size_t)NK_U32_MAX * 4) {
644
+ nk_size_t left_count = count / 2;
645
+ nk_f16_t left_min_value, right_min_value, left_max_value, right_max_value;
646
+ nk_size_t left_min_index, right_min_index, left_max_index, right_max_index;
647
+ nk_reduce_minmax_f16_v128relaxed(data, left_count, stride_bytes, &left_min_value, &left_min_index,
648
+ &left_max_value, &left_max_index);
649
+ nk_reduce_minmax_f16_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
650
+ &right_min_value, &right_min_index, &right_max_value, &right_max_index);
651
+ if (nk_f16_order_serial(right_min_value, left_min_value) < 0)
652
+ *min_value_ptr = right_min_value, *min_index_ptr = left_count + right_min_index;
653
+ else *min_value_ptr = left_min_value, *min_index_ptr = left_min_index;
654
+ if (nk_f16_order_serial(right_max_value, left_max_value) > 0)
655
+ *max_value_ptr = right_max_value, *max_index_ptr = left_count + right_max_index;
656
+ else *max_value_ptr = left_max_value, *max_index_ptr = left_max_index;
657
+ }
658
+ else if (stride_elements == 1)
659
+ nk_reduce_minmax_f16_v128relaxed_contiguous_(data, count, min_value_ptr, min_index_ptr, max_value_ptr,
660
+ max_index_ptr);
661
+ else
662
+ nk_reduce_minmax_f16_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
663
+ max_index_ptr);
664
+ }
665
+
666
+ NK_INTERNAL void nk_reduce_moments_i8_v128relaxed_contiguous_( //
667
+ nk_i8_t const *data, nk_size_t count, //
668
+ nk_i64_t *sum_ptr, nk_u64_t *sumsq_ptr) {
669
+ v128_t sum_i32x4 = wasm_i32x4_splat(0);
670
+ v128_t sumsq_u64x2 = wasm_i64x2_splat(0);
671
+ nk_size_t idx = 0;
672
+ for (; idx + 16 <= count; idx += 16) {
673
+ v128_t data_i8x16 = wasm_v128_load(data + idx);
674
+ v128_t pairwise_i16x8 = wasm_i16x8_extadd_pairwise_i8x16(data_i8x16);
675
+ v128_t pairwise_i32x4 = wasm_i32x4_extadd_pairwise_i16x8(pairwise_i16x8);
676
+ sum_i32x4 = wasm_i32x4_add(sum_i32x4, pairwise_i32x4);
677
+ v128_t sq_low_i16x8 = wasm_i16x8_extmul_low_i8x16(data_i8x16, data_i8x16);
678
+ v128_t sq_high_i16x8 = wasm_i16x8_extmul_high_i8x16(data_i8x16, data_i8x16);
679
+ v128_t sq_u32x4 = wasm_i32x4_add(wasm_u32x4_extadd_pairwise_u16x8(sq_low_i16x8),
680
+ wasm_u32x4_extadd_pairwise_u16x8(sq_high_i16x8));
681
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_low_u32x4(sq_u32x4));
682
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_high_u32x4(sq_u32x4));
683
+ }
684
+ nk_i64_t sum = nk_reduce_add_i32x4_v128relaxed_(sum_i32x4);
685
+ nk_u64_t sumsq = nk_reduce_add_u64x2_v128relaxed_(sumsq_u64x2);
686
+ for (; idx < count; ++idx) {
687
+ nk_i64_t val = (nk_i64_t)data[idx];
688
+ sum += val, sumsq += (nk_u64_t)(val * val);
689
+ }
690
+ *sum_ptr = sum, *sumsq_ptr = sumsq;
691
+ }
692
+
693
+ NK_PUBLIC void nk_reduce_moments_i8_v128relaxed( //
694
+ nk_i8_t const *data, nk_size_t count, nk_size_t stride_bytes, //
695
+ nk_i64_t *sum, nk_u64_t *sumsq) {
696
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_i8_t);
697
+ int aligned = (stride_bytes % sizeof(nk_i8_t) == 0);
698
+ if (count == 0) *sum = 0, *sumsq = 0;
699
+ else if (!aligned) nk_reduce_moments_i8_serial(data, count, stride_bytes, sum, sumsq);
700
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 16) {
701
+ nk_size_t left_count = count / 2;
702
+ nk_i64_t left_sum, right_sum;
703
+ nk_u64_t left_sumsq, right_sumsq;
704
+ nk_reduce_moments_i8_v128relaxed(data, left_count, stride_bytes, &left_sum, &left_sumsq);
705
+ nk_reduce_moments_i8_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
706
+ &right_sum, &right_sumsq);
707
+ *sum = nk_i64_saturating_add_serial(left_sum, right_sum);
708
+ *sumsq = nk_u64_saturating_add_serial(left_sumsq, right_sumsq);
709
+ }
710
+ else if (stride_elements == 1) nk_reduce_moments_i8_v128relaxed_contiguous_(data, count, sum, sumsq);
711
+ else nk_reduce_moments_i8_serial(data, count, stride_bytes, sum, sumsq);
712
+ }
713
+
714
+ NK_INTERNAL void nk_reduce_moments_u8_v128relaxed_contiguous_( //
715
+ nk_u8_t const *data, nk_size_t count, //
716
+ nk_u64_t *sum_ptr, nk_u64_t *sumsq_ptr) {
717
+ v128_t sum_u32x4 = wasm_i32x4_splat(0);
718
+ v128_t sumsq_u64x2 = wasm_i64x2_splat(0);
719
+ nk_size_t idx = 0;
720
+ for (; idx + 16 <= count; idx += 16) {
721
+ v128_t data_u8x16 = wasm_v128_load(data + idx);
722
+ v128_t pairwise_u16x8 = wasm_u16x8_extadd_pairwise_u8x16(data_u8x16);
723
+ v128_t pairwise_u32x4 = wasm_u32x4_extadd_pairwise_u16x8(pairwise_u16x8);
724
+ sum_u32x4 = wasm_i32x4_add(sum_u32x4, pairwise_u32x4);
725
+ v128_t sq_low_u16x8 = wasm_u16x8_extmul_low_u8x16(data_u8x16, data_u8x16);
726
+ v128_t sq_high_u16x8 = wasm_u16x8_extmul_high_u8x16(data_u8x16, data_u8x16);
727
+ v128_t sq_u32x4 = wasm_i32x4_add(wasm_u32x4_extadd_pairwise_u16x8(sq_low_u16x8),
728
+ wasm_u32x4_extadd_pairwise_u16x8(sq_high_u16x8));
729
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_low_u32x4(sq_u32x4));
730
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_high_u32x4(sq_u32x4));
731
+ }
732
+ nk_u64_t sum = nk_reduce_add_u32x4_v128relaxed_(sum_u32x4);
733
+ nk_u64_t sumsq = nk_reduce_add_u64x2_v128relaxed_(sumsq_u64x2);
734
+ for (; idx < count; ++idx) {
735
+ nk_u64_t val = (nk_u64_t)data[idx];
736
+ sum += val, sumsq += val * val;
737
+ }
738
+ *sum_ptr = sum, *sumsq_ptr = sumsq;
739
+ }
740
+
741
+ NK_PUBLIC void nk_reduce_moments_u8_v128relaxed( //
742
+ nk_u8_t const *data, nk_size_t count, nk_size_t stride_bytes, //
743
+ nk_u64_t *sum, nk_u64_t *sumsq) {
744
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_u8_t);
745
+ int aligned = (stride_bytes % sizeof(nk_u8_t) == 0);
746
+ if (count == 0) *sum = 0, *sumsq = 0;
747
+ else if (!aligned) nk_reduce_moments_u8_serial(data, count, stride_bytes, sum, sumsq);
748
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 16) {
749
+ nk_size_t left_count = count / 2;
750
+ nk_u64_t left_sum, left_sumsq, right_sum, right_sumsq;
751
+ nk_reduce_moments_u8_v128relaxed(data, left_count, stride_bytes, &left_sum, &left_sumsq);
752
+ nk_reduce_moments_u8_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
753
+ &right_sum, &right_sumsq);
754
+ *sum = nk_u64_saturating_add_serial(left_sum, right_sum);
755
+ *sumsq = nk_u64_saturating_add_serial(left_sumsq, right_sumsq);
756
+ }
757
+ else if (stride_elements == 1) nk_reduce_moments_u8_v128relaxed_contiguous_(data, count, sum, sumsq);
758
+ else nk_reduce_moments_u8_serial(data, count, stride_bytes, sum, sumsq);
759
+ }
760
+
761
+ NK_INTERNAL void nk_reduce_moments_i16_v128relaxed_contiguous_( //
762
+ nk_i16_t const *data, nk_size_t count, //
763
+ nk_i64_t *sum_ptr, nk_u64_t *sumsq_ptr) {
764
+ v128_t sum_i64x2 = wasm_i64x2_splat(0);
765
+ v128_t sumsq_u64x2 = wasm_i64x2_splat(0);
766
+ nk_size_t idx = 0;
767
+ for (; idx + 8 <= count; idx += 8) {
768
+ v128_t data_i16x8 = wasm_v128_load(data + idx);
769
+ v128_t pairwise_i32x4 = wasm_i32x4_extadd_pairwise_i16x8(data_i16x8);
770
+ sum_i64x2 = wasm_i64x2_add(sum_i64x2, wasm_i64x2_extend_low_i32x4(pairwise_i32x4));
771
+ sum_i64x2 = wasm_i64x2_add(sum_i64x2, wasm_i64x2_extend_high_i32x4(pairwise_i32x4));
772
+ v128_t sq_low_i32x4 = wasm_i32x4_extmul_low_i16x8(data_i16x8, data_i16x8);
773
+ v128_t sq_high_i32x4 = wasm_i32x4_extmul_high_i16x8(data_i16x8, data_i16x8);
774
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_low_u32x4(sq_low_i32x4));
775
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_high_u32x4(sq_low_i32x4));
776
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_low_u32x4(sq_high_i32x4));
777
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_high_u32x4(sq_high_i32x4));
778
+ }
779
+ nk_i64_t sum = nk_reduce_add_i64x2_v128relaxed_(sum_i64x2);
780
+ nk_u64_t sumsq = nk_reduce_add_u64x2_v128relaxed_(sumsq_u64x2);
781
+ for (; idx < count; ++idx) {
782
+ nk_i64_t val = (nk_i64_t)data[idx];
783
+ sum += val, sumsq += (nk_u64_t)(val * val);
784
+ }
785
+ *sum_ptr = sum, *sumsq_ptr = sumsq;
786
+ }
787
+
788
+ NK_PUBLIC void nk_reduce_moments_i16_v128relaxed( //
789
+ nk_i16_t const *data, nk_size_t count, nk_size_t stride_bytes, //
790
+ nk_i64_t *sum, nk_u64_t *sumsq) {
791
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_i16_t);
792
+ int aligned = (stride_bytes % sizeof(nk_i16_t) == 0);
793
+ if (count == 0) *sum = 0, *sumsq = 0;
794
+ else if (!aligned) nk_reduce_moments_i16_serial(data, count, stride_bytes, sum, sumsq);
795
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 8) {
796
+ nk_size_t left_count = count / 2;
797
+ nk_i64_t left_sum, right_sum;
798
+ nk_u64_t left_sumsq, right_sumsq;
799
+ nk_reduce_moments_i16_v128relaxed(data, left_count, stride_bytes, &left_sum, &left_sumsq);
800
+ nk_reduce_moments_i16_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
801
+ &right_sum, &right_sumsq);
802
+ *sum = nk_i64_saturating_add_serial(left_sum, right_sum);
803
+ *sumsq = nk_u64_saturating_add_serial(left_sumsq, right_sumsq);
804
+ }
805
+ else if (stride_elements == 1) nk_reduce_moments_i16_v128relaxed_contiguous_(data, count, sum, sumsq);
806
+ else nk_reduce_moments_i16_serial(data, count, stride_bytes, sum, sumsq);
807
+ }
808
+
809
+ NK_INTERNAL void nk_reduce_moments_u16_v128relaxed_contiguous_( //
810
+ nk_u16_t const *data, nk_size_t count, //
811
+ nk_u64_t *sum_ptr, nk_u64_t *sumsq_ptr) {
812
+ v128_t sum_u64x2 = wasm_i64x2_splat(0);
813
+ v128_t sumsq_u64x2 = wasm_i64x2_splat(0);
814
+ nk_size_t idx = 0;
815
+ for (; idx + 8 <= count; idx += 8) {
816
+ v128_t data_u16x8 = wasm_v128_load(data + idx);
817
+ v128_t pairwise_u32x4 = wasm_u32x4_extadd_pairwise_u16x8(data_u16x8);
818
+ sum_u64x2 = wasm_i64x2_add(sum_u64x2, wasm_u64x2_extend_low_u32x4(pairwise_u32x4));
819
+ sum_u64x2 = wasm_i64x2_add(sum_u64x2, wasm_u64x2_extend_high_u32x4(pairwise_u32x4));
820
+ v128_t sq_low_u32x4 = wasm_u32x4_extmul_low_u16x8(data_u16x8, data_u16x8);
821
+ v128_t sq_high_u32x4 = wasm_u32x4_extmul_high_u16x8(data_u16x8, data_u16x8);
822
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_low_u32x4(sq_low_u32x4));
823
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_high_u32x4(sq_low_u32x4));
824
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_low_u32x4(sq_high_u32x4));
825
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_high_u32x4(sq_high_u32x4));
826
+ }
827
+ nk_u64_t sum = nk_reduce_add_u64x2_v128relaxed_(sum_u64x2);
828
+ nk_u64_t sumsq = nk_reduce_add_u64x2_v128relaxed_(sumsq_u64x2);
829
+ for (; idx < count; ++idx) {
830
+ nk_u64_t val = (nk_u64_t)data[idx];
831
+ sum += val, sumsq += val * val;
832
+ }
833
+ *sum_ptr = sum, *sumsq_ptr = sumsq;
834
+ }
835
+
836
+ NK_PUBLIC void nk_reduce_moments_u16_v128relaxed( //
837
+ nk_u16_t const *data, nk_size_t count, nk_size_t stride_bytes, //
838
+ nk_u64_t *sum, nk_u64_t *sumsq) {
839
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_u16_t);
840
+ int aligned = (stride_bytes % sizeof(nk_u16_t) == 0);
841
+ if (count == 0) *sum = 0, *sumsq = 0;
842
+ else if (!aligned) nk_reduce_moments_u16_serial(data, count, stride_bytes, sum, sumsq);
843
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 8) {
844
+ nk_size_t left_count = count / 2;
845
+ nk_u64_t left_sum, left_sumsq, right_sum, right_sumsq;
846
+ nk_reduce_moments_u16_v128relaxed(data, left_count, stride_bytes, &left_sum, &left_sumsq);
847
+ nk_reduce_moments_u16_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
848
+ &right_sum, &right_sumsq);
849
+ *sum = nk_u64_saturating_add_serial(left_sum, right_sum);
850
+ *sumsq = nk_u64_saturating_add_serial(left_sumsq, right_sumsq);
851
+ }
852
+ else if (stride_elements == 1) nk_reduce_moments_u16_v128relaxed_contiguous_(data, count, sum, sumsq);
853
+ else nk_reduce_moments_u16_serial(data, count, stride_bytes, sum, sumsq);
854
+ }
855
+
856
+ NK_INTERNAL void nk_reduce_moments_i32_v128relaxed_contiguous_( //
857
+ nk_i32_t const *data, nk_size_t count, //
858
+ nk_i64_t *sum_ptr, nk_u64_t *sumsq_ptr) {
859
+ v128_t sum_lower_u64x2 = wasm_i64x2_splat(0);
860
+ v128_t sum_upper_i64x2 = wasm_i64x2_splat(0);
861
+ v128_t sumsq_u64x2 = wasm_i64x2_splat(0);
862
+ v128_t sumsq_overflow_u64x2 = wasm_i64x2_splat(0);
863
+ v128_t sign_bit_i64x2 = wasm_i64x2_splat((nk_i64_t)0x8000000000000000LL);
864
+ nk_size_t idx = 0;
865
+ for (; idx + 4 <= count; idx += 4) {
866
+ v128_t data_i32x4 = wasm_v128_load(data + idx);
867
+ v128_t data_low_i64x2 = wasm_i64x2_extend_low_i32x4(data_i32x4);
868
+ v128_t before_u64x2 = sum_lower_u64x2;
869
+ sum_lower_u64x2 = wasm_i64x2_add(sum_lower_u64x2, data_low_i64x2);
870
+ v128_t result_biased_i64x2 = wasm_v128_xor(sum_lower_u64x2, sign_bit_i64x2);
871
+ v128_t before_biased_i64x2 = wasm_v128_xor(before_u64x2, sign_bit_i64x2);
872
+ v128_t carry_u64x2 = wasm_i64x2_gt(before_biased_i64x2, result_biased_i64x2);
873
+ sum_upper_i64x2 = wasm_i64x2_sub(sum_upper_i64x2, carry_u64x2);
874
+ sum_upper_i64x2 = wasm_i64x2_add(sum_upper_i64x2, wasm_i64x2_shr(data_low_i64x2, 63));
875
+ v128_t data_high_i64x2 = wasm_i64x2_extend_high_i32x4(data_i32x4);
876
+ before_u64x2 = sum_lower_u64x2;
877
+ sum_lower_u64x2 = wasm_i64x2_add(sum_lower_u64x2, data_high_i64x2);
878
+ result_biased_i64x2 = wasm_v128_xor(sum_lower_u64x2, sign_bit_i64x2);
879
+ before_biased_i64x2 = wasm_v128_xor(before_u64x2, sign_bit_i64x2);
880
+ carry_u64x2 = wasm_i64x2_gt(before_biased_i64x2, result_biased_i64x2);
881
+ sum_upper_i64x2 = wasm_i64x2_sub(sum_upper_i64x2, carry_u64x2);
882
+ sum_upper_i64x2 = wasm_i64x2_add(sum_upper_i64x2, wasm_i64x2_shr(data_high_i64x2, 63));
883
+ v128_t sq_low_i64x2 = wasm_i64x2_extmul_low_i32x4(data_i32x4, data_i32x4);
884
+ v128_t sq_high_i64x2 = wasm_i64x2_extmul_high_i32x4(data_i32x4, data_i32x4);
885
+ v128_t sq_before_u64x2 = sumsq_u64x2;
886
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, sq_low_i64x2);
887
+ sumsq_overflow_u64x2 = wasm_v128_or(
888
+ sumsq_overflow_u64x2,
889
+ wasm_i64x2_gt(wasm_v128_xor(sq_before_u64x2, sign_bit_i64x2), wasm_v128_xor(sumsq_u64x2, sign_bit_i64x2)));
890
+ sq_before_u64x2 = sumsq_u64x2;
891
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, sq_high_i64x2);
892
+ sumsq_overflow_u64x2 = wasm_v128_or(
893
+ sumsq_overflow_u64x2,
894
+ wasm_i64x2_gt(wasm_v128_xor(sq_before_u64x2, sign_bit_i64x2), wasm_v128_xor(sumsq_u64x2, sign_bit_i64x2)));
895
+ }
896
+ int sumsq_overflow = (int)(wasm_i64x2_extract_lane(sumsq_overflow_u64x2, 0) |
897
+ wasm_i64x2_extract_lane(sumsq_overflow_u64x2, 1));
898
+ nk_u64_t sumsq = sumsq_overflow ? NK_U64_MAX : nk_reduce_sadd_u64x2_v128relaxed_(sumsq_u64x2);
899
+ nk_b128_vec_t lower_vec, upper_vec;
900
+ lower_vec.v128 = sum_lower_u64x2;
901
+ upper_vec.v128 = sum_upper_i64x2;
902
+ nk_u64_t sum_lower = 0;
903
+ nk_i64_t sum_upper = 0;
904
+ nk_u64_t sum_before = sum_lower;
905
+ sum_lower += lower_vec.u64s[0], sum_upper += (sum_lower < sum_before) + upper_vec.i64s[0];
906
+ sum_before = sum_lower;
907
+ sum_lower += lower_vec.u64s[1], sum_upper += (sum_lower < sum_before) + upper_vec.i64s[1];
908
+ for (; idx < count; ++idx) {
909
+ nk_i64_t val = (nk_i64_t)data[idx];
910
+ sum_before = sum_lower;
911
+ sum_lower += (nk_u64_t)val;
912
+ if (sum_lower < sum_before) sum_upper++;
913
+ sum_upper += (val >> 63);
914
+ nk_u64_t product = (nk_u64_t)(val * val);
915
+ sumsq = nk_u64_saturating_add_serial(sumsq, product);
916
+ }
917
+ nk_i64_t sum_lower_signed = (nk_i64_t)sum_lower;
918
+ if (sum_upper == (sum_lower_signed >> 63)) *sum_ptr = sum_lower_signed;
919
+ else if (sum_upper >= 0) *sum_ptr = NK_I64_MAX;
920
+ else *sum_ptr = NK_I64_MIN;
921
+ *sumsq_ptr = sumsq;
922
+ }
923
+
924
+ NK_PUBLIC void nk_reduce_moments_i32_v128relaxed( //
925
+ nk_i32_t const *data, nk_size_t count, nk_size_t stride_bytes, //
926
+ nk_i64_t *sum, nk_u64_t *sumsq) {
927
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_i32_t);
928
+ int aligned = (stride_bytes % sizeof(nk_i32_t) == 0);
929
+ if (count == 0) *sum = 0, *sumsq = 0;
930
+ else if (!aligned) nk_reduce_moments_i32_serial(data, count, stride_bytes, sum, sumsq);
931
+ else if (stride_elements == 1) nk_reduce_moments_i32_v128relaxed_contiguous_(data, count, sum, sumsq);
932
+ else nk_reduce_moments_i32_serial(data, count, stride_bytes, sum, sumsq);
933
+ }
934
+
935
+ NK_INTERNAL void nk_reduce_moments_u32_v128relaxed_contiguous_( //
936
+ nk_u32_t const *data, nk_size_t count, //
937
+ nk_u64_t *sum_ptr, nk_u64_t *sumsq_ptr) {
938
+ v128_t sum_u64x2 = wasm_i64x2_splat(0);
939
+ v128_t sumsq_u64x2 = wasm_i64x2_splat(0);
940
+ nk_size_t idx = 0;
941
+ for (; idx + 4 <= count; idx += 4) {
942
+ v128_t data_u32x4 = wasm_v128_load(data + idx);
943
+ sum_u64x2 = wasm_i64x2_add(sum_u64x2, wasm_u64x2_extend_low_u32x4(data_u32x4));
944
+ sum_u64x2 = wasm_i64x2_add(sum_u64x2, wasm_u64x2_extend_high_u32x4(data_u32x4));
945
+ v128_t sq_low_u64x2 = wasm_u64x2_extmul_low_u32x4(data_u32x4, data_u32x4);
946
+ v128_t sq_high_u64x2 = wasm_u64x2_extmul_high_u32x4(data_u32x4, data_u32x4);
947
+ sumsq_u64x2 = nk_u64_sadd_epi64_v128relaxed_(sumsq_u64x2, sq_low_u64x2);
948
+ sumsq_u64x2 = nk_u64_sadd_epi64_v128relaxed_(sumsq_u64x2, sq_high_u64x2);
949
+ }
950
+ nk_u64_t sum = nk_reduce_add_u64x2_v128relaxed_(sum_u64x2);
951
+ nk_u64_t sumsq = nk_reduce_sadd_u64x2_v128relaxed_(sumsq_u64x2);
952
+ for (; idx < count; ++idx) {
953
+ nk_u64_t val = (nk_u64_t)data[idx];
954
+ sum += val;
955
+ nk_u64_t product = val * val;
956
+ sumsq = nk_u64_saturating_add_serial(sumsq, product);
957
+ }
958
+ *sum_ptr = sum, *sumsq_ptr = sumsq;
959
+ }
960
+
961
+ NK_PUBLIC void nk_reduce_moments_u32_v128relaxed( //
962
+ nk_u32_t const *data, nk_size_t count, nk_size_t stride_bytes, //
963
+ nk_u64_t *sum, nk_u64_t *sumsq) {
964
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_u32_t);
965
+ int aligned = (stride_bytes % sizeof(nk_u32_t) == 0);
966
+ if (count == 0) *sum = 0, *sumsq = 0;
967
+ else if (!aligned) nk_reduce_moments_u32_serial(data, count, stride_bytes, sum, sumsq);
968
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 4) {
969
+ nk_size_t left_count = count / 2;
970
+ nk_u64_t left_sum, left_sumsq, right_sum, right_sumsq;
971
+ nk_reduce_moments_u32_v128relaxed(data, left_count, stride_bytes, &left_sum, &left_sumsq);
972
+ nk_reduce_moments_u32_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
973
+ &right_sum, &right_sumsq);
974
+ *sum = nk_u64_saturating_add_serial(left_sum, right_sum);
975
+ *sumsq = nk_u64_saturating_add_serial(left_sumsq, right_sumsq);
976
+ }
977
+ else if (stride_elements == 1) nk_reduce_moments_u32_v128relaxed_contiguous_(data, count, sum, sumsq);
978
+ else nk_reduce_moments_u32_serial(data, count, stride_bytes, sum, sumsq);
979
+ }
980
+
981
+ NK_INTERNAL void nk_reduce_moments_i64_v128relaxed_contiguous_( //
982
+ nk_i64_t const *data, nk_size_t count, //
983
+ nk_i64_t *sum_ptr, nk_u64_t *sumsq_ptr) {
984
+ v128_t sum_lower_u64x2 = wasm_i64x2_splat(0);
985
+ v128_t sum_upper_i64x2 = wasm_i64x2_splat(0);
986
+ v128_t sumsq_u64x2 = wasm_i64x2_splat(0);
987
+ v128_t sumsq_overflow_u64x2 = wasm_i64x2_splat(0);
988
+ v128_t sign_bit_i64x2 = wasm_i64x2_splat((nk_i64_t)0x8000000000000000LL);
989
+ nk_size_t idx = 0;
990
+ for (; idx + 2 <= count; idx += 2) {
991
+ v128_t data_i64x2 = wasm_v128_load(data + idx);
992
+ v128_t sq_u64x2 = nk_i64_smul_sq_epi64_v128relaxed_(data_i64x2);
993
+ v128_t sq_before_u64x2 = sumsq_u64x2;
994
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, sq_u64x2);
995
+ sumsq_overflow_u64x2 = wasm_v128_or(
996
+ sumsq_overflow_u64x2,
997
+ wasm_i64x2_gt(wasm_v128_xor(sq_before_u64x2, sign_bit_i64x2), wasm_v128_xor(sumsq_u64x2, sign_bit_i64x2)));
998
+ v128_t before_u64x2 = sum_lower_u64x2;
999
+ sum_lower_u64x2 = wasm_i64x2_add(sum_lower_u64x2, data_i64x2);
1000
+ v128_t carry_u64x2 = wasm_i64x2_gt(wasm_v128_xor(before_u64x2, sign_bit_i64x2),
1001
+ wasm_v128_xor(sum_lower_u64x2, sign_bit_i64x2));
1002
+ sum_upper_i64x2 = wasm_i64x2_sub(sum_upper_i64x2, carry_u64x2);
1003
+ sum_upper_i64x2 = wasm_i64x2_add(sum_upper_i64x2, wasm_i64x2_shr(data_i64x2, 63));
1004
+ }
1005
+ int sumsq_overflow = (int)(wasm_i64x2_extract_lane(sumsq_overflow_u64x2, 0) |
1006
+ wasm_i64x2_extract_lane(sumsq_overflow_u64x2, 1));
1007
+ nk_u64_t sumsq = sumsq_overflow ? NK_U64_MAX : nk_reduce_sadd_u64x2_v128relaxed_(sumsq_u64x2);
1008
+ nk_u64_t sum_lower = (nk_u64_t)wasm_i64x2_extract_lane(sum_lower_u64x2, 0);
1009
+ nk_i64_t sum_upper = wasm_i64x2_extract_lane(sum_upper_i64x2, 0);
1010
+ {
1011
+ nk_u64_t sum_before = sum_lower;
1012
+ sum_lower += (nk_u64_t)wasm_i64x2_extract_lane(sum_lower_u64x2, 1);
1013
+ if (sum_lower < sum_before) sum_upper++;
1014
+ sum_upper += wasm_i64x2_extract_lane(sum_upper_i64x2, 1);
1015
+ }
1016
+ for (; idx < count; ++idx) {
1017
+ nk_i64_t val = data[idx];
1018
+ nk_u64_t unsigned_product = (nk_u64_t)nk_i64_saturating_mul_serial(val, val);
1019
+ sumsq = nk_u64_saturating_add_serial(sumsq, unsigned_product);
1020
+ nk_u64_t sum_before = sum_lower;
1021
+ sum_lower += (nk_u64_t)val;
1022
+ if (sum_lower < sum_before) sum_upper++;
1023
+ sum_upper += (val >> 63);
1024
+ }
1025
+ nk_i64_t sum_lower_signed = (nk_i64_t)sum_lower;
1026
+ if (sum_upper == (sum_lower_signed >> 63)) *sum_ptr = sum_lower_signed;
1027
+ else if (sum_upper >= 0) *sum_ptr = NK_I64_MAX;
1028
+ else *sum_ptr = NK_I64_MIN;
1029
+ *sumsq_ptr = sumsq;
1030
+ }
1031
+
1032
+ NK_PUBLIC void nk_reduce_moments_i64_v128relaxed( //
1033
+ nk_i64_t const *data, nk_size_t count, nk_size_t stride_bytes, //
1034
+ nk_i64_t *sum, nk_u64_t *sumsq) {
1035
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_i64_t);
1036
+ int aligned = (stride_bytes % sizeof(nk_i64_t) == 0);
1037
+ if (count == 0) *sum = 0, *sumsq = 0;
1038
+ else if (!aligned) nk_reduce_moments_i64_serial(data, count, stride_bytes, sum, sumsq);
1039
+ else if (stride_elements == 1) nk_reduce_moments_i64_v128relaxed_contiguous_(data, count, sum, sumsq);
1040
+ else nk_reduce_moments_i64_serial(data, count, stride_bytes, sum, sumsq);
1041
+ }
1042
+
1043
+ NK_INTERNAL void nk_reduce_moments_u64_v128relaxed_contiguous_( //
1044
+ nk_u64_t const *data, nk_size_t count, //
1045
+ nk_u64_t *sum_ptr, nk_u64_t *sumsq_ptr) {
1046
+ v128_t sum_u64x2 = wasm_i64x2_splat(0);
1047
+ v128_t sumsq_u64x2 = wasm_i64x2_splat(0);
1048
+ nk_size_t idx = 0;
1049
+ for (; idx + 2 <= count; idx += 2) {
1050
+ v128_t data_u64x2 = wasm_v128_load(data + idx);
1051
+ sum_u64x2 = nk_u64_sadd_epi64_v128relaxed_(sum_u64x2, data_u64x2);
1052
+ v128_t sq_u64x2 = nk_u64_smul_sq_epi64_v128relaxed_(data_u64x2);
1053
+ sumsq_u64x2 = nk_u64_sadd_epi64_v128relaxed_(sumsq_u64x2, sq_u64x2);
1054
+ }
1055
+ nk_u64_t sum = nk_reduce_sadd_u64x2_v128relaxed_(sum_u64x2);
1056
+ nk_u64_t sumsq = nk_reduce_sadd_u64x2_v128relaxed_(sumsq_u64x2);
1057
+ for (; idx < count; ++idx) {
1058
+ nk_u64_t val = data[idx];
1059
+ sum = nk_u64_saturating_add_serial(sum, val);
1060
+ nk_u64_t product = nk_u64_saturating_mul_serial(val, val);
1061
+ sumsq = nk_u64_saturating_add_serial(sumsq, product);
1062
+ }
1063
+ *sum_ptr = sum, *sumsq_ptr = sumsq;
1064
+ }
1065
+
1066
+ NK_PUBLIC void nk_reduce_moments_u64_v128relaxed( //
1067
+ nk_u64_t const *data, nk_size_t count, nk_size_t stride_bytes, //
1068
+ nk_u64_t *sum, nk_u64_t *sumsq) {
1069
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_u64_t);
1070
+ int aligned = (stride_bytes % sizeof(nk_u64_t) == 0);
1071
+ if (count == 0) *sum = 0, *sumsq = 0;
1072
+ else if (!aligned) nk_reduce_moments_u64_serial(data, count, stride_bytes, sum, sumsq);
1073
+ else if (stride_elements == 1) nk_reduce_moments_u64_v128relaxed_contiguous_(data, count, sum, sumsq);
1074
+ else nk_reduce_moments_u64_serial(data, count, stride_bytes, sum, sumsq);
1075
+ }
1076
+
1077
+ NK_INTERNAL void nk_reduce_minmax_i8_v128relaxed_contiguous_( //
1078
+ nk_i8_t const *data, nk_size_t count, //
1079
+ nk_i8_t *min_value_ptr, nk_size_t *min_index_ptr, //
1080
+ nk_i8_t *max_value_ptr, nk_size_t *max_index_ptr) {
1081
+ v128_t min_i8x16 = wasm_i8x16_splat(NK_I8_MAX), max_i8x16 = wasm_i8x16_splat(NK_I8_MIN);
1082
+ v128_t min_iter_u8x16 = wasm_i8x16_splat(0), max_iter_u8x16 = wasm_i8x16_splat(0);
1083
+ v128_t iter_u8x16 = wasm_i8x16_splat(0), one_u8x16 = wasm_i8x16_splat(1);
1084
+ nk_size_t idx = 0;
1085
+ for (; idx + 16 <= count; idx += 16) {
1086
+ v128_t data_i8x16 = wasm_v128_load(data + idx);
1087
+ v128_t less_b8x16 = wasm_i8x16_lt(data_i8x16, min_i8x16);
1088
+ v128_t greater_b8x16 = wasm_i8x16_gt(data_i8x16, max_i8x16);
1089
+ min_i8x16 = wasm_i8x16_relaxed_laneselect(data_i8x16, min_i8x16, less_b8x16);
1090
+ max_i8x16 = wasm_i8x16_relaxed_laneselect(data_i8x16, max_i8x16, greater_b8x16);
1091
+ min_iter_u8x16 = wasm_i8x16_relaxed_laneselect(iter_u8x16, min_iter_u8x16, less_b8x16);
1092
+ max_iter_u8x16 = wasm_i8x16_relaxed_laneselect(iter_u8x16, max_iter_u8x16, greater_b8x16);
1093
+ iter_u8x16 = wasm_i8x16_add(iter_u8x16, one_u8x16);
1094
+ }
1095
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
1096
+ min_values_vec.v128 = min_i8x16;
1097
+ max_values_vec.v128 = max_i8x16;
1098
+ min_iters_vec.v128 = min_iter_u8x16;
1099
+ max_iters_vec.v128 = max_iter_u8x16;
1100
+ nk_i8_t min_value = min_values_vec.i8s[0];
1101
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u8s[0] * 16;
1102
+ for (int i = 1; i < 16; ++i) {
1103
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u8s[i] * 16 + (nk_size_t)i;
1104
+ if (min_values_vec.i8s[i] < min_value || (min_values_vec.i8s[i] == min_value && abs_idx < min_idx))
1105
+ min_value = min_values_vec.i8s[i], min_idx = abs_idx;
1106
+ }
1107
+ nk_i8_t max_value = max_values_vec.i8s[0];
1108
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u8s[0] * 16;
1109
+ for (int i = 1; i < 16; ++i) {
1110
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u8s[i] * 16 + (nk_size_t)i;
1111
+ if (max_values_vec.i8s[i] > max_value || (max_values_vec.i8s[i] == max_value && abs_idx < max_idx))
1112
+ max_value = max_values_vec.i8s[i], max_idx = abs_idx;
1113
+ }
1114
+ for (; idx < count; ++idx) {
1115
+ nk_i8_t val = data[idx];
1116
+ if (val < min_value) min_value = val, min_idx = idx;
1117
+ if (val > max_value) max_value = val, max_idx = idx;
1118
+ }
1119
+ *min_value_ptr = min_value, *min_index_ptr = min_idx;
1120
+ *max_value_ptr = max_value, *max_index_ptr = max_idx;
1121
+ }
1122
+
1123
+ NK_PUBLIC void nk_reduce_minmax_i8_v128relaxed( //
1124
+ nk_i8_t const *data, nk_size_t count, nk_size_t stride_bytes, //
1125
+ nk_i8_t *min_value_ptr, nk_size_t *min_index_ptr, //
1126
+ nk_i8_t *max_value_ptr, nk_size_t *max_index_ptr) {
1127
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_i8_t);
1128
+ int aligned = (stride_bytes % sizeof(nk_i8_t) == 0);
1129
+ if (count == 0)
1130
+ *min_value_ptr = NK_I8_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_I8_MIN,
1131
+ *max_index_ptr = NK_SIZE_MAX;
1132
+ else if (!aligned)
1133
+ nk_reduce_minmax_i8_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1134
+ max_index_ptr);
1135
+ else if (count > (nk_size_t)(NK_U8_MAX + 1) * 16) {
1136
+ nk_size_t left_count = count / 2;
1137
+ nk_i8_t left_min, right_min, left_max, right_max;
1138
+ nk_size_t left_min_idx, right_min_idx, left_max_idx, right_max_idx;
1139
+ nk_reduce_minmax_i8_v128relaxed(data, left_count, stride_bytes, &left_min, &left_min_idx, &left_max,
1140
+ &left_max_idx);
1141
+ nk_reduce_minmax_i8_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
1142
+ &right_min, &right_min_idx, &right_max, &right_max_idx);
1143
+ if (right_min < left_min) *min_value_ptr = right_min, *min_index_ptr = left_count + right_min_idx;
1144
+ else *min_value_ptr = left_min, *min_index_ptr = left_min_idx;
1145
+ if (right_max > left_max) *max_value_ptr = right_max, *max_index_ptr = left_count + right_max_idx;
1146
+ else *max_value_ptr = left_max, *max_index_ptr = left_max_idx;
1147
+ }
1148
+ else if (stride_elements == 1)
1149
+ nk_reduce_minmax_i8_v128relaxed_contiguous_(data, count, min_value_ptr, min_index_ptr, max_value_ptr,
1150
+ max_index_ptr);
1151
+ else
1152
+ nk_reduce_minmax_i8_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1153
+ max_index_ptr);
1154
+ }
1155
+
1156
+ NK_INTERNAL void nk_reduce_minmax_u8_v128relaxed_contiguous_( //
1157
+ nk_u8_t const *data, nk_size_t count, //
1158
+ nk_u8_t *min_value_ptr, nk_size_t *min_index_ptr, //
1159
+ nk_u8_t *max_value_ptr, nk_size_t *max_index_ptr) {
1160
+ v128_t min_u8x16 = wasm_i8x16_splat((nk_i8_t)NK_U8_MAX), max_u8x16 = wasm_i8x16_splat(0);
1161
+ v128_t min_iter_u8x16 = wasm_i8x16_splat(0), max_iter_u8x16 = wasm_i8x16_splat(0);
1162
+ v128_t iter_u8x16 = wasm_i8x16_splat(0), one_u8x16 = wasm_i8x16_splat(1);
1163
+ nk_size_t idx = 0;
1164
+ for (; idx + 16 <= count; idx += 16) {
1165
+ v128_t data_u8x16 = wasm_v128_load(data + idx);
1166
+ v128_t less_b8x16 = wasm_u8x16_lt(data_u8x16, min_u8x16);
1167
+ v128_t greater_b8x16 = wasm_u8x16_gt(data_u8x16, max_u8x16);
1168
+ min_u8x16 = wasm_i8x16_relaxed_laneselect(data_u8x16, min_u8x16, less_b8x16);
1169
+ max_u8x16 = wasm_i8x16_relaxed_laneselect(data_u8x16, max_u8x16, greater_b8x16);
1170
+ min_iter_u8x16 = wasm_i8x16_relaxed_laneselect(iter_u8x16, min_iter_u8x16, less_b8x16);
1171
+ max_iter_u8x16 = wasm_i8x16_relaxed_laneselect(iter_u8x16, max_iter_u8x16, greater_b8x16);
1172
+ iter_u8x16 = wasm_i8x16_add(iter_u8x16, one_u8x16);
1173
+ }
1174
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
1175
+ min_values_vec.v128 = min_u8x16;
1176
+ max_values_vec.v128 = max_u8x16;
1177
+ min_iters_vec.v128 = min_iter_u8x16;
1178
+ max_iters_vec.v128 = max_iter_u8x16;
1179
+ nk_u8_t min_value = min_values_vec.u8s[0];
1180
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u8s[0] * 16;
1181
+ for (int i = 1; i < 16; ++i) {
1182
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u8s[i] * 16 + (nk_size_t)i;
1183
+ if (min_values_vec.u8s[i] < min_value || (min_values_vec.u8s[i] == min_value && abs_idx < min_idx))
1184
+ min_value = min_values_vec.u8s[i], min_idx = abs_idx;
1185
+ }
1186
+ nk_u8_t max_value = max_values_vec.u8s[0];
1187
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u8s[0] * 16;
1188
+ for (int i = 1; i < 16; ++i) {
1189
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u8s[i] * 16 + (nk_size_t)i;
1190
+ if (max_values_vec.u8s[i] > max_value || (max_values_vec.u8s[i] == max_value && abs_idx < max_idx))
1191
+ max_value = max_values_vec.u8s[i], max_idx = abs_idx;
1192
+ }
1193
+ for (; idx < count; ++idx) {
1194
+ nk_u8_t val = data[idx];
1195
+ if (val < min_value) min_value = val, min_idx = idx;
1196
+ if (val > max_value) max_value = val, max_idx = idx;
1197
+ }
1198
+ *min_value_ptr = min_value, *min_index_ptr = min_idx;
1199
+ *max_value_ptr = max_value, *max_index_ptr = max_idx;
1200
+ }
1201
+
1202
+ NK_PUBLIC void nk_reduce_minmax_u8_v128relaxed( //
1203
+ nk_u8_t const *data, nk_size_t count, nk_size_t stride_bytes, //
1204
+ nk_u8_t *min_value_ptr, nk_size_t *min_index_ptr, //
1205
+ nk_u8_t *max_value_ptr, nk_size_t *max_index_ptr) {
1206
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_u8_t);
1207
+ int aligned = (stride_bytes % sizeof(nk_u8_t) == 0);
1208
+ if (count == 0)
1209
+ *min_value_ptr = NK_U8_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = 0, *max_index_ptr = NK_SIZE_MAX;
1210
+ else if (!aligned)
1211
+ nk_reduce_minmax_u8_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1212
+ max_index_ptr);
1213
+ else if (count > (nk_size_t)(NK_U8_MAX + 1) * 16) {
1214
+ nk_size_t left_count = count / 2;
1215
+ nk_u8_t left_min, right_min, left_max, right_max;
1216
+ nk_size_t left_min_idx, right_min_idx, left_max_idx, right_max_idx;
1217
+ nk_reduce_minmax_u8_v128relaxed(data, left_count, stride_bytes, &left_min, &left_min_idx, &left_max,
1218
+ &left_max_idx);
1219
+ nk_reduce_minmax_u8_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
1220
+ &right_min, &right_min_idx, &right_max, &right_max_idx);
1221
+ if (right_min < left_min) *min_value_ptr = right_min, *min_index_ptr = left_count + right_min_idx;
1222
+ else *min_value_ptr = left_min, *min_index_ptr = left_min_idx;
1223
+ if (right_max > left_max) *max_value_ptr = right_max, *max_index_ptr = left_count + right_max_idx;
1224
+ else *max_value_ptr = left_max, *max_index_ptr = left_max_idx;
1225
+ }
1226
+ else if (stride_elements == 1)
1227
+ nk_reduce_minmax_u8_v128relaxed_contiguous_(data, count, min_value_ptr, min_index_ptr, max_value_ptr,
1228
+ max_index_ptr);
1229
+ else
1230
+ nk_reduce_minmax_u8_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1231
+ max_index_ptr);
1232
+ }
1233
+
1234
+ NK_INTERNAL void nk_reduce_minmax_i16_v128relaxed_contiguous_( //
1235
+ nk_i16_t const *data, nk_size_t count, //
1236
+ nk_i16_t *min_value_ptr, nk_size_t *min_index_ptr, //
1237
+ nk_i16_t *max_value_ptr, nk_size_t *max_index_ptr) {
1238
+ v128_t min_i16x8 = wasm_i16x8_splat(NK_I16_MAX), max_i16x8 = wasm_i16x8_splat(NK_I16_MIN);
1239
+ v128_t min_iter_u16x8 = wasm_i16x8_splat(0), max_iter_u16x8 = wasm_i16x8_splat(0);
1240
+ v128_t iter_u16x8 = wasm_i16x8_splat(0), one_u16x8 = wasm_i16x8_splat(1);
1241
+ nk_size_t idx = 0;
1242
+ for (; idx + 8 <= count; idx += 8) {
1243
+ v128_t data_i16x8 = wasm_v128_load(data + idx);
1244
+ v128_t less_b16x8 = wasm_i16x8_lt(data_i16x8, min_i16x8);
1245
+ v128_t greater_b16x8 = wasm_i16x8_gt(data_i16x8, max_i16x8);
1246
+ min_i16x8 = wasm_i16x8_relaxed_laneselect(data_i16x8, min_i16x8, less_b16x8);
1247
+ max_i16x8 = wasm_i16x8_relaxed_laneselect(data_i16x8, max_i16x8, greater_b16x8);
1248
+ min_iter_u16x8 = wasm_i16x8_relaxed_laneselect(iter_u16x8, min_iter_u16x8, less_b16x8);
1249
+ max_iter_u16x8 = wasm_i16x8_relaxed_laneselect(iter_u16x8, max_iter_u16x8, greater_b16x8);
1250
+ iter_u16x8 = wasm_i16x8_add(iter_u16x8, one_u16x8);
1251
+ }
1252
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
1253
+ min_values_vec.v128 = min_i16x8;
1254
+ max_values_vec.v128 = max_i16x8;
1255
+ min_iters_vec.v128 = min_iter_u16x8;
1256
+ max_iters_vec.v128 = max_iter_u16x8;
1257
+ nk_i16_t min_value = min_values_vec.i16s[0];
1258
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u16s[0] * 8;
1259
+ for (int i = 1; i < 8; ++i) {
1260
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u16s[i] * 8 + (nk_size_t)i;
1261
+ if (min_values_vec.i16s[i] < min_value || (min_values_vec.i16s[i] == min_value && abs_idx < min_idx))
1262
+ min_value = min_values_vec.i16s[i], min_idx = abs_idx;
1263
+ }
1264
+ nk_i16_t max_value = max_values_vec.i16s[0];
1265
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u16s[0] * 8;
1266
+ for (int i = 1; i < 8; ++i) {
1267
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u16s[i] * 8 + (nk_size_t)i;
1268
+ if (max_values_vec.i16s[i] > max_value || (max_values_vec.i16s[i] == max_value && abs_idx < max_idx))
1269
+ max_value = max_values_vec.i16s[i], max_idx = abs_idx;
1270
+ }
1271
+ for (; idx < count; ++idx) {
1272
+ nk_i16_t val = data[idx];
1273
+ if (val < min_value) min_value = val, min_idx = idx;
1274
+ if (val > max_value) max_value = val, max_idx = idx;
1275
+ }
1276
+ *min_value_ptr = min_value, *min_index_ptr = min_idx;
1277
+ *max_value_ptr = max_value, *max_index_ptr = max_idx;
1278
+ }
1279
+
1280
+ NK_PUBLIC void nk_reduce_minmax_i16_v128relaxed( //
1281
+ nk_i16_t const *data, nk_size_t count, nk_size_t stride_bytes, //
1282
+ nk_i16_t *min_value_ptr, nk_size_t *min_index_ptr, //
1283
+ nk_i16_t *max_value_ptr, nk_size_t *max_index_ptr) {
1284
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_i16_t);
1285
+ int aligned = (stride_bytes % sizeof(nk_i16_t) == 0);
1286
+ if (count == 0)
1287
+ *min_value_ptr = NK_I16_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_I16_MIN,
1288
+ *max_index_ptr = NK_SIZE_MAX;
1289
+ else if (!aligned)
1290
+ nk_reduce_minmax_i16_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1291
+ max_index_ptr);
1292
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 8) {
1293
+ nk_size_t left_count = count / 2;
1294
+ nk_i16_t left_min, right_min, left_max, right_max;
1295
+ nk_size_t left_min_idx, right_min_idx, left_max_idx, right_max_idx;
1296
+ nk_reduce_minmax_i16_v128relaxed(data, left_count, stride_bytes, &left_min, &left_min_idx, &left_max,
1297
+ &left_max_idx);
1298
+ nk_reduce_minmax_i16_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
1299
+ &right_min, &right_min_idx, &right_max, &right_max_idx);
1300
+ if (right_min < left_min) *min_value_ptr = right_min, *min_index_ptr = left_count + right_min_idx;
1301
+ else *min_value_ptr = left_min, *min_index_ptr = left_min_idx;
1302
+ if (right_max > left_max) *max_value_ptr = right_max, *max_index_ptr = left_count + right_max_idx;
1303
+ else *max_value_ptr = left_max, *max_index_ptr = left_max_idx;
1304
+ }
1305
+ else if (stride_elements == 1)
1306
+ nk_reduce_minmax_i16_v128relaxed_contiguous_(data, count, min_value_ptr, min_index_ptr, max_value_ptr,
1307
+ max_index_ptr);
1308
+ else
1309
+ nk_reduce_minmax_i16_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1310
+ max_index_ptr);
1311
+ }
1312
+
1313
+ NK_INTERNAL void nk_reduce_minmax_u16_v128relaxed_contiguous_( //
1314
+ nk_u16_t const *data, nk_size_t count, //
1315
+ nk_u16_t *min_value_ptr, nk_size_t *min_index_ptr, //
1316
+ nk_u16_t *max_value_ptr, nk_size_t *max_index_ptr) {
1317
+ v128_t min_u16x8 = wasm_i16x8_splat((nk_i16_t)NK_U16_MAX), max_u16x8 = wasm_i16x8_splat(0);
1318
+ v128_t min_iter_u16x8 = wasm_i16x8_splat(0), max_iter_u16x8 = wasm_i16x8_splat(0);
1319
+ v128_t iter_u16x8 = wasm_i16x8_splat(0), one_u16x8 = wasm_i16x8_splat(1);
1320
+ nk_size_t idx = 0;
1321
+ for (; idx + 8 <= count; idx += 8) {
1322
+ v128_t data_u16x8 = wasm_v128_load(data + idx);
1323
+ v128_t less_b16x8 = wasm_u16x8_lt(data_u16x8, min_u16x8);
1324
+ v128_t greater_b16x8 = wasm_u16x8_gt(data_u16x8, max_u16x8);
1325
+ min_u16x8 = wasm_i16x8_relaxed_laneselect(data_u16x8, min_u16x8, less_b16x8);
1326
+ max_u16x8 = wasm_i16x8_relaxed_laneselect(data_u16x8, max_u16x8, greater_b16x8);
1327
+ min_iter_u16x8 = wasm_i16x8_relaxed_laneselect(iter_u16x8, min_iter_u16x8, less_b16x8);
1328
+ max_iter_u16x8 = wasm_i16x8_relaxed_laneselect(iter_u16x8, max_iter_u16x8, greater_b16x8);
1329
+ iter_u16x8 = wasm_i16x8_add(iter_u16x8, one_u16x8);
1330
+ }
1331
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
1332
+ min_values_vec.v128 = min_u16x8;
1333
+ max_values_vec.v128 = max_u16x8;
1334
+ min_iters_vec.v128 = min_iter_u16x8;
1335
+ max_iters_vec.v128 = max_iter_u16x8;
1336
+ nk_u16_t min_value = min_values_vec.u16s[0];
1337
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u16s[0] * 8;
1338
+ for (int i = 1; i < 8; ++i) {
1339
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u16s[i] * 8 + (nk_size_t)i;
1340
+ if (min_values_vec.u16s[i] < min_value || (min_values_vec.u16s[i] == min_value && abs_idx < min_idx))
1341
+ min_value = min_values_vec.u16s[i], min_idx = abs_idx;
1342
+ }
1343
+ nk_u16_t max_value = max_values_vec.u16s[0];
1344
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u16s[0] * 8;
1345
+ for (int i = 1; i < 8; ++i) {
1346
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u16s[i] * 8 + (nk_size_t)i;
1347
+ if (max_values_vec.u16s[i] > max_value || (max_values_vec.u16s[i] == max_value && abs_idx < max_idx))
1348
+ max_value = max_values_vec.u16s[i], max_idx = abs_idx;
1349
+ }
1350
+ for (; idx < count; ++idx) {
1351
+ nk_u16_t val = data[idx];
1352
+ if (val < min_value) min_value = val, min_idx = idx;
1353
+ if (val > max_value) max_value = val, max_idx = idx;
1354
+ }
1355
+ *min_value_ptr = min_value, *min_index_ptr = min_idx;
1356
+ *max_value_ptr = max_value, *max_index_ptr = max_idx;
1357
+ }
1358
+
1359
+ NK_PUBLIC void nk_reduce_minmax_u16_v128relaxed( //
1360
+ nk_u16_t const *data, nk_size_t count, nk_size_t stride_bytes, //
1361
+ nk_u16_t *min_value_ptr, nk_size_t *min_index_ptr, //
1362
+ nk_u16_t *max_value_ptr, nk_size_t *max_index_ptr) {
1363
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_u16_t);
1364
+ int aligned = (stride_bytes % sizeof(nk_u16_t) == 0);
1365
+ if (count == 0)
1366
+ *min_value_ptr = NK_U16_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = 0, *max_index_ptr = NK_SIZE_MAX;
1367
+ else if (!aligned)
1368
+ nk_reduce_minmax_u16_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1369
+ max_index_ptr);
1370
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 8) {
1371
+ nk_size_t left_count = count / 2;
1372
+ nk_u16_t left_min, right_min, left_max, right_max;
1373
+ nk_size_t left_min_idx, right_min_idx, left_max_idx, right_max_idx;
1374
+ nk_reduce_minmax_u16_v128relaxed(data, left_count, stride_bytes, &left_min, &left_min_idx, &left_max,
1375
+ &left_max_idx);
1376
+ nk_reduce_minmax_u16_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
1377
+ &right_min, &right_min_idx, &right_max, &right_max_idx);
1378
+ if (right_min < left_min) *min_value_ptr = right_min, *min_index_ptr = left_count + right_min_idx;
1379
+ else *min_value_ptr = left_min, *min_index_ptr = left_min_idx;
1380
+ if (right_max > left_max) *max_value_ptr = right_max, *max_index_ptr = left_count + right_max_idx;
1381
+ else *max_value_ptr = left_max, *max_index_ptr = left_max_idx;
1382
+ }
1383
+ else if (stride_elements == 1)
1384
+ nk_reduce_minmax_u16_v128relaxed_contiguous_(data, count, min_value_ptr, min_index_ptr, max_value_ptr,
1385
+ max_index_ptr);
1386
+ else
1387
+ nk_reduce_minmax_u16_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1388
+ max_index_ptr);
1389
+ }
1390
+
1391
+ NK_INTERNAL void nk_reduce_minmax_i32_v128relaxed_contiguous_( //
1392
+ nk_i32_t const *data, nk_size_t count, //
1393
+ nk_i32_t *min_value_ptr, nk_size_t *min_index_ptr, //
1394
+ nk_i32_t *max_value_ptr, nk_size_t *max_index_ptr) {
1395
+ v128_t min_i32x4 = wasm_i32x4_splat(NK_I32_MAX), max_i32x4 = wasm_i32x4_splat(NK_I32_MIN);
1396
+ v128_t min_iter_u32x4 = wasm_i32x4_splat(0), max_iter_u32x4 = wasm_i32x4_splat(0);
1397
+ v128_t iter_u32x4 = wasm_i32x4_splat(0), one_u32x4 = wasm_i32x4_splat(1);
1398
+ nk_size_t idx = 0;
1399
+ for (; idx + 4 <= count; idx += 4) {
1400
+ v128_t data_i32x4 = wasm_v128_load(data + idx);
1401
+ v128_t less_b32x4 = wasm_i32x4_lt(data_i32x4, min_i32x4);
1402
+ v128_t greater_b32x4 = wasm_i32x4_gt(data_i32x4, max_i32x4);
1403
+ min_i32x4 = wasm_i32x4_relaxed_laneselect(data_i32x4, min_i32x4, less_b32x4);
1404
+ max_i32x4 = wasm_i32x4_relaxed_laneselect(data_i32x4, max_i32x4, greater_b32x4);
1405
+ min_iter_u32x4 = wasm_i32x4_relaxed_laneselect(iter_u32x4, min_iter_u32x4, less_b32x4);
1406
+ max_iter_u32x4 = wasm_i32x4_relaxed_laneselect(iter_u32x4, max_iter_u32x4, greater_b32x4);
1407
+ iter_u32x4 = wasm_i32x4_add(iter_u32x4, one_u32x4);
1408
+ }
1409
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
1410
+ min_values_vec.v128 = min_i32x4;
1411
+ max_values_vec.v128 = max_i32x4;
1412
+ min_iters_vec.v128 = min_iter_u32x4;
1413
+ max_iters_vec.v128 = max_iter_u32x4;
1414
+ nk_i32_t min_value = min_values_vec.i32s[0];
1415
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u32s[0] * 4;
1416
+ for (int i = 1; i < 4; ++i) {
1417
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u32s[i] * 4 + (nk_size_t)i;
1418
+ if (min_values_vec.i32s[i] < min_value || (min_values_vec.i32s[i] == min_value && abs_idx < min_idx))
1419
+ min_value = min_values_vec.i32s[i], min_idx = abs_idx;
1420
+ }
1421
+ nk_i32_t max_value = max_values_vec.i32s[0];
1422
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u32s[0] * 4;
1423
+ for (int i = 1; i < 4; ++i) {
1424
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u32s[i] * 4 + (nk_size_t)i;
1425
+ if (max_values_vec.i32s[i] > max_value || (max_values_vec.i32s[i] == max_value && abs_idx < max_idx))
1426
+ max_value = max_values_vec.i32s[i], max_idx = abs_idx;
1427
+ }
1428
+ for (; idx < count; ++idx) {
1429
+ nk_i32_t val = data[idx];
1430
+ if (val < min_value) min_value = val, min_idx = idx;
1431
+ if (val > max_value) max_value = val, max_idx = idx;
1432
+ }
1433
+ *min_value_ptr = min_value, *min_index_ptr = min_idx;
1434
+ *max_value_ptr = max_value, *max_index_ptr = max_idx;
1435
+ }
1436
+
1437
+ NK_PUBLIC void nk_reduce_minmax_i32_v128relaxed( //
1438
+ nk_i32_t const *data, nk_size_t count, nk_size_t stride_bytes, //
1439
+ nk_i32_t *min_value_ptr, nk_size_t *min_index_ptr, //
1440
+ nk_i32_t *max_value_ptr, nk_size_t *max_index_ptr) {
1441
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_i32_t);
1442
+ int aligned = (stride_bytes % sizeof(nk_i32_t) == 0);
1443
+ if (count == 0)
1444
+ *min_value_ptr = NK_I32_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_I32_MIN,
1445
+ *max_index_ptr = NK_SIZE_MAX;
1446
+ else if (!aligned)
1447
+ nk_reduce_minmax_i32_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1448
+ max_index_ptr);
1449
+ else if (count > (nk_size_t)NK_U32_MAX * 4) {
1450
+ nk_size_t left_count = count / 2;
1451
+ nk_i32_t left_min, right_min, left_max, right_max;
1452
+ nk_size_t left_min_idx, right_min_idx, left_max_idx, right_max_idx;
1453
+ nk_reduce_minmax_i32_v128relaxed(data, left_count, stride_bytes, &left_min, &left_min_idx, &left_max,
1454
+ &left_max_idx);
1455
+ nk_reduce_minmax_i32_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
1456
+ &right_min, &right_min_idx, &right_max, &right_max_idx);
1457
+ if (right_min < left_min) *min_value_ptr = right_min, *min_index_ptr = left_count + right_min_idx;
1458
+ else *min_value_ptr = left_min, *min_index_ptr = left_min_idx;
1459
+ if (right_max > left_max) *max_value_ptr = right_max, *max_index_ptr = left_count + right_max_idx;
1460
+ else *max_value_ptr = left_max, *max_index_ptr = left_max_idx;
1461
+ }
1462
+ else if (stride_elements == 1)
1463
+ nk_reduce_minmax_i32_v128relaxed_contiguous_(data, count, min_value_ptr, min_index_ptr, max_value_ptr,
1464
+ max_index_ptr);
1465
+ else
1466
+ nk_reduce_minmax_i32_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1467
+ max_index_ptr);
1468
+ }
1469
+
1470
+ NK_INTERNAL void nk_reduce_minmax_u32_v128relaxed_contiguous_( //
1471
+ nk_u32_t const *data, nk_size_t count, //
1472
+ nk_u32_t *min_value_ptr, nk_size_t *min_index_ptr, //
1473
+ nk_u32_t *max_value_ptr, nk_size_t *max_index_ptr) {
1474
+ v128_t min_u32x4 = wasm_i32x4_splat((nk_i32_t)NK_U32_MAX), max_u32x4 = wasm_i32x4_splat(0);
1475
+ v128_t min_iter_u32x4 = wasm_i32x4_splat(0), max_iter_u32x4 = wasm_i32x4_splat(0);
1476
+ v128_t iter_u32x4 = wasm_i32x4_splat(0), one_u32x4 = wasm_i32x4_splat(1);
1477
+ v128_t sign_bit_i32x4 = wasm_i32x4_splat((nk_i32_t)0x80000000);
1478
+ nk_size_t idx = 0;
1479
+ for (; idx + 4 <= count; idx += 4) {
1480
+ v128_t data_u32x4 = wasm_v128_load(data + idx);
1481
+ v128_t data_biased_i32x4 = wasm_v128_xor(data_u32x4, sign_bit_i32x4);
1482
+ v128_t min_biased_i32x4 = wasm_v128_xor(min_u32x4, sign_bit_i32x4);
1483
+ v128_t max_biased_i32x4 = wasm_v128_xor(max_u32x4, sign_bit_i32x4);
1484
+ v128_t less_b32x4 = wasm_i32x4_lt(data_biased_i32x4, min_biased_i32x4);
1485
+ v128_t greater_b32x4 = wasm_i32x4_gt(data_biased_i32x4, max_biased_i32x4);
1486
+ min_u32x4 = wasm_i32x4_relaxed_laneselect(data_u32x4, min_u32x4, less_b32x4);
1487
+ max_u32x4 = wasm_i32x4_relaxed_laneselect(data_u32x4, max_u32x4, greater_b32x4);
1488
+ min_iter_u32x4 = wasm_i32x4_relaxed_laneselect(iter_u32x4, min_iter_u32x4, less_b32x4);
1489
+ max_iter_u32x4 = wasm_i32x4_relaxed_laneselect(iter_u32x4, max_iter_u32x4, greater_b32x4);
1490
+ iter_u32x4 = wasm_i32x4_add(iter_u32x4, one_u32x4);
1491
+ }
1492
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
1493
+ min_values_vec.v128 = min_u32x4;
1494
+ max_values_vec.v128 = max_u32x4;
1495
+ min_iters_vec.v128 = min_iter_u32x4;
1496
+ max_iters_vec.v128 = max_iter_u32x4;
1497
+ nk_u32_t min_value = min_values_vec.u32s[0];
1498
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u32s[0] * 4;
1499
+ for (int i = 1; i < 4; ++i) {
1500
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u32s[i] * 4 + (nk_size_t)i;
1501
+ if (min_values_vec.u32s[i] < min_value || (min_values_vec.u32s[i] == min_value && abs_idx < min_idx))
1502
+ min_value = min_values_vec.u32s[i], min_idx = abs_idx;
1503
+ }
1504
+ nk_u32_t max_value = max_values_vec.u32s[0];
1505
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u32s[0] * 4;
1506
+ for (int i = 1; i < 4; ++i) {
1507
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u32s[i] * 4 + (nk_size_t)i;
1508
+ if (max_values_vec.u32s[i] > max_value || (max_values_vec.u32s[i] == max_value && abs_idx < max_idx))
1509
+ max_value = max_values_vec.u32s[i], max_idx = abs_idx;
1510
+ }
1511
+ for (; idx < count; ++idx) {
1512
+ nk_u32_t val = data[idx];
1513
+ if (val < min_value) min_value = val, min_idx = idx;
1514
+ if (val > max_value) max_value = val, max_idx = idx;
1515
+ }
1516
+ *min_value_ptr = min_value, *min_index_ptr = min_idx;
1517
+ *max_value_ptr = max_value, *max_index_ptr = max_idx;
1518
+ }
1519
+
1520
+ NK_PUBLIC void nk_reduce_minmax_u32_v128relaxed( //
1521
+ nk_u32_t const *data, nk_size_t count, nk_size_t stride_bytes, //
1522
+ nk_u32_t *min_value_ptr, nk_size_t *min_index_ptr, //
1523
+ nk_u32_t *max_value_ptr, nk_size_t *max_index_ptr) {
1524
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_u32_t);
1525
+ int aligned = (stride_bytes % sizeof(nk_u32_t) == 0);
1526
+ if (count == 0)
1527
+ *min_value_ptr = NK_U32_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = 0, *max_index_ptr = NK_SIZE_MAX;
1528
+ else if (!aligned)
1529
+ nk_reduce_minmax_u32_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1530
+ max_index_ptr);
1531
+ else if (count > (nk_size_t)NK_U32_MAX * 4) {
1532
+ nk_size_t left_count = count / 2;
1533
+ nk_u32_t left_min, right_min, left_max, right_max;
1534
+ nk_size_t left_min_idx, right_min_idx, left_max_idx, right_max_idx;
1535
+ nk_reduce_minmax_u32_v128relaxed(data, left_count, stride_bytes, &left_min, &left_min_idx, &left_max,
1536
+ &left_max_idx);
1537
+ nk_reduce_minmax_u32_v128relaxed(data + left_count * stride_elements, count - left_count, stride_bytes,
1538
+ &right_min, &right_min_idx, &right_max, &right_max_idx);
1539
+ if (right_min < left_min) *min_value_ptr = right_min, *min_index_ptr = left_count + right_min_idx;
1540
+ else *min_value_ptr = left_min, *min_index_ptr = left_min_idx;
1541
+ if (right_max > left_max) *max_value_ptr = right_max, *max_index_ptr = left_count + right_max_idx;
1542
+ else *max_value_ptr = left_max, *max_index_ptr = left_max_idx;
1543
+ }
1544
+ else if (stride_elements == 1)
1545
+ nk_reduce_minmax_u32_v128relaxed_contiguous_(data, count, min_value_ptr, min_index_ptr, max_value_ptr,
1546
+ max_index_ptr);
1547
+ else
1548
+ nk_reduce_minmax_u32_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1549
+ max_index_ptr);
1550
+ }
1551
+
1552
+ NK_INTERNAL void nk_reduce_minmax_i64_v128relaxed_contiguous_( //
1553
+ nk_i64_t const *data, nk_size_t count, //
1554
+ nk_i64_t *min_value_ptr, nk_size_t *min_index_ptr, //
1555
+ nk_i64_t *max_value_ptr, nk_size_t *max_index_ptr) {
1556
+ v128_t min_i64x2 = wasm_i64x2_splat(NK_I64_MAX), max_i64x2 = wasm_i64x2_splat(NK_I64_MIN);
1557
+ v128_t min_iter_u64x2 = wasm_i64x2_splat(0), max_iter_u64x2 = wasm_i64x2_splat(0);
1558
+ v128_t iter_u64x2 = wasm_i64x2_splat(0), one_u64x2 = wasm_i64x2_splat(1);
1559
+ nk_size_t idx = 0;
1560
+ for (; idx + 2 <= count; idx += 2) {
1561
+ v128_t data_i64x2 = wasm_v128_load(data + idx);
1562
+ v128_t less_b64x2 = wasm_i64x2_gt(min_i64x2, data_i64x2);
1563
+ v128_t greater_b64x2 = wasm_i64x2_gt(data_i64x2, max_i64x2);
1564
+ min_i64x2 = wasm_i64x2_relaxed_laneselect(data_i64x2, min_i64x2, less_b64x2);
1565
+ max_i64x2 = wasm_i64x2_relaxed_laneselect(data_i64x2, max_i64x2, greater_b64x2);
1566
+ min_iter_u64x2 = wasm_i64x2_relaxed_laneselect(iter_u64x2, min_iter_u64x2, less_b64x2);
1567
+ max_iter_u64x2 = wasm_i64x2_relaxed_laneselect(iter_u64x2, max_iter_u64x2, greater_b64x2);
1568
+ iter_u64x2 = wasm_i64x2_add(iter_u64x2, one_u64x2);
1569
+ }
1570
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
1571
+ min_values_vec.v128 = min_i64x2;
1572
+ max_values_vec.v128 = max_i64x2;
1573
+ min_iters_vec.v128 = min_iter_u64x2;
1574
+ max_iters_vec.v128 = max_iter_u64x2;
1575
+ nk_i64_t min_value = min_values_vec.i64s[0];
1576
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u64s[0] * 2;
1577
+ if (min_values_vec.i64s[1] < min_value ||
1578
+ (min_values_vec.i64s[1] == min_value && (nk_size_t)min_iters_vec.u64s[1] * 2 + 1 < min_idx))
1579
+ min_value = min_values_vec.i64s[1], min_idx = (nk_size_t)min_iters_vec.u64s[1] * 2 + 1;
1580
+ nk_i64_t max_value = max_values_vec.i64s[0];
1581
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u64s[0] * 2;
1582
+ if (max_values_vec.i64s[1] > max_value ||
1583
+ (max_values_vec.i64s[1] == max_value && (nk_size_t)max_iters_vec.u64s[1] * 2 + 1 < max_idx))
1584
+ max_value = max_values_vec.i64s[1], max_idx = (nk_size_t)max_iters_vec.u64s[1] * 2 + 1;
1585
+ for (; idx < count; ++idx) {
1586
+ nk_i64_t val = data[idx];
1587
+ if (val < min_value) min_value = val, min_idx = idx;
1588
+ if (val > max_value) max_value = val, max_idx = idx;
1589
+ }
1590
+ *min_value_ptr = min_value, *min_index_ptr = min_idx;
1591
+ *max_value_ptr = max_value, *max_index_ptr = max_idx;
1592
+ }
1593
+
1594
+ NK_PUBLIC void nk_reduce_minmax_i64_v128relaxed( //
1595
+ nk_i64_t const *data, nk_size_t count, nk_size_t stride_bytes, //
1596
+ nk_i64_t *min_value_ptr, nk_size_t *min_index_ptr, //
1597
+ nk_i64_t *max_value_ptr, nk_size_t *max_index_ptr) {
1598
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_i64_t);
1599
+ int aligned = (stride_bytes % sizeof(nk_i64_t) == 0);
1600
+ if (count == 0)
1601
+ *min_value_ptr = NK_I64_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_I64_MIN,
1602
+ *max_index_ptr = NK_SIZE_MAX;
1603
+ else if (!aligned)
1604
+ nk_reduce_minmax_i64_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1605
+ max_index_ptr);
1606
+ else if (stride_elements == 1)
1607
+ nk_reduce_minmax_i64_v128relaxed_contiguous_(data, count, min_value_ptr, min_index_ptr, max_value_ptr,
1608
+ max_index_ptr);
1609
+ else
1610
+ nk_reduce_minmax_i64_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1611
+ max_index_ptr);
1612
+ }
1613
+
1614
+ NK_INTERNAL void nk_reduce_minmax_u64_v128relaxed_contiguous_( //
1615
+ nk_u64_t const *data, nk_size_t count, //
1616
+ nk_u64_t *min_value_ptr, nk_size_t *min_index_ptr, //
1617
+ nk_u64_t *max_value_ptr, nk_size_t *max_index_ptr) {
1618
+ v128_t min_u64x2 = wasm_i64x2_splat((nk_i64_t)NK_U64_MAX), max_u64x2 = wasm_i64x2_splat(0);
1619
+ v128_t min_iter_u64x2 = wasm_i64x2_splat(0), max_iter_u64x2 = wasm_i64x2_splat(0);
1620
+ v128_t iter_u64x2 = wasm_i64x2_splat(0), one_u64x2 = wasm_i64x2_splat(1);
1621
+ v128_t sign_bit_i64x2 = wasm_i64x2_splat((nk_i64_t)0x8000000000000000LL);
1622
+ nk_size_t idx = 0;
1623
+ for (; idx + 2 <= count; idx += 2) {
1624
+ v128_t data_u64x2 = wasm_v128_load(data + idx);
1625
+ v128_t data_biased_i64x2 = wasm_v128_xor(data_u64x2, sign_bit_i64x2);
1626
+ v128_t min_biased_i64x2 = wasm_v128_xor(min_u64x2, sign_bit_i64x2);
1627
+ v128_t max_biased_i64x2 = wasm_v128_xor(max_u64x2, sign_bit_i64x2);
1628
+ v128_t less_b64x2 = wasm_i64x2_gt(min_biased_i64x2, data_biased_i64x2);
1629
+ v128_t greater_b64x2 = wasm_i64x2_gt(data_biased_i64x2, max_biased_i64x2);
1630
+ min_u64x2 = wasm_i64x2_relaxed_laneselect(data_u64x2, min_u64x2, less_b64x2);
1631
+ max_u64x2 = wasm_i64x2_relaxed_laneselect(data_u64x2, max_u64x2, greater_b64x2);
1632
+ min_iter_u64x2 = wasm_i64x2_relaxed_laneselect(iter_u64x2, min_iter_u64x2, less_b64x2);
1633
+ max_iter_u64x2 = wasm_i64x2_relaxed_laneselect(iter_u64x2, max_iter_u64x2, greater_b64x2);
1634
+ iter_u64x2 = wasm_i64x2_add(iter_u64x2, one_u64x2);
1635
+ }
1636
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
1637
+ min_values_vec.v128 = min_u64x2;
1638
+ max_values_vec.v128 = max_u64x2;
1639
+ min_iters_vec.v128 = min_iter_u64x2;
1640
+ max_iters_vec.v128 = max_iter_u64x2;
1641
+ nk_u64_t min_value = min_values_vec.u64s[0];
1642
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u64s[0] * 2;
1643
+ if (min_values_vec.u64s[1] < min_value ||
1644
+ (min_values_vec.u64s[1] == min_value && (nk_size_t)min_iters_vec.u64s[1] * 2 + 1 < min_idx))
1645
+ min_value = min_values_vec.u64s[1], min_idx = (nk_size_t)min_iters_vec.u64s[1] * 2 + 1;
1646
+ nk_u64_t max_value = max_values_vec.u64s[0];
1647
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u64s[0] * 2;
1648
+ if (max_values_vec.u64s[1] > max_value ||
1649
+ (max_values_vec.u64s[1] == max_value && (nk_size_t)max_iters_vec.u64s[1] * 2 + 1 < max_idx))
1650
+ max_value = max_values_vec.u64s[1], max_idx = (nk_size_t)max_iters_vec.u64s[1] * 2 + 1;
1651
+ for (; idx < count; ++idx) {
1652
+ nk_u64_t val = data[idx];
1653
+ if (val < min_value) min_value = val, min_idx = idx;
1654
+ if (val > max_value) max_value = val, max_idx = idx;
1655
+ }
1656
+ *min_value_ptr = min_value, *min_index_ptr = min_idx;
1657
+ *max_value_ptr = max_value, *max_index_ptr = max_idx;
1658
+ }
1659
+
1660
+ NK_PUBLIC void nk_reduce_minmax_u64_v128relaxed( //
1661
+ nk_u64_t const *data, nk_size_t count, nk_size_t stride_bytes, //
1662
+ nk_u64_t *min_value_ptr, nk_size_t *min_index_ptr, //
1663
+ nk_u64_t *max_value_ptr, nk_size_t *max_index_ptr) {
1664
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_u64_t);
1665
+ int aligned = (stride_bytes % sizeof(nk_u64_t) == 0);
1666
+ if (count == 0)
1667
+ *min_value_ptr = NK_U64_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = 0, *max_index_ptr = NK_SIZE_MAX;
1668
+ else if (!aligned)
1669
+ nk_reduce_minmax_u64_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1670
+ max_index_ptr);
1671
+ else if (stride_elements == 1)
1672
+ nk_reduce_minmax_u64_v128relaxed_contiguous_(data, count, min_value_ptr, min_index_ptr, max_value_ptr,
1673
+ max_index_ptr);
1674
+ else
1675
+ nk_reduce_minmax_u64_serial(data, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1676
+ max_index_ptr);
1677
+ }
1678
+
1679
+ NK_INTERNAL void nk_reduce_moments_e4m3_v128relaxed_contiguous_( //
1680
+ nk_e4m3_t const *data_ptr, nk_size_t count, //
1681
+ nk_f32_t *sum_ptr, nk_f32_t *sumsq_ptr) {
1682
+ v128_t sum_f32x4 = wasm_f32x4_splat(0), sumsq_f32x4 = wasm_f32x4_splat(0);
1683
+ nk_size_t idx = 0;
1684
+ for (; idx + 4 <= count; idx += 4) {
1685
+ nk_b32_vec_t raw;
1686
+ nk_load_b32_serial_(data_ptr + idx, &raw);
1687
+ v128_t data_f32x4 = nk_e4m3x4_to_f32x4_v128relaxed_(raw).v128;
1688
+ sum_f32x4 = wasm_f32x4_add(sum_f32x4, data_f32x4);
1689
+ sumsq_f32x4 = wasm_f32x4_relaxed_madd(data_f32x4, data_f32x4, sumsq_f32x4);
1690
+ }
1691
+ nk_f32_t sum = nk_reduce_add_f32x4_v128relaxed_(sum_f32x4);
1692
+ nk_f32_t sumsq = nk_reduce_add_f32x4_v128relaxed_(sumsq_f32x4);
1693
+ for (; idx < count; ++idx) {
1694
+ nk_f32_t val;
1695
+ nk_e4m3_to_f32_serial(&data_ptr[idx], &val);
1696
+ sum += val, sumsq += val * val;
1697
+ }
1698
+ *sum_ptr = sum, *sumsq_ptr = sumsq;
1699
+ }
1700
+
1701
+ NK_PUBLIC void nk_reduce_moments_e4m3_v128relaxed( //
1702
+ nk_e4m3_t const *data_ptr, nk_size_t count, nk_size_t stride_bytes, //
1703
+ nk_f32_t *sum_ptr, nk_f32_t *sumsq_ptr) {
1704
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_e4m3_t);
1705
+ int aligned = (stride_bytes % sizeof(nk_e4m3_t) == 0);
1706
+ if (count == 0) *sum_ptr = 0, *sumsq_ptr = 0;
1707
+ else if (!aligned) nk_reduce_moments_e4m3_serial(data_ptr, count, stride_bytes, sum_ptr, sumsq_ptr);
1708
+ else if (stride_elements == 1) nk_reduce_moments_e4m3_v128relaxed_contiguous_(data_ptr, count, sum_ptr, sumsq_ptr);
1709
+ else nk_reduce_moments_e4m3_serial(data_ptr, count, stride_bytes, sum_ptr, sumsq_ptr);
1710
+ }
1711
+
1712
+ NK_INTERNAL void nk_reduce_moments_e2m3_v128relaxed_contiguous_( //
1713
+ nk_e2m3_t const *data_ptr, nk_size_t count, //
1714
+ nk_f32_t *sum_ptr, nk_f32_t *sumsq_ptr) {
1715
+ v128_t const lut_low_u8x16 = wasm_i8x16_const(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
1716
+ v128_t const lut_high_u8x16 = wasm_i8x16_const(32, 36, 40, 44, 48, 52, 56, 60, 64, 72, 80, 88, 96, 104, 112, 120);
1717
+ v128_t const magnitude_mask_u8x16 = wasm_i8x16_splat(0x1F);
1718
+ v128_t const sign_mask_u8x16 = wasm_i8x16_splat(0x20);
1719
+ v128_t const sixteen_u8x16 = wasm_i8x16_splat(16);
1720
+ v128_t sum_i32x4 = wasm_i32x4_splat(0);
1721
+ v128_t sumsq_u64x2 = wasm_i64x2_splat(0);
1722
+ nk_size_t idx = 0;
1723
+ for (; idx + 16 <= count; idx += 16) {
1724
+ v128_t raw_u8x16 = wasm_v128_load(data_ptr + idx);
1725
+ v128_t magnitude_u8x16 = wasm_v128_and(raw_u8x16, magnitude_mask_u8x16);
1726
+ v128_t from_low_u8x16 = wasm_i8x16_relaxed_swizzle(lut_low_u8x16, magnitude_u8x16);
1727
+ v128_t high_indices_u8x16 = wasm_i8x16_sub(magnitude_u8x16, sixteen_u8x16);
1728
+ v128_t from_high_u8x16 = wasm_i8x16_relaxed_swizzle(lut_high_u8x16, high_indices_u8x16);
1729
+ v128_t in_high_b8x16 = wasm_u8x16_ge(magnitude_u8x16, sixteen_u8x16);
1730
+ v128_t unsigned_u8x16 = wasm_i8x16_relaxed_laneselect(from_high_u8x16, from_low_u8x16, in_high_b8x16);
1731
+ v128_t is_negative_b8x16 = wasm_i8x16_eq(wasm_v128_and(raw_u8x16, sign_mask_u8x16), sign_mask_u8x16);
1732
+ v128_t negated_i8x16 = wasm_i8x16_sub(wasm_i8x16_splat(0), unsigned_u8x16);
1733
+ v128_t scaled_i8x16 = wasm_i8x16_relaxed_laneselect(negated_i8x16, unsigned_u8x16, is_negative_b8x16);
1734
+ v128_t pairwise_i16x8 = wasm_i16x8_extadd_pairwise_i8x16(scaled_i8x16);
1735
+ v128_t pairwise_i32x4 = wasm_i32x4_extadd_pairwise_i16x8(pairwise_i16x8);
1736
+ sum_i32x4 = wasm_i32x4_add(sum_i32x4, pairwise_i32x4);
1737
+ v128_t sq_low_i16x8 = wasm_i16x8_extmul_low_i8x16(scaled_i8x16, scaled_i8x16);
1738
+ v128_t sq_high_i16x8 = wasm_i16x8_extmul_high_i8x16(scaled_i8x16, scaled_i8x16);
1739
+ v128_t sq_low_u32x4 = wasm_u32x4_extadd_pairwise_u16x8(sq_low_i16x8);
1740
+ v128_t sq_high_u32x4 = wasm_u32x4_extadd_pairwise_u16x8(sq_high_i16x8);
1741
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_low_u32x4(sq_low_u32x4));
1742
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_high_u32x4(sq_low_u32x4));
1743
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_low_u32x4(sq_high_u32x4));
1744
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_high_u32x4(sq_high_u32x4));
1745
+ }
1746
+ nk_i64_t sum = nk_reduce_add_i32x4_v128relaxed_(sum_i32x4);
1747
+ nk_u64_t sumsq = nk_reduce_add_u64x2_v128relaxed_(sumsq_u64x2);
1748
+ for (; idx < count; ++idx) {
1749
+ nk_f32_t val;
1750
+ nk_e2m3_to_f32_serial(&data_ptr[idx], &val);
1751
+ sum += (nk_i64_t)(val * 16.0f), sumsq += (nk_u64_t)(nk_i64_t)(val * val * 256.0f);
1752
+ }
1753
+ *sum_ptr = (nk_f32_t)sum / 16.0f, *sumsq_ptr = (nk_f32_t)sumsq / 256.0f;
1754
+ }
1755
+
1756
+ NK_PUBLIC void nk_reduce_moments_e2m3_v128relaxed( //
1757
+ nk_e2m3_t const *data_ptr, nk_size_t count, nk_size_t stride_bytes, //
1758
+ nk_f32_t *sum_ptr, nk_f32_t *sumsq_ptr) {
1759
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_e2m3_t);
1760
+ int aligned = (stride_bytes % sizeof(nk_e2m3_t) == 0);
1761
+ if (count == 0) *sum_ptr = 0, *sumsq_ptr = 0;
1762
+ else if (!aligned) nk_reduce_moments_e2m3_serial(data_ptr, count, stride_bytes, sum_ptr, sumsq_ptr);
1763
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 16) {
1764
+ nk_size_t left_count = count / 2;
1765
+ nk_f32_t left_sum, left_sumsq, right_sum, right_sumsq;
1766
+ nk_reduce_moments_e2m3_v128relaxed(data_ptr, left_count, stride_bytes, &left_sum, &left_sumsq);
1767
+ nk_reduce_moments_e2m3_v128relaxed(data_ptr + left_count * stride_elements, count - left_count, stride_bytes,
1768
+ &right_sum, &right_sumsq);
1769
+ *sum_ptr = left_sum + right_sum, *sumsq_ptr = left_sumsq + right_sumsq;
1770
+ }
1771
+ else if (stride_elements == 1) nk_reduce_moments_e2m3_v128relaxed_contiguous_(data_ptr, count, sum_ptr, sumsq_ptr);
1772
+ else nk_reduce_moments_e2m3_serial(data_ptr, count, stride_bytes, sum_ptr, sumsq_ptr);
1773
+ }
1774
+
1775
+ NK_INTERNAL void nk_reduce_moments_e3m2_v128relaxed_contiguous_( //
1776
+ nk_e3m2_t const *data_ptr, nk_size_t count, //
1777
+ nk_f32_t *sum_ptr, nk_f32_t *sumsq_ptr) {
1778
+ v128_t const lut_low_u8x16 = wasm_i8x16_const(0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28);
1779
+ v128_t const lut_high_u8x16 = wasm_i8x16_const(32, 40, 48, 56, 64, 80, 96, 112, (nk_i8_t)-128, (nk_i8_t)-96,
1780
+ (nk_i8_t)-64, (nk_i8_t)-32, 0, 64, (nk_i8_t)-128, (nk_i8_t)-96);
1781
+ v128_t const magnitude_mask_u8x16 = wasm_i8x16_splat(0x1F);
1782
+ v128_t const sign_mask_u8x16 = wasm_i8x16_splat(0x20);
1783
+ v128_t const sixteen_u8x16 = wasm_i8x16_splat(16);
1784
+ v128_t const threshold_u8x16 = wasm_i8x16_splat(27);
1785
+ v128_t sum_i32x4 = wasm_i32x4_splat(0);
1786
+ v128_t sumsq_u64x2 = wasm_i64x2_splat(0);
1787
+ nk_size_t idx = 0;
1788
+ for (; idx + 16 <= count; idx += 16) {
1789
+ v128_t raw_u8x16 = wasm_v128_load(data_ptr + idx);
1790
+ v128_t magnitude_u8x16 = wasm_v128_and(raw_u8x16, magnitude_mask_u8x16);
1791
+ v128_t from_low_u8x16 = wasm_i8x16_relaxed_swizzle(lut_low_u8x16, magnitude_u8x16);
1792
+ v128_t high_indices_u8x16 = wasm_i8x16_sub(magnitude_u8x16, sixteen_u8x16);
1793
+ v128_t from_high_u8x16 = wasm_i8x16_relaxed_swizzle(lut_high_u8x16, high_indices_u8x16);
1794
+ v128_t in_high_b8x16 = wasm_u8x16_ge(magnitude_u8x16, sixteen_u8x16);
1795
+ v128_t low_byte_u8x16 = wasm_i8x16_relaxed_laneselect(from_high_u8x16, from_low_u8x16, in_high_b8x16);
1796
+ v128_t high_byte_u8x16 = wasm_v128_and(wasm_u8x16_gt(magnitude_u8x16, threshold_u8x16), wasm_i8x16_splat(1));
1797
+ v128_t is_negative_b8x16 = wasm_i8x16_eq(wasm_v128_and(raw_u8x16, sign_mask_u8x16), sign_mask_u8x16);
1798
+ v128_t unsigned_low_u16x8 = wasm_i8x16_shuffle(low_byte_u8x16, high_byte_u8x16, 0, 16, 1, 17, 2, 18, 3, 19, 4,
1799
+ 20, 5, 21, 6, 22, 7, 23);
1800
+ v128_t unsigned_high_u16x8 = wasm_i8x16_shuffle(low_byte_u8x16, high_byte_u8x16, 8, 24, 9, 25, 10, 26, 11, 27,
1801
+ 12, 28, 13, 29, 14, 30, 15, 31);
1802
+ v128_t is_neg_low_i16x8 = wasm_i16x8_extend_low_i8x16(is_negative_b8x16);
1803
+ v128_t is_neg_high_i16x8 = wasm_i16x8_extend_high_i8x16(is_negative_b8x16);
1804
+ v128_t neg_low_i16x8 = wasm_i16x8_neg(unsigned_low_u16x8);
1805
+ v128_t scaled_low_i16x8 = wasm_i16x8_relaxed_laneselect(neg_low_i16x8, unsigned_low_u16x8, is_neg_low_i16x8);
1806
+ v128_t neg_high_i16x8 = wasm_i16x8_neg(unsigned_high_u16x8);
1807
+ v128_t scaled_high_i16x8 = wasm_i16x8_relaxed_laneselect(neg_high_i16x8, unsigned_high_u16x8,
1808
+ is_neg_high_i16x8);
1809
+ v128_t sum_low_i32x4 = wasm_i32x4_extadd_pairwise_i16x8(scaled_low_i16x8);
1810
+ v128_t sum_high_i32x4 = wasm_i32x4_extadd_pairwise_i16x8(scaled_high_i16x8);
1811
+ sum_i32x4 = wasm_i32x4_add(sum_i32x4, sum_low_i32x4);
1812
+ sum_i32x4 = wasm_i32x4_add(sum_i32x4, sum_high_i32x4);
1813
+ v128_t sq_low_a_i32x4 = wasm_i32x4_extmul_low_i16x8(scaled_low_i16x8, scaled_low_i16x8);
1814
+ v128_t sq_low_b_i32x4 = wasm_i32x4_extmul_high_i16x8(scaled_low_i16x8, scaled_low_i16x8);
1815
+ v128_t sq_high_a_i32x4 = wasm_i32x4_extmul_low_i16x8(scaled_high_i16x8, scaled_high_i16x8);
1816
+ v128_t sq_high_b_i32x4 = wasm_i32x4_extmul_high_i16x8(scaled_high_i16x8, scaled_high_i16x8);
1817
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_low_u32x4(sq_low_a_i32x4));
1818
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_high_u32x4(sq_low_a_i32x4));
1819
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_low_u32x4(sq_low_b_i32x4));
1820
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_high_u32x4(sq_low_b_i32x4));
1821
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_low_u32x4(sq_high_a_i32x4));
1822
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_high_u32x4(sq_high_a_i32x4));
1823
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_low_u32x4(sq_high_b_i32x4));
1824
+ sumsq_u64x2 = wasm_i64x2_add(sumsq_u64x2, wasm_u64x2_extend_high_u32x4(sq_high_b_i32x4));
1825
+ }
1826
+ nk_i64_t sum = nk_reduce_add_i32x4_v128relaxed_(sum_i32x4);
1827
+ nk_u64_t sumsq = nk_reduce_add_u64x2_v128relaxed_(sumsq_u64x2);
1828
+ for (; idx < count; ++idx) {
1829
+ nk_f32_t val;
1830
+ nk_e3m2_to_f32_serial(&data_ptr[idx], &val);
1831
+ sum += (nk_i64_t)(val * 16.0f), sumsq += (nk_u64_t)(nk_i64_t)(val * val * 256.0f);
1832
+ }
1833
+ *sum_ptr = (nk_f32_t)sum / 16.0f, *sumsq_ptr = (nk_f32_t)sumsq / 256.0f;
1834
+ }
1835
+
1836
+ NK_PUBLIC void nk_reduce_moments_e3m2_v128relaxed( //
1837
+ nk_e3m2_t const *data_ptr, nk_size_t count, nk_size_t stride_bytes, //
1838
+ nk_f32_t *sum_ptr, nk_f32_t *sumsq_ptr) {
1839
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_e3m2_t);
1840
+ int aligned = (stride_bytes % sizeof(nk_e3m2_t) == 0);
1841
+ if (count == 0) *sum_ptr = 0, *sumsq_ptr = 0;
1842
+ else if (!aligned) nk_reduce_moments_e3m2_serial(data_ptr, count, stride_bytes, sum_ptr, sumsq_ptr);
1843
+ else if (count > (nk_size_t)(NK_U16_MAX + 1) * 16) {
1844
+ nk_size_t left_count = count / 2;
1845
+ nk_f32_t left_sum, left_sumsq, right_sum, right_sumsq;
1846
+ nk_reduce_moments_e3m2_v128relaxed(data_ptr, left_count, stride_bytes, &left_sum, &left_sumsq);
1847
+ nk_reduce_moments_e3m2_v128relaxed(data_ptr + left_count * stride_elements, count - left_count, stride_bytes,
1848
+ &right_sum, &right_sumsq);
1849
+ *sum_ptr = left_sum + right_sum, *sumsq_ptr = left_sumsq + right_sumsq;
1850
+ }
1851
+ else if (stride_elements == 1) nk_reduce_moments_e3m2_v128relaxed_contiguous_(data_ptr, count, sum_ptr, sumsq_ptr);
1852
+ else nk_reduce_moments_e3m2_serial(data_ptr, count, stride_bytes, sum_ptr, sumsq_ptr);
1853
+ }
1854
+
1855
+ NK_INTERNAL void nk_reduce_moments_e5m2_v128relaxed_contiguous_( //
1856
+ nk_e5m2_t const *data_ptr, nk_size_t count, //
1857
+ nk_f32_t *sum_ptr, nk_f32_t *sumsq_ptr) {
1858
+ v128_t sum_f32x4 = wasm_f32x4_splat(0), sumsq_f32x4 = wasm_f32x4_splat(0);
1859
+ nk_size_t idx = 0;
1860
+ for (; idx + 4 <= count; idx += 4) {
1861
+ nk_b32_vec_t raw;
1862
+ nk_load_b32_serial_(data_ptr + idx, &raw);
1863
+ v128_t data_f32x4 = nk_e5m2x4_to_f32x4_v128relaxed_(raw).v128;
1864
+ sum_f32x4 = wasm_f32x4_add(sum_f32x4, data_f32x4);
1865
+ sumsq_f32x4 = wasm_f32x4_relaxed_madd(data_f32x4, data_f32x4, sumsq_f32x4);
1866
+ }
1867
+ nk_f32_t sum = nk_reduce_add_f32x4_v128relaxed_(sum_f32x4);
1868
+ nk_f32_t sumsq = nk_reduce_add_f32x4_v128relaxed_(sumsq_f32x4);
1869
+ for (; idx < count; ++idx) {
1870
+ nk_f32_t val;
1871
+ nk_e5m2_to_f32_serial(&data_ptr[idx], &val);
1872
+ sum += val, sumsq += val * val;
1873
+ }
1874
+ *sum_ptr = sum, *sumsq_ptr = sumsq;
1875
+ }
1876
+
1877
+ NK_PUBLIC void nk_reduce_moments_e5m2_v128relaxed( //
1878
+ nk_e5m2_t const *data_ptr, nk_size_t count, nk_size_t stride_bytes, //
1879
+ nk_f32_t *sum_ptr, nk_f32_t *sumsq_ptr) {
1880
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_e5m2_t);
1881
+ int aligned = (stride_bytes % sizeof(nk_e5m2_t) == 0);
1882
+ if (count == 0) *sum_ptr = 0, *sumsq_ptr = 0;
1883
+ else if (!aligned) nk_reduce_moments_e5m2_serial(data_ptr, count, stride_bytes, sum_ptr, sumsq_ptr);
1884
+ else if (stride_elements == 1) nk_reduce_moments_e5m2_v128relaxed_contiguous_(data_ptr, count, sum_ptr, sumsq_ptr);
1885
+ else nk_reduce_moments_e5m2_serial(data_ptr, count, stride_bytes, sum_ptr, sumsq_ptr);
1886
+ }
1887
+
1888
+ NK_INTERNAL v128_t nk_fp8x16_to_comparable_v128relaxed_(v128_t raw_u8x16) {
1889
+ v128_t sign_mask_u8x16 = wasm_i8x16_splat((signed char)0x80);
1890
+ v128_t is_negative_b8x16 = wasm_i8x16_eq(wasm_v128_and(raw_u8x16, sign_mask_u8x16), sign_mask_u8x16);
1891
+ v128_t flip_positive_u8x16 = wasm_v128_xor(raw_u8x16, sign_mask_u8x16);
1892
+ v128_t flip_negative_u8x16 = wasm_v128_not(raw_u8x16);
1893
+ return wasm_i8x16_relaxed_laneselect(flip_negative_u8x16, flip_positive_u8x16, is_negative_b8x16);
1894
+ }
1895
+
1896
+ NK_INTERNAL nk_u8_t nk_comparable_to_fp8_v128relaxed_(nk_u8_t comparable) {
1897
+ if (comparable >= 0x80) return comparable ^ 0x80;
1898
+ else return ~comparable;
1899
+ }
1900
+
1901
+ NK_INTERNAL v128_t nk_fp6x16_to_comparable_v128relaxed_(v128_t raw_u8x16) {
1902
+ v128_t magnitude_u8x16 = wasm_v128_and(raw_u8x16, wasm_i8x16_splat(0x1F));
1903
+ v128_t sign_mask_u8x16 = wasm_i8x16_splat(0x20);
1904
+ v128_t is_negative_b8x16 = wasm_i8x16_eq(wasm_v128_and(raw_u8x16, sign_mask_u8x16), sign_mask_u8x16);
1905
+ v128_t positive_u8x16 = wasm_v128_or(magnitude_u8x16, sign_mask_u8x16);
1906
+ v128_t negative_u8x16 = wasm_i8x16_sub(wasm_i8x16_splat(0x1F), magnitude_u8x16);
1907
+ return wasm_i8x16_relaxed_laneselect(negative_u8x16, positive_u8x16, is_negative_b8x16);
1908
+ }
1909
+
1910
+ NK_INTERNAL nk_u8_t nk_comparable_to_fp6_v128relaxed_(nk_u8_t comparable) {
1911
+ if (comparable >= 0x20) return comparable ^ 0x20;
1912
+ else return (0x1F - comparable) | 0x20;
1913
+ }
1914
+
1915
+ NK_INTERNAL void nk_reduce_minmax_e4m3_v128relaxed_contiguous_( //
1916
+ nk_e4m3_t const *data_ptr, nk_size_t count, //
1917
+ nk_e4m3_t *min_value_ptr, nk_size_t *min_index_ptr, //
1918
+ nk_e4m3_t *max_value_ptr, nk_size_t *max_index_ptr) {
1919
+ v128_t min_u8x16 = wasm_i8x16_splat((signed char)0xFF), max_u8x16 = wasm_i8x16_splat(0);
1920
+ v128_t min_iter_u8x16 = wasm_i8x16_splat(0), max_iter_u8x16 = wasm_i8x16_splat(0);
1921
+ v128_t iter_u8x16 = wasm_i8x16_splat(0), one_u8x16 = wasm_i8x16_splat(1);
1922
+ nk_size_t idx = 0;
1923
+ for (; idx + 16 <= count; idx += 16) {
1924
+ v128_t raw_u8x16 = wasm_v128_load(data_ptr + idx);
1925
+ v128_t comparable_u8x16 = nk_fp8x16_to_comparable_v128relaxed_(raw_u8x16);
1926
+ // E4M3 NaN: comparable == 0x00 (negative NaN) or comparable == 0xFF (positive NaN)
1927
+ v128_t is_nan_low_u8x16 = wasm_i8x16_eq(comparable_u8x16, wasm_i8x16_splat(0));
1928
+ v128_t is_nan_high_u8x16 = wasm_i8x16_eq(comparable_u8x16, wasm_i8x16_splat((signed char)0xFF));
1929
+ v128_t is_nan_u8x16 = wasm_v128_or(is_nan_low_u8x16, is_nan_high_u8x16);
1930
+ v128_t data_min_u8x16 = wasm_i8x16_relaxed_laneselect(wasm_i8x16_splat((signed char)0xFF), comparable_u8x16,
1931
+ is_nan_u8x16);
1932
+ v128_t data_max_u8x16 = wasm_i8x16_relaxed_laneselect(wasm_i8x16_splat(0), comparable_u8x16, is_nan_u8x16);
1933
+ v128_t less_b8x16 = wasm_u8x16_lt(data_min_u8x16, min_u8x16);
1934
+ v128_t greater_b8x16 = wasm_u8x16_gt(data_max_u8x16, max_u8x16);
1935
+ min_u8x16 = wasm_i8x16_relaxed_laneselect(data_min_u8x16, min_u8x16, less_b8x16);
1936
+ max_u8x16 = wasm_i8x16_relaxed_laneselect(data_max_u8x16, max_u8x16, greater_b8x16);
1937
+ min_iter_u8x16 = wasm_i8x16_relaxed_laneselect(iter_u8x16, min_iter_u8x16, less_b8x16);
1938
+ max_iter_u8x16 = wasm_i8x16_relaxed_laneselect(iter_u8x16, max_iter_u8x16, greater_b8x16);
1939
+ iter_u8x16 = wasm_i8x16_add(iter_u8x16, one_u8x16);
1940
+ }
1941
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
1942
+ min_values_vec.v128 = min_u8x16;
1943
+ max_values_vec.v128 = max_u8x16;
1944
+ min_iters_vec.v128 = min_iter_u8x16;
1945
+ max_iters_vec.v128 = max_iter_u8x16;
1946
+ nk_u8_t min_comparable = min_values_vec.u8s[0];
1947
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u8s[0] * 16;
1948
+ for (int i = 1; i < 16; ++i) {
1949
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u8s[i] * 16 + (nk_size_t)i;
1950
+ if (min_values_vec.u8s[i] < min_comparable || (min_values_vec.u8s[i] == min_comparable && abs_idx < min_idx))
1951
+ min_comparable = min_values_vec.u8s[i], min_idx = abs_idx;
1952
+ }
1953
+ nk_u8_t max_comparable = max_values_vec.u8s[0];
1954
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u8s[0] * 16;
1955
+ for (int i = 1; i < 16; ++i) {
1956
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u8s[i] * 16 + (nk_size_t)i;
1957
+ if (max_values_vec.u8s[i] > max_comparable || (max_values_vec.u8s[i] == max_comparable && abs_idx < max_idx))
1958
+ max_comparable = max_values_vec.u8s[i], max_idx = abs_idx;
1959
+ }
1960
+ // Check if SIMD found only NaN (sentinels unchanged)
1961
+ if (min_comparable == 0xFF) min_idx = NK_SIZE_MAX;
1962
+ if (max_comparable == 0x00) max_idx = NK_SIZE_MAX;
1963
+ for (; idx < count; ++idx) {
1964
+ nk_u8_t raw = data_ptr[idx];
1965
+ nk_u8_t cmp = (raw & 0x80) ? (nk_u8_t)~raw : (raw ^ 0x80);
1966
+ if (cmp == 0x00 || cmp == 0xFF) continue;
1967
+ if (min_idx == NK_SIZE_MAX || cmp < min_comparable) min_comparable = cmp, min_idx = idx;
1968
+ if (max_idx == NK_SIZE_MAX || cmp > max_comparable) max_comparable = cmp, max_idx = idx;
1969
+ }
1970
+ if (min_idx == NK_SIZE_MAX) {
1971
+ *min_value_ptr = (nk_e4m3_t)NK_E4M3_MAX, *min_index_ptr = NK_SIZE_MAX;
1972
+ *max_value_ptr = (nk_e4m3_t)NK_E4M3_MIN, *max_index_ptr = NK_SIZE_MAX;
1973
+ return;
1974
+ }
1975
+ *min_value_ptr = nk_comparable_to_fp8_v128relaxed_(min_comparable), *min_index_ptr = min_idx;
1976
+ *max_value_ptr = nk_comparable_to_fp8_v128relaxed_(max_comparable), *max_index_ptr = max_idx;
1977
+ }
1978
+
1979
+ NK_PUBLIC void nk_reduce_minmax_e4m3_v128relaxed( //
1980
+ nk_e4m3_t const *data_ptr, nk_size_t count, nk_size_t stride_bytes, //
1981
+ nk_e4m3_t *min_value_ptr, nk_size_t *min_index_ptr, //
1982
+ nk_e4m3_t *max_value_ptr, nk_size_t *max_index_ptr) {
1983
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_e4m3_t);
1984
+ int aligned = (stride_bytes % sizeof(nk_e4m3_t) == 0);
1985
+ if (count == 0)
1986
+ *min_value_ptr = NK_E4M3_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_E4M3_MIN,
1987
+ *max_index_ptr = NK_SIZE_MAX;
1988
+ else if (!aligned)
1989
+ nk_reduce_minmax_e4m3_serial(data_ptr, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
1990
+ max_index_ptr);
1991
+ else if (count > (nk_size_t)256 * 16) {
1992
+ nk_size_t left_count = count / 2;
1993
+ nk_e4m3_t left_min, right_min, left_max, right_max;
1994
+ nk_size_t left_min_idx, right_min_idx, left_max_idx, right_max_idx;
1995
+ nk_reduce_minmax_e4m3_v128relaxed(data_ptr, left_count, stride_bytes, &left_min, &left_min_idx, &left_max,
1996
+ &left_max_idx);
1997
+ nk_reduce_minmax_e4m3_v128relaxed(data_ptr + left_count * stride_elements, count - left_count, stride_bytes,
1998
+ &right_min, &right_min_idx, &right_max, &right_max_idx);
1999
+ right_min_idx = (right_min_idx == NK_SIZE_MAX) ? NK_SIZE_MAX : left_count + right_min_idx;
2000
+ right_max_idx = (right_max_idx == NK_SIZE_MAX) ? NK_SIZE_MAX : left_count + right_max_idx;
2001
+ if (left_min_idx == NK_SIZE_MAX) *min_value_ptr = right_min, *min_index_ptr = right_min_idx;
2002
+ else if (right_min_idx == NK_SIZE_MAX || nk_e4m3_order_serial(left_min, right_min) <= 0)
2003
+ *min_value_ptr = left_min, *min_index_ptr = left_min_idx;
2004
+ else *min_value_ptr = right_min, *min_index_ptr = right_min_idx;
2005
+ if (left_max_idx == NK_SIZE_MAX) *max_value_ptr = right_max, *max_index_ptr = right_max_idx;
2006
+ else if (right_max_idx == NK_SIZE_MAX || nk_e4m3_order_serial(left_max, right_max) >= 0)
2007
+ *max_value_ptr = left_max, *max_index_ptr = left_max_idx;
2008
+ else *max_value_ptr = right_max, *max_index_ptr = right_max_idx;
2009
+ }
2010
+ else if (stride_elements == 1)
2011
+ nk_reduce_minmax_e4m3_v128relaxed_contiguous_(data_ptr, count, min_value_ptr, min_index_ptr, max_value_ptr,
2012
+ max_index_ptr);
2013
+ else
2014
+ nk_reduce_minmax_e4m3_serial(data_ptr, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
2015
+ max_index_ptr);
2016
+ }
2017
+
2018
+ NK_INTERNAL void nk_reduce_minmax_e5m2_v128relaxed_contiguous_( //
2019
+ nk_e5m2_t const *data_ptr, nk_size_t count, //
2020
+ nk_e5m2_t *min_value_ptr, nk_size_t *min_index_ptr, //
2021
+ nk_e5m2_t *max_value_ptr, nk_size_t *max_index_ptr) {
2022
+ v128_t min_u8x16 = wasm_i8x16_splat((signed char)0xFF), max_u8x16 = wasm_i8x16_splat(0);
2023
+ v128_t min_iter_u8x16 = wasm_i8x16_splat(0), max_iter_u8x16 = wasm_i8x16_splat(0);
2024
+ v128_t iter_u8x16 = wasm_i8x16_splat(0), one_u8x16 = wasm_i8x16_splat(1);
2025
+ nk_size_t idx = 0;
2026
+ for (; idx + 16 <= count; idx += 16) {
2027
+ v128_t raw_u8x16 = wasm_v128_load(data_ptr + idx);
2028
+ v128_t comparable_u8x16 = nk_fp8x16_to_comparable_v128relaxed_(raw_u8x16);
2029
+ // E5M2 NaN: comparable <= 0x02 or comparable >= 0xFD
2030
+ v128_t low_bound_u8x16 = wasm_i8x16_splat(0x02);
2031
+ v128_t high_bound_u8x16 = wasm_i8x16_splat((signed char)0xFD);
2032
+ v128_t is_nan_low_u8x16 = wasm_u8x16_le(comparable_u8x16, low_bound_u8x16);
2033
+ v128_t is_nan_high_u8x16 = wasm_u8x16_ge(comparable_u8x16, high_bound_u8x16);
2034
+ v128_t is_nan_u8x16 = wasm_v128_or(is_nan_low_u8x16, is_nan_high_u8x16);
2035
+ v128_t data_min_u8x16 = wasm_i8x16_relaxed_laneselect(wasm_i8x16_splat((signed char)0xFF), comparable_u8x16,
2036
+ is_nan_u8x16);
2037
+ v128_t data_max_u8x16 = wasm_i8x16_relaxed_laneselect(wasm_i8x16_splat(0), comparable_u8x16, is_nan_u8x16);
2038
+ v128_t less_b8x16 = wasm_u8x16_lt(data_min_u8x16, min_u8x16);
2039
+ v128_t greater_b8x16 = wasm_u8x16_gt(data_max_u8x16, max_u8x16);
2040
+ min_u8x16 = wasm_i8x16_relaxed_laneselect(data_min_u8x16, min_u8x16, less_b8x16);
2041
+ max_u8x16 = wasm_i8x16_relaxed_laneselect(data_max_u8x16, max_u8x16, greater_b8x16);
2042
+ min_iter_u8x16 = wasm_i8x16_relaxed_laneselect(iter_u8x16, min_iter_u8x16, less_b8x16);
2043
+ max_iter_u8x16 = wasm_i8x16_relaxed_laneselect(iter_u8x16, max_iter_u8x16, greater_b8x16);
2044
+ iter_u8x16 = wasm_i8x16_add(iter_u8x16, one_u8x16);
2045
+ }
2046
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
2047
+ min_values_vec.v128 = min_u8x16;
2048
+ max_values_vec.v128 = max_u8x16;
2049
+ min_iters_vec.v128 = min_iter_u8x16;
2050
+ max_iters_vec.v128 = max_iter_u8x16;
2051
+ nk_u8_t min_comparable = min_values_vec.u8s[0];
2052
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u8s[0] * 16;
2053
+ for (int i = 1; i < 16; ++i) {
2054
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u8s[i] * 16 + (nk_size_t)i;
2055
+ if (min_values_vec.u8s[i] < min_comparable || (min_values_vec.u8s[i] == min_comparable && abs_idx < min_idx))
2056
+ min_comparable = min_values_vec.u8s[i], min_idx = abs_idx;
2057
+ }
2058
+ nk_u8_t max_comparable = max_values_vec.u8s[0];
2059
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u8s[0] * 16;
2060
+ for (int i = 1; i < 16; ++i) {
2061
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u8s[i] * 16 + (nk_size_t)i;
2062
+ if (max_values_vec.u8s[i] > max_comparable || (max_values_vec.u8s[i] == max_comparable && abs_idx < max_idx))
2063
+ max_comparable = max_values_vec.u8s[i], max_idx = abs_idx;
2064
+ }
2065
+ // Check if SIMD found only NaN (sentinels unchanged)
2066
+ if (min_comparable == 0xFF) min_idx = NK_SIZE_MAX;
2067
+ if (max_comparable == 0x00) max_idx = NK_SIZE_MAX;
2068
+ for (; idx < count; ++idx) {
2069
+ nk_u8_t raw = data_ptr[idx];
2070
+ nk_u8_t cmp = (raw & 0x80) ? (nk_u8_t)~raw : (raw ^ 0x80);
2071
+ if (cmp <= 0x02 || cmp >= 0xFD) continue;
2072
+ if (min_idx == NK_SIZE_MAX || cmp < min_comparable) min_comparable = cmp, min_idx = idx;
2073
+ if (max_idx == NK_SIZE_MAX || cmp > max_comparable) max_comparable = cmp, max_idx = idx;
2074
+ }
2075
+ if (min_idx == NK_SIZE_MAX) {
2076
+ *min_value_ptr = (nk_e5m2_t)NK_E5M2_MAX, *min_index_ptr = NK_SIZE_MAX;
2077
+ *max_value_ptr = (nk_e5m2_t)NK_E5M2_MIN, *max_index_ptr = NK_SIZE_MAX;
2078
+ return;
2079
+ }
2080
+ *min_value_ptr = nk_comparable_to_fp8_v128relaxed_(min_comparable), *min_index_ptr = min_idx;
2081
+ *max_value_ptr = nk_comparable_to_fp8_v128relaxed_(max_comparable), *max_index_ptr = max_idx;
2082
+ }
2083
+
2084
+ NK_PUBLIC void nk_reduce_minmax_e5m2_v128relaxed( //
2085
+ nk_e5m2_t const *data_ptr, nk_size_t count, nk_size_t stride_bytes, //
2086
+ nk_e5m2_t *min_value_ptr, nk_size_t *min_index_ptr, //
2087
+ nk_e5m2_t *max_value_ptr, nk_size_t *max_index_ptr) {
2088
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_e5m2_t);
2089
+ int aligned = (stride_bytes % sizeof(nk_e5m2_t) == 0);
2090
+ if (count == 0)
2091
+ *min_value_ptr = NK_E5M2_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_E5M2_MIN,
2092
+ *max_index_ptr = NK_SIZE_MAX;
2093
+ else if (!aligned)
2094
+ nk_reduce_minmax_e5m2_serial(data_ptr, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
2095
+ max_index_ptr);
2096
+ else if (count > (nk_size_t)256 * 16) {
2097
+ nk_size_t left_count = count / 2;
2098
+ nk_e5m2_t left_min, right_min, left_max, right_max;
2099
+ nk_size_t left_min_idx, right_min_idx, left_max_idx, right_max_idx;
2100
+ nk_reduce_minmax_e5m2_v128relaxed(data_ptr, left_count, stride_bytes, &left_min, &left_min_idx, &left_max,
2101
+ &left_max_idx);
2102
+ nk_reduce_minmax_e5m2_v128relaxed(data_ptr + left_count * stride_elements, count - left_count, stride_bytes,
2103
+ &right_min, &right_min_idx, &right_max, &right_max_idx);
2104
+ right_min_idx = (right_min_idx == NK_SIZE_MAX) ? NK_SIZE_MAX : left_count + right_min_idx;
2105
+ right_max_idx = (right_max_idx == NK_SIZE_MAX) ? NK_SIZE_MAX : left_count + right_max_idx;
2106
+ if (left_min_idx == NK_SIZE_MAX) *min_value_ptr = right_min, *min_index_ptr = right_min_idx;
2107
+ else if (right_min_idx == NK_SIZE_MAX || nk_e5m2_order_serial(left_min, right_min) <= 0)
2108
+ *min_value_ptr = left_min, *min_index_ptr = left_min_idx;
2109
+ else *min_value_ptr = right_min, *min_index_ptr = right_min_idx;
2110
+ if (left_max_idx == NK_SIZE_MAX) *max_value_ptr = right_max, *max_index_ptr = right_max_idx;
2111
+ else if (right_max_idx == NK_SIZE_MAX || nk_e5m2_order_serial(left_max, right_max) >= 0)
2112
+ *max_value_ptr = left_max, *max_index_ptr = left_max_idx;
2113
+ else *max_value_ptr = right_max, *max_index_ptr = right_max_idx;
2114
+ }
2115
+ else if (stride_elements == 1)
2116
+ nk_reduce_minmax_e5m2_v128relaxed_contiguous_(data_ptr, count, min_value_ptr, min_index_ptr, max_value_ptr,
2117
+ max_index_ptr);
2118
+ else
2119
+ nk_reduce_minmax_e5m2_serial(data_ptr, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
2120
+ max_index_ptr);
2121
+ }
2122
+
2123
+ NK_INTERNAL void nk_reduce_minmax_e2m3_v128relaxed_contiguous_( //
2124
+ nk_e2m3_t const *data_ptr, nk_size_t count, //
2125
+ nk_e2m3_t *min_value_ptr, nk_size_t *min_index_ptr, //
2126
+ nk_e2m3_t *max_value_ptr, nk_size_t *max_index_ptr) {
2127
+ v128_t min_u8x16 = wasm_i8x16_splat(0x3F), max_u8x16 = wasm_i8x16_splat(0);
2128
+ v128_t min_iter_u8x16 = wasm_i8x16_splat(0), max_iter_u8x16 = wasm_i8x16_splat(0);
2129
+ v128_t iter_u8x16 = wasm_i8x16_splat(0), one_u8x16 = wasm_i8x16_splat(1);
2130
+ nk_size_t idx = 0;
2131
+ for (; idx + 16 <= count; idx += 16) {
2132
+ v128_t raw_u8x16 = wasm_v128_load(data_ptr + idx);
2133
+ v128_t comparable_u8x16 = nk_fp6x16_to_comparable_v128relaxed_(raw_u8x16);
2134
+ v128_t less_b8x16 = wasm_u8x16_lt(comparable_u8x16, min_u8x16);
2135
+ v128_t greater_b8x16 = wasm_u8x16_gt(comparable_u8x16, max_u8x16);
2136
+ min_u8x16 = wasm_i8x16_relaxed_laneselect(comparable_u8x16, min_u8x16, less_b8x16);
2137
+ max_u8x16 = wasm_i8x16_relaxed_laneselect(comparable_u8x16, max_u8x16, greater_b8x16);
2138
+ min_iter_u8x16 = wasm_i8x16_relaxed_laneselect(iter_u8x16, min_iter_u8x16, less_b8x16);
2139
+ max_iter_u8x16 = wasm_i8x16_relaxed_laneselect(iter_u8x16, max_iter_u8x16, greater_b8x16);
2140
+ iter_u8x16 = wasm_i8x16_add(iter_u8x16, one_u8x16);
2141
+ }
2142
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
2143
+ min_values_vec.v128 = min_u8x16;
2144
+ max_values_vec.v128 = max_u8x16;
2145
+ min_iters_vec.v128 = min_iter_u8x16;
2146
+ max_iters_vec.v128 = max_iter_u8x16;
2147
+ nk_u8_t min_comparable = min_values_vec.u8s[0];
2148
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u8s[0] * 16;
2149
+ for (int i = 1; i < 16; ++i) {
2150
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u8s[i] * 16 + (nk_size_t)i;
2151
+ if (min_values_vec.u8s[i] < min_comparable || (min_values_vec.u8s[i] == min_comparable && abs_idx < min_idx))
2152
+ min_comparable = min_values_vec.u8s[i], min_idx = abs_idx;
2153
+ }
2154
+ nk_u8_t max_comparable = max_values_vec.u8s[0];
2155
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u8s[0] * 16;
2156
+ for (int i = 1; i < 16; ++i) {
2157
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u8s[i] * 16 + (nk_size_t)i;
2158
+ if (max_values_vec.u8s[i] > max_comparable || (max_values_vec.u8s[i] == max_comparable && abs_idx < max_idx))
2159
+ max_comparable = max_values_vec.u8s[i], max_idx = abs_idx;
2160
+ }
2161
+ for (; idx < count; ++idx) {
2162
+ nk_u8_t raw = data_ptr[idx] & 0x3F;
2163
+ nk_u8_t sign = raw >> 5;
2164
+ nk_u8_t mag = raw & 0x1F;
2165
+ nk_u8_t cmp = sign ? (0x1F - mag) : (mag | 0x20);
2166
+ if (cmp < min_comparable) min_comparable = cmp, min_idx = idx;
2167
+ if (cmp > max_comparable) max_comparable = cmp, max_idx = idx;
2168
+ }
2169
+ *min_value_ptr = nk_comparable_to_fp6_v128relaxed_(min_comparable), *min_index_ptr = min_idx;
2170
+ *max_value_ptr = nk_comparable_to_fp6_v128relaxed_(max_comparable), *max_index_ptr = max_idx;
2171
+ }
2172
+
2173
+ NK_PUBLIC void nk_reduce_minmax_e2m3_v128relaxed( //
2174
+ nk_e2m3_t const *data_ptr, nk_size_t count, nk_size_t stride_bytes, //
2175
+ nk_e2m3_t *min_value_ptr, nk_size_t *min_index_ptr, //
2176
+ nk_e2m3_t *max_value_ptr, nk_size_t *max_index_ptr) {
2177
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_e2m3_t);
2178
+ int aligned = (stride_bytes % sizeof(nk_e2m3_t) == 0);
2179
+ if (count == 0)
2180
+ *min_value_ptr = NK_E2M3_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_E2M3_MIN,
2181
+ *max_index_ptr = NK_SIZE_MAX;
2182
+ else if (!aligned)
2183
+ nk_reduce_minmax_e2m3_serial(data_ptr, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
2184
+ max_index_ptr);
2185
+ else if (count > (nk_size_t)256 * 16) {
2186
+ nk_size_t left_count = count / 2;
2187
+ nk_e2m3_t left_min, right_min, left_max, right_max;
2188
+ nk_size_t left_min_idx, right_min_idx, left_max_idx, right_max_idx;
2189
+ nk_reduce_minmax_e2m3_v128relaxed(data_ptr, left_count, stride_bytes, &left_min, &left_min_idx, &left_max,
2190
+ &left_max_idx);
2191
+ nk_reduce_minmax_e2m3_v128relaxed(data_ptr + left_count * stride_elements, count - left_count, stride_bytes,
2192
+ &right_min, &right_min_idx, &right_max, &right_max_idx);
2193
+ if (nk_e2m3_order_serial(right_min, left_min) < 0)
2194
+ *min_value_ptr = right_min, *min_index_ptr = left_count + right_min_idx;
2195
+ else *min_value_ptr = left_min, *min_index_ptr = left_min_idx;
2196
+ if (nk_e2m3_order_serial(right_max, left_max) > 0)
2197
+ *max_value_ptr = right_max, *max_index_ptr = left_count + right_max_idx;
2198
+ else *max_value_ptr = left_max, *max_index_ptr = left_max_idx;
2199
+ }
2200
+ else if (stride_elements == 1)
2201
+ nk_reduce_minmax_e2m3_v128relaxed_contiguous_(data_ptr, count, min_value_ptr, min_index_ptr, max_value_ptr,
2202
+ max_index_ptr);
2203
+ else
2204
+ nk_reduce_minmax_e2m3_serial(data_ptr, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
2205
+ max_index_ptr);
2206
+ }
2207
+
2208
+ NK_INTERNAL void nk_reduce_minmax_e3m2_v128relaxed_contiguous_( //
2209
+ nk_e3m2_t const *data_ptr, nk_size_t count, //
2210
+ nk_e3m2_t *min_value_ptr, nk_size_t *min_index_ptr, //
2211
+ nk_e3m2_t *max_value_ptr, nk_size_t *max_index_ptr) {
2212
+ v128_t min_u8x16 = wasm_i8x16_splat(0x3F), max_u8x16 = wasm_i8x16_splat(0);
2213
+ v128_t min_iter_u8x16 = wasm_i8x16_splat(0), max_iter_u8x16 = wasm_i8x16_splat(0);
2214
+ v128_t iter_u8x16 = wasm_i8x16_splat(0), one_u8x16 = wasm_i8x16_splat(1);
2215
+ nk_size_t idx = 0;
2216
+ for (; idx + 16 <= count; idx += 16) {
2217
+ v128_t raw_u8x16 = wasm_v128_load(data_ptr + idx);
2218
+ v128_t comparable_u8x16 = nk_fp6x16_to_comparable_v128relaxed_(raw_u8x16);
2219
+ v128_t less_b8x16 = wasm_u8x16_lt(comparable_u8x16, min_u8x16);
2220
+ v128_t greater_b8x16 = wasm_u8x16_gt(comparable_u8x16, max_u8x16);
2221
+ min_u8x16 = wasm_i8x16_relaxed_laneselect(comparable_u8x16, min_u8x16, less_b8x16);
2222
+ max_u8x16 = wasm_i8x16_relaxed_laneselect(comparable_u8x16, max_u8x16, greater_b8x16);
2223
+ min_iter_u8x16 = wasm_i8x16_relaxed_laneselect(iter_u8x16, min_iter_u8x16, less_b8x16);
2224
+ max_iter_u8x16 = wasm_i8x16_relaxed_laneselect(iter_u8x16, max_iter_u8x16, greater_b8x16);
2225
+ iter_u8x16 = wasm_i8x16_add(iter_u8x16, one_u8x16);
2226
+ }
2227
+ nk_b128_vec_t min_values_vec, max_values_vec, min_iters_vec, max_iters_vec;
2228
+ min_values_vec.v128 = min_u8x16;
2229
+ max_values_vec.v128 = max_u8x16;
2230
+ min_iters_vec.v128 = min_iter_u8x16;
2231
+ max_iters_vec.v128 = max_iter_u8x16;
2232
+ nk_u8_t min_comparable = min_values_vec.u8s[0];
2233
+ nk_size_t min_idx = (nk_size_t)min_iters_vec.u8s[0] * 16;
2234
+ for (int i = 1; i < 16; ++i) {
2235
+ nk_size_t abs_idx = (nk_size_t)min_iters_vec.u8s[i] * 16 + (nk_size_t)i;
2236
+ if (min_values_vec.u8s[i] < min_comparable || (min_values_vec.u8s[i] == min_comparable && abs_idx < min_idx))
2237
+ min_comparable = min_values_vec.u8s[i], min_idx = abs_idx;
2238
+ }
2239
+ nk_u8_t max_comparable = max_values_vec.u8s[0];
2240
+ nk_size_t max_idx = (nk_size_t)max_iters_vec.u8s[0] * 16;
2241
+ for (int i = 1; i < 16; ++i) {
2242
+ nk_size_t abs_idx = (nk_size_t)max_iters_vec.u8s[i] * 16 + (nk_size_t)i;
2243
+ if (max_values_vec.u8s[i] > max_comparable || (max_values_vec.u8s[i] == max_comparable && abs_idx < max_idx))
2244
+ max_comparable = max_values_vec.u8s[i], max_idx = abs_idx;
2245
+ }
2246
+ for (; idx < count; ++idx) {
2247
+ nk_u8_t raw = data_ptr[idx] & 0x3F;
2248
+ nk_u8_t sign = raw >> 5;
2249
+ nk_u8_t mag = raw & 0x1F;
2250
+ nk_u8_t cmp = sign ? (0x1F - mag) : (mag | 0x20);
2251
+ if (cmp < min_comparable) min_comparable = cmp, min_idx = idx;
2252
+ if (cmp > max_comparable) max_comparable = cmp, max_idx = idx;
2253
+ }
2254
+ *min_value_ptr = nk_comparable_to_fp6_v128relaxed_(min_comparable), *min_index_ptr = min_idx;
2255
+ *max_value_ptr = nk_comparable_to_fp6_v128relaxed_(max_comparable), *max_index_ptr = max_idx;
2256
+ }
2257
+
2258
+ NK_PUBLIC void nk_reduce_minmax_e3m2_v128relaxed( //
2259
+ nk_e3m2_t const *data_ptr, nk_size_t count, nk_size_t stride_bytes, //
2260
+ nk_e3m2_t *min_value_ptr, nk_size_t *min_index_ptr, //
2261
+ nk_e3m2_t *max_value_ptr, nk_size_t *max_index_ptr) {
2262
+ nk_size_t stride_elements = stride_bytes / sizeof(nk_e3m2_t);
2263
+ int aligned = (stride_bytes % sizeof(nk_e3m2_t) == 0);
2264
+ if (count == 0)
2265
+ *min_value_ptr = NK_E3M2_MAX, *min_index_ptr = NK_SIZE_MAX, *max_value_ptr = NK_E3M2_MIN,
2266
+ *max_index_ptr = NK_SIZE_MAX;
2267
+ else if (!aligned)
2268
+ nk_reduce_minmax_e3m2_serial(data_ptr, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
2269
+ max_index_ptr);
2270
+ else if (count > (nk_size_t)256 * 16) {
2271
+ nk_size_t left_count = count / 2;
2272
+ nk_e3m2_t left_min, right_min, left_max, right_max;
2273
+ nk_size_t left_min_idx, right_min_idx, left_max_idx, right_max_idx;
2274
+ nk_reduce_minmax_e3m2_v128relaxed(data_ptr, left_count, stride_bytes, &left_min, &left_min_idx, &left_max,
2275
+ &left_max_idx);
2276
+ nk_reduce_minmax_e3m2_v128relaxed(data_ptr + left_count * stride_elements, count - left_count, stride_bytes,
2277
+ &right_min, &right_min_idx, &right_max, &right_max_idx);
2278
+ if (nk_e3m2_order_serial(right_min, left_min) < 0)
2279
+ *min_value_ptr = right_min, *min_index_ptr = left_count + right_min_idx;
2280
+ else *min_value_ptr = left_min, *min_index_ptr = left_min_idx;
2281
+ if (nk_e3m2_order_serial(right_max, left_max) > 0)
2282
+ *max_value_ptr = right_max, *max_index_ptr = left_count + right_max_idx;
2283
+ else *max_value_ptr = left_max, *max_index_ptr = left_max_idx;
2284
+ }
2285
+ else if (stride_elements == 1)
2286
+ nk_reduce_minmax_e3m2_v128relaxed_contiguous_(data_ptr, count, min_value_ptr, min_index_ptr, max_value_ptr,
2287
+ max_index_ptr);
2288
+ else
2289
+ nk_reduce_minmax_e3m2_serial(data_ptr, count, stride_bytes, min_value_ptr, min_index_ptr, max_value_ptr,
2290
+ max_index_ptr);
2291
+ }
2292
+
2293
+ #if defined(__clang__)
2294
+ #pragma clang attribute pop
2295
+ #endif
2296
+
2297
+ #if defined(__cplusplus)
2298
+ } // extern "C"
2299
+ #endif
2300
+
2301
+ #endif // NK_TARGET_V128RELAXED
2302
+ #endif // NK_REDUCE_V128RELAXED_H