numkong 7.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (294) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +495 -0
  3. package/binding.gyp +540 -0
  4. package/c/dispatch.h +512 -0
  5. package/c/dispatch_bf16.c +389 -0
  6. package/c/dispatch_bf16c.c +52 -0
  7. package/c/dispatch_e2m3.c +263 -0
  8. package/c/dispatch_e3m2.c +243 -0
  9. package/c/dispatch_e4m3.c +276 -0
  10. package/c/dispatch_e5m2.c +272 -0
  11. package/c/dispatch_f16.c +376 -0
  12. package/c/dispatch_f16c.c +58 -0
  13. package/c/dispatch_f32.c +378 -0
  14. package/c/dispatch_f32c.c +99 -0
  15. package/c/dispatch_f64.c +296 -0
  16. package/c/dispatch_f64c.c +98 -0
  17. package/c/dispatch_i16.c +96 -0
  18. package/c/dispatch_i32.c +89 -0
  19. package/c/dispatch_i4.c +150 -0
  20. package/c/dispatch_i64.c +86 -0
  21. package/c/dispatch_i8.c +289 -0
  22. package/c/dispatch_other.c +330 -0
  23. package/c/dispatch_u1.c +148 -0
  24. package/c/dispatch_u16.c +124 -0
  25. package/c/dispatch_u32.c +118 -0
  26. package/c/dispatch_u4.c +150 -0
  27. package/c/dispatch_u64.c +102 -0
  28. package/c/dispatch_u8.c +303 -0
  29. package/c/numkong.c +950 -0
  30. package/include/README.md +573 -0
  31. package/include/module.modulemap +129 -0
  32. package/include/numkong/attention/sapphireamx.h +1361 -0
  33. package/include/numkong/attention/sme.h +2066 -0
  34. package/include/numkong/attention.h +49 -0
  35. package/include/numkong/capabilities.h +748 -0
  36. package/include/numkong/cast/README.md +262 -0
  37. package/include/numkong/cast/haswell.h +975 -0
  38. package/include/numkong/cast/icelake.h +470 -0
  39. package/include/numkong/cast/neon.h +1192 -0
  40. package/include/numkong/cast/rvv.h +1021 -0
  41. package/include/numkong/cast/sapphire.h +262 -0
  42. package/include/numkong/cast/serial.h +2262 -0
  43. package/include/numkong/cast/skylake.h +856 -0
  44. package/include/numkong/cast/v128relaxed.h +180 -0
  45. package/include/numkong/cast.h +230 -0
  46. package/include/numkong/curved/README.md +223 -0
  47. package/include/numkong/curved/genoa.h +182 -0
  48. package/include/numkong/curved/haswell.h +276 -0
  49. package/include/numkong/curved/neon.h +205 -0
  50. package/include/numkong/curved/neonbfdot.h +212 -0
  51. package/include/numkong/curved/neonhalf.h +212 -0
  52. package/include/numkong/curved/rvv.h +305 -0
  53. package/include/numkong/curved/serial.h +207 -0
  54. package/include/numkong/curved/skylake.h +457 -0
  55. package/include/numkong/curved/smef64.h +506 -0
  56. package/include/numkong/curved.h +517 -0
  57. package/include/numkong/curved.hpp +144 -0
  58. package/include/numkong/dot/README.md +425 -0
  59. package/include/numkong/dot/alder.h +563 -0
  60. package/include/numkong/dot/genoa.h +315 -0
  61. package/include/numkong/dot/haswell.h +1688 -0
  62. package/include/numkong/dot/icelake.h +883 -0
  63. package/include/numkong/dot/neon.h +818 -0
  64. package/include/numkong/dot/neonbfdot.h +244 -0
  65. package/include/numkong/dot/neonfhm.h +360 -0
  66. package/include/numkong/dot/neonhalf.h +198 -0
  67. package/include/numkong/dot/neonsdot.h +508 -0
  68. package/include/numkong/dot/rvv.h +714 -0
  69. package/include/numkong/dot/rvvbb.h +72 -0
  70. package/include/numkong/dot/rvvbf16.h +123 -0
  71. package/include/numkong/dot/rvvhalf.h +129 -0
  72. package/include/numkong/dot/sapphire.h +141 -0
  73. package/include/numkong/dot/serial.h +838 -0
  74. package/include/numkong/dot/sierra.h +405 -0
  75. package/include/numkong/dot/skylake.h +1084 -0
  76. package/include/numkong/dot/sve.h +379 -0
  77. package/include/numkong/dot/svebfdot.h +74 -0
  78. package/include/numkong/dot/svehalf.h +123 -0
  79. package/include/numkong/dot/v128relaxed.h +1258 -0
  80. package/include/numkong/dot.h +1070 -0
  81. package/include/numkong/dot.hpp +94 -0
  82. package/include/numkong/dots/README.md +496 -0
  83. package/include/numkong/dots/alder.h +114 -0
  84. package/include/numkong/dots/genoa.h +94 -0
  85. package/include/numkong/dots/haswell.h +295 -0
  86. package/include/numkong/dots/icelake.h +171 -0
  87. package/include/numkong/dots/neon.h +120 -0
  88. package/include/numkong/dots/neonbfdot.h +58 -0
  89. package/include/numkong/dots/neonfhm.h +94 -0
  90. package/include/numkong/dots/neonhalf.h +57 -0
  91. package/include/numkong/dots/neonsdot.h +108 -0
  92. package/include/numkong/dots/rvv.h +2486 -0
  93. package/include/numkong/dots/sapphireamx.h +3973 -0
  94. package/include/numkong/dots/serial.h +2844 -0
  95. package/include/numkong/dots/sierra.h +97 -0
  96. package/include/numkong/dots/skylake.h +196 -0
  97. package/include/numkong/dots/sme.h +5372 -0
  98. package/include/numkong/dots/smebi32.h +461 -0
  99. package/include/numkong/dots/smef64.h +1318 -0
  100. package/include/numkong/dots/smehalf.h +47 -0
  101. package/include/numkong/dots/v128relaxed.h +294 -0
  102. package/include/numkong/dots.h +2804 -0
  103. package/include/numkong/dots.hpp +639 -0
  104. package/include/numkong/each/README.md +469 -0
  105. package/include/numkong/each/haswell.h +1658 -0
  106. package/include/numkong/each/icelake.h +272 -0
  107. package/include/numkong/each/neon.h +1104 -0
  108. package/include/numkong/each/neonbfdot.h +212 -0
  109. package/include/numkong/each/neonhalf.h +410 -0
  110. package/include/numkong/each/rvv.h +1121 -0
  111. package/include/numkong/each/sapphire.h +477 -0
  112. package/include/numkong/each/serial.h +260 -0
  113. package/include/numkong/each/skylake.h +1562 -0
  114. package/include/numkong/each.h +2146 -0
  115. package/include/numkong/each.hpp +434 -0
  116. package/include/numkong/geospatial/README.md +147 -0
  117. package/include/numkong/geospatial/haswell.h +593 -0
  118. package/include/numkong/geospatial/neon.h +571 -0
  119. package/include/numkong/geospatial/rvv.h +701 -0
  120. package/include/numkong/geospatial/serial.h +309 -0
  121. package/include/numkong/geospatial/skylake.h +577 -0
  122. package/include/numkong/geospatial/v128relaxed.h +613 -0
  123. package/include/numkong/geospatial.h +453 -0
  124. package/include/numkong/geospatial.hpp +235 -0
  125. package/include/numkong/matrix.hpp +336 -0
  126. package/include/numkong/maxsim/README.md +187 -0
  127. package/include/numkong/maxsim/alder.h +511 -0
  128. package/include/numkong/maxsim/genoa.h +115 -0
  129. package/include/numkong/maxsim/haswell.h +553 -0
  130. package/include/numkong/maxsim/icelake.h +480 -0
  131. package/include/numkong/maxsim/neonsdot.h +394 -0
  132. package/include/numkong/maxsim/sapphireamx.h +877 -0
  133. package/include/numkong/maxsim/serial.h +490 -0
  134. package/include/numkong/maxsim/sme.h +929 -0
  135. package/include/numkong/maxsim/v128relaxed.h +280 -0
  136. package/include/numkong/maxsim.h +571 -0
  137. package/include/numkong/maxsim.hpp +133 -0
  138. package/include/numkong/mesh/README.md +227 -0
  139. package/include/numkong/mesh/haswell.h +2235 -0
  140. package/include/numkong/mesh/neon.h +1329 -0
  141. package/include/numkong/mesh/neonbfdot.h +842 -0
  142. package/include/numkong/mesh/neonhalf.h +616 -0
  143. package/include/numkong/mesh/rvv.h +916 -0
  144. package/include/numkong/mesh/serial.h +742 -0
  145. package/include/numkong/mesh/skylake.h +1135 -0
  146. package/include/numkong/mesh/v128relaxed.h +1052 -0
  147. package/include/numkong/mesh.h +652 -0
  148. package/include/numkong/mesh.hpp +762 -0
  149. package/include/numkong/numkong.h +78 -0
  150. package/include/numkong/numkong.hpp +57 -0
  151. package/include/numkong/probability/README.md +173 -0
  152. package/include/numkong/probability/haswell.h +267 -0
  153. package/include/numkong/probability/neon.h +225 -0
  154. package/include/numkong/probability/rvv.h +409 -0
  155. package/include/numkong/probability/serial.h +169 -0
  156. package/include/numkong/probability/skylake.h +324 -0
  157. package/include/numkong/probability.h +383 -0
  158. package/include/numkong/probability.hpp +120 -0
  159. package/include/numkong/random.h +50 -0
  160. package/include/numkong/random.hpp +285 -0
  161. package/include/numkong/reduce/README.md +547 -0
  162. package/include/numkong/reduce/alder.h +632 -0
  163. package/include/numkong/reduce/genoa.h +201 -0
  164. package/include/numkong/reduce/haswell.h +3783 -0
  165. package/include/numkong/reduce/icelake.h +549 -0
  166. package/include/numkong/reduce/neon.h +3841 -0
  167. package/include/numkong/reduce/neonbfdot.h +353 -0
  168. package/include/numkong/reduce/neonfhm.h +665 -0
  169. package/include/numkong/reduce/neonhalf.h +157 -0
  170. package/include/numkong/reduce/neonsdot.h +357 -0
  171. package/include/numkong/reduce/rvv.h +3407 -0
  172. package/include/numkong/reduce/serial.h +757 -0
  173. package/include/numkong/reduce/sierra.h +338 -0
  174. package/include/numkong/reduce/skylake.h +3792 -0
  175. package/include/numkong/reduce/v128relaxed.h +2302 -0
  176. package/include/numkong/reduce.h +1597 -0
  177. package/include/numkong/reduce.hpp +633 -0
  178. package/include/numkong/scalar/README.md +89 -0
  179. package/include/numkong/scalar/haswell.h +113 -0
  180. package/include/numkong/scalar/neon.h +122 -0
  181. package/include/numkong/scalar/neonhalf.h +70 -0
  182. package/include/numkong/scalar/rvv.h +211 -0
  183. package/include/numkong/scalar/sapphire.h +63 -0
  184. package/include/numkong/scalar/serial.h +332 -0
  185. package/include/numkong/scalar/v128relaxed.h +56 -0
  186. package/include/numkong/scalar.h +683 -0
  187. package/include/numkong/set/README.md +179 -0
  188. package/include/numkong/set/haswell.h +334 -0
  189. package/include/numkong/set/icelake.h +485 -0
  190. package/include/numkong/set/neon.h +364 -0
  191. package/include/numkong/set/rvv.h +226 -0
  192. package/include/numkong/set/rvvbb.h +117 -0
  193. package/include/numkong/set/serial.h +174 -0
  194. package/include/numkong/set/sve.h +185 -0
  195. package/include/numkong/set/v128relaxed.h +240 -0
  196. package/include/numkong/set.h +457 -0
  197. package/include/numkong/set.hpp +114 -0
  198. package/include/numkong/sets/README.md +149 -0
  199. package/include/numkong/sets/haswell.h +63 -0
  200. package/include/numkong/sets/icelake.h +66 -0
  201. package/include/numkong/sets/neon.h +61 -0
  202. package/include/numkong/sets/serial.h +43 -0
  203. package/include/numkong/sets/smebi32.h +1099 -0
  204. package/include/numkong/sets/v128relaxed.h +58 -0
  205. package/include/numkong/sets.h +339 -0
  206. package/include/numkong/sparse/README.md +156 -0
  207. package/include/numkong/sparse/icelake.h +463 -0
  208. package/include/numkong/sparse/neon.h +288 -0
  209. package/include/numkong/sparse/serial.h +117 -0
  210. package/include/numkong/sparse/sve2.h +507 -0
  211. package/include/numkong/sparse/turin.h +322 -0
  212. package/include/numkong/sparse.h +363 -0
  213. package/include/numkong/sparse.hpp +113 -0
  214. package/include/numkong/spatial/README.md +435 -0
  215. package/include/numkong/spatial/alder.h +607 -0
  216. package/include/numkong/spatial/genoa.h +290 -0
  217. package/include/numkong/spatial/haswell.h +960 -0
  218. package/include/numkong/spatial/icelake.h +586 -0
  219. package/include/numkong/spatial/neon.h +773 -0
  220. package/include/numkong/spatial/neonbfdot.h +165 -0
  221. package/include/numkong/spatial/neonhalf.h +118 -0
  222. package/include/numkong/spatial/neonsdot.h +261 -0
  223. package/include/numkong/spatial/rvv.h +984 -0
  224. package/include/numkong/spatial/rvvbf16.h +123 -0
  225. package/include/numkong/spatial/rvvhalf.h +117 -0
  226. package/include/numkong/spatial/sapphire.h +343 -0
  227. package/include/numkong/spatial/serial.h +346 -0
  228. package/include/numkong/spatial/sierra.h +323 -0
  229. package/include/numkong/spatial/skylake.h +606 -0
  230. package/include/numkong/spatial/sve.h +224 -0
  231. package/include/numkong/spatial/svebfdot.h +122 -0
  232. package/include/numkong/spatial/svehalf.h +109 -0
  233. package/include/numkong/spatial/v128relaxed.h +717 -0
  234. package/include/numkong/spatial.h +1425 -0
  235. package/include/numkong/spatial.hpp +183 -0
  236. package/include/numkong/spatials/README.md +580 -0
  237. package/include/numkong/spatials/alder.h +94 -0
  238. package/include/numkong/spatials/genoa.h +94 -0
  239. package/include/numkong/spatials/haswell.h +219 -0
  240. package/include/numkong/spatials/icelake.h +113 -0
  241. package/include/numkong/spatials/neon.h +109 -0
  242. package/include/numkong/spatials/neonbfdot.h +60 -0
  243. package/include/numkong/spatials/neonfhm.h +92 -0
  244. package/include/numkong/spatials/neonhalf.h +58 -0
  245. package/include/numkong/spatials/neonsdot.h +109 -0
  246. package/include/numkong/spatials/rvv.h +1960 -0
  247. package/include/numkong/spatials/sapphireamx.h +1149 -0
  248. package/include/numkong/spatials/serial.h +226 -0
  249. package/include/numkong/spatials/sierra.h +96 -0
  250. package/include/numkong/spatials/skylake.h +184 -0
  251. package/include/numkong/spatials/sme.h +1901 -0
  252. package/include/numkong/spatials/smef64.h +465 -0
  253. package/include/numkong/spatials/v128relaxed.h +240 -0
  254. package/include/numkong/spatials.h +3021 -0
  255. package/include/numkong/spatials.hpp +508 -0
  256. package/include/numkong/tensor.hpp +1592 -0
  257. package/include/numkong/trigonometry/README.md +184 -0
  258. package/include/numkong/trigonometry/haswell.h +652 -0
  259. package/include/numkong/trigonometry/neon.h +639 -0
  260. package/include/numkong/trigonometry/rvv.h +699 -0
  261. package/include/numkong/trigonometry/serial.h +703 -0
  262. package/include/numkong/trigonometry/skylake.h +721 -0
  263. package/include/numkong/trigonometry/v128relaxed.h +666 -0
  264. package/include/numkong/trigonometry.h +467 -0
  265. package/include/numkong/trigonometry.hpp +166 -0
  266. package/include/numkong/types.h +1384 -0
  267. package/include/numkong/types.hpp +5603 -0
  268. package/include/numkong/vector.hpp +698 -0
  269. package/javascript/README.md +246 -0
  270. package/javascript/dist/cjs/numkong-wasm.d.ts +166 -0
  271. package/javascript/dist/cjs/numkong-wasm.js +617 -0
  272. package/javascript/dist/cjs/numkong.d.ts +343 -0
  273. package/javascript/dist/cjs/numkong.js +523 -0
  274. package/javascript/dist/cjs/package.json +3 -0
  275. package/javascript/dist/cjs/types.d.ts +284 -0
  276. package/javascript/dist/cjs/types.js +653 -0
  277. package/javascript/dist/esm/numkong-wasm.d.ts +166 -0
  278. package/javascript/dist/esm/numkong-wasm.js +595 -0
  279. package/javascript/dist/esm/numkong.d.ts +343 -0
  280. package/javascript/dist/esm/numkong.js +452 -0
  281. package/javascript/dist/esm/package.json +3 -0
  282. package/javascript/dist/esm/types.d.ts +284 -0
  283. package/javascript/dist/esm/types.js +630 -0
  284. package/javascript/dist-package-cjs.json +3 -0
  285. package/javascript/dist-package-esm.json +3 -0
  286. package/javascript/node-gyp-build.d.ts +1 -0
  287. package/javascript/numkong-wasm.ts +756 -0
  288. package/javascript/numkong.c +689 -0
  289. package/javascript/numkong.ts +575 -0
  290. package/javascript/tsconfig-base.json +39 -0
  291. package/javascript/tsconfig-cjs.json +8 -0
  292. package/javascript/tsconfig-esm.json +8 -0
  293. package/javascript/types.ts +674 -0
  294. package/package.json +87 -0
@@ -0,0 +1,916 @@
1
+ /**
2
+ * @brief SIMD-accelerated Mesh Operations for RISC-V.
3
+ * @file include/numkong/mesh/rvv.h
4
+ * @author Ash Vardanian
5
+ * @date February 6, 2026
6
+ *
7
+ * @sa include/numkong/mesh.h
8
+ *
9
+ * RVV mesh operations leverage:
10
+ *
11
+ * - `vlseg3e32`/`vlseg3e64`: deinterleave xyz triplets in hardware
12
+ * - `vfwcvt`/`vfwmacc`: widening FMA for f32→f64 accumulation
13
+ * - `vfredusum`: single-instruction horizontal reduction
14
+ * - Serial SVD/determinant from mesh/serial.h for fixed 3×3 matrix operations
15
+ *
16
+ * Fused helpers minimize data passes:
17
+ *
18
+ * - `nk_bicentroid_*_rvv_`: both centroids in a single pass (used by RMSD)
19
+ * - `nk_centroid_and_cross_covariance_*_rvv_`: centroids + H in one pass (Kabsch)
20
+ * - `nk_centroid_and_cross_covariance_and_variance_*_rvv_`: + variance (Umeyama)
21
+ *
22
+ * Math for fused centroid+covariance:
23
+ * H[i][j] = Σ (a[i] - ca[i]) * (b[j] - cb[j])
24
+ * = Σ a[i] * b[j] - n * ca[i] * cb[j]
25
+ * So we accumulate raw Σ a[i] * b[j] in the loop, then fix up after.
26
+ *
27
+ * Key RVV-specific optimizations (vs. scalar or x86 backends):
28
+ *
29
+ * - Deferred horizontal reduction in bicentroid: per-lane `vfwadd_wv` (f32)
30
+ * or `vfadd_vv` (f64) accumulation across loop iterations, with a single
31
+ * `vfredusum` after the loop — eliminates 6 `vfredusum` per iteration.
32
+ * - `vfwmacc_vv` in f32 SSD: accumulates widened squared distances per-lane
33
+ * (dx²+dy²+dz²) before a single reduction — saves 2 `vfredusum` per iteration.
34
+ * - Vectorized R = V×Uᵀ via `vfmul_vf`/`vfmacc_vf`: each output row computed
35
+ * as a 3-element vector dot product — 15 vector ops vs 45 scalar ops.
36
+ * - `vfncvt_f_f_w` for f64→f32 narrowing of H matrix before SVD.
37
+ */
38
+ #ifndef NK_MESH_RVV_H
39
+ #define NK_MESH_RVV_H
40
+
41
+ #if NK_TARGET_RISCV_
42
+ #if NK_TARGET_RVV
43
+
44
+ #include "numkong/types.h"
45
+ #include "numkong/dot/rvv.h"
46
+ #include "numkong/spatial/rvv.h" // `nk_f32_sqrt_rvv`, `nk_f64_sqrt_rvv`
47
+ #include "numkong/mesh/serial.h" // `nk_svd3x3_f32_`, `nk_svd3x3_f64_`, `nk_det3x3_f32_`, `nk_det3x3_f64_`
48
+
49
+ #if defined(__clang__)
50
+ #pragma clang attribute push(__attribute__((target("arch=+v"))), apply_to = function)
51
+ #elif defined(__GNUC__)
52
+ #pragma GCC push_options
53
+ #pragma GCC target("arch=+v")
54
+ #endif
55
+
56
+ #if defined(__cplusplus)
57
+ extern "C" {
58
+ #endif
59
+
60
+ NK_INTERNAL void nk_accumulate_sum_f64m1_rvv_(vfloat64m1_t *sum_f64m1, vfloat64m1_t *compensation_f64m1,
61
+ vfloat64m1_t addend_f64m1, nk_size_t vector_length) {
62
+ vfloat64m1_t tentative_sum_f64m1 = __riscv_vfadd_vv_f64m1(*sum_f64m1, addend_f64m1, vector_length);
63
+ vfloat64m1_t virtual_addend_f64m1 = __riscv_vfsub_vv_f64m1(tentative_sum_f64m1, *sum_f64m1, vector_length);
64
+ vfloat64m1_t sum_error_f64m1 = __riscv_vfadd_vv_f64m1(
65
+ __riscv_vfsub_vv_f64m1(*sum_f64m1,
66
+ __riscv_vfsub_vv_f64m1(tentative_sum_f64m1, virtual_addend_f64m1, vector_length),
67
+ vector_length),
68
+ __riscv_vfsub_vv_f64m1(addend_f64m1, virtual_addend_f64m1, vector_length), vector_length);
69
+ *sum_f64m1 = __riscv_vslideup_vx_f64m1_tu(*sum_f64m1, tentative_sum_f64m1, 0, vector_length);
70
+ *compensation_f64m1 = __riscv_vfadd_vv_f64m1_tu(*compensation_f64m1, *compensation_f64m1, sum_error_f64m1,
71
+ vector_length);
72
+ }
73
+
74
+ NK_INTERNAL void nk_accumulate_product_f64m1_rvv_(vfloat64m1_t *sum_f64m1, vfloat64m1_t *compensation_f64m1,
75
+ vfloat64m1_t left_f64m1, vfloat64m1_t right_f64m1,
76
+ nk_size_t vector_length) {
77
+ vfloat64m1_t product_f64m1 = __riscv_vfmul_vv_f64m1(left_f64m1, right_f64m1, vector_length);
78
+ vfloat64m1_t product_error_f64m1 = __riscv_vfmsac_vv_f64m1(product_f64m1, left_f64m1, right_f64m1, vector_length);
79
+ vfloat64m1_t tentative_sum_f64m1 = __riscv_vfadd_vv_f64m1(*sum_f64m1, product_f64m1, vector_length);
80
+ vfloat64m1_t virtual_addend_f64m1 = __riscv_vfsub_vv_f64m1(tentative_sum_f64m1, *sum_f64m1, vector_length);
81
+ vfloat64m1_t sum_error_f64m1 = __riscv_vfadd_vv_f64m1(
82
+ __riscv_vfsub_vv_f64m1(*sum_f64m1,
83
+ __riscv_vfsub_vv_f64m1(tentative_sum_f64m1, virtual_addend_f64m1, vector_length),
84
+ vector_length),
85
+ __riscv_vfsub_vv_f64m1(product_f64m1, virtual_addend_f64m1, vector_length), vector_length);
86
+ *sum_f64m1 = __riscv_vslideup_vx_f64m1_tu(*sum_f64m1, tentative_sum_f64m1, 0, vector_length);
87
+ vfloat64m1_t total_error_f64m1 = __riscv_vfadd_vv_f64m1(sum_error_f64m1, product_error_f64m1, vector_length);
88
+ *compensation_f64m1 = __riscv_vfadd_vv_f64m1_tu(*compensation_f64m1, *compensation_f64m1, total_error_f64m1,
89
+ vector_length);
90
+ }
91
+
92
+ /**
93
+ * @brief Compute centroids of two f32 point clouds in a single pass.
94
+ *
95
+ * Reads both clouds simultaneously, accumulating 6 sums (3 per cloud) in f64.
96
+ * Reduces RMSD from 3 passes to 2 (bicentroid + SSD).
97
+ * Uses per-lane `vfwadd_wv` accumulation with deferred `vfredusum` after the loop.
98
+ */
99
+ NK_INTERNAL void nk_bicentroid_f32_rvv_( //
100
+ nk_f32_t const *a, nk_f32_t const *b, nk_size_t n, //
101
+ nk_f64_t *ca_x, nk_f64_t *ca_y, nk_f64_t *ca_z, //
102
+ nk_f64_t *cb_x, nk_f64_t *cb_y, nk_f64_t *cb_z) {
103
+ nk_size_t vlmax = __riscv_vsetvlmax_e64m2();
104
+ vfloat64m2_t sum_a_x_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
105
+ vfloat64m2_t sum_a_y_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
106
+ vfloat64m2_t sum_a_z_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
107
+ vfloat64m2_t sum_b_x_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
108
+ vfloat64m2_t sum_b_y_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
109
+ vfloat64m2_t sum_b_z_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
110
+ nk_f32_t const *a_ptr = a, *b_ptr = b;
111
+ nk_size_t remaining = n;
112
+ for (nk_size_t vector_length; remaining > 0;
113
+ remaining -= vector_length, a_ptr += vector_length * 3, b_ptr += vector_length * 3) {
114
+ vector_length = __riscv_vsetvl_e32m1(remaining);
115
+ vfloat32m1x3_t a_f32m1x3 = __riscv_vlseg3e32_v_f32m1x3(a_ptr, vector_length);
116
+ sum_a_x_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_a_x_f64m2, sum_a_x_f64m2,
117
+ __riscv_vget_v_f32m1x3_f32m1(a_f32m1x3, 0), vector_length);
118
+ sum_a_y_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_a_y_f64m2, sum_a_y_f64m2,
119
+ __riscv_vget_v_f32m1x3_f32m1(a_f32m1x3, 1), vector_length);
120
+ sum_a_z_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_a_z_f64m2, sum_a_z_f64m2,
121
+ __riscv_vget_v_f32m1x3_f32m1(a_f32m1x3, 2), vector_length);
122
+ vfloat32m1x3_t b_f32m1x3 = __riscv_vlseg3e32_v_f32m1x3(b_ptr, vector_length);
123
+ sum_b_x_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_b_x_f64m2, sum_b_x_f64m2,
124
+ __riscv_vget_v_f32m1x3_f32m1(b_f32m1x3, 0), vector_length);
125
+ sum_b_y_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_b_y_f64m2, sum_b_y_f64m2,
126
+ __riscv_vget_v_f32m1x3_f32m1(b_f32m1x3, 1), vector_length);
127
+ sum_b_z_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_b_z_f64m2, sum_b_z_f64m2,
128
+ __riscv_vget_v_f32m1x3_f32m1(b_f32m1x3, 2), vector_length);
129
+ }
130
+ vfloat64m1_t zero_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, 1);
131
+ nk_f64_t inv_n = 1.0 / (nk_f64_t)n;
132
+ *ca_x = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_a_x_f64m2, zero_f64m1, vlmax)) * inv_n;
133
+ *ca_y = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_a_y_f64m2, zero_f64m1, vlmax)) * inv_n;
134
+ *ca_z = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_a_z_f64m2, zero_f64m1, vlmax)) * inv_n;
135
+ *cb_x = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_b_x_f64m2, zero_f64m1, vlmax)) * inv_n;
136
+ *cb_y = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_b_y_f64m2, zero_f64m1, vlmax)) * inv_n;
137
+ *cb_z = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_b_z_f64m2, zero_f64m1, vlmax)) * inv_n;
138
+ }
139
+
140
+ /**
141
+ * @brief Compute centroids of two f64 point clouds in a single pass.
142
+ * Uses per-lane `vfadd_vv` accumulation with deferred `vfredusum` after the loop.
143
+ */
144
+ NK_INTERNAL void nk_bicentroid_f64_rvv_( //
145
+ nk_f64_t const *a, nk_f64_t const *b, nk_size_t n, //
146
+ nk_f64_t *ca_x, nk_f64_t *ca_y, nk_f64_t *ca_z, //
147
+ nk_f64_t *cb_x, nk_f64_t *cb_y, nk_f64_t *cb_z) {
148
+ nk_size_t vlmax = __riscv_vsetvlmax_e64m1();
149
+ vfloat64m1_t sum_a_x_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
150
+ vfloat64m1_t sum_a_y_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
151
+ vfloat64m1_t sum_a_z_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
152
+ vfloat64m1_t sum_b_x_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
153
+ vfloat64m1_t sum_b_y_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
154
+ vfloat64m1_t sum_b_z_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
155
+ vfloat64m1_t compensation_a_x_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
156
+ vfloat64m1_t compensation_a_y_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
157
+ vfloat64m1_t compensation_a_z_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
158
+ vfloat64m1_t compensation_b_x_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
159
+ vfloat64m1_t compensation_b_y_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
160
+ vfloat64m1_t compensation_b_z_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
161
+ nk_f64_t const *a_ptr = a, *b_ptr = b;
162
+ nk_size_t remaining = n;
163
+ for (nk_size_t vector_length; remaining > 0;
164
+ remaining -= vector_length, a_ptr += vector_length * 3, b_ptr += vector_length * 3) {
165
+ vector_length = __riscv_vsetvl_e64m1(remaining);
166
+ vfloat64m1x3_t a_f64m1x3 = __riscv_vlseg3e64_v_f64m1x3(a_ptr, vector_length);
167
+ nk_accumulate_sum_f64m1_rvv_(&sum_a_x_f64m1, &compensation_a_x_f64m1,
168
+ __riscv_vget_v_f64m1x3_f64m1(a_f64m1x3, 0), vector_length);
169
+ nk_accumulate_sum_f64m1_rvv_(&sum_a_y_f64m1, &compensation_a_y_f64m1,
170
+ __riscv_vget_v_f64m1x3_f64m1(a_f64m1x3, 1), vector_length);
171
+ nk_accumulate_sum_f64m1_rvv_(&sum_a_z_f64m1, &compensation_a_z_f64m1,
172
+ __riscv_vget_v_f64m1x3_f64m1(a_f64m1x3, 2), vector_length);
173
+ vfloat64m1x3_t b_f64m1x3 = __riscv_vlseg3e64_v_f64m1x3(b_ptr, vector_length);
174
+ nk_accumulate_sum_f64m1_rvv_(&sum_b_x_f64m1, &compensation_b_x_f64m1,
175
+ __riscv_vget_v_f64m1x3_f64m1(b_f64m1x3, 0), vector_length);
176
+ nk_accumulate_sum_f64m1_rvv_(&sum_b_y_f64m1, &compensation_b_y_f64m1,
177
+ __riscv_vget_v_f64m1x3_f64m1(b_f64m1x3, 1), vector_length);
178
+ nk_accumulate_sum_f64m1_rvv_(&sum_b_z_f64m1, &compensation_b_z_f64m1,
179
+ __riscv_vget_v_f64m1x3_f64m1(b_f64m1x3, 2), vector_length);
180
+ }
181
+ nk_f64_t inv_n = 1.0 / (nk_f64_t)n;
182
+ *ca_x = nk_dot_stable_sum_f64m1_rvv_(sum_a_x_f64m1, compensation_a_x_f64m1) * inv_n;
183
+ *ca_y = nk_dot_stable_sum_f64m1_rvv_(sum_a_y_f64m1, compensation_a_y_f64m1) * inv_n;
184
+ *ca_z = nk_dot_stable_sum_f64m1_rvv_(sum_a_z_f64m1, compensation_a_z_f64m1) * inv_n;
185
+ *cb_x = nk_dot_stable_sum_f64m1_rvv_(sum_b_x_f64m1, compensation_b_x_f64m1) * inv_n;
186
+ *cb_y = nk_dot_stable_sum_f64m1_rvv_(sum_b_y_f64m1, compensation_b_y_f64m1) * inv_n;
187
+ *cb_z = nk_dot_stable_sum_f64m1_rvv_(sum_b_z_f64m1, compensation_b_z_f64m1) * inv_n;
188
+ }
189
+
190
+ /**
191
+ * @brief Compute centroids and cross-covariance matrix in a single pass (f32).
192
+ *
193
+ * Accumulates raw Σ a[i]*b[j] and Σ a[i], Σ b[j] simultaneously, then:
194
+ * ca = Σa / n, cb = Σb / n
195
+ * H[i][j] = raw[i][j] - n * ca[i] * cb[j]
196
+ *
197
+ * Reduces Kabsch from 4 passes to 2 (fused centroid+covariance + SSD).
198
+ * Cross-products use per-lane `vfwmacc_vv` accumulation (vfloat64m2_t) with
199
+ * deferred `vfredusum` after the loop — eliminates 9 reductions per iteration.
200
+ */
201
+ NK_INTERNAL void nk_centroid_and_cross_covariance_f32_rvv_( //
202
+ nk_f32_t const *a, nk_f32_t const *b, nk_size_t n, //
203
+ nk_f64_t *ca_x, nk_f64_t *ca_y, nk_f64_t *ca_z, //
204
+ nk_f64_t *cb_x, nk_f64_t *cb_y, nk_f64_t *cb_z, //
205
+ nk_f64_t h[9]) {
206
+ nk_size_t vlmax = __riscv_vsetvlmax_e64m2();
207
+ vfloat64m2_t sum_a_x_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax), sum_a_y_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
208
+ vfloat64m2_t sum_a_z_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
209
+ vfloat64m2_t sum_b_x_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax), sum_b_y_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
210
+ vfloat64m2_t sum_b_z_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
211
+ vfloat64m2_t cross_00_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax),
212
+ cross_01_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
213
+ vfloat64m2_t cross_02_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax),
214
+ cross_10_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
215
+ vfloat64m2_t cross_11_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax),
216
+ cross_12_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
217
+ vfloat64m2_t cross_20_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax),
218
+ cross_21_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
219
+ vfloat64m2_t cross_22_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
220
+ nk_f32_t const *a_ptr = a, *b_ptr = b;
221
+ nk_size_t remaining = n;
222
+ for (nk_size_t vector_length; remaining > 0;
223
+ remaining -= vector_length, a_ptr += vector_length * 3, b_ptr += vector_length * 3) {
224
+ vector_length = __riscv_vsetvl_e32m1(remaining);
225
+ vfloat32m1x3_t a_f32m1x3 = __riscv_vlseg3e32_v_f32m1x3(a_ptr, vector_length);
226
+ vfloat32m1_t a_x_f32m1 = __riscv_vget_v_f32m1x3_f32m1(a_f32m1x3, 0);
227
+ vfloat32m1_t a_y_f32m1 = __riscv_vget_v_f32m1x3_f32m1(a_f32m1x3, 1);
228
+ vfloat32m1_t a_z_f32m1 = __riscv_vget_v_f32m1x3_f32m1(a_f32m1x3, 2);
229
+ vfloat32m1x3_t b_f32m1x3 = __riscv_vlseg3e32_v_f32m1x3(b_ptr, vector_length);
230
+ vfloat32m1_t b_x_f32m1 = __riscv_vget_v_f32m1x3_f32m1(b_f32m1x3, 0);
231
+ vfloat32m1_t b_y_f32m1 = __riscv_vget_v_f32m1x3_f32m1(b_f32m1x3, 1);
232
+ vfloat32m1_t b_z_f32m1 = __riscv_vget_v_f32m1x3_f32m1(b_f32m1x3, 2);
233
+ sum_a_x_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_a_x_f64m2, sum_a_x_f64m2, a_x_f32m1, vector_length);
234
+ sum_a_y_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_a_y_f64m2, sum_a_y_f64m2, a_y_f32m1, vector_length);
235
+ sum_a_z_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_a_z_f64m2, sum_a_z_f64m2, a_z_f32m1, vector_length);
236
+ sum_b_x_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_b_x_f64m2, sum_b_x_f64m2, b_x_f32m1, vector_length);
237
+ sum_b_y_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_b_y_f64m2, sum_b_y_f64m2, b_y_f32m1, vector_length);
238
+ sum_b_z_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_b_z_f64m2, sum_b_z_f64m2, b_z_f32m1, vector_length);
239
+ cross_00_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_00_f64m2, a_x_f32m1, b_x_f32m1, vector_length);
240
+ cross_01_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_01_f64m2, a_x_f32m1, b_y_f32m1, vector_length);
241
+ cross_02_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_02_f64m2, a_x_f32m1, b_z_f32m1, vector_length);
242
+ cross_10_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_10_f64m2, a_y_f32m1, b_x_f32m1, vector_length);
243
+ cross_11_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_11_f64m2, a_y_f32m1, b_y_f32m1, vector_length);
244
+ cross_12_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_12_f64m2, a_y_f32m1, b_z_f32m1, vector_length);
245
+ cross_20_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_20_f64m2, a_z_f32m1, b_x_f32m1, vector_length);
246
+ cross_21_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_21_f64m2, a_z_f32m1, b_y_f32m1, vector_length);
247
+ cross_22_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_22_f64m2, a_z_f32m1, b_z_f32m1, vector_length);
248
+ }
249
+ vfloat64m1_t zero_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, 1);
250
+ // Compute centroids
251
+ nk_f64_t inv_n = 1.0 / (nk_f64_t)n;
252
+ nk_f64_t ca_x_ = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_a_x_f64m2, zero_f64m1, vlmax)) *
253
+ inv_n;
254
+ nk_f64_t ca_y_ = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_a_y_f64m2, zero_f64m1, vlmax)) *
255
+ inv_n;
256
+ nk_f64_t ca_z_ = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_a_z_f64m2, zero_f64m1, vlmax)) *
257
+ inv_n;
258
+ nk_f64_t cb_x_ = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_b_x_f64m2, zero_f64m1, vlmax)) *
259
+ inv_n;
260
+ nk_f64_t cb_y_ = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_b_y_f64m2, zero_f64m1, vlmax)) *
261
+ inv_n;
262
+ nk_f64_t cb_z_ = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_b_z_f64m2, zero_f64m1, vlmax)) *
263
+ inv_n;
264
+ *ca_x = ca_x_;
265
+ *ca_y = ca_y_;
266
+ *ca_z = ca_z_;
267
+ *cb_x = cb_x_;
268
+ *cb_y = cb_y_;
269
+ *cb_z = cb_z_;
270
+ // Fix up: H[i][j] = raw[i][j] - n * ca[i] * cb[j]
271
+ nk_f64_t n_f64 = (nk_f64_t)n;
272
+ h[0] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_00_f64m2, zero_f64m1, vlmax)) -
273
+ n_f64 * ca_x_ * cb_x_;
274
+ h[1] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_01_f64m2, zero_f64m1, vlmax)) -
275
+ n_f64 * ca_x_ * cb_y_;
276
+ h[2] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_02_f64m2, zero_f64m1, vlmax)) -
277
+ n_f64 * ca_x_ * cb_z_;
278
+ h[3] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_10_f64m2, zero_f64m1, vlmax)) -
279
+ n_f64 * ca_y_ * cb_x_;
280
+ h[4] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_11_f64m2, zero_f64m1, vlmax)) -
281
+ n_f64 * ca_y_ * cb_y_;
282
+ h[5] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_12_f64m2, zero_f64m1, vlmax)) -
283
+ n_f64 * ca_y_ * cb_z_;
284
+ h[6] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_20_f64m2, zero_f64m1, vlmax)) -
285
+ n_f64 * ca_z_ * cb_x_;
286
+ h[7] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_21_f64m2, zero_f64m1, vlmax)) -
287
+ n_f64 * ca_z_ * cb_y_;
288
+ h[8] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_22_f64m2, zero_f64m1, vlmax)) -
289
+ n_f64 * ca_z_ * cb_z_;
290
+ }
291
+
292
+ /**
293
+ * @brief Compute centroids and cross-covariance matrix in a single pass (f64).
294
+ *
295
+ * Per-lane `vfadd_vv`/`vfmacc_vv` accumulation with deferred `vfredusum` after the loop
296
+ * — eliminates 15 horizontal reductions per iteration.
297
+ */
298
+ NK_INTERNAL void nk_centroid_and_cross_covariance_f64_rvv_( //
299
+ nk_f64_t const *a, nk_f64_t const *b, nk_size_t n, //
300
+ nk_f64_t *ca_x, nk_f64_t *ca_y, nk_f64_t *ca_z, //
301
+ nk_f64_t *cb_x, nk_f64_t *cb_y, nk_f64_t *cb_z, //
302
+ nk_f64_t h[9]) {
303
+ nk_size_t vlmax = __riscv_vsetvlmax_e64m1();
304
+ vfloat64m1_t sum_a_x_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax), sum_a_y_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
305
+ vfloat64m1_t sum_a_z_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
306
+ vfloat64m1_t sum_b_x_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax), sum_b_y_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
307
+ vfloat64m1_t sum_b_z_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
308
+ vfloat64m1_t compensation_a_x_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
309
+ vfloat64m1_t compensation_a_y_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
310
+ vfloat64m1_t compensation_a_z_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
311
+ vfloat64m1_t compensation_b_x_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
312
+ vfloat64m1_t compensation_b_y_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
313
+ vfloat64m1_t compensation_b_z_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
314
+ vfloat64m1_t cross_00_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax),
315
+ cross_01_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
316
+ vfloat64m1_t cross_02_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax),
317
+ cross_10_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
318
+ vfloat64m1_t cross_11_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax),
319
+ cross_12_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
320
+ vfloat64m1_t cross_20_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax),
321
+ cross_21_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
322
+ vfloat64m1_t cross_22_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
323
+ vfloat64m1_t compensation_00_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
324
+ vfloat64m1_t compensation_01_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
325
+ vfloat64m1_t compensation_02_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
326
+ vfloat64m1_t compensation_10_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
327
+ vfloat64m1_t compensation_11_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
328
+ vfloat64m1_t compensation_12_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
329
+ vfloat64m1_t compensation_20_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
330
+ vfloat64m1_t compensation_21_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
331
+ vfloat64m1_t compensation_22_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
332
+ nk_f64_t const *a_ptr = a, *b_ptr = b;
333
+ nk_size_t remaining = n;
334
+ for (nk_size_t vector_length; remaining > 0;
335
+ remaining -= vector_length, a_ptr += vector_length * 3, b_ptr += vector_length * 3) {
336
+ vector_length = __riscv_vsetvl_e64m1(remaining);
337
+ vfloat64m1x3_t a_f64m1x3 = __riscv_vlseg3e64_v_f64m1x3(a_ptr, vector_length);
338
+ vfloat64m1_t a_x_f64m1 = __riscv_vget_v_f64m1x3_f64m1(a_f64m1x3, 0);
339
+ vfloat64m1_t a_y_f64m1 = __riscv_vget_v_f64m1x3_f64m1(a_f64m1x3, 1);
340
+ vfloat64m1_t a_z_f64m1 = __riscv_vget_v_f64m1x3_f64m1(a_f64m1x3, 2);
341
+ vfloat64m1x3_t b_f64m1x3 = __riscv_vlseg3e64_v_f64m1x3(b_ptr, vector_length);
342
+ vfloat64m1_t b_x_f64m1 = __riscv_vget_v_f64m1x3_f64m1(b_f64m1x3, 0);
343
+ vfloat64m1_t b_y_f64m1 = __riscv_vget_v_f64m1x3_f64m1(b_f64m1x3, 1);
344
+ vfloat64m1_t b_z_f64m1 = __riscv_vget_v_f64m1x3_f64m1(b_f64m1x3, 2);
345
+ nk_accumulate_sum_f64m1_rvv_(&sum_a_x_f64m1, &compensation_a_x_f64m1, a_x_f64m1, vector_length);
346
+ nk_accumulate_sum_f64m1_rvv_(&sum_a_y_f64m1, &compensation_a_y_f64m1, a_y_f64m1, vector_length);
347
+ nk_accumulate_sum_f64m1_rvv_(&sum_a_z_f64m1, &compensation_a_z_f64m1, a_z_f64m1, vector_length);
348
+ nk_accumulate_sum_f64m1_rvv_(&sum_b_x_f64m1, &compensation_b_x_f64m1, b_x_f64m1, vector_length);
349
+ nk_accumulate_sum_f64m1_rvv_(&sum_b_y_f64m1, &compensation_b_y_f64m1, b_y_f64m1, vector_length);
350
+ nk_accumulate_sum_f64m1_rvv_(&sum_b_z_f64m1, &compensation_b_z_f64m1, b_z_f64m1, vector_length);
351
+ nk_accumulate_product_f64m1_rvv_(&cross_00_f64m1, &compensation_00_f64m1, a_x_f64m1, b_x_f64m1, vector_length);
352
+ nk_accumulate_product_f64m1_rvv_(&cross_01_f64m1, &compensation_01_f64m1, a_x_f64m1, b_y_f64m1, vector_length);
353
+ nk_accumulate_product_f64m1_rvv_(&cross_02_f64m1, &compensation_02_f64m1, a_x_f64m1, b_z_f64m1, vector_length);
354
+ nk_accumulate_product_f64m1_rvv_(&cross_10_f64m1, &compensation_10_f64m1, a_y_f64m1, b_x_f64m1, vector_length);
355
+ nk_accumulate_product_f64m1_rvv_(&cross_11_f64m1, &compensation_11_f64m1, a_y_f64m1, b_y_f64m1, vector_length);
356
+ nk_accumulate_product_f64m1_rvv_(&cross_12_f64m1, &compensation_12_f64m1, a_y_f64m1, b_z_f64m1, vector_length);
357
+ nk_accumulate_product_f64m1_rvv_(&cross_20_f64m1, &compensation_20_f64m1, a_z_f64m1, b_x_f64m1, vector_length);
358
+ nk_accumulate_product_f64m1_rvv_(&cross_21_f64m1, &compensation_21_f64m1, a_z_f64m1, b_y_f64m1, vector_length);
359
+ nk_accumulate_product_f64m1_rvv_(&cross_22_f64m1, &compensation_22_f64m1, a_z_f64m1, b_z_f64m1, vector_length);
360
+ }
361
+ // Compute centroids.
362
+ nk_f64_t inv_n = 1.0 / (nk_f64_t)n;
363
+ nk_f64_t ca_x_ = nk_dot_stable_sum_f64m1_rvv_(sum_a_x_f64m1, compensation_a_x_f64m1) * inv_n;
364
+ nk_f64_t ca_y_ = nk_dot_stable_sum_f64m1_rvv_(sum_a_y_f64m1, compensation_a_y_f64m1) * inv_n;
365
+ nk_f64_t ca_z_ = nk_dot_stable_sum_f64m1_rvv_(sum_a_z_f64m1, compensation_a_z_f64m1) * inv_n;
366
+ nk_f64_t cb_x_ = nk_dot_stable_sum_f64m1_rvv_(sum_b_x_f64m1, compensation_b_x_f64m1) * inv_n;
367
+ nk_f64_t cb_y_ = nk_dot_stable_sum_f64m1_rvv_(sum_b_y_f64m1, compensation_b_y_f64m1) * inv_n;
368
+ nk_f64_t cb_z_ = nk_dot_stable_sum_f64m1_rvv_(sum_b_z_f64m1, compensation_b_z_f64m1) * inv_n;
369
+ *ca_x = ca_x_;
370
+ *ca_y = ca_y_;
371
+ *ca_z = ca_z_;
372
+ *cb_x = cb_x_;
373
+ *cb_y = cb_y_;
374
+ *cb_z = cb_z_;
375
+ nk_f64_t n_f64 = (nk_f64_t)n;
376
+ h[0] = nk_dot_stable_sum_f64m1_rvv_(cross_00_f64m1, compensation_00_f64m1) - n_f64 * ca_x_ * cb_x_;
377
+ h[1] = nk_dot_stable_sum_f64m1_rvv_(cross_01_f64m1, compensation_01_f64m1) - n_f64 * ca_x_ * cb_y_;
378
+ h[2] = nk_dot_stable_sum_f64m1_rvv_(cross_02_f64m1, compensation_02_f64m1) - n_f64 * ca_x_ * cb_z_;
379
+ h[3] = nk_dot_stable_sum_f64m1_rvv_(cross_10_f64m1, compensation_10_f64m1) - n_f64 * ca_y_ * cb_x_;
380
+ h[4] = nk_dot_stable_sum_f64m1_rvv_(cross_11_f64m1, compensation_11_f64m1) - n_f64 * ca_y_ * cb_y_;
381
+ h[5] = nk_dot_stable_sum_f64m1_rvv_(cross_12_f64m1, compensation_12_f64m1) - n_f64 * ca_y_ * cb_z_;
382
+ h[6] = nk_dot_stable_sum_f64m1_rvv_(cross_20_f64m1, compensation_20_f64m1) - n_f64 * ca_z_ * cb_x_;
383
+ h[7] = nk_dot_stable_sum_f64m1_rvv_(cross_21_f64m1, compensation_21_f64m1) - n_f64 * ca_z_ * cb_y_;
384
+ h[8] = nk_dot_stable_sum_f64m1_rvv_(cross_22_f64m1, compensation_22_f64m1) - n_f64 * ca_z_ * cb_z_;
385
+ }
386
+
387
+ /**
388
+ * @brief Compute centroids, cross-covariance, and variance_a in a single pass (f32).
389
+ *
390
+ * Same as centroid_and_cross_covariance but also computes:
391
+ * variance_a = (1/n) * Σ ||a[i] - ca||²
392
+ * = (1/n) * (Σ ||a[i]||² - n * ||ca||²)
393
+ *
394
+ * Cross-products use per-lane `vfwmacc_vv` accumulation (vfloat64m2_t) with
395
+ * deferred `vfredusum` after the loop — eliminates 9 reductions per iteration.
396
+ */
397
+ NK_INTERNAL void nk_centroid_and_cross_covariance_and_variance_f32_rvv_( //
398
+ nk_f32_t const *a, nk_f32_t const *b, nk_size_t n, //
399
+ nk_f64_t *ca_x, nk_f64_t *ca_y, nk_f64_t *ca_z, //
400
+ nk_f64_t *cb_x, nk_f64_t *cb_y, nk_f64_t *cb_z, //
401
+ nk_f64_t h[9], nk_f64_t *variance_a) {
402
+ nk_size_t vlmax = __riscv_vsetvlmax_e64m2();
403
+ vfloat64m2_t sum_a_x_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax), sum_a_y_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
404
+ vfloat64m2_t sum_a_z_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
405
+ vfloat64m2_t sum_b_x_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax), sum_b_y_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
406
+ vfloat64m2_t sum_b_z_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
407
+ vfloat64m2_t cross_00_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax),
408
+ cross_01_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
409
+ vfloat64m2_t cross_02_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax),
410
+ cross_10_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
411
+ vfloat64m2_t cross_11_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax),
412
+ cross_12_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
413
+ vfloat64m2_t cross_20_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax),
414
+ cross_21_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
415
+ vfloat64m2_t cross_22_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
416
+ vfloat64m2_t sum_norm_squared_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
417
+ nk_f32_t const *a_ptr = a, *b_ptr = b;
418
+ nk_size_t remaining = n;
419
+ for (nk_size_t vector_length; remaining > 0;
420
+ remaining -= vector_length, a_ptr += vector_length * 3, b_ptr += vector_length * 3) {
421
+ vector_length = __riscv_vsetvl_e32m1(remaining);
422
+ vfloat32m1x3_t a_f32m1x3 = __riscv_vlseg3e32_v_f32m1x3(a_ptr, vector_length);
423
+ vfloat32m1_t a_x_f32m1 = __riscv_vget_v_f32m1x3_f32m1(a_f32m1x3, 0);
424
+ vfloat32m1_t a_y_f32m1 = __riscv_vget_v_f32m1x3_f32m1(a_f32m1x3, 1);
425
+ vfloat32m1_t a_z_f32m1 = __riscv_vget_v_f32m1x3_f32m1(a_f32m1x3, 2);
426
+ vfloat32m1x3_t b_f32m1x3 = __riscv_vlseg3e32_v_f32m1x3(b_ptr, vector_length);
427
+ vfloat32m1_t b_x_f32m1 = __riscv_vget_v_f32m1x3_f32m1(b_f32m1x3, 0);
428
+ vfloat32m1_t b_y_f32m1 = __riscv_vget_v_f32m1x3_f32m1(b_f32m1x3, 1);
429
+ vfloat32m1_t b_z_f32m1 = __riscv_vget_v_f32m1x3_f32m1(b_f32m1x3, 2);
430
+ sum_a_x_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_a_x_f64m2, sum_a_x_f64m2, a_x_f32m1, vector_length);
431
+ sum_a_y_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_a_y_f64m2, sum_a_y_f64m2, a_y_f32m1, vector_length);
432
+ sum_a_z_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_a_z_f64m2, sum_a_z_f64m2, a_z_f32m1, vector_length);
433
+ sum_b_x_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_b_x_f64m2, sum_b_x_f64m2, b_x_f32m1, vector_length);
434
+ sum_b_y_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_b_y_f64m2, sum_b_y_f64m2, b_y_f32m1, vector_length);
435
+ sum_b_z_f64m2 = __riscv_vfwadd_wv_f64m2_tu(sum_b_z_f64m2, sum_b_z_f64m2, b_z_f32m1, vector_length);
436
+ cross_00_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_00_f64m2, a_x_f32m1, b_x_f32m1, vector_length);
437
+ cross_01_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_01_f64m2, a_x_f32m1, b_y_f32m1, vector_length);
438
+ cross_02_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_02_f64m2, a_x_f32m1, b_z_f32m1, vector_length);
439
+ cross_10_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_10_f64m2, a_y_f32m1, b_x_f32m1, vector_length);
440
+ cross_11_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_11_f64m2, a_y_f32m1, b_y_f32m1, vector_length);
441
+ cross_12_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_12_f64m2, a_y_f32m1, b_z_f32m1, vector_length);
442
+ cross_20_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_20_f64m2, a_z_f32m1, b_x_f32m1, vector_length);
443
+ cross_21_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_21_f64m2, a_z_f32m1, b_y_f32m1, vector_length);
444
+ cross_22_f64m2 = __riscv_vfwmacc_vv_f64m2_tu(cross_22_f64m2, a_z_f32m1, b_z_f32m1, vector_length);
445
+ // Variance: Σ (a_x² + a_y² + a_z²) — raw, not centered.
446
+ vfloat64m2_t norm_squared_f64m2 = __riscv_vfwmul_vv_f64m2(a_x_f32m1, a_x_f32m1, vector_length);
447
+ norm_squared_f64m2 = __riscv_vfwmacc_vv_f64m2(norm_squared_f64m2, a_y_f32m1, a_y_f32m1, vector_length);
448
+ norm_squared_f64m2 = __riscv_vfwmacc_vv_f64m2(norm_squared_f64m2, a_z_f32m1, a_z_f32m1, vector_length);
449
+ sum_norm_squared_f64m2 = __riscv_vfadd_vv_f64m2_tu(sum_norm_squared_f64m2, sum_norm_squared_f64m2,
450
+ norm_squared_f64m2, vector_length);
451
+ }
452
+ vfloat64m1_t zero_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, 1);
453
+ nk_f64_t inv_n = 1.0 / (nk_f64_t)n;
454
+ nk_f64_t ca_x_ = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_a_x_f64m2, zero_f64m1, vlmax)) *
455
+ inv_n;
456
+ nk_f64_t ca_y_ = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_a_y_f64m2, zero_f64m1, vlmax)) *
457
+ inv_n;
458
+ nk_f64_t ca_z_ = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_a_z_f64m2, zero_f64m1, vlmax)) *
459
+ inv_n;
460
+ nk_f64_t cb_x_ = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_b_x_f64m2, zero_f64m1, vlmax)) *
461
+ inv_n;
462
+ nk_f64_t cb_y_ = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_b_y_f64m2, zero_f64m1, vlmax)) *
463
+ inv_n;
464
+ nk_f64_t cb_z_ = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_b_z_f64m2, zero_f64m1, vlmax)) *
465
+ inv_n;
466
+ *ca_x = ca_x_;
467
+ *ca_y = ca_y_;
468
+ *ca_z = ca_z_;
469
+ *cb_x = cb_x_;
470
+ *cb_y = cb_y_;
471
+ *cb_z = cb_z_;
472
+ nk_f64_t n_f64 = (nk_f64_t)n;
473
+ h[0] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_00_f64m2, zero_f64m1, vlmax)) -
474
+ n_f64 * ca_x_ * cb_x_;
475
+ h[1] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_01_f64m2, zero_f64m1, vlmax)) -
476
+ n_f64 * ca_x_ * cb_y_;
477
+ h[2] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_02_f64m2, zero_f64m1, vlmax)) -
478
+ n_f64 * ca_x_ * cb_z_;
479
+ h[3] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_10_f64m2, zero_f64m1, vlmax)) -
480
+ n_f64 * ca_y_ * cb_x_;
481
+ h[4] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_11_f64m2, zero_f64m1, vlmax)) -
482
+ n_f64 * ca_y_ * cb_y_;
483
+ h[5] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_12_f64m2, zero_f64m1, vlmax)) -
484
+ n_f64 * ca_y_ * cb_z_;
485
+ h[6] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_20_f64m2, zero_f64m1, vlmax)) -
486
+ n_f64 * ca_z_ * cb_x_;
487
+ h[7] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_21_f64m2, zero_f64m1, vlmax)) -
488
+ n_f64 * ca_z_ * cb_y_;
489
+ h[8] = __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(cross_22_f64m2, zero_f64m1, vlmax)) -
490
+ n_f64 * ca_z_ * cb_z_;
491
+ // variance_a = (1/n) * (Σ ||a[i]||² - n * ||ca||²)
492
+ *variance_a = __riscv_vfmv_f_s_f64m1_f64(
493
+ __riscv_vfredusum_vs_f64m2_f64m1(sum_norm_squared_f64m2, zero_f64m1, vlmax)) *
494
+ inv_n -
495
+ (ca_x_ * ca_x_ + ca_y_ * ca_y_ + ca_z_ * ca_z_);
496
+ }
497
+
498
+ /**
499
+ * @brief Compute centroids, cross-covariance, and variance_a in a single pass (f64).
500
+ *
501
+ * Per-lane `vfadd_vv`/`vfmacc_vv` accumulation with deferred `vfredusum` after the loop
502
+ * — eliminates 16 horizontal reductions per iteration.
503
+ */
504
+ NK_INTERNAL void nk_centroid_and_cross_covariance_and_variance_f64_rvv_( //
505
+ nk_f64_t const *a, nk_f64_t const *b, nk_size_t n, //
506
+ nk_f64_t *ca_x, nk_f64_t *ca_y, nk_f64_t *ca_z, //
507
+ nk_f64_t *cb_x, nk_f64_t *cb_y, nk_f64_t *cb_z, //
508
+ nk_f64_t h[9], nk_f64_t *variance_a) {
509
+ nk_size_t vlmax = __riscv_vsetvlmax_e64m1();
510
+ vfloat64m1_t sum_a_x_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax), sum_a_y_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
511
+ vfloat64m1_t sum_a_z_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
512
+ vfloat64m1_t sum_b_x_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax), sum_b_y_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
513
+ vfloat64m1_t sum_b_z_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
514
+ vfloat64m1_t compensation_a_x_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
515
+ vfloat64m1_t compensation_a_y_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
516
+ vfloat64m1_t compensation_a_z_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
517
+ vfloat64m1_t compensation_b_x_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
518
+ vfloat64m1_t compensation_b_y_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
519
+ vfloat64m1_t compensation_b_z_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
520
+ vfloat64m1_t cross_00_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax),
521
+ cross_01_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
522
+ vfloat64m1_t cross_02_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax),
523
+ cross_10_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
524
+ vfloat64m1_t cross_11_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax),
525
+ cross_12_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
526
+ vfloat64m1_t cross_20_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax),
527
+ cross_21_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
528
+ vfloat64m1_t cross_22_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
529
+ vfloat64m1_t compensation_00_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
530
+ vfloat64m1_t compensation_01_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
531
+ vfloat64m1_t compensation_02_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
532
+ vfloat64m1_t compensation_10_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
533
+ vfloat64m1_t compensation_11_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
534
+ vfloat64m1_t compensation_12_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
535
+ vfloat64m1_t compensation_20_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
536
+ vfloat64m1_t compensation_21_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
537
+ vfloat64m1_t compensation_22_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
538
+ vfloat64m1_t sum_norm_squared_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
539
+ vfloat64m1_t compensation_norm_squared_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
540
+ nk_f64_t const *a_ptr = a, *b_ptr = b;
541
+ nk_size_t remaining = n;
542
+ for (nk_size_t vector_length; remaining > 0;
543
+ remaining -= vector_length, a_ptr += vector_length * 3, b_ptr += vector_length * 3) {
544
+ vector_length = __riscv_vsetvl_e64m1(remaining);
545
+ vfloat64m1x3_t a_f64m1x3 = __riscv_vlseg3e64_v_f64m1x3(a_ptr, vector_length);
546
+ vfloat64m1_t a_x_f64m1 = __riscv_vget_v_f64m1x3_f64m1(a_f64m1x3, 0);
547
+ vfloat64m1_t a_y_f64m1 = __riscv_vget_v_f64m1x3_f64m1(a_f64m1x3, 1);
548
+ vfloat64m1_t a_z_f64m1 = __riscv_vget_v_f64m1x3_f64m1(a_f64m1x3, 2);
549
+ vfloat64m1x3_t b_f64m1x3 = __riscv_vlseg3e64_v_f64m1x3(b_ptr, vector_length);
550
+ vfloat64m1_t b_x_f64m1 = __riscv_vget_v_f64m1x3_f64m1(b_f64m1x3, 0);
551
+ vfloat64m1_t b_y_f64m1 = __riscv_vget_v_f64m1x3_f64m1(b_f64m1x3, 1);
552
+ vfloat64m1_t b_z_f64m1 = __riscv_vget_v_f64m1x3_f64m1(b_f64m1x3, 2);
553
+ nk_accumulate_sum_f64m1_rvv_(&sum_a_x_f64m1, &compensation_a_x_f64m1, a_x_f64m1, vector_length);
554
+ nk_accumulate_sum_f64m1_rvv_(&sum_a_y_f64m1, &compensation_a_y_f64m1, a_y_f64m1, vector_length);
555
+ nk_accumulate_sum_f64m1_rvv_(&sum_a_z_f64m1, &compensation_a_z_f64m1, a_z_f64m1, vector_length);
556
+ nk_accumulate_sum_f64m1_rvv_(&sum_b_x_f64m1, &compensation_b_x_f64m1, b_x_f64m1, vector_length);
557
+ nk_accumulate_sum_f64m1_rvv_(&sum_b_y_f64m1, &compensation_b_y_f64m1, b_y_f64m1, vector_length);
558
+ nk_accumulate_sum_f64m1_rvv_(&sum_b_z_f64m1, &compensation_b_z_f64m1, b_z_f64m1, vector_length);
559
+ nk_accumulate_product_f64m1_rvv_(&cross_00_f64m1, &compensation_00_f64m1, a_x_f64m1, b_x_f64m1, vector_length);
560
+ nk_accumulate_product_f64m1_rvv_(&cross_01_f64m1, &compensation_01_f64m1, a_x_f64m1, b_y_f64m1, vector_length);
561
+ nk_accumulate_product_f64m1_rvv_(&cross_02_f64m1, &compensation_02_f64m1, a_x_f64m1, b_z_f64m1, vector_length);
562
+ nk_accumulate_product_f64m1_rvv_(&cross_10_f64m1, &compensation_10_f64m1, a_y_f64m1, b_x_f64m1, vector_length);
563
+ nk_accumulate_product_f64m1_rvv_(&cross_11_f64m1, &compensation_11_f64m1, a_y_f64m1, b_y_f64m1, vector_length);
564
+ nk_accumulate_product_f64m1_rvv_(&cross_12_f64m1, &compensation_12_f64m1, a_y_f64m1, b_z_f64m1, vector_length);
565
+ nk_accumulate_product_f64m1_rvv_(&cross_20_f64m1, &compensation_20_f64m1, a_z_f64m1, b_x_f64m1, vector_length);
566
+ nk_accumulate_product_f64m1_rvv_(&cross_21_f64m1, &compensation_21_f64m1, a_z_f64m1, b_y_f64m1, vector_length);
567
+ nk_accumulate_product_f64m1_rvv_(&cross_22_f64m1, &compensation_22_f64m1, a_z_f64m1, b_z_f64m1, vector_length);
568
+ vfloat64m1_t norm_squared_f64m1 = __riscv_vfmul_vv_f64m1(a_x_f64m1, a_x_f64m1, vector_length);
569
+ norm_squared_f64m1 = __riscv_vfmacc_vv_f64m1(norm_squared_f64m1, a_y_f64m1, a_y_f64m1, vector_length);
570
+ norm_squared_f64m1 = __riscv_vfmacc_vv_f64m1(norm_squared_f64m1, a_z_f64m1, a_z_f64m1, vector_length);
571
+ nk_accumulate_sum_f64m1_rvv_(&sum_norm_squared_f64m1, &compensation_norm_squared_f64m1, norm_squared_f64m1,
572
+ vector_length);
573
+ }
574
+ nk_f64_t inv_n = 1.0 / (nk_f64_t)n;
575
+ nk_f64_t ca_x_ = nk_dot_stable_sum_f64m1_rvv_(sum_a_x_f64m1, compensation_a_x_f64m1) * inv_n;
576
+ nk_f64_t ca_y_ = nk_dot_stable_sum_f64m1_rvv_(sum_a_y_f64m1, compensation_a_y_f64m1) * inv_n;
577
+ nk_f64_t ca_z_ = nk_dot_stable_sum_f64m1_rvv_(sum_a_z_f64m1, compensation_a_z_f64m1) * inv_n;
578
+ nk_f64_t cb_x_ = nk_dot_stable_sum_f64m1_rvv_(sum_b_x_f64m1, compensation_b_x_f64m1) * inv_n;
579
+ nk_f64_t cb_y_ = nk_dot_stable_sum_f64m1_rvv_(sum_b_y_f64m1, compensation_b_y_f64m1) * inv_n;
580
+ nk_f64_t cb_z_ = nk_dot_stable_sum_f64m1_rvv_(sum_b_z_f64m1, compensation_b_z_f64m1) * inv_n;
581
+ *ca_x = ca_x_;
582
+ *ca_y = ca_y_;
583
+ *ca_z = ca_z_;
584
+ *cb_x = cb_x_;
585
+ *cb_y = cb_y_;
586
+ *cb_z = cb_z_;
587
+ nk_f64_t n_f64 = (nk_f64_t)n;
588
+ h[0] = nk_dot_stable_sum_f64m1_rvv_(cross_00_f64m1, compensation_00_f64m1) - n_f64 * ca_x_ * cb_x_;
589
+ h[1] = nk_dot_stable_sum_f64m1_rvv_(cross_01_f64m1, compensation_01_f64m1) - n_f64 * ca_x_ * cb_y_;
590
+ h[2] = nk_dot_stable_sum_f64m1_rvv_(cross_02_f64m1, compensation_02_f64m1) - n_f64 * ca_x_ * cb_z_;
591
+ h[3] = nk_dot_stable_sum_f64m1_rvv_(cross_10_f64m1, compensation_10_f64m1) - n_f64 * ca_y_ * cb_x_;
592
+ h[4] = nk_dot_stable_sum_f64m1_rvv_(cross_11_f64m1, compensation_11_f64m1) - n_f64 * ca_y_ * cb_y_;
593
+ h[5] = nk_dot_stable_sum_f64m1_rvv_(cross_12_f64m1, compensation_12_f64m1) - n_f64 * ca_y_ * cb_z_;
594
+ h[6] = nk_dot_stable_sum_f64m1_rvv_(cross_20_f64m1, compensation_20_f64m1) - n_f64 * ca_z_ * cb_x_;
595
+ h[7] = nk_dot_stable_sum_f64m1_rvv_(cross_21_f64m1, compensation_21_f64m1) - n_f64 * ca_z_ * cb_y_;
596
+ h[8] = nk_dot_stable_sum_f64m1_rvv_(cross_22_f64m1, compensation_22_f64m1) - n_f64 * ca_z_ * cb_z_;
597
+ *variance_a = nk_dot_stable_sum_f64m1_rvv_(sum_norm_squared_f64m1, compensation_norm_squared_f64m1) * inv_n -
598
+ (ca_x_ * ca_x_ + ca_y_ * ca_y_ + ca_z_ * ca_z_);
599
+ }
600
+
601
+ NK_INTERNAL nk_f64_t nk_transformed_ssd_f32_rvv_( //
602
+ nk_f32_t const *a, nk_f32_t const *b, nk_size_t n, //
603
+ nk_f64_t const *r, nk_f64_t scale, //
604
+ nk_f64_t ca_x, nk_f64_t ca_y, nk_f64_t ca_z, //
605
+ nk_f64_t cb_x, nk_f64_t cb_y, nk_f64_t cb_z) {
606
+ nk_f64_t scaled_rotation_x_x = scale * r[0], scaled_rotation_x_y = scale * r[1], scaled_rotation_x_z = scale * r[2];
607
+ nk_f64_t scaled_rotation_y_x = scale * r[3], scaled_rotation_y_y = scale * r[4], scaled_rotation_y_z = scale * r[5];
608
+ nk_f64_t scaled_rotation_z_x = scale * r[6], scaled_rotation_z_y = scale * r[7], scaled_rotation_z_z = scale * r[8];
609
+ nk_size_t vlmax = __riscv_vsetvlmax_e64m2();
610
+ vfloat64m2_t sum_distance_squared_f64m2 = __riscv_vfmv_v_f_f64m2(0.0, vlmax);
611
+ vfloat64m1_t zero_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, 1);
612
+ nk_f32_t const *a_ptr = a, *b_ptr = b;
613
+ nk_size_t remaining = n;
614
+ for (nk_size_t vector_length; remaining > 0;
615
+ remaining -= vector_length, a_ptr += vector_length * 3, b_ptr += vector_length * 3) {
616
+ vector_length = __riscv_vsetvl_e32m1(remaining);
617
+ vfloat32m1x3_t a_f32m1x3 = __riscv_vlseg3e32_v_f32m1x3(a_ptr, vector_length);
618
+ vfloat64m2_t centered_a_x_f64m2 = __riscv_vfsub_vf_f64m2(
619
+ __riscv_vfwcvt_f_f_v_f64m2(__riscv_vget_v_f32m1x3_f32m1(a_f32m1x3, 0), vector_length), ca_x, vector_length);
620
+ vfloat64m2_t centered_a_y_f64m2 = __riscv_vfsub_vf_f64m2(
621
+ __riscv_vfwcvt_f_f_v_f64m2(__riscv_vget_v_f32m1x3_f32m1(a_f32m1x3, 1), vector_length), ca_y, vector_length);
622
+ vfloat64m2_t centered_a_z_f64m2 = __riscv_vfsub_vf_f64m2(
623
+ __riscv_vfwcvt_f_f_v_f64m2(__riscv_vget_v_f32m1x3_f32m1(a_f32m1x3, 2), vector_length), ca_z, vector_length);
624
+ vfloat64m2_t rotated_a_x_f64m2 = __riscv_vfmul_vf_f64m2(centered_a_x_f64m2, scaled_rotation_x_x, vector_length);
625
+ rotated_a_x_f64m2 = __riscv_vfmacc_vf_f64m2(rotated_a_x_f64m2, scaled_rotation_x_y, centered_a_y_f64m2,
626
+ vector_length);
627
+ rotated_a_x_f64m2 = __riscv_vfmacc_vf_f64m2(rotated_a_x_f64m2, scaled_rotation_x_z, centered_a_z_f64m2,
628
+ vector_length);
629
+ vfloat64m2_t rotated_a_y_f64m2 = __riscv_vfmul_vf_f64m2(centered_a_x_f64m2, scaled_rotation_y_x, vector_length);
630
+ rotated_a_y_f64m2 = __riscv_vfmacc_vf_f64m2(rotated_a_y_f64m2, scaled_rotation_y_y, centered_a_y_f64m2,
631
+ vector_length);
632
+ rotated_a_y_f64m2 = __riscv_vfmacc_vf_f64m2(rotated_a_y_f64m2, scaled_rotation_y_z, centered_a_z_f64m2,
633
+ vector_length);
634
+ vfloat64m2_t rotated_a_z_f64m2 = __riscv_vfmul_vf_f64m2(centered_a_x_f64m2, scaled_rotation_z_x, vector_length);
635
+ rotated_a_z_f64m2 = __riscv_vfmacc_vf_f64m2(rotated_a_z_f64m2, scaled_rotation_z_y, centered_a_y_f64m2,
636
+ vector_length);
637
+ rotated_a_z_f64m2 = __riscv_vfmacc_vf_f64m2(rotated_a_z_f64m2, scaled_rotation_z_z, centered_a_z_f64m2,
638
+ vector_length);
639
+ vfloat32m1x3_t b_f32m1x3 = __riscv_vlseg3e32_v_f32m1x3(b_ptr, vector_length);
640
+ vfloat64m2_t centered_b_x_f64m2 = __riscv_vfsub_vf_f64m2(
641
+ __riscv_vfwcvt_f_f_v_f64m2(__riscv_vget_v_f32m1x3_f32m1(b_f32m1x3, 0), vector_length), cb_x, vector_length);
642
+ vfloat64m2_t centered_b_y_f64m2 = __riscv_vfsub_vf_f64m2(
643
+ __riscv_vfwcvt_f_f_v_f64m2(__riscv_vget_v_f32m1x3_f32m1(b_f32m1x3, 1), vector_length), cb_y, vector_length);
644
+ vfloat64m2_t centered_b_z_f64m2 = __riscv_vfsub_vf_f64m2(
645
+ __riscv_vfwcvt_f_f_v_f64m2(__riscv_vget_v_f32m1x3_f32m1(b_f32m1x3, 2), vector_length), cb_z, vector_length);
646
+ vfloat64m2_t delta_x_f64m2 = __riscv_vfsub_vv_f64m2(rotated_a_x_f64m2, centered_b_x_f64m2, vector_length);
647
+ vfloat64m2_t delta_y_f64m2 = __riscv_vfsub_vv_f64m2(rotated_a_y_f64m2, centered_b_y_f64m2, vector_length);
648
+ vfloat64m2_t delta_z_f64m2 = __riscv_vfsub_vv_f64m2(rotated_a_z_f64m2, centered_b_z_f64m2, vector_length);
649
+ sum_distance_squared_f64m2 = __riscv_vfmacc_vv_f64m2_tu(sum_distance_squared_f64m2, delta_x_f64m2,
650
+ delta_x_f64m2, vector_length);
651
+ sum_distance_squared_f64m2 = __riscv_vfmacc_vv_f64m2_tu(sum_distance_squared_f64m2, delta_y_f64m2,
652
+ delta_y_f64m2, vector_length);
653
+ sum_distance_squared_f64m2 = __riscv_vfmacc_vv_f64m2_tu(sum_distance_squared_f64m2, delta_z_f64m2,
654
+ delta_z_f64m2, vector_length);
655
+ }
656
+ return __riscv_vfmv_f_s_f64m1_f64(__riscv_vfredusum_vs_f64m2_f64m1(sum_distance_squared_f64m2, zero_f64m1, vlmax));
657
+ }
658
+
659
+ NK_INTERNAL nk_f64_t nk_transformed_ssd_f64_rvv_( //
660
+ nk_f64_t const *a, nk_f64_t const *b, nk_size_t n, //
661
+ nk_f64_t const *r, nk_f64_t scale, //
662
+ nk_f64_t ca_x, nk_f64_t ca_y, nk_f64_t ca_z, //
663
+ nk_f64_t cb_x, nk_f64_t cb_y, nk_f64_t cb_z) {
664
+ nk_f64_t scaled_rotation_x_x = scale * r[0], scaled_rotation_x_y = scale * r[1], scaled_rotation_x_z = scale * r[2];
665
+ nk_f64_t scaled_rotation_y_x = scale * r[3], scaled_rotation_y_y = scale * r[4], scaled_rotation_y_z = scale * r[5];
666
+ nk_f64_t scaled_rotation_z_x = scale * r[6], scaled_rotation_z_y = scale * r[7], scaled_rotation_z_z = scale * r[8];
667
+ nk_size_t vlmax = __riscv_vsetvlmax_e64m1();
668
+ vfloat64m1_t sum_distance_squared_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
669
+ vfloat64m1_t compensation_distance_squared_f64m1 = __riscv_vfmv_v_f_f64m1(0.0, vlmax);
670
+ nk_f64_t const *a_ptr = a, *b_ptr = b;
671
+ nk_size_t remaining = n;
672
+ for (nk_size_t vector_length; remaining > 0;
673
+ remaining -= vector_length, a_ptr += vector_length * 3, b_ptr += vector_length * 3) {
674
+ vector_length = __riscv_vsetvl_e64m1(remaining);
675
+ vfloat64m1x3_t a_f64m1x3 = __riscv_vlseg3e64_v_f64m1x3(a_ptr, vector_length);
676
+ vfloat64m1_t centered_a_x_f64m1 = __riscv_vfsub_vf_f64m1(__riscv_vget_v_f64m1x3_f64m1(a_f64m1x3, 0), ca_x,
677
+ vector_length);
678
+ vfloat64m1_t centered_a_y_f64m1 = __riscv_vfsub_vf_f64m1(__riscv_vget_v_f64m1x3_f64m1(a_f64m1x3, 1), ca_y,
679
+ vector_length);
680
+ vfloat64m1_t centered_a_z_f64m1 = __riscv_vfsub_vf_f64m1(__riscv_vget_v_f64m1x3_f64m1(a_f64m1x3, 2), ca_z,
681
+ vector_length);
682
+ vfloat64m1_t rotated_a_x_f64m1 = __riscv_vfmul_vf_f64m1(centered_a_x_f64m1, scaled_rotation_x_x, vector_length);
683
+ rotated_a_x_f64m1 = __riscv_vfmacc_vf_f64m1(rotated_a_x_f64m1, scaled_rotation_x_y, centered_a_y_f64m1,
684
+ vector_length);
685
+ rotated_a_x_f64m1 = __riscv_vfmacc_vf_f64m1(rotated_a_x_f64m1, scaled_rotation_x_z, centered_a_z_f64m1,
686
+ vector_length);
687
+ vfloat64m1_t rotated_a_y_f64m1 = __riscv_vfmul_vf_f64m1(centered_a_x_f64m1, scaled_rotation_y_x, vector_length);
688
+ rotated_a_y_f64m1 = __riscv_vfmacc_vf_f64m1(rotated_a_y_f64m1, scaled_rotation_y_y, centered_a_y_f64m1,
689
+ vector_length);
690
+ rotated_a_y_f64m1 = __riscv_vfmacc_vf_f64m1(rotated_a_y_f64m1, scaled_rotation_y_z, centered_a_z_f64m1,
691
+ vector_length);
692
+ vfloat64m1_t rotated_a_z_f64m1 = __riscv_vfmul_vf_f64m1(centered_a_x_f64m1, scaled_rotation_z_x, vector_length);
693
+ rotated_a_z_f64m1 = __riscv_vfmacc_vf_f64m1(rotated_a_z_f64m1, scaled_rotation_z_y, centered_a_y_f64m1,
694
+ vector_length);
695
+ rotated_a_z_f64m1 = __riscv_vfmacc_vf_f64m1(rotated_a_z_f64m1, scaled_rotation_z_z, centered_a_z_f64m1,
696
+ vector_length);
697
+ vfloat64m1x3_t b_f64m1x3 = __riscv_vlseg3e64_v_f64m1x3(b_ptr, vector_length);
698
+ vfloat64m1_t centered_b_x_f64m1 = __riscv_vfsub_vf_f64m1(__riscv_vget_v_f64m1x3_f64m1(b_f64m1x3, 0), cb_x,
699
+ vector_length);
700
+ vfloat64m1_t centered_b_y_f64m1 = __riscv_vfsub_vf_f64m1(__riscv_vget_v_f64m1x3_f64m1(b_f64m1x3, 1), cb_y,
701
+ vector_length);
702
+ vfloat64m1_t centered_b_z_f64m1 = __riscv_vfsub_vf_f64m1(__riscv_vget_v_f64m1x3_f64m1(b_f64m1x3, 2), cb_z,
703
+ vector_length);
704
+ vfloat64m1_t delta_x_f64m1 = __riscv_vfsub_vv_f64m1(rotated_a_x_f64m1, centered_b_x_f64m1, vector_length);
705
+ vfloat64m1_t delta_y_f64m1 = __riscv_vfsub_vv_f64m1(rotated_a_y_f64m1, centered_b_y_f64m1, vector_length);
706
+ vfloat64m1_t delta_z_f64m1 = __riscv_vfsub_vv_f64m1(rotated_a_z_f64m1, centered_b_z_f64m1, vector_length);
707
+ vfloat64m1_t distance_squared_f64m1 = __riscv_vfmul_vv_f64m1(delta_x_f64m1, delta_x_f64m1, vector_length);
708
+ distance_squared_f64m1 = __riscv_vfmacc_vv_f64m1(distance_squared_f64m1, delta_y_f64m1, delta_y_f64m1,
709
+ vector_length);
710
+ distance_squared_f64m1 = __riscv_vfmacc_vv_f64m1(distance_squared_f64m1, delta_z_f64m1, delta_z_f64m1,
711
+ vector_length);
712
+ nk_accumulate_sum_f64m1_rvv_(&sum_distance_squared_f64m1, &compensation_distance_squared_f64m1,
713
+ distance_squared_f64m1, vector_length);
714
+ }
715
+ return nk_dot_stable_sum_f64m1_rvv_(sum_distance_squared_f64m1, compensation_distance_squared_f64m1);
716
+ }
717
+
718
+ /** @brief Compute R = V * Uᵀ from SVD factors (f32), vectorized with `vfmul_vf`/`vfmacc_vf`. */
719
+ NK_INTERNAL void nk_rotation_from_svd_f32_rvv_( //
720
+ nk_f32_t *svd_u, nk_f32_t *svd_v, nk_f32_t r[9]) {
721
+ nk_size_t vl3 = __riscv_vsetvl_e32m1(3);
722
+ vfloat32m1_t u_row0_f32m1 = __riscv_vle32_v_f32m1(svd_u + 0, vl3);
723
+ vfloat32m1_t u_row1_f32m1 = __riscv_vle32_v_f32m1(svd_u + 3, vl3);
724
+ vfloat32m1_t u_row2_f32m1 = __riscv_vle32_v_f32m1(svd_u + 6, vl3);
725
+ // Row 0: R[0..2] = V[0]*U_row0 + V[1]*U_row1 + V[2]*U_row2
726
+ vfloat32m1_t rotation_row_f32m1 = __riscv_vfmul_vf_f32m1(u_row0_f32m1, svd_v[0], vl3);
727
+ rotation_row_f32m1 = __riscv_vfmacc_vf_f32m1(rotation_row_f32m1, svd_v[1], u_row1_f32m1, vl3);
728
+ rotation_row_f32m1 = __riscv_vfmacc_vf_f32m1(rotation_row_f32m1, svd_v[2], u_row2_f32m1, vl3);
729
+ __riscv_vse32_v_f32m1(r + 0, rotation_row_f32m1, vl3);
730
+ // Row 1: R[3..5]
731
+ rotation_row_f32m1 = __riscv_vfmul_vf_f32m1(u_row0_f32m1, svd_v[3], vl3);
732
+ rotation_row_f32m1 = __riscv_vfmacc_vf_f32m1(rotation_row_f32m1, svd_v[4], u_row1_f32m1, vl3);
733
+ rotation_row_f32m1 = __riscv_vfmacc_vf_f32m1(rotation_row_f32m1, svd_v[5], u_row2_f32m1, vl3);
734
+ __riscv_vse32_v_f32m1(r + 3, rotation_row_f32m1, vl3);
735
+ // Row 2: R[6..8]
736
+ rotation_row_f32m1 = __riscv_vfmul_vf_f32m1(u_row0_f32m1, svd_v[6], vl3);
737
+ rotation_row_f32m1 = __riscv_vfmacc_vf_f32m1(rotation_row_f32m1, svd_v[7], u_row1_f32m1, vl3);
738
+ rotation_row_f32m1 = __riscv_vfmacc_vf_f32m1(rotation_row_f32m1, svd_v[8], u_row2_f32m1, vl3);
739
+ __riscv_vse32_v_f32m1(r + 6, rotation_row_f32m1, vl3);
740
+ }
741
+
742
+ /** @brief Compute R = V * Uᵀ from SVD factors (f64), vectorized with `vfmul_vf`/`vfmacc_vf`. */
743
+ NK_INTERNAL void nk_rotation_from_svd_f64_rvv_( //
744
+ nk_f64_t *svd_u, nk_f64_t *svd_v, nk_f64_t r[9]) {
745
+ nk_rotation_from_svd_f64_serial_(svd_u, svd_v, r);
746
+ }
747
+
748
+ NK_PUBLIC void nk_rmsd_f32_rvv(nk_f32_t const *a, nk_f32_t const *b, nk_size_t n, nk_f32_t *a_centroid,
749
+ nk_f32_t *b_centroid, nk_f32_t *rotation, nk_f32_t *scale, nk_f64_t *result) {
750
+ nk_f64_t identity[9] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
751
+ if (rotation)
752
+ for (int j = 0; j < 9; ++j) rotation[j] = identity[j];
753
+ if (scale) *scale = 1.0f;
754
+ nk_f64_t ca_x, ca_y, ca_z, cb_x, cb_y, cb_z;
755
+ nk_bicentroid_f32_rvv_(a, b, n, &ca_x, &ca_y, &ca_z, &cb_x, &cb_y, &cb_z);
756
+ if (a_centroid) a_centroid[0] = (nk_f32_t)ca_x, a_centroid[1] = (nk_f32_t)ca_y, a_centroid[2] = (nk_f32_t)ca_z;
757
+ if (b_centroid) b_centroid[0] = (nk_f32_t)cb_x, b_centroid[1] = (nk_f32_t)cb_y, b_centroid[2] = (nk_f32_t)cb_z;
758
+ nk_f64_t ssd = nk_transformed_ssd_f32_rvv_(a, b, n, identity, 1.0, ca_x, ca_y, ca_z, cb_x, cb_y, cb_z);
759
+ *result = nk_f64_sqrt_rvv(ssd / (nk_f64_t)n);
760
+ }
761
+
762
+ NK_PUBLIC void nk_rmsd_f64_rvv(nk_f64_t const *a, nk_f64_t const *b, nk_size_t n, nk_f64_t *a_centroid,
763
+ nk_f64_t *b_centroid, nk_f64_t *rotation, nk_f64_t *scale, nk_f64_t *result) {
764
+ nk_f64_t identity[9] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
765
+ if (rotation)
766
+ for (int j = 0; j < 9; ++j) rotation[j] = identity[j];
767
+ if (scale) *scale = 1.0;
768
+ nk_f64_t ca_x, ca_y, ca_z, cb_x, cb_y, cb_z;
769
+ nk_bicentroid_f64_rvv_(a, b, n, &ca_x, &ca_y, &ca_z, &cb_x, &cb_y, &cb_z);
770
+ if (a_centroid) a_centroid[0] = ca_x, a_centroid[1] = ca_y, a_centroid[2] = ca_z;
771
+ if (b_centroid) b_centroid[0] = cb_x, b_centroid[1] = cb_y, b_centroid[2] = cb_z;
772
+ nk_f64_t ssd = nk_transformed_ssd_f64_rvv_(a, b, n, identity, 1.0, ca_x, ca_y, ca_z, cb_x, cb_y, cb_z);
773
+ *result = nk_f64_sqrt_rvv(ssd / (nk_f64_t)n);
774
+ }
775
+
776
+ NK_PUBLIC void nk_kabsch_f32_rvv(nk_f32_t const *a, nk_f32_t const *b, nk_size_t n, nk_f32_t *a_centroid,
777
+ nk_f32_t *b_centroid, nk_f32_t *rotation, nk_f32_t *scale, nk_f64_t *result) {
778
+ if (scale) *scale = 1.0f;
779
+ nk_f64_t ca_x, ca_y, ca_z, cb_x, cb_y, cb_z;
780
+ nk_f64_t h[9];
781
+ nk_centroid_and_cross_covariance_f32_rvv_(a, b, n, &ca_x, &ca_y, &ca_z, &cb_x, &cb_y, &cb_z, h);
782
+ if (a_centroid) a_centroid[0] = (nk_f32_t)ca_x, a_centroid[1] = (nk_f32_t)ca_y, a_centroid[2] = (nk_f32_t)ca_z;
783
+ if (b_centroid) b_centroid[0] = (nk_f32_t)cb_x, b_centroid[1] = (nk_f32_t)cb_y, b_centroid[2] = (nk_f32_t)cb_z;
784
+ nk_f64_t svd_u[9], svd_s[9], svd_v[9];
785
+ nk_svd3x3_f64_(h, svd_u, svd_s, svd_v);
786
+ nk_f64_t r[9];
787
+ nk_rotation_from_svd_f64_rvv_(svd_u, svd_v, r);
788
+ if (nk_det3x3_f64_(r) < 0) {
789
+ svd_v[2] = -svd_v[2], svd_v[5] = -svd_v[5], svd_v[8] = -svd_v[8];
790
+ nk_rotation_from_svd_f64_rvv_(svd_u, svd_v, r);
791
+ }
792
+ if (rotation)
793
+ for (int j = 0; j < 9; ++j) rotation[j] = (nk_f32_t)r[j];
794
+ nk_f64_t ssd = nk_transformed_ssd_f32_rvv_(a, b, n, r, 1.0, ca_x, ca_y, ca_z, cb_x, cb_y, cb_z);
795
+ *result = nk_f64_sqrt_rvv(ssd / (nk_f64_t)n);
796
+ }
797
+
798
+ NK_PUBLIC void nk_kabsch_f64_rvv(nk_f64_t const *a, nk_f64_t const *b, nk_size_t n, nk_f64_t *a_centroid,
799
+ nk_f64_t *b_centroid, nk_f64_t *rotation, nk_f64_t *scale, nk_f64_t *result) {
800
+ if (scale) *scale = 1.0;
801
+ nk_f64_t ca_x, ca_y, ca_z, cb_x, cb_y, cb_z;
802
+ nk_f64_t h[9];
803
+ nk_centroid_and_cross_covariance_f64_rvv_(a, b, n, &ca_x, &ca_y, &ca_z, &cb_x, &cb_y, &cb_z, h);
804
+ if (a_centroid) a_centroid[0] = ca_x, a_centroid[1] = ca_y, a_centroid[2] = ca_z;
805
+ if (b_centroid) b_centroid[0] = cb_x, b_centroid[1] = cb_y, b_centroid[2] = cb_z;
806
+ nk_f64_t svd_u[9], svd_s[9], svd_v[9];
807
+ nk_svd3x3_f64_(h, svd_u, svd_s, svd_v);
808
+ nk_f64_t r[9];
809
+ nk_rotation_from_svd_f64_rvv_(svd_u, svd_v, r);
810
+ if (nk_det3x3_f64_(r) < 0) {
811
+ svd_v[2] = -svd_v[2], svd_v[5] = -svd_v[5], svd_v[8] = -svd_v[8];
812
+ nk_rotation_from_svd_f64_rvv_(svd_u, svd_v, r);
813
+ }
814
+ if (rotation)
815
+ for (int j = 0; j < 9; ++j) rotation[j] = r[j];
816
+ nk_f64_t ssd = nk_transformed_ssd_f64_rvv_(a, b, n, r, 1.0, ca_x, ca_y, ca_z, cb_x, cb_y, cb_z);
817
+ *result = nk_f64_sqrt_rvv(ssd / (nk_f64_t)n);
818
+ }
819
+
820
+ NK_PUBLIC void nk_umeyama_f32_rvv(nk_f32_t const *a, nk_f32_t const *b, nk_size_t n, nk_f32_t *a_centroid,
821
+ nk_f32_t *b_centroid, nk_f32_t *rotation, nk_f32_t *scale, nk_f64_t *result) {
822
+ nk_f64_t ca_x, ca_y, ca_z, cb_x, cb_y, cb_z;
823
+ nk_f64_t h[9], variance_a;
824
+ nk_centroid_and_cross_covariance_and_variance_f32_rvv_(a, b, n, &ca_x, &ca_y, &ca_z, &cb_x, &cb_y, &cb_z, h,
825
+ &variance_a);
826
+ if (a_centroid) a_centroid[0] = (nk_f32_t)ca_x, a_centroid[1] = (nk_f32_t)ca_y, a_centroid[2] = (nk_f32_t)ca_z;
827
+ if (b_centroid) b_centroid[0] = (nk_f32_t)cb_x, b_centroid[1] = (nk_f32_t)cb_y, b_centroid[2] = (nk_f32_t)cb_z;
828
+ nk_f64_t svd_u[9], svd_s[9], svd_v[9];
829
+ nk_svd3x3_f64_(h, svd_u, svd_s, svd_v);
830
+ nk_f64_t r[9];
831
+ nk_rotation_from_svd_f64_rvv_(svd_u, svd_v, r);
832
+ nk_f64_t det = nk_det3x3_f64_(r);
833
+ nk_f64_t sign_det = det < 0 ? -1.0 : 1.0;
834
+ nk_f64_t trace_ds = nk_sum_three_products_f64_(svd_s[0], 1.0, svd_s[4], 1.0, svd_s[8], sign_det);
835
+ nk_f64_t scale_factor = trace_ds / ((nk_f64_t)n * variance_a);
836
+ if (scale) *scale = (nk_f32_t)scale_factor;
837
+ if (det < 0) {
838
+ svd_v[2] = -svd_v[2], svd_v[5] = -svd_v[5], svd_v[8] = -svd_v[8];
839
+ nk_rotation_from_svd_f64_rvv_(svd_u, svd_v, r);
840
+ }
841
+ if (rotation)
842
+ for (int j = 0; j < 9; ++j) rotation[j] = (nk_f32_t)r[j];
843
+ nk_f64_t ssd = nk_transformed_ssd_f32_rvv_(a, b, n, r, scale_factor, ca_x, ca_y, ca_z, cb_x, cb_y, cb_z);
844
+ *result = nk_f64_sqrt_rvv(ssd / (nk_f64_t)n);
845
+ }
846
+
847
+ NK_PUBLIC void nk_umeyama_f64_rvv(nk_f64_t const *a, nk_f64_t const *b, nk_size_t n, nk_f64_t *a_centroid,
848
+ nk_f64_t *b_centroid, nk_f64_t *rotation, nk_f64_t *scale, nk_f64_t *result) {
849
+ nk_f64_t ca_x, ca_y, ca_z, cb_x, cb_y, cb_z;
850
+ nk_f64_t h[9], variance_a;
851
+ nk_centroid_and_cross_covariance_and_variance_f64_rvv_(a, b, n, &ca_x, &ca_y, &ca_z, &cb_x, &cb_y, &cb_z, h,
852
+ &variance_a);
853
+ if (a_centroid) a_centroid[0] = ca_x, a_centroid[1] = ca_y, a_centroid[2] = ca_z;
854
+ if (b_centroid) b_centroid[0] = cb_x, b_centroid[1] = cb_y, b_centroid[2] = cb_z;
855
+ nk_f64_t svd_u[9], svd_s[9], svd_v[9];
856
+ nk_svd3x3_f64_(h, svd_u, svd_s, svd_v);
857
+ nk_f64_t r[9];
858
+ nk_rotation_from_svd_f64_rvv_(svd_u, svd_v, r);
859
+ nk_f64_t det = nk_det3x3_f64_(r);
860
+ nk_f64_t sign_det = det < 0 ? -1.0 : 1.0;
861
+ nk_f64_t trace_ds = nk_sum_three_products_f64_(svd_s[0], 1.0, svd_s[4], 1.0, svd_s[8], sign_det);
862
+ nk_f64_t scale_factor = trace_ds / ((nk_f64_t)n * variance_a);
863
+ if (scale) *scale = scale_factor;
864
+ if (det < 0) {
865
+ svd_v[2] = -svd_v[2], svd_v[5] = -svd_v[5], svd_v[8] = -svd_v[8];
866
+ nk_rotation_from_svd_f64_rvv_(svd_u, svd_v, r);
867
+ }
868
+ if (rotation)
869
+ for (int j = 0; j < 9; ++j) rotation[j] = r[j];
870
+ nk_f64_t ssd = nk_transformed_ssd_f64_rvv_(a, b, n, r, scale_factor, ca_x, ca_y, ca_z, cb_x, cb_y, cb_z);
871
+ *result = nk_f64_sqrt_rvv(ssd / (nk_f64_t)n);
872
+ }
873
+
874
+ NK_PUBLIC void nk_rmsd_f16_rvv(nk_f16_t const *a, nk_f16_t const *b, nk_size_t n, nk_f32_t *a_centroid,
875
+ nk_f32_t *b_centroid, nk_f32_t *rotation, nk_f32_t *scale, nk_f32_t *result) {
876
+ nk_rmsd_f16_serial(a, b, n, a_centroid, b_centroid, rotation, scale, result);
877
+ }
878
+
879
+ NK_PUBLIC void nk_kabsch_f16_rvv(nk_f16_t const *a, nk_f16_t const *b, nk_size_t n, nk_f32_t *a_centroid,
880
+ nk_f32_t *b_centroid, nk_f32_t *rotation, nk_f32_t *scale, nk_f32_t *result) {
881
+ nk_kabsch_f16_serial(a, b, n, a_centroid, b_centroid, rotation, scale, result);
882
+ }
883
+
884
+ NK_PUBLIC void nk_umeyama_f16_rvv(nk_f16_t const *a, nk_f16_t const *b, nk_size_t n, nk_f32_t *a_centroid,
885
+ nk_f32_t *b_centroid, nk_f32_t *rotation, nk_f32_t *scale, nk_f32_t *result) {
886
+ nk_umeyama_f16_serial(a, b, n, a_centroid, b_centroid, rotation, scale, result);
887
+ }
888
+
889
+ NK_PUBLIC void nk_rmsd_bf16_rvv(nk_bf16_t const *a, nk_bf16_t const *b, nk_size_t n, nk_f32_t *a_centroid,
890
+ nk_f32_t *b_centroid, nk_f32_t *rotation, nk_f32_t *scale, nk_f32_t *result) {
891
+ nk_rmsd_bf16_serial(a, b, n, a_centroid, b_centroid, rotation, scale, result);
892
+ }
893
+
894
+ NK_PUBLIC void nk_kabsch_bf16_rvv(nk_bf16_t const *a, nk_bf16_t const *b, nk_size_t n, nk_f32_t *a_centroid,
895
+ nk_f32_t *b_centroid, nk_f32_t *rotation, nk_f32_t *scale, nk_f32_t *result) {
896
+ nk_kabsch_bf16_serial(a, b, n, a_centroid, b_centroid, rotation, scale, result);
897
+ }
898
+
899
+ NK_PUBLIC void nk_umeyama_bf16_rvv(nk_bf16_t const *a, nk_bf16_t const *b, nk_size_t n, nk_f32_t *a_centroid,
900
+ nk_f32_t *b_centroid, nk_f32_t *rotation, nk_f32_t *scale, nk_f32_t *result) {
901
+ nk_umeyama_bf16_serial(a, b, n, a_centroid, b_centroid, rotation, scale, result);
902
+ }
903
+
904
+ #if defined(__cplusplus)
905
+ } // extern "C"
906
+ #endif
907
+
908
+ #if defined(__clang__)
909
+ #pragma clang attribute pop
910
+ #elif defined(__GNUC__)
911
+ #pragma GCC pop_options
912
+ #endif
913
+
914
+ #endif // NK_TARGET_RVV
915
+ #endif // NK_TARGET_RISCV_
916
+ #endif // NK_MESH_RVV_H