numkong 7.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (294) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +495 -0
  3. package/binding.gyp +540 -0
  4. package/c/dispatch.h +512 -0
  5. package/c/dispatch_bf16.c +389 -0
  6. package/c/dispatch_bf16c.c +52 -0
  7. package/c/dispatch_e2m3.c +263 -0
  8. package/c/dispatch_e3m2.c +243 -0
  9. package/c/dispatch_e4m3.c +276 -0
  10. package/c/dispatch_e5m2.c +272 -0
  11. package/c/dispatch_f16.c +376 -0
  12. package/c/dispatch_f16c.c +58 -0
  13. package/c/dispatch_f32.c +378 -0
  14. package/c/dispatch_f32c.c +99 -0
  15. package/c/dispatch_f64.c +296 -0
  16. package/c/dispatch_f64c.c +98 -0
  17. package/c/dispatch_i16.c +96 -0
  18. package/c/dispatch_i32.c +89 -0
  19. package/c/dispatch_i4.c +150 -0
  20. package/c/dispatch_i64.c +86 -0
  21. package/c/dispatch_i8.c +289 -0
  22. package/c/dispatch_other.c +330 -0
  23. package/c/dispatch_u1.c +148 -0
  24. package/c/dispatch_u16.c +124 -0
  25. package/c/dispatch_u32.c +118 -0
  26. package/c/dispatch_u4.c +150 -0
  27. package/c/dispatch_u64.c +102 -0
  28. package/c/dispatch_u8.c +303 -0
  29. package/c/numkong.c +950 -0
  30. package/include/README.md +573 -0
  31. package/include/module.modulemap +129 -0
  32. package/include/numkong/attention/sapphireamx.h +1361 -0
  33. package/include/numkong/attention/sme.h +2066 -0
  34. package/include/numkong/attention.h +49 -0
  35. package/include/numkong/capabilities.h +748 -0
  36. package/include/numkong/cast/README.md +262 -0
  37. package/include/numkong/cast/haswell.h +975 -0
  38. package/include/numkong/cast/icelake.h +470 -0
  39. package/include/numkong/cast/neon.h +1192 -0
  40. package/include/numkong/cast/rvv.h +1021 -0
  41. package/include/numkong/cast/sapphire.h +262 -0
  42. package/include/numkong/cast/serial.h +2262 -0
  43. package/include/numkong/cast/skylake.h +856 -0
  44. package/include/numkong/cast/v128relaxed.h +180 -0
  45. package/include/numkong/cast.h +230 -0
  46. package/include/numkong/curved/README.md +223 -0
  47. package/include/numkong/curved/genoa.h +182 -0
  48. package/include/numkong/curved/haswell.h +276 -0
  49. package/include/numkong/curved/neon.h +205 -0
  50. package/include/numkong/curved/neonbfdot.h +212 -0
  51. package/include/numkong/curved/neonhalf.h +212 -0
  52. package/include/numkong/curved/rvv.h +305 -0
  53. package/include/numkong/curved/serial.h +207 -0
  54. package/include/numkong/curved/skylake.h +457 -0
  55. package/include/numkong/curved/smef64.h +506 -0
  56. package/include/numkong/curved.h +517 -0
  57. package/include/numkong/curved.hpp +144 -0
  58. package/include/numkong/dot/README.md +425 -0
  59. package/include/numkong/dot/alder.h +563 -0
  60. package/include/numkong/dot/genoa.h +315 -0
  61. package/include/numkong/dot/haswell.h +1688 -0
  62. package/include/numkong/dot/icelake.h +883 -0
  63. package/include/numkong/dot/neon.h +818 -0
  64. package/include/numkong/dot/neonbfdot.h +244 -0
  65. package/include/numkong/dot/neonfhm.h +360 -0
  66. package/include/numkong/dot/neonhalf.h +198 -0
  67. package/include/numkong/dot/neonsdot.h +508 -0
  68. package/include/numkong/dot/rvv.h +714 -0
  69. package/include/numkong/dot/rvvbb.h +72 -0
  70. package/include/numkong/dot/rvvbf16.h +123 -0
  71. package/include/numkong/dot/rvvhalf.h +129 -0
  72. package/include/numkong/dot/sapphire.h +141 -0
  73. package/include/numkong/dot/serial.h +838 -0
  74. package/include/numkong/dot/sierra.h +405 -0
  75. package/include/numkong/dot/skylake.h +1084 -0
  76. package/include/numkong/dot/sve.h +379 -0
  77. package/include/numkong/dot/svebfdot.h +74 -0
  78. package/include/numkong/dot/svehalf.h +123 -0
  79. package/include/numkong/dot/v128relaxed.h +1258 -0
  80. package/include/numkong/dot.h +1070 -0
  81. package/include/numkong/dot.hpp +94 -0
  82. package/include/numkong/dots/README.md +496 -0
  83. package/include/numkong/dots/alder.h +114 -0
  84. package/include/numkong/dots/genoa.h +94 -0
  85. package/include/numkong/dots/haswell.h +295 -0
  86. package/include/numkong/dots/icelake.h +171 -0
  87. package/include/numkong/dots/neon.h +120 -0
  88. package/include/numkong/dots/neonbfdot.h +58 -0
  89. package/include/numkong/dots/neonfhm.h +94 -0
  90. package/include/numkong/dots/neonhalf.h +57 -0
  91. package/include/numkong/dots/neonsdot.h +108 -0
  92. package/include/numkong/dots/rvv.h +2486 -0
  93. package/include/numkong/dots/sapphireamx.h +3973 -0
  94. package/include/numkong/dots/serial.h +2844 -0
  95. package/include/numkong/dots/sierra.h +97 -0
  96. package/include/numkong/dots/skylake.h +196 -0
  97. package/include/numkong/dots/sme.h +5372 -0
  98. package/include/numkong/dots/smebi32.h +461 -0
  99. package/include/numkong/dots/smef64.h +1318 -0
  100. package/include/numkong/dots/smehalf.h +47 -0
  101. package/include/numkong/dots/v128relaxed.h +294 -0
  102. package/include/numkong/dots.h +2804 -0
  103. package/include/numkong/dots.hpp +639 -0
  104. package/include/numkong/each/README.md +469 -0
  105. package/include/numkong/each/haswell.h +1658 -0
  106. package/include/numkong/each/icelake.h +272 -0
  107. package/include/numkong/each/neon.h +1104 -0
  108. package/include/numkong/each/neonbfdot.h +212 -0
  109. package/include/numkong/each/neonhalf.h +410 -0
  110. package/include/numkong/each/rvv.h +1121 -0
  111. package/include/numkong/each/sapphire.h +477 -0
  112. package/include/numkong/each/serial.h +260 -0
  113. package/include/numkong/each/skylake.h +1562 -0
  114. package/include/numkong/each.h +2146 -0
  115. package/include/numkong/each.hpp +434 -0
  116. package/include/numkong/geospatial/README.md +147 -0
  117. package/include/numkong/geospatial/haswell.h +593 -0
  118. package/include/numkong/geospatial/neon.h +571 -0
  119. package/include/numkong/geospatial/rvv.h +701 -0
  120. package/include/numkong/geospatial/serial.h +309 -0
  121. package/include/numkong/geospatial/skylake.h +577 -0
  122. package/include/numkong/geospatial/v128relaxed.h +613 -0
  123. package/include/numkong/geospatial.h +453 -0
  124. package/include/numkong/geospatial.hpp +235 -0
  125. package/include/numkong/matrix.hpp +336 -0
  126. package/include/numkong/maxsim/README.md +187 -0
  127. package/include/numkong/maxsim/alder.h +511 -0
  128. package/include/numkong/maxsim/genoa.h +115 -0
  129. package/include/numkong/maxsim/haswell.h +553 -0
  130. package/include/numkong/maxsim/icelake.h +480 -0
  131. package/include/numkong/maxsim/neonsdot.h +394 -0
  132. package/include/numkong/maxsim/sapphireamx.h +877 -0
  133. package/include/numkong/maxsim/serial.h +490 -0
  134. package/include/numkong/maxsim/sme.h +929 -0
  135. package/include/numkong/maxsim/v128relaxed.h +280 -0
  136. package/include/numkong/maxsim.h +571 -0
  137. package/include/numkong/maxsim.hpp +133 -0
  138. package/include/numkong/mesh/README.md +227 -0
  139. package/include/numkong/mesh/haswell.h +2235 -0
  140. package/include/numkong/mesh/neon.h +1329 -0
  141. package/include/numkong/mesh/neonbfdot.h +842 -0
  142. package/include/numkong/mesh/neonhalf.h +616 -0
  143. package/include/numkong/mesh/rvv.h +916 -0
  144. package/include/numkong/mesh/serial.h +742 -0
  145. package/include/numkong/mesh/skylake.h +1135 -0
  146. package/include/numkong/mesh/v128relaxed.h +1052 -0
  147. package/include/numkong/mesh.h +652 -0
  148. package/include/numkong/mesh.hpp +762 -0
  149. package/include/numkong/numkong.h +78 -0
  150. package/include/numkong/numkong.hpp +57 -0
  151. package/include/numkong/probability/README.md +173 -0
  152. package/include/numkong/probability/haswell.h +267 -0
  153. package/include/numkong/probability/neon.h +225 -0
  154. package/include/numkong/probability/rvv.h +409 -0
  155. package/include/numkong/probability/serial.h +169 -0
  156. package/include/numkong/probability/skylake.h +324 -0
  157. package/include/numkong/probability.h +383 -0
  158. package/include/numkong/probability.hpp +120 -0
  159. package/include/numkong/random.h +50 -0
  160. package/include/numkong/random.hpp +285 -0
  161. package/include/numkong/reduce/README.md +547 -0
  162. package/include/numkong/reduce/alder.h +632 -0
  163. package/include/numkong/reduce/genoa.h +201 -0
  164. package/include/numkong/reduce/haswell.h +3783 -0
  165. package/include/numkong/reduce/icelake.h +549 -0
  166. package/include/numkong/reduce/neon.h +3841 -0
  167. package/include/numkong/reduce/neonbfdot.h +353 -0
  168. package/include/numkong/reduce/neonfhm.h +665 -0
  169. package/include/numkong/reduce/neonhalf.h +157 -0
  170. package/include/numkong/reduce/neonsdot.h +357 -0
  171. package/include/numkong/reduce/rvv.h +3407 -0
  172. package/include/numkong/reduce/serial.h +757 -0
  173. package/include/numkong/reduce/sierra.h +338 -0
  174. package/include/numkong/reduce/skylake.h +3792 -0
  175. package/include/numkong/reduce/v128relaxed.h +2302 -0
  176. package/include/numkong/reduce.h +1597 -0
  177. package/include/numkong/reduce.hpp +633 -0
  178. package/include/numkong/scalar/README.md +89 -0
  179. package/include/numkong/scalar/haswell.h +113 -0
  180. package/include/numkong/scalar/neon.h +122 -0
  181. package/include/numkong/scalar/neonhalf.h +70 -0
  182. package/include/numkong/scalar/rvv.h +211 -0
  183. package/include/numkong/scalar/sapphire.h +63 -0
  184. package/include/numkong/scalar/serial.h +332 -0
  185. package/include/numkong/scalar/v128relaxed.h +56 -0
  186. package/include/numkong/scalar.h +683 -0
  187. package/include/numkong/set/README.md +179 -0
  188. package/include/numkong/set/haswell.h +334 -0
  189. package/include/numkong/set/icelake.h +485 -0
  190. package/include/numkong/set/neon.h +364 -0
  191. package/include/numkong/set/rvv.h +226 -0
  192. package/include/numkong/set/rvvbb.h +117 -0
  193. package/include/numkong/set/serial.h +174 -0
  194. package/include/numkong/set/sve.h +185 -0
  195. package/include/numkong/set/v128relaxed.h +240 -0
  196. package/include/numkong/set.h +457 -0
  197. package/include/numkong/set.hpp +114 -0
  198. package/include/numkong/sets/README.md +149 -0
  199. package/include/numkong/sets/haswell.h +63 -0
  200. package/include/numkong/sets/icelake.h +66 -0
  201. package/include/numkong/sets/neon.h +61 -0
  202. package/include/numkong/sets/serial.h +43 -0
  203. package/include/numkong/sets/smebi32.h +1099 -0
  204. package/include/numkong/sets/v128relaxed.h +58 -0
  205. package/include/numkong/sets.h +339 -0
  206. package/include/numkong/sparse/README.md +156 -0
  207. package/include/numkong/sparse/icelake.h +463 -0
  208. package/include/numkong/sparse/neon.h +288 -0
  209. package/include/numkong/sparse/serial.h +117 -0
  210. package/include/numkong/sparse/sve2.h +507 -0
  211. package/include/numkong/sparse/turin.h +322 -0
  212. package/include/numkong/sparse.h +363 -0
  213. package/include/numkong/sparse.hpp +113 -0
  214. package/include/numkong/spatial/README.md +435 -0
  215. package/include/numkong/spatial/alder.h +607 -0
  216. package/include/numkong/spatial/genoa.h +290 -0
  217. package/include/numkong/spatial/haswell.h +960 -0
  218. package/include/numkong/spatial/icelake.h +586 -0
  219. package/include/numkong/spatial/neon.h +773 -0
  220. package/include/numkong/spatial/neonbfdot.h +165 -0
  221. package/include/numkong/spatial/neonhalf.h +118 -0
  222. package/include/numkong/spatial/neonsdot.h +261 -0
  223. package/include/numkong/spatial/rvv.h +984 -0
  224. package/include/numkong/spatial/rvvbf16.h +123 -0
  225. package/include/numkong/spatial/rvvhalf.h +117 -0
  226. package/include/numkong/spatial/sapphire.h +343 -0
  227. package/include/numkong/spatial/serial.h +346 -0
  228. package/include/numkong/spatial/sierra.h +323 -0
  229. package/include/numkong/spatial/skylake.h +606 -0
  230. package/include/numkong/spatial/sve.h +224 -0
  231. package/include/numkong/spatial/svebfdot.h +122 -0
  232. package/include/numkong/spatial/svehalf.h +109 -0
  233. package/include/numkong/spatial/v128relaxed.h +717 -0
  234. package/include/numkong/spatial.h +1425 -0
  235. package/include/numkong/spatial.hpp +183 -0
  236. package/include/numkong/spatials/README.md +580 -0
  237. package/include/numkong/spatials/alder.h +94 -0
  238. package/include/numkong/spatials/genoa.h +94 -0
  239. package/include/numkong/spatials/haswell.h +219 -0
  240. package/include/numkong/spatials/icelake.h +113 -0
  241. package/include/numkong/spatials/neon.h +109 -0
  242. package/include/numkong/spatials/neonbfdot.h +60 -0
  243. package/include/numkong/spatials/neonfhm.h +92 -0
  244. package/include/numkong/spatials/neonhalf.h +58 -0
  245. package/include/numkong/spatials/neonsdot.h +109 -0
  246. package/include/numkong/spatials/rvv.h +1960 -0
  247. package/include/numkong/spatials/sapphireamx.h +1149 -0
  248. package/include/numkong/spatials/serial.h +226 -0
  249. package/include/numkong/spatials/sierra.h +96 -0
  250. package/include/numkong/spatials/skylake.h +184 -0
  251. package/include/numkong/spatials/sme.h +1901 -0
  252. package/include/numkong/spatials/smef64.h +465 -0
  253. package/include/numkong/spatials/v128relaxed.h +240 -0
  254. package/include/numkong/spatials.h +3021 -0
  255. package/include/numkong/spatials.hpp +508 -0
  256. package/include/numkong/tensor.hpp +1592 -0
  257. package/include/numkong/trigonometry/README.md +184 -0
  258. package/include/numkong/trigonometry/haswell.h +652 -0
  259. package/include/numkong/trigonometry/neon.h +639 -0
  260. package/include/numkong/trigonometry/rvv.h +699 -0
  261. package/include/numkong/trigonometry/serial.h +703 -0
  262. package/include/numkong/trigonometry/skylake.h +721 -0
  263. package/include/numkong/trigonometry/v128relaxed.h +666 -0
  264. package/include/numkong/trigonometry.h +467 -0
  265. package/include/numkong/trigonometry.hpp +166 -0
  266. package/include/numkong/types.h +1384 -0
  267. package/include/numkong/types.hpp +5603 -0
  268. package/include/numkong/vector.hpp +698 -0
  269. package/javascript/README.md +246 -0
  270. package/javascript/dist/cjs/numkong-wasm.d.ts +166 -0
  271. package/javascript/dist/cjs/numkong-wasm.js +617 -0
  272. package/javascript/dist/cjs/numkong.d.ts +343 -0
  273. package/javascript/dist/cjs/numkong.js +523 -0
  274. package/javascript/dist/cjs/package.json +3 -0
  275. package/javascript/dist/cjs/types.d.ts +284 -0
  276. package/javascript/dist/cjs/types.js +653 -0
  277. package/javascript/dist/esm/numkong-wasm.d.ts +166 -0
  278. package/javascript/dist/esm/numkong-wasm.js +595 -0
  279. package/javascript/dist/esm/numkong.d.ts +343 -0
  280. package/javascript/dist/esm/numkong.js +452 -0
  281. package/javascript/dist/esm/package.json +3 -0
  282. package/javascript/dist/esm/types.d.ts +284 -0
  283. package/javascript/dist/esm/types.js +630 -0
  284. package/javascript/dist-package-cjs.json +3 -0
  285. package/javascript/dist-package-esm.json +3 -0
  286. package/javascript/node-gyp-build.d.ts +1 -0
  287. package/javascript/numkong-wasm.ts +756 -0
  288. package/javascript/numkong.c +689 -0
  289. package/javascript/numkong.ts +575 -0
  290. package/javascript/tsconfig-base.json +39 -0
  291. package/javascript/tsconfig-cjs.json +8 -0
  292. package/javascript/tsconfig-esm.json +8 -0
  293. package/javascript/types.ts +674 -0
  294. package/package.json +87 -0
@@ -0,0 +1,1135 @@
1
+ /**
2
+ * @brief SIMD-accelerated Point Cloud Alignment for Skylake.
3
+ * @file include/numkong/mesh/skylake.h
4
+ * @author Ash Vardanian
5
+ * @date December 27, 2025
6
+ *
7
+ * @sa include/numkong/mesh.h
8
+ *
9
+ * @section skylake_mesh_instructions Key AVX-512 Mesh Instructions
10
+ *
11
+ * Intrinsic Instruction Latency Throughput Ports
12
+ * _mm512_fmadd_ps VFMADD132PS (ZMM, ZMM, ZMM) 4cy 0.5/cy p05
13
+ * _mm512_permutexvar_ps VPERMPS (ZMM, ZMM, ZMM) 3cy 1/cy p5
14
+ * _mm512_permutex2var_ps VPERMT2PS (ZMM, ZMM, ZMM) 3cy 1/cy p5
15
+ * _mm512_extractf32x8_ps VEXTRACTF32X8 (YMM, ZMM, I8) 3cy 1/cy p5
16
+ *
17
+ * Point cloud operations use VPERMT2PS for stride-3 deinterleaving of xyz coordinates, avoiding
18
+ * expensive gather instructions. This achieves ~1.8x speedup over scalar deinterleaving. Dual FMA
19
+ * accumulators on Skylake-X server chips hide the 4cy latency for centroid and covariance computation.
20
+ */
21
+ #ifndef NK_MESH_SKYLAKE_H
22
+ #define NK_MESH_SKYLAKE_H
23
+
24
+ #if NK_TARGET_X86_
25
+ #if NK_TARGET_SKYLAKE
26
+
27
+ #include "numkong/types.h"
28
+ #include "numkong/dot/skylake.h"
29
+ #include "numkong/mesh/serial.h"
30
+ #include "numkong/spatial/haswell.h"
31
+
32
+ #if defined(__cplusplus)
33
+ extern "C" {
34
+ #endif
35
+
36
+ #if defined(__clang__)
37
+ #pragma clang attribute push(__attribute__((target("avx2,avx512f,avx512vl,avx512bw,avx512dq,f16c,fma,bmi,bmi2"))), \
38
+ apply_to = function)
39
+ #elif defined(__GNUC__)
40
+ #pragma GCC push_options
41
+ #pragma GCC target("avx2", "avx512f", "avx512vl", "avx512bw", "avx512dq", "f16c", "fma", "bmi", "bmi2")
42
+ #endif
43
+
44
+ /* Deinterleave 48 floats (16 xyz triplets) into separate x, y, z vectors.
45
+ * Uses permutex2var shuffles instead of gather for ~1.8x speedup.
46
+ *
47
+ * Input: 48 contiguous floats [x0,y0,z0, x1,y1,z1, ..., x15,y15,z15]
48
+ * Output: x[16], y[16], z[16] vectors
49
+ *
50
+ * Implementation: Load 3 registers (r0,r1,r2), use 6 permutex2var ops to separate.
51
+ * Phase analysis: r0 starts at float 0 (phase 0), r1 at float 16 (phase 1), r2 at float 32 (phase 2)
52
+ *
53
+ * X elements at memory positions: 0,3,6,9,12,15,18,21,24,27,30,33,36,39,42,45
54
+ * = r0[0,3,6,9,12,15], r1[2,5,8,11,14], r2[1,4,7,10,13]
55
+ * Y elements at memory positions: 1,4,7,10,13,16,19,22,25,28,31,34,37,40,43,46
56
+ * = r0[1,4,7,10,13], r1[0,3,6,9,12,15], r2[2,5,8,11,14]
57
+ * Z elements at memory positions: 2,5,8,11,14,17,20,23,26,29,32,35,38,41,44,47
58
+ * = r0[2,5,8,11,14], r1[1,4,7,10,13], r2[0,3,6,9,12,15]
59
+ */
60
+ NK_INTERNAL void nk_deinterleave_f32x16_skylake_( //
61
+ nk_f32_t const *ptr, __m512 *x_f32x16_out, __m512 *y_f32x16_out, __m512 *z_f32x16_out) { //
62
+ __m512 reg0_f32x16 = _mm512_loadu_ps(ptr);
63
+ __m512 reg1_f32x16 = _mm512_loadu_ps(ptr + 16);
64
+ __m512 reg2_f32x16 = _mm512_loadu_ps(ptr + 32);
65
+
66
+ // X: reg0[0,3,6,9,12,15] + reg1[2,5,8,11,14] → 11 elements, then + reg2[1,4,7,10,13] → 16 elements
67
+ // Indices for permutex2var: 0-15 = from first operand, 16-31 = from second operand
68
+ __m512i idx_x_01_i32x16 = _mm512_setr_epi32(0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 0, 0, 0, 0, 0);
69
+ __m512i idx_x_2_i32x16 = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 17, 20, 23, 26, 29);
70
+ __m512 x01_f32x16 = _mm512_permutex2var_ps(reg0_f32x16, idx_x_01_i32x16, reg1_f32x16);
71
+ *x_f32x16_out = _mm512_permutex2var_ps(x01_f32x16, idx_x_2_i32x16, reg2_f32x16);
72
+
73
+ // Y: reg0[1,4,7,10,13] + reg1[0,3,6,9,12,15] → 11 elements, then + reg2[2,5,8,11,14] → 16 elements
74
+ __m512i idx_y_01_i32x16 = _mm512_setr_epi32(1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 0, 0, 0, 0, 0);
75
+ __m512i idx_y_2_i32x16 = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 18, 21, 24, 27, 30);
76
+ __m512 y01_f32x16 = _mm512_permutex2var_ps(reg0_f32x16, idx_y_01_i32x16, reg1_f32x16);
77
+ *y_f32x16_out = _mm512_permutex2var_ps(y01_f32x16, idx_y_2_i32x16, reg2_f32x16);
78
+
79
+ // Z: reg0[2,5,8,11,14] + reg1[1,4,7,10,13] → 10 elements, then + reg2[0,3,6,9,12,15] → 16 elements
80
+ __m512i idx_z_01_i32x16 = _mm512_setr_epi32(2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 0, 0, 0, 0, 0, 0);
81
+ __m512i idx_z_2_i32x16 = _mm512_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 19, 22, 25, 28, 31);
82
+ __m512 z01_f32x16 = _mm512_permutex2var_ps(reg0_f32x16, idx_z_01_i32x16, reg1_f32x16);
83
+ *z_f32x16_out = _mm512_permutex2var_ps(z01_f32x16, idx_z_2_i32x16, reg2_f32x16);
84
+ }
85
+
86
+ /* Deinterleave 8 f64 3D points from xyz,xyz,xyz... to separate x,y,z vectors.
87
+ * Input: 24 consecutive f64 values (8 points * 3 coordinates)
88
+ * Output: Three __m512d vectors containing the x, y, z coordinates separately.
89
+ */
90
+ NK_INTERNAL void nk_deinterleave_f64x8_skylake_( //
91
+ nk_f64_t const *ptr, __m512d *x_f64x8_out, __m512d *y_f64x8_out, __m512d *z_f64x8_out) { //
92
+ __m512d reg0_f64x8 = _mm512_loadu_pd(ptr); // elements 0-7
93
+ __m512d reg1_f64x8 = _mm512_loadu_pd(ptr + 8); // elements 8-15
94
+ __m512d reg2_f64x8 = _mm512_loadu_pd(ptr + 16); // elements 16-23
95
+
96
+ // X: positions 0,3,6,9,12,15,18,21 → reg0[0,3,6] + reg1[1,4,7] + reg2[2,5]
97
+ __m512i idx_x_01_i64x8 = _mm512_setr_epi64(0, 3, 6, 9, 12, 15, 0, 0);
98
+ __m512i idx_x_2_i64x8 = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 10, 13);
99
+ __m512d x01_f64x8 = _mm512_permutex2var_pd(reg0_f64x8, idx_x_01_i64x8, reg1_f64x8);
100
+ *x_f64x8_out = _mm512_permutex2var_pd(x01_f64x8, idx_x_2_i64x8, reg2_f64x8);
101
+
102
+ // Y: positions 1,4,7,10,13,16,19,22 → reg0[1,4,7] + reg1[2,5] + reg2[0,3,6]
103
+ __m512i idx_y_01_i64x8 = _mm512_setr_epi64(1, 4, 7, 10, 13, 0, 0, 0);
104
+ __m512i idx_y_2_i64x8 = _mm512_setr_epi64(0, 1, 2, 3, 4, 8, 11, 14);
105
+ __m512d y01_f64x8 = _mm512_permutex2var_pd(reg0_f64x8, idx_y_01_i64x8, reg1_f64x8);
106
+ *y_f64x8_out = _mm512_permutex2var_pd(y01_f64x8, idx_y_2_i64x8, reg2_f64x8);
107
+
108
+ // Z: positions 2,5,8,11,14,17,20,23 → reg0[2,5] + reg1[0,3,6] + reg2[1,4,7]
109
+ __m512i idx_z_01_i64x8 = _mm512_setr_epi64(2, 5, 8, 11, 14, 0, 0, 0);
110
+ __m512i idx_z_2_i64x8 = _mm512_setr_epi64(0, 1, 2, 3, 4, 9, 12, 15);
111
+ __m512d z01_f64x8 = _mm512_permutex2var_pd(reg0_f64x8, idx_z_01_i64x8, reg1_f64x8);
112
+ *z_f64x8_out = _mm512_permutex2var_pd(z01_f64x8, idx_z_2_i64x8, reg2_f64x8);
113
+ }
114
+
115
+ NK_INTERNAL nk_f64_t nk_reduce_stable_f64x8_skylake_(__m512d values_f64x8) {
116
+ nk_b512_vec_t values;
117
+ values.zmm_pd = values_f64x8;
118
+ nk_f64_t sum = 0.0, compensation = 0.0;
119
+ for (nk_size_t lane_index = 0; lane_index != 8; ++lane_index)
120
+ nk_accumulate_sum_f64_(&sum, &compensation, values.f64s[lane_index]);
121
+ return sum + compensation;
122
+ }
123
+
124
+ NK_INTERNAL void nk_rotation_from_svd_f64_skylake_(nk_f64_t const *svd_u, nk_f64_t const *svd_v, nk_f64_t *rotation) {
125
+ nk_rotation_from_svd_f64_serial_(svd_u, svd_v, rotation);
126
+ }
127
+
128
+ NK_INTERNAL void nk_accumulate_square_f64x8_skylake_(__m512d *sum_f64x8, __m512d *compensation_f64x8,
129
+ __m512d values_f64x8) {
130
+ __m512d product_f64x8 = _mm512_mul_pd(values_f64x8, values_f64x8);
131
+ __m512d product_error_f64x8 = _mm512_fmsub_pd(values_f64x8, values_f64x8, product_f64x8);
132
+ __m512d tentative_sum_f64x8 = _mm512_add_pd(*sum_f64x8, product_f64x8);
133
+ __m512d virtual_addend_f64x8 = _mm512_sub_pd(tentative_sum_f64x8, *sum_f64x8);
134
+ __m512d sum_error_f64x8 = _mm512_add_pd(
135
+ _mm512_sub_pd(*sum_f64x8, _mm512_sub_pd(tentative_sum_f64x8, virtual_addend_f64x8)),
136
+ _mm512_sub_pd(product_f64x8, virtual_addend_f64x8));
137
+ *sum_f64x8 = tentative_sum_f64x8;
138
+ *compensation_f64x8 = _mm512_add_pd(*compensation_f64x8, _mm512_add_pd(sum_error_f64x8, product_error_f64x8));
139
+ }
140
+
141
+ /* Compute sum of squared distances after applying rotation (and optional scale).
142
+ * Used by kabsch (scale=1.0) and umeyama (scale=computed_scale).
143
+ * Returns sum_squared, caller computes √(sum_squared / n).
144
+ */
145
+ NK_INTERNAL nk_f64_t nk_transformed_ssd_f32_skylake_(nk_f32_t const *a, nk_f32_t const *b, nk_size_t n,
146
+ nk_f64_t const *r, nk_f64_t scale, nk_f64_t centroid_a_x,
147
+ nk_f64_t centroid_a_y, nk_f64_t centroid_a_z,
148
+ nk_f64_t centroid_b_x, nk_f64_t centroid_b_y,
149
+ nk_f64_t centroid_b_z) {
150
+ __m512d scaled_rotation_x_x_f64x8 = _mm512_set1_pd(scale * r[0]);
151
+ __m512d scaled_rotation_x_y_f64x8 = _mm512_set1_pd(scale * r[1]);
152
+ __m512d scaled_rotation_x_z_f64x8 = _mm512_set1_pd(scale * r[2]);
153
+ __m512d scaled_rotation_y_x_f64x8 = _mm512_set1_pd(scale * r[3]);
154
+ __m512d scaled_rotation_y_y_f64x8 = _mm512_set1_pd(scale * r[4]);
155
+ __m512d scaled_rotation_y_z_f64x8 = _mm512_set1_pd(scale * r[5]);
156
+ __m512d scaled_rotation_z_x_f64x8 = _mm512_set1_pd(scale * r[6]);
157
+ __m512d scaled_rotation_z_y_f64x8 = _mm512_set1_pd(scale * r[7]);
158
+ __m512d scaled_rotation_z_z_f64x8 = _mm512_set1_pd(scale * r[8]);
159
+ __m512d centroid_a_x_f64x8 = _mm512_set1_pd(centroid_a_x), centroid_a_y_f64x8 = _mm512_set1_pd(centroid_a_y);
160
+ __m512d centroid_a_z_f64x8 = _mm512_set1_pd(centroid_a_z), centroid_b_x_f64x8 = _mm512_set1_pd(centroid_b_x);
161
+ __m512d centroid_b_y_f64x8 = _mm512_set1_pd(centroid_b_y), centroid_b_z_f64x8 = _mm512_set1_pd(centroid_b_z);
162
+ __m512d sum_squared_f64x8 = _mm512_setzero_pd();
163
+ __m512 a_x_f32x16, a_y_f32x16, a_z_f32x16, b_x_f32x16, b_y_f32x16, b_z_f32x16;
164
+ nk_size_t index = 0;
165
+
166
+ for (; index + 16 <= n; index += 16) {
167
+ nk_deinterleave_f32x16_skylake_(a + index * 3, &a_x_f32x16, &a_y_f32x16, &a_z_f32x16),
168
+ nk_deinterleave_f32x16_skylake_(b + index * 3, &b_x_f32x16, &b_y_f32x16, &b_z_f32x16);
169
+ __m512d a_x_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(a_x_f32x16));
170
+ __m512d a_x_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(a_x_f32x16, 1));
171
+ __m512d a_y_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(a_y_f32x16));
172
+ __m512d a_y_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(a_y_f32x16, 1));
173
+ __m512d a_z_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(a_z_f32x16));
174
+ __m512d a_z_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(a_z_f32x16, 1));
175
+ __m512d b_x_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(b_x_f32x16));
176
+ __m512d b_x_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(b_x_f32x16, 1));
177
+ __m512d b_y_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(b_y_f32x16));
178
+ __m512d b_y_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(b_y_f32x16, 1));
179
+ __m512d b_z_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(b_z_f32x16));
180
+ __m512d b_z_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(b_z_f32x16, 1));
181
+
182
+ __m512d centered_a_x_lower_f64x8 = _mm512_sub_pd(a_x_lower_f64x8, centroid_a_x_f64x8);
183
+ __m512d centered_a_x_upper_f64x8 = _mm512_sub_pd(a_x_upper_f64x8, centroid_a_x_f64x8);
184
+ __m512d centered_a_y_lower_f64x8 = _mm512_sub_pd(a_y_lower_f64x8, centroid_a_y_f64x8);
185
+ __m512d centered_a_y_upper_f64x8 = _mm512_sub_pd(a_y_upper_f64x8, centroid_a_y_f64x8);
186
+ __m512d centered_a_z_lower_f64x8 = _mm512_sub_pd(a_z_lower_f64x8, centroid_a_z_f64x8);
187
+ __m512d centered_a_z_upper_f64x8 = _mm512_sub_pd(a_z_upper_f64x8, centroid_a_z_f64x8);
188
+ __m512d centered_b_x_lower_f64x8 = _mm512_sub_pd(b_x_lower_f64x8, centroid_b_x_f64x8);
189
+ __m512d centered_b_x_upper_f64x8 = _mm512_sub_pd(b_x_upper_f64x8, centroid_b_x_f64x8);
190
+ __m512d centered_b_y_lower_f64x8 = _mm512_sub_pd(b_y_lower_f64x8, centroid_b_y_f64x8);
191
+ __m512d centered_b_y_upper_f64x8 = _mm512_sub_pd(b_y_upper_f64x8, centroid_b_y_f64x8);
192
+ __m512d centered_b_z_lower_f64x8 = _mm512_sub_pd(b_z_lower_f64x8, centroid_b_z_f64x8);
193
+ __m512d centered_b_z_upper_f64x8 = _mm512_sub_pd(b_z_upper_f64x8, centroid_b_z_f64x8);
194
+
195
+ __m512d rotated_a_x_lower_f64x8 = _mm512_fmadd_pd(
196
+ scaled_rotation_x_z_f64x8, centered_a_z_lower_f64x8,
197
+ _mm512_fmadd_pd(scaled_rotation_x_y_f64x8, centered_a_y_lower_f64x8,
198
+ _mm512_mul_pd(scaled_rotation_x_x_f64x8, centered_a_x_lower_f64x8)));
199
+ __m512d rotated_a_x_upper_f64x8 = _mm512_fmadd_pd(
200
+ scaled_rotation_x_z_f64x8, centered_a_z_upper_f64x8,
201
+ _mm512_fmadd_pd(scaled_rotation_x_y_f64x8, centered_a_y_upper_f64x8,
202
+ _mm512_mul_pd(scaled_rotation_x_x_f64x8, centered_a_x_upper_f64x8)));
203
+ __m512d rotated_a_y_lower_f64x8 = _mm512_fmadd_pd(
204
+ scaled_rotation_y_z_f64x8, centered_a_z_lower_f64x8,
205
+ _mm512_fmadd_pd(scaled_rotation_y_y_f64x8, centered_a_y_lower_f64x8,
206
+ _mm512_mul_pd(scaled_rotation_y_x_f64x8, centered_a_x_lower_f64x8)));
207
+ __m512d rotated_a_y_upper_f64x8 = _mm512_fmadd_pd(
208
+ scaled_rotation_y_z_f64x8, centered_a_z_upper_f64x8,
209
+ _mm512_fmadd_pd(scaled_rotation_y_y_f64x8, centered_a_y_upper_f64x8,
210
+ _mm512_mul_pd(scaled_rotation_y_x_f64x8, centered_a_x_upper_f64x8)));
211
+ __m512d rotated_a_z_lower_f64x8 = _mm512_fmadd_pd(
212
+ scaled_rotation_z_z_f64x8, centered_a_z_lower_f64x8,
213
+ _mm512_fmadd_pd(scaled_rotation_z_y_f64x8, centered_a_y_lower_f64x8,
214
+ _mm512_mul_pd(scaled_rotation_z_x_f64x8, centered_a_x_lower_f64x8)));
215
+ __m512d rotated_a_z_upper_f64x8 = _mm512_fmadd_pd(
216
+ scaled_rotation_z_z_f64x8, centered_a_z_upper_f64x8,
217
+ _mm512_fmadd_pd(scaled_rotation_z_y_f64x8, centered_a_y_upper_f64x8,
218
+ _mm512_mul_pd(scaled_rotation_z_x_f64x8, centered_a_x_upper_f64x8)));
219
+
220
+ __m512d delta_x_lower_f64x8 = _mm512_sub_pd(rotated_a_x_lower_f64x8, centered_b_x_lower_f64x8);
221
+ __m512d delta_x_upper_f64x8 = _mm512_sub_pd(rotated_a_x_upper_f64x8, centered_b_x_upper_f64x8);
222
+ __m512d delta_y_lower_f64x8 = _mm512_sub_pd(rotated_a_y_lower_f64x8, centered_b_y_lower_f64x8);
223
+ __m512d delta_y_upper_f64x8 = _mm512_sub_pd(rotated_a_y_upper_f64x8, centered_b_y_upper_f64x8);
224
+ __m512d delta_z_lower_f64x8 = _mm512_sub_pd(rotated_a_z_lower_f64x8, centered_b_z_lower_f64x8);
225
+ __m512d delta_z_upper_f64x8 = _mm512_sub_pd(rotated_a_z_upper_f64x8, centered_b_z_upper_f64x8);
226
+
227
+ __m512d batch_sum_squared_f64x8 = _mm512_add_pd(_mm512_mul_pd(delta_x_lower_f64x8, delta_x_lower_f64x8),
228
+ _mm512_mul_pd(delta_x_upper_f64x8, delta_x_upper_f64x8));
229
+ batch_sum_squared_f64x8 = _mm512_fmadd_pd(delta_y_lower_f64x8, delta_y_lower_f64x8, batch_sum_squared_f64x8);
230
+ batch_sum_squared_f64x8 = _mm512_fmadd_pd(delta_y_upper_f64x8, delta_y_upper_f64x8, batch_sum_squared_f64x8);
231
+ batch_sum_squared_f64x8 = _mm512_fmadd_pd(delta_z_lower_f64x8, delta_z_lower_f64x8, batch_sum_squared_f64x8);
232
+ batch_sum_squared_f64x8 = _mm512_fmadd_pd(delta_z_upper_f64x8, delta_z_upper_f64x8, batch_sum_squared_f64x8);
233
+ sum_squared_f64x8 = _mm512_add_pd(sum_squared_f64x8, batch_sum_squared_f64x8);
234
+ }
235
+
236
+ nk_f64_t sum_squared = _mm512_reduce_add_pd(sum_squared_f64x8);
237
+ for (; index < n; ++index) {
238
+ nk_f64_t centered_a_x = (nk_f64_t)a[index * 3 + 0] - centroid_a_x;
239
+ nk_f64_t centered_a_y = (nk_f64_t)a[index * 3 + 1] - centroid_a_y;
240
+ nk_f64_t centered_a_z = (nk_f64_t)a[index * 3 + 2] - centroid_a_z;
241
+ nk_f64_t centered_b_x = (nk_f64_t)b[index * 3 + 0] - centroid_b_x;
242
+ nk_f64_t centered_b_y = (nk_f64_t)b[index * 3 + 1] - centroid_b_y;
243
+ nk_f64_t centered_b_z = (nk_f64_t)b[index * 3 + 2] - centroid_b_z;
244
+ nk_f64_t rotated_a_x = scale * (r[0] * centered_a_x + r[1] * centered_a_y + r[2] * centered_a_z);
245
+ nk_f64_t rotated_a_y = scale * (r[3] * centered_a_x + r[4] * centered_a_y + r[5] * centered_a_z);
246
+ nk_f64_t rotated_a_z = scale * (r[6] * centered_a_x + r[7] * centered_a_y + r[8] * centered_a_z);
247
+ nk_f64_t delta_x = rotated_a_x - centered_b_x, delta_y = rotated_a_y - centered_b_y,
248
+ delta_z = rotated_a_z - centered_b_z;
249
+ sum_squared += delta_x * delta_x + delta_y * delta_y + delta_z * delta_z;
250
+ }
251
+
252
+ return sum_squared;
253
+ }
254
+
255
+ /* Compute sum of squared distances for f64 after applying rotation (and optional scale).
256
+ * Rotation matrix, scale and data are all f64 for full precision.
257
+ */
258
+ NK_INTERNAL nk_f64_t nk_transformed_ssd_f64_skylake_(nk_f64_t const *a, nk_f64_t const *b, nk_size_t n,
259
+ nk_f64_t const *r, nk_f64_t scale, nk_f64_t centroid_a_x,
260
+ nk_f64_t centroid_a_y, nk_f64_t centroid_a_z,
261
+ nk_f64_t centroid_b_x, nk_f64_t centroid_b_y,
262
+ nk_f64_t centroid_b_z) {
263
+ // Broadcast scaled rotation matrix elements
264
+ __m512d scaled_rotation_x_x_f64x8 = _mm512_set1_pd(scale * r[0]);
265
+ __m512d scaled_rotation_x_y_f64x8 = _mm512_set1_pd(scale * r[1]);
266
+ __m512d scaled_rotation_x_z_f64x8 = _mm512_set1_pd(scale * r[2]);
267
+ __m512d scaled_rotation_y_x_f64x8 = _mm512_set1_pd(scale * r[3]);
268
+ __m512d scaled_rotation_y_y_f64x8 = _mm512_set1_pd(scale * r[4]);
269
+ __m512d scaled_rotation_y_z_f64x8 = _mm512_set1_pd(scale * r[5]);
270
+ __m512d scaled_rotation_z_x_f64x8 = _mm512_set1_pd(scale * r[6]);
271
+ __m512d scaled_rotation_z_y_f64x8 = _mm512_set1_pd(scale * r[7]);
272
+ __m512d scaled_rotation_z_z_f64x8 = _mm512_set1_pd(scale * r[8]);
273
+
274
+ // Broadcast centroids
275
+ __m512d centroid_a_x_f64x8 = _mm512_set1_pd(centroid_a_x);
276
+ __m512d centroid_a_y_f64x8 = _mm512_set1_pd(centroid_a_y);
277
+ __m512d centroid_a_z_f64x8 = _mm512_set1_pd(centroid_a_z);
278
+ __m512d centroid_b_x_f64x8 = _mm512_set1_pd(centroid_b_x);
279
+ __m512d centroid_b_y_f64x8 = _mm512_set1_pd(centroid_b_y);
280
+ __m512d centroid_b_z_f64x8 = _mm512_set1_pd(centroid_b_z);
281
+
282
+ __m512d sum_squared_f64x8 = _mm512_setzero_pd();
283
+ __m512d sum_squared_compensation_f64x8 = _mm512_setzero_pd();
284
+ __m512d a_x_f64x8, a_y_f64x8, a_z_f64x8, b_x_f64x8, b_y_f64x8, b_z_f64x8;
285
+ nk_size_t j = 0;
286
+
287
+ for (; j + 8 <= n; j += 8) {
288
+ nk_deinterleave_f64x8_skylake_(a + j * 3, &a_x_f64x8, &a_y_f64x8, &a_z_f64x8);
289
+ nk_deinterleave_f64x8_skylake_(b + j * 3, &b_x_f64x8, &b_y_f64x8, &b_z_f64x8);
290
+
291
+ // Center points
292
+ __m512d pa_x_f64x8 = _mm512_sub_pd(a_x_f64x8, centroid_a_x_f64x8);
293
+ __m512d pa_y_f64x8 = _mm512_sub_pd(a_y_f64x8, centroid_a_y_f64x8);
294
+ __m512d pa_z_f64x8 = _mm512_sub_pd(a_z_f64x8, centroid_a_z_f64x8);
295
+ __m512d pb_x_f64x8 = _mm512_sub_pd(b_x_f64x8, centroid_b_x_f64x8);
296
+ __m512d pb_y_f64x8 = _mm512_sub_pd(b_y_f64x8, centroid_b_y_f64x8);
297
+ __m512d pb_z_f64x8 = _mm512_sub_pd(b_z_f64x8, centroid_b_z_f64x8);
298
+
299
+ // Rotate and scale: ra = scale * R * pa
300
+ __m512d ra_x_f64x8 = _mm512_fmadd_pd(scaled_rotation_x_z_f64x8, pa_z_f64x8,
301
+ _mm512_fmadd_pd(scaled_rotation_x_y_f64x8, pa_y_f64x8,
302
+ _mm512_mul_pd(scaled_rotation_x_x_f64x8, pa_x_f64x8)));
303
+ __m512d ra_y_f64x8 = _mm512_fmadd_pd(scaled_rotation_y_z_f64x8, pa_z_f64x8,
304
+ _mm512_fmadd_pd(scaled_rotation_y_y_f64x8, pa_y_f64x8,
305
+ _mm512_mul_pd(scaled_rotation_y_x_f64x8, pa_x_f64x8)));
306
+ __m512d ra_z_f64x8 = _mm512_fmadd_pd(scaled_rotation_z_z_f64x8, pa_z_f64x8,
307
+ _mm512_fmadd_pd(scaled_rotation_z_y_f64x8, pa_y_f64x8,
308
+ _mm512_mul_pd(scaled_rotation_z_x_f64x8, pa_x_f64x8)));
309
+
310
+ // Delta and accumulate
311
+ __m512d delta_x_f64x8 = _mm512_sub_pd(ra_x_f64x8, pb_x_f64x8);
312
+ __m512d delta_y_f64x8 = _mm512_sub_pd(ra_y_f64x8, pb_y_f64x8);
313
+ __m512d delta_z_f64x8 = _mm512_sub_pd(ra_z_f64x8, pb_z_f64x8);
314
+
315
+ nk_accumulate_square_f64x8_skylake_(&sum_squared_f64x8, &sum_squared_compensation_f64x8, delta_x_f64x8);
316
+ nk_accumulate_square_f64x8_skylake_(&sum_squared_f64x8, &sum_squared_compensation_f64x8, delta_y_f64x8);
317
+ nk_accumulate_square_f64x8_skylake_(&sum_squared_f64x8, &sum_squared_compensation_f64x8, delta_z_f64x8);
318
+ }
319
+
320
+ nk_f64_t sum_squared = nk_dot_stable_sum_f64x8_skylake_(sum_squared_f64x8, sum_squared_compensation_f64x8);
321
+ nk_f64_t sum_squared_compensation = 0.0;
322
+
323
+ // Scalar tail
324
+ for (; j < n; ++j) {
325
+ nk_f64_t pa_x = a[j * 3 + 0] - centroid_a_x;
326
+ nk_f64_t pa_y = a[j * 3 + 1] - centroid_a_y;
327
+ nk_f64_t pa_z = a[j * 3 + 2] - centroid_a_z;
328
+ nk_f64_t pb_x = b[j * 3 + 0] - centroid_b_x;
329
+ nk_f64_t pb_y = b[j * 3 + 1] - centroid_b_y;
330
+ nk_f64_t pb_z = b[j * 3 + 2] - centroid_b_z;
331
+
332
+ nk_f64_t ra_x = scale * (r[0] * pa_x + r[1] * pa_y + r[2] * pa_z);
333
+ nk_f64_t ra_y = scale * (r[3] * pa_x + r[4] * pa_y + r[5] * pa_z);
334
+ nk_f64_t ra_z = scale * (r[6] * pa_x + r[7] * pa_y + r[8] * pa_z);
335
+
336
+ nk_f64_t delta_x = ra_x - pb_x;
337
+ nk_f64_t delta_y = ra_y - pb_y;
338
+ nk_f64_t delta_z = ra_z - pb_z;
339
+ nk_accumulate_square_f64_(&sum_squared, &sum_squared_compensation, delta_x);
340
+ nk_accumulate_square_f64_(&sum_squared, &sum_squared_compensation, delta_y);
341
+ nk_accumulate_square_f64_(&sum_squared, &sum_squared_compensation, delta_z);
342
+ }
343
+
344
+ return sum_squared + sum_squared_compensation;
345
+ }
346
+
347
+ NK_INTERNAL void nk_centroid_and_cross_covariance_f32_skylake_( //
348
+ nk_f32_t const *a, nk_f32_t const *b, nk_size_t n, //
349
+ nk_f64_t *centroid_a_x, nk_f64_t *centroid_a_y, nk_f64_t *centroid_a_z, nk_f64_t *centroid_b_x,
350
+ nk_f64_t *centroid_b_y, nk_f64_t *centroid_b_z, nk_f64_t cross_covariance_f64[9]) {
351
+ __m512d sum_a_x_f64x8 = _mm512_setzero_pd(), sum_a_y_f64x8 = _mm512_setzero_pd();
352
+ __m512d sum_a_z_f64x8 = _mm512_setzero_pd(), sum_b_x_f64x8 = _mm512_setzero_pd();
353
+ __m512d sum_b_y_f64x8 = _mm512_setzero_pd(), sum_b_z_f64x8 = _mm512_setzero_pd();
354
+ __m512d covariance_00_f64x8 = _mm512_setzero_pd(), covariance_01_f64x8 = _mm512_setzero_pd();
355
+ __m512d covariance_02_f64x8 = _mm512_setzero_pd(), covariance_10_f64x8 = _mm512_setzero_pd();
356
+ __m512d covariance_11_f64x8 = _mm512_setzero_pd(), covariance_12_f64x8 = _mm512_setzero_pd();
357
+ __m512d covariance_20_f64x8 = _mm512_setzero_pd(), covariance_21_f64x8 = _mm512_setzero_pd();
358
+ __m512d covariance_22_f64x8 = _mm512_setzero_pd();
359
+ __m512 a_x_f32x16, a_y_f32x16, a_z_f32x16, b_x_f32x16, b_y_f32x16, b_z_f32x16;
360
+ nk_size_t index = 0;
361
+
362
+ for (; index + 16 <= n; index += 16) {
363
+ nk_deinterleave_f32x16_skylake_(a + index * 3, &a_x_f32x16, &a_y_f32x16, &a_z_f32x16),
364
+ nk_deinterleave_f32x16_skylake_(b + index * 3, &b_x_f32x16, &b_y_f32x16, &b_z_f32x16);
365
+ __m512d a_x_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(a_x_f32x16));
366
+ __m512d a_x_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(a_x_f32x16, 1));
367
+ __m512d a_y_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(a_y_f32x16));
368
+ __m512d a_y_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(a_y_f32x16, 1));
369
+ __m512d a_z_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(a_z_f32x16));
370
+ __m512d a_z_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(a_z_f32x16, 1));
371
+ __m512d b_x_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(b_x_f32x16));
372
+ __m512d b_x_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(b_x_f32x16, 1));
373
+ __m512d b_y_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(b_y_f32x16));
374
+ __m512d b_y_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(b_y_f32x16, 1));
375
+ __m512d b_z_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(b_z_f32x16));
376
+ __m512d b_z_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(b_z_f32x16, 1));
377
+
378
+ sum_a_x_f64x8 = _mm512_add_pd(sum_a_x_f64x8, _mm512_add_pd(a_x_lower_f64x8, a_x_upper_f64x8)),
379
+ sum_a_y_f64x8 = _mm512_add_pd(sum_a_y_f64x8, _mm512_add_pd(a_y_lower_f64x8, a_y_upper_f64x8)),
380
+ sum_a_z_f64x8 = _mm512_add_pd(sum_a_z_f64x8, _mm512_add_pd(a_z_lower_f64x8, a_z_upper_f64x8));
381
+ sum_b_x_f64x8 = _mm512_add_pd(sum_b_x_f64x8, _mm512_add_pd(b_x_lower_f64x8, b_x_upper_f64x8)),
382
+ sum_b_y_f64x8 = _mm512_add_pd(sum_b_y_f64x8, _mm512_add_pd(b_y_lower_f64x8, b_y_upper_f64x8)),
383
+ sum_b_z_f64x8 = _mm512_add_pd(sum_b_z_f64x8, _mm512_add_pd(b_z_lower_f64x8, b_z_upper_f64x8));
384
+ covariance_00_f64x8 = _mm512_add_pd(covariance_00_f64x8,
385
+ _mm512_add_pd(_mm512_mul_pd(a_x_lower_f64x8, b_x_lower_f64x8),
386
+ _mm512_mul_pd(a_x_upper_f64x8, b_x_upper_f64x8))),
387
+ covariance_01_f64x8 = _mm512_add_pd(covariance_01_f64x8,
388
+ _mm512_add_pd(_mm512_mul_pd(a_x_lower_f64x8, b_y_lower_f64x8),
389
+ _mm512_mul_pd(a_x_upper_f64x8, b_y_upper_f64x8))),
390
+ covariance_02_f64x8 = _mm512_add_pd(covariance_02_f64x8,
391
+ _mm512_add_pd(_mm512_mul_pd(a_x_lower_f64x8, b_z_lower_f64x8),
392
+ _mm512_mul_pd(a_x_upper_f64x8, b_z_upper_f64x8)));
393
+ covariance_10_f64x8 = _mm512_add_pd(covariance_10_f64x8,
394
+ _mm512_add_pd(_mm512_mul_pd(a_y_lower_f64x8, b_x_lower_f64x8),
395
+ _mm512_mul_pd(a_y_upper_f64x8, b_x_upper_f64x8))),
396
+ covariance_11_f64x8 = _mm512_add_pd(covariance_11_f64x8,
397
+ _mm512_add_pd(_mm512_mul_pd(a_y_lower_f64x8, b_y_lower_f64x8),
398
+ _mm512_mul_pd(a_y_upper_f64x8, b_y_upper_f64x8))),
399
+ covariance_12_f64x8 = _mm512_add_pd(covariance_12_f64x8,
400
+ _mm512_add_pd(_mm512_mul_pd(a_y_lower_f64x8, b_z_lower_f64x8),
401
+ _mm512_mul_pd(a_y_upper_f64x8, b_z_upper_f64x8)));
402
+ covariance_20_f64x8 = _mm512_add_pd(covariance_20_f64x8,
403
+ _mm512_add_pd(_mm512_mul_pd(a_z_lower_f64x8, b_x_lower_f64x8),
404
+ _mm512_mul_pd(a_z_upper_f64x8, b_x_upper_f64x8))),
405
+ covariance_21_f64x8 = _mm512_add_pd(covariance_21_f64x8,
406
+ _mm512_add_pd(_mm512_mul_pd(a_z_lower_f64x8, b_y_lower_f64x8),
407
+ _mm512_mul_pd(a_z_upper_f64x8, b_y_upper_f64x8))),
408
+ covariance_22_f64x8 = _mm512_add_pd(covariance_22_f64x8,
409
+ _mm512_add_pd(_mm512_mul_pd(a_z_lower_f64x8, b_z_lower_f64x8),
410
+ _mm512_mul_pd(a_z_upper_f64x8, b_z_upper_f64x8)));
411
+ }
412
+
413
+ nk_f64_t sum_a_x = _mm512_reduce_add_pd(sum_a_x_f64x8), sum_a_y = _mm512_reduce_add_pd(sum_a_y_f64x8),
414
+ sum_a_z = _mm512_reduce_add_pd(sum_a_z_f64x8);
415
+ nk_f64_t sum_b_x = _mm512_reduce_add_pd(sum_b_x_f64x8), sum_b_y = _mm512_reduce_add_pd(sum_b_y_f64x8),
416
+ sum_b_z = _mm512_reduce_add_pd(sum_b_z_f64x8);
417
+ nk_f64_t covariance_00 = _mm512_reduce_add_pd(covariance_00_f64x8),
418
+ covariance_01 = _mm512_reduce_add_pd(covariance_01_f64x8),
419
+ covariance_02 = _mm512_reduce_add_pd(covariance_02_f64x8);
420
+ nk_f64_t covariance_10 = _mm512_reduce_add_pd(covariance_10_f64x8),
421
+ covariance_11 = _mm512_reduce_add_pd(covariance_11_f64x8),
422
+ covariance_12 = _mm512_reduce_add_pd(covariance_12_f64x8);
423
+ nk_f64_t covariance_20 = _mm512_reduce_add_pd(covariance_20_f64x8),
424
+ covariance_21 = _mm512_reduce_add_pd(covariance_21_f64x8),
425
+ covariance_22 = _mm512_reduce_add_pd(covariance_22_f64x8);
426
+
427
+ for (; index < n; ++index) {
428
+ nk_f64_t a_x = a[index * 3 + 0], a_y = a[index * 3 + 1], a_z = a[index * 3 + 2];
429
+ nk_f64_t b_x = b[index * 3 + 0], b_y = b[index * 3 + 1], b_z = b[index * 3 + 2];
430
+ sum_a_x += a_x, sum_a_y += a_y, sum_a_z += a_z;
431
+ sum_b_x += b_x, sum_b_y += b_y, sum_b_z += b_z;
432
+ covariance_00 += a_x * b_x, covariance_01 += a_x * b_y, covariance_02 += a_x * b_z;
433
+ covariance_10 += a_y * b_x, covariance_11 += a_y * b_y, covariance_12 += a_y * b_z;
434
+ covariance_20 += a_z * b_x, covariance_21 += a_z * b_y, covariance_22 += a_z * b_z;
435
+ }
436
+
437
+ nk_f64_t inv_n = 1.0 / (nk_f64_t)n, n_f64 = (nk_f64_t)n;
438
+ *centroid_a_x = sum_a_x * inv_n, *centroid_a_y = sum_a_y * inv_n, *centroid_a_z = sum_a_z * inv_n;
439
+ *centroid_b_x = sum_b_x * inv_n, *centroid_b_y = sum_b_y * inv_n, *centroid_b_z = sum_b_z * inv_n;
440
+ cross_covariance_f64[0] = covariance_00 - n_f64 * (*centroid_a_x) * (*centroid_b_x),
441
+ cross_covariance_f64[1] = covariance_01 - n_f64 * (*centroid_a_x) * (*centroid_b_y),
442
+ cross_covariance_f64[2] = covariance_02 - n_f64 * (*centroid_a_x) * (*centroid_b_z);
443
+ cross_covariance_f64[3] = covariance_10 - n_f64 * (*centroid_a_y) * (*centroid_b_x),
444
+ cross_covariance_f64[4] = covariance_11 - n_f64 * (*centroid_a_y) * (*centroid_b_y),
445
+ cross_covariance_f64[5] = covariance_12 - n_f64 * (*centroid_a_y) * (*centroid_b_z);
446
+ cross_covariance_f64[6] = covariance_20 - n_f64 * (*centroid_a_z) * (*centroid_b_x),
447
+ cross_covariance_f64[7] = covariance_21 - n_f64 * (*centroid_a_z) * (*centroid_b_y),
448
+ cross_covariance_f64[8] = covariance_22 - n_f64 * (*centroid_a_z) * (*centroid_b_z);
449
+ }
450
+
451
+ NK_PUBLIC void nk_rmsd_f32_skylake(nk_f32_t const *a, nk_f32_t const *b, nk_size_t n, nk_f32_t *a_centroid,
452
+ nk_f32_t *b_centroid, nk_f32_t *rotation, nk_f32_t *scale, nk_f64_t *result) {
453
+ nk_f64_t identity[9] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
454
+ nk_f64_t centroid_a_x, centroid_a_y, centroid_a_z, centroid_b_x, centroid_b_y, centroid_b_z;
455
+ nk_f64_t cross_covariance_f64[9];
456
+ if (rotation)
457
+ rotation[0] = 1, rotation[1] = 0, rotation[2] = 0, rotation[3] = 0, rotation[4] = 1, rotation[5] = 0,
458
+ rotation[6] = 0, rotation[7] = 0, rotation[8] = 1;
459
+ if (scale) *scale = 1.0f;
460
+ nk_centroid_and_cross_covariance_f32_skylake_(a, b, n, &centroid_a_x, &centroid_a_y, &centroid_a_z, &centroid_b_x,
461
+ &centroid_b_y, &centroid_b_z, cross_covariance_f64);
462
+ if (a_centroid)
463
+ a_centroid[0] = (nk_f32_t)centroid_a_x, a_centroid[1] = (nk_f32_t)centroid_a_y,
464
+ a_centroid[2] = (nk_f32_t)centroid_a_z;
465
+ if (b_centroid)
466
+ b_centroid[0] = (nk_f32_t)centroid_b_x, b_centroid[1] = (nk_f32_t)centroid_b_y,
467
+ b_centroid[2] = (nk_f32_t)centroid_b_z;
468
+ *result = nk_f64_sqrt_haswell(nk_transformed_ssd_f32_skylake_(a, b, n, identity, 1.0, centroid_a_x, centroid_a_y,
469
+ centroid_a_z, centroid_b_x, centroid_b_y,
470
+ centroid_b_z) /
471
+ (nk_f64_t)n);
472
+ }
473
+
474
+ NK_PUBLIC void nk_kabsch_f32_skylake(nk_f32_t const *a, nk_f32_t const *b, nk_size_t n, nk_f32_t *a_centroid,
475
+ nk_f32_t *b_centroid, nk_f32_t *rotation, nk_f32_t *scale, nk_f64_t *result) {
476
+ nk_f64_t centroid_a_x, centroid_a_y, centroid_a_z, centroid_b_x, centroid_b_y, centroid_b_z;
477
+ nk_f64_t cross_covariance_f64[9];
478
+ nk_centroid_and_cross_covariance_f32_skylake_(a, b, n, &centroid_a_x, &centroid_a_y, &centroid_a_z, &centroid_b_x,
479
+ &centroid_b_y, &centroid_b_z, cross_covariance_f64);
480
+ if (a_centroid)
481
+ a_centroid[0] = (nk_f32_t)centroid_a_x, a_centroid[1] = (nk_f32_t)centroid_a_y,
482
+ a_centroid[2] = (nk_f32_t)centroid_a_z;
483
+ if (b_centroid)
484
+ b_centroid[0] = (nk_f32_t)centroid_b_x, b_centroid[1] = (nk_f32_t)centroid_b_y,
485
+ b_centroid[2] = (nk_f32_t)centroid_b_z;
486
+ if (scale) *scale = 1.0f;
487
+
488
+ nk_f64_t svd_u[9], svd_s[9], svd_v[9], r[9];
489
+ nk_svd3x3_f64_(cross_covariance_f64, svd_u, svd_s, svd_v);
490
+ r[0] = svd_v[0] * svd_u[0] + svd_v[1] * svd_u[1] + svd_v[2] * svd_u[2];
491
+ r[1] = svd_v[0] * svd_u[3] + svd_v[1] * svd_u[4] + svd_v[2] * svd_u[5];
492
+ r[2] = svd_v[0] * svd_u[6] + svd_v[1] * svd_u[7] + svd_v[2] * svd_u[8];
493
+ r[3] = svd_v[3] * svd_u[0] + svd_v[4] * svd_u[1] + svd_v[5] * svd_u[2];
494
+ r[4] = svd_v[3] * svd_u[3] + svd_v[4] * svd_u[4] + svd_v[5] * svd_u[5];
495
+ r[5] = svd_v[3] * svd_u[6] + svd_v[4] * svd_u[7] + svd_v[5] * svd_u[8];
496
+ r[6] = svd_v[6] * svd_u[0] + svd_v[7] * svd_u[1] + svd_v[8] * svd_u[2];
497
+ r[7] = svd_v[6] * svd_u[3] + svd_v[7] * svd_u[4] + svd_v[8] * svd_u[5];
498
+ r[8] = svd_v[6] * svd_u[6] + svd_v[7] * svd_u[7] + svd_v[8] * svd_u[8];
499
+ if (nk_det3x3_f64_(r) < 0) {
500
+ svd_v[2] = -svd_v[2], svd_v[5] = -svd_v[5], svd_v[8] = -svd_v[8];
501
+ r[0] = svd_v[0] * svd_u[0] + svd_v[1] * svd_u[1] + svd_v[2] * svd_u[2];
502
+ r[1] = svd_v[0] * svd_u[3] + svd_v[1] * svd_u[4] + svd_v[2] * svd_u[5];
503
+ r[2] = svd_v[0] * svd_u[6] + svd_v[1] * svd_u[7] + svd_v[2] * svd_u[8];
504
+ r[3] = svd_v[3] * svd_u[0] + svd_v[4] * svd_u[1] + svd_v[5] * svd_u[2];
505
+ r[4] = svd_v[3] * svd_u[3] + svd_v[4] * svd_u[4] + svd_v[5] * svd_u[5];
506
+ r[5] = svd_v[3] * svd_u[6] + svd_v[4] * svd_u[7] + svd_v[5] * svd_u[8];
507
+ r[6] = svd_v[6] * svd_u[0] + svd_v[7] * svd_u[1] + svd_v[8] * svd_u[2];
508
+ r[7] = svd_v[6] * svd_u[3] + svd_v[7] * svd_u[4] + svd_v[8] * svd_u[5];
509
+ r[8] = svd_v[6] * svd_u[6] + svd_v[7] * svd_u[7] + svd_v[8] * svd_u[8];
510
+ }
511
+ if (rotation)
512
+ for (int index = 0; index != 9; ++index) rotation[index] = (nk_f32_t)r[index];
513
+ *result = nk_f64_sqrt_haswell(nk_transformed_ssd_f32_skylake_(a, b, n, r, 1.0, centroid_a_x, centroid_a_y,
514
+ centroid_a_z, centroid_b_x, centroid_b_y,
515
+ centroid_b_z) /
516
+ (nk_f64_t)n);
517
+ }
518
+
519
+ NK_PUBLIC void nk_rmsd_f64_skylake(nk_f64_t const *a, nk_f64_t const *b, nk_size_t n, nk_f64_t *a_centroid,
520
+ nk_f64_t *b_centroid, nk_f64_t *rotation, nk_f64_t *scale, nk_f64_t *result) {
521
+ // RMSD uses identity rotation and scale=1.0.
522
+ if (rotation) {
523
+ rotation[0] = 1;
524
+ rotation[1] = 0;
525
+ rotation[2] = 0;
526
+ rotation[3] = 0;
527
+ rotation[4] = 1;
528
+ rotation[5] = 0;
529
+ rotation[6] = 0;
530
+ rotation[7] = 0;
531
+ rotation[8] = 1;
532
+ }
533
+ if (scale) *scale = 1.0;
534
+ // Optimized fused single-pass implementation for f64.
535
+ // Computes centroids and squared differences in one pass using the identity:
536
+ // RMSD = √(E[(a-ā) - (b-b̄)]²)
537
+ // = √(E[(a-b)²] - (ā - b̄)²)
538
+ __m512i const gather_idx_i64x8 = _mm512_setr_epi64(0, 3, 6, 9, 12, 15, 18, 21);
539
+ __m512d const zeros_f64x8 = _mm512_setzero_pd();
540
+
541
+ // Accumulators for centroids and squared differences
542
+ __m512d sum_a_x_f64x8 = zeros_f64x8, sum_a_y_f64x8 = zeros_f64x8, sum_a_z_f64x8 = zeros_f64x8;
543
+ __m512d sum_b_x_f64x8 = zeros_f64x8, sum_b_y_f64x8 = zeros_f64x8, sum_b_z_f64x8 = zeros_f64x8;
544
+ __m512d sum_squared_x_f64x8 = zeros_f64x8, sum_squared_y_f64x8 = zeros_f64x8, sum_squared_z_f64x8 = zeros_f64x8;
545
+
546
+ __m512d a_x_f64x8, a_y_f64x8, a_z_f64x8, b_x_f64x8, b_y_f64x8, b_z_f64x8;
547
+ nk_size_t i = 0;
548
+
549
+ // Main loop with 2x unrolling for better latency hiding
550
+ for (; i + 16 <= n; i += 16) {
551
+ // Iteration 0
552
+ nk_deinterleave_f64x8_skylake_(a + i * 3, &a_x_f64x8, &a_y_f64x8, &a_z_f64x8);
553
+ nk_deinterleave_f64x8_skylake_(b + i * 3, &b_x_f64x8, &b_y_f64x8, &b_z_f64x8);
554
+
555
+ sum_a_x_f64x8 = _mm512_add_pd(sum_a_x_f64x8, a_x_f64x8),
556
+ sum_a_y_f64x8 = _mm512_add_pd(sum_a_y_f64x8, a_y_f64x8),
557
+ sum_a_z_f64x8 = _mm512_add_pd(sum_a_z_f64x8, a_z_f64x8);
558
+ sum_b_x_f64x8 = _mm512_add_pd(sum_b_x_f64x8, b_x_f64x8),
559
+ sum_b_y_f64x8 = _mm512_add_pd(sum_b_y_f64x8, b_y_f64x8),
560
+ sum_b_z_f64x8 = _mm512_add_pd(sum_b_z_f64x8, b_z_f64x8);
561
+
562
+ __m512d delta_x_f64x8 = _mm512_sub_pd(a_x_f64x8, b_x_f64x8),
563
+ delta_y_f64x8 = _mm512_sub_pd(a_y_f64x8, b_y_f64x8),
564
+ delta_z_f64x8 = _mm512_sub_pd(a_z_f64x8, b_z_f64x8);
565
+ sum_squared_x_f64x8 = _mm512_fmadd_pd(delta_x_f64x8, delta_x_f64x8, sum_squared_x_f64x8);
566
+ sum_squared_y_f64x8 = _mm512_fmadd_pd(delta_y_f64x8, delta_y_f64x8, sum_squared_y_f64x8);
567
+ sum_squared_z_f64x8 = _mm512_fmadd_pd(delta_z_f64x8, delta_z_f64x8, sum_squared_z_f64x8);
568
+
569
+ // Iteration 1
570
+ __m512d a_x1_f64x8, a_y1_f64x8, a_z1_f64x8, b_x1_f64x8, b_y1_f64x8, b_z1_f64x8;
571
+ nk_deinterleave_f64x8_skylake_(a + (i + 8) * 3, &a_x1_f64x8, &a_y1_f64x8, &a_z1_f64x8);
572
+ nk_deinterleave_f64x8_skylake_(b + (i + 8) * 3, &b_x1_f64x8, &b_y1_f64x8, &b_z1_f64x8);
573
+
574
+ sum_a_x_f64x8 = _mm512_add_pd(sum_a_x_f64x8, a_x1_f64x8),
575
+ sum_a_y_f64x8 = _mm512_add_pd(sum_a_y_f64x8, a_y1_f64x8),
576
+ sum_a_z_f64x8 = _mm512_add_pd(sum_a_z_f64x8, a_z1_f64x8);
577
+ sum_b_x_f64x8 = _mm512_add_pd(sum_b_x_f64x8, b_x1_f64x8),
578
+ sum_b_y_f64x8 = _mm512_add_pd(sum_b_y_f64x8, b_y1_f64x8),
579
+ sum_b_z_f64x8 = _mm512_add_pd(sum_b_z_f64x8, b_z1_f64x8);
580
+
581
+ __m512d delta_x1_f64x8 = _mm512_sub_pd(a_x1_f64x8, b_x1_f64x8),
582
+ delta_y1_f64x8 = _mm512_sub_pd(a_y1_f64x8, b_y1_f64x8),
583
+ delta_z1_f64x8 = _mm512_sub_pd(a_z1_f64x8, b_z1_f64x8);
584
+ sum_squared_x_f64x8 = _mm512_fmadd_pd(delta_x1_f64x8, delta_x1_f64x8, sum_squared_x_f64x8);
585
+ sum_squared_y_f64x8 = _mm512_fmadd_pd(delta_y1_f64x8, delta_y1_f64x8, sum_squared_y_f64x8);
586
+ sum_squared_z_f64x8 = _mm512_fmadd_pd(delta_z1_f64x8, delta_z1_f64x8, sum_squared_z_f64x8);
587
+ }
588
+
589
+ // Handle 8-point remainder
590
+ for (; i + 8 <= n; i += 8) {
591
+ nk_deinterleave_f64x8_skylake_(a + i * 3, &a_x_f64x8, &a_y_f64x8, &a_z_f64x8);
592
+ nk_deinterleave_f64x8_skylake_(b + i * 3, &b_x_f64x8, &b_y_f64x8, &b_z_f64x8);
593
+
594
+ sum_a_x_f64x8 = _mm512_add_pd(sum_a_x_f64x8, a_x_f64x8),
595
+ sum_a_y_f64x8 = _mm512_add_pd(sum_a_y_f64x8, a_y_f64x8),
596
+ sum_a_z_f64x8 = _mm512_add_pd(sum_a_z_f64x8, a_z_f64x8);
597
+ sum_b_x_f64x8 = _mm512_add_pd(sum_b_x_f64x8, b_x_f64x8),
598
+ sum_b_y_f64x8 = _mm512_add_pd(sum_b_y_f64x8, b_y_f64x8),
599
+ sum_b_z_f64x8 = _mm512_add_pd(sum_b_z_f64x8, b_z_f64x8);
600
+
601
+ __m512d delta_x_f64x8 = _mm512_sub_pd(a_x_f64x8, b_x_f64x8),
602
+ delta_y_f64x8 = _mm512_sub_pd(a_y_f64x8, b_y_f64x8),
603
+ delta_z_f64x8 = _mm512_sub_pd(a_z_f64x8, b_z_f64x8);
604
+ sum_squared_x_f64x8 = _mm512_fmadd_pd(delta_x_f64x8, delta_x_f64x8, sum_squared_x_f64x8);
605
+ sum_squared_y_f64x8 = _mm512_fmadd_pd(delta_y_f64x8, delta_y_f64x8, sum_squared_y_f64x8);
606
+ sum_squared_z_f64x8 = _mm512_fmadd_pd(delta_z_f64x8, delta_z_f64x8, sum_squared_z_f64x8);
607
+ }
608
+
609
+ // Tail: use masked gather
610
+ if (i < n) {
611
+ nk_size_t tail = n - i;
612
+ __mmask8 mask = (__mmask8)_bzhi_u32(0xFF, tail);
613
+ nk_f64_t const *a_tail = a + i * 3;
614
+ nk_f64_t const *b_tail = b + i * 3;
615
+
616
+ a_x_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, a_tail + 0, 8);
617
+ a_y_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, a_tail + 1, 8);
618
+ a_z_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, a_tail + 2, 8);
619
+ b_x_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, b_tail + 0, 8);
620
+ b_y_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, b_tail + 1, 8);
621
+ b_z_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, b_tail + 2, 8);
622
+
623
+ sum_a_x_f64x8 = _mm512_add_pd(sum_a_x_f64x8, a_x_f64x8),
624
+ sum_a_y_f64x8 = _mm512_add_pd(sum_a_y_f64x8, a_y_f64x8),
625
+ sum_a_z_f64x8 = _mm512_add_pd(sum_a_z_f64x8, a_z_f64x8);
626
+ sum_b_x_f64x8 = _mm512_add_pd(sum_b_x_f64x8, b_x_f64x8),
627
+ sum_b_y_f64x8 = _mm512_add_pd(sum_b_y_f64x8, b_y_f64x8),
628
+ sum_b_z_f64x8 = _mm512_add_pd(sum_b_z_f64x8, b_z_f64x8);
629
+
630
+ __m512d delta_x_f64x8 = _mm512_sub_pd(a_x_f64x8, b_x_f64x8),
631
+ delta_y_f64x8 = _mm512_sub_pd(a_y_f64x8, b_y_f64x8),
632
+ delta_z_f64x8 = _mm512_sub_pd(a_z_f64x8, b_z_f64x8);
633
+ sum_squared_x_f64x8 = _mm512_fmadd_pd(delta_x_f64x8, delta_x_f64x8, sum_squared_x_f64x8);
634
+ sum_squared_y_f64x8 = _mm512_fmadd_pd(delta_y_f64x8, delta_y_f64x8, sum_squared_y_f64x8);
635
+ sum_squared_z_f64x8 = _mm512_fmadd_pd(delta_z_f64x8, delta_z_f64x8, sum_squared_z_f64x8);
636
+ }
637
+
638
+ // Reduce and compute centroids.
639
+ nk_f64_t inv_n = 1.0 / (nk_f64_t)n;
640
+ nk_f64_t total_ax = nk_reduce_stable_f64x8_skylake_(sum_a_x_f64x8), total_ax_compensation = 0.0;
641
+ nk_f64_t total_ay = nk_reduce_stable_f64x8_skylake_(sum_a_y_f64x8), total_ay_compensation = 0.0;
642
+ nk_f64_t total_az = nk_reduce_stable_f64x8_skylake_(sum_a_z_f64x8), total_az_compensation = 0.0;
643
+ nk_f64_t total_bx = nk_reduce_stable_f64x8_skylake_(sum_b_x_f64x8), total_bx_compensation = 0.0;
644
+ nk_f64_t total_by = nk_reduce_stable_f64x8_skylake_(sum_b_y_f64x8), total_by_compensation = 0.0;
645
+ nk_f64_t total_bz = nk_reduce_stable_f64x8_skylake_(sum_b_z_f64x8), total_bz_compensation = 0.0;
646
+ nk_f64_t total_squared_x = nk_reduce_stable_f64x8_skylake_(sum_squared_x_f64x8), total_squared_x_compensation = 0.0;
647
+ nk_f64_t total_squared_y = nk_reduce_stable_f64x8_skylake_(sum_squared_y_f64x8), total_squared_y_compensation = 0.0;
648
+ nk_f64_t total_squared_z = nk_reduce_stable_f64x8_skylake_(sum_squared_z_f64x8), total_squared_z_compensation = 0.0;
649
+
650
+ for (; i < n; ++i) {
651
+ nk_f64_t ax = a[i * 3 + 0], ay = a[i * 3 + 1], az = a[i * 3 + 2];
652
+ nk_f64_t bx = b[i * 3 + 0], by = b[i * 3 + 1], bz = b[i * 3 + 2];
653
+ nk_accumulate_sum_f64_(&total_ax, &total_ax_compensation, ax);
654
+ nk_accumulate_sum_f64_(&total_ay, &total_ay_compensation, ay);
655
+ nk_accumulate_sum_f64_(&total_az, &total_az_compensation, az);
656
+ nk_accumulate_sum_f64_(&total_bx, &total_bx_compensation, bx);
657
+ nk_accumulate_sum_f64_(&total_by, &total_by_compensation, by);
658
+ nk_accumulate_sum_f64_(&total_bz, &total_bz_compensation, bz);
659
+ nk_f64_t delta_x = ax - bx, delta_y = ay - by, delta_z = az - bz;
660
+ nk_accumulate_square_f64_(&total_squared_x, &total_squared_x_compensation, delta_x);
661
+ nk_accumulate_square_f64_(&total_squared_y, &total_squared_y_compensation, delta_y);
662
+ nk_accumulate_square_f64_(&total_squared_z, &total_squared_z_compensation, delta_z);
663
+ }
664
+
665
+ total_ax += total_ax_compensation, total_ay += total_ay_compensation, total_az += total_az_compensation;
666
+ total_bx += total_bx_compensation, total_by += total_by_compensation, total_bz += total_bz_compensation;
667
+ total_squared_x += total_squared_x_compensation, total_squared_y += total_squared_y_compensation,
668
+ total_squared_z += total_squared_z_compensation;
669
+
670
+ nk_f64_t centroid_a_x = total_ax * inv_n, centroid_a_y = total_ay * inv_n, centroid_a_z = total_az * inv_n;
671
+ nk_f64_t centroid_b_x = total_bx * inv_n, centroid_b_y = total_by * inv_n, centroid_b_z = total_bz * inv_n;
672
+
673
+ if (a_centroid) a_centroid[0] = centroid_a_x, a_centroid[1] = centroid_a_y, a_centroid[2] = centroid_a_z;
674
+ if (b_centroid) b_centroid[0] = centroid_b_x, b_centroid[1] = centroid_b_y, b_centroid[2] = centroid_b_z;
675
+
676
+ // Compute RMSD using the formula:
677
+ // RMSD = √(E[(a-b)²] - (ā - b̄)²).
678
+ nk_f64_t mean_diff_x = centroid_a_x - centroid_b_x, mean_diff_y = centroid_a_y - centroid_b_y,
679
+ mean_diff_z = centroid_a_z - centroid_b_z;
680
+ nk_f64_t sum_squared = total_squared_x + total_squared_y + total_squared_z;
681
+ nk_f64_t mean_diff_sq = mean_diff_x * mean_diff_x + mean_diff_y * mean_diff_y + mean_diff_z * mean_diff_z;
682
+
683
+ *result = nk_f64_sqrt_haswell(sum_squared * inv_n - mean_diff_sq);
684
+ }
685
+
686
+ NK_PUBLIC void nk_kabsch_f64_skylake(nk_f64_t const *a, nk_f64_t const *b, nk_size_t n, nk_f64_t *a_centroid,
687
+ nk_f64_t *b_centroid, nk_f64_t *rotation, nk_f64_t *scale, nk_f64_t *result) {
688
+ // Optimized fused single-pass implementation for f64.
689
+ // Computes centroids and covariance matrix in one pass using the identity:
690
+ // Hᵢⱼ = Σ((aᵢ - ā) × (bⱼ - b̄))
691
+ // = Σ(aᵢ × bⱼ) - Σaᵢ × Σbⱼ / n
692
+ __m512i const gather_idx_i64x8 = _mm512_setr_epi64(0, 3, 6, 9, 12, 15, 18, 21);
693
+ __m512d const zeros_f64x8 = _mm512_setzero_pd();
694
+
695
+ // Accumulators for centroids
696
+ __m512d sum_a_x_f64x8 = zeros_f64x8, sum_a_y_f64x8 = zeros_f64x8, sum_a_z_f64x8 = zeros_f64x8;
697
+ __m512d sum_b_x_f64x8 = zeros_f64x8, sum_b_y_f64x8 = zeros_f64x8, sum_b_z_f64x8 = zeros_f64x8;
698
+
699
+ // Accumulators for covariance matrix (sum of outer products)
700
+ __m512d cov_xx_f64x8 = zeros_f64x8, cov_xy_f64x8 = zeros_f64x8, cov_xz_f64x8 = zeros_f64x8;
701
+ __m512d cov_yx_f64x8 = zeros_f64x8, cov_yy_f64x8 = zeros_f64x8, cov_yz_f64x8 = zeros_f64x8;
702
+ __m512d cov_zx_f64x8 = zeros_f64x8, cov_zy_f64x8 = zeros_f64x8, cov_zz_f64x8 = zeros_f64x8;
703
+
704
+ nk_size_t i = 0;
705
+ __m512d a_x_f64x8, a_y_f64x8, a_z_f64x8, b_x_f64x8, b_y_f64x8, b_z_f64x8;
706
+
707
+ // Fused single-pass: accumulate sums and outer products together
708
+ for (; i + 8 <= n; i += 8) {
709
+ nk_deinterleave_f64x8_skylake_(a + i * 3, &a_x_f64x8, &a_y_f64x8, &a_z_f64x8);
710
+ nk_deinterleave_f64x8_skylake_(b + i * 3, &b_x_f64x8, &b_y_f64x8, &b_z_f64x8);
711
+
712
+ // Accumulate centroids
713
+ sum_a_x_f64x8 = _mm512_add_pd(sum_a_x_f64x8, a_x_f64x8),
714
+ sum_a_y_f64x8 = _mm512_add_pd(sum_a_y_f64x8, a_y_f64x8),
715
+ sum_a_z_f64x8 = _mm512_add_pd(sum_a_z_f64x8, a_z_f64x8);
716
+ sum_b_x_f64x8 = _mm512_add_pd(sum_b_x_f64x8, b_x_f64x8),
717
+ sum_b_y_f64x8 = _mm512_add_pd(sum_b_y_f64x8, b_y_f64x8),
718
+ sum_b_z_f64x8 = _mm512_add_pd(sum_b_z_f64x8, b_z_f64x8);
719
+
720
+ // Accumulate outer products (raw, not centered)
721
+ cov_xx_f64x8 = _mm512_fmadd_pd(a_x_f64x8, b_x_f64x8, cov_xx_f64x8),
722
+ cov_xy_f64x8 = _mm512_fmadd_pd(a_x_f64x8, b_y_f64x8, cov_xy_f64x8),
723
+ cov_xz_f64x8 = _mm512_fmadd_pd(a_x_f64x8, b_z_f64x8, cov_xz_f64x8);
724
+ cov_yx_f64x8 = _mm512_fmadd_pd(a_y_f64x8, b_x_f64x8, cov_yx_f64x8),
725
+ cov_yy_f64x8 = _mm512_fmadd_pd(a_y_f64x8, b_y_f64x8, cov_yy_f64x8),
726
+ cov_yz_f64x8 = _mm512_fmadd_pd(a_y_f64x8, b_z_f64x8, cov_yz_f64x8);
727
+ cov_zx_f64x8 = _mm512_fmadd_pd(a_z_f64x8, b_x_f64x8, cov_zx_f64x8),
728
+ cov_zy_f64x8 = _mm512_fmadd_pd(a_z_f64x8, b_y_f64x8, cov_zy_f64x8),
729
+ cov_zz_f64x8 = _mm512_fmadd_pd(a_z_f64x8, b_z_f64x8, cov_zz_f64x8);
730
+ }
731
+
732
+ // Tail: masked gather for remaining points
733
+ if (i < n) {
734
+ nk_size_t tail = n - i;
735
+ __mmask8 mask = (__mmask8)_bzhi_u32(0xFF, tail);
736
+ nk_f64_t const *a_tail = a + i * 3;
737
+ nk_f64_t const *b_tail = b + i * 3;
738
+
739
+ a_x_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, a_tail + 0, 8);
740
+ a_y_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, a_tail + 1, 8);
741
+ a_z_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, a_tail + 2, 8);
742
+ b_x_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, b_tail + 0, 8);
743
+ b_y_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, b_tail + 1, 8);
744
+ b_z_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, b_tail + 2, 8);
745
+
746
+ sum_a_x_f64x8 = _mm512_add_pd(sum_a_x_f64x8, a_x_f64x8),
747
+ sum_a_y_f64x8 = _mm512_add_pd(sum_a_y_f64x8, a_y_f64x8),
748
+ sum_a_z_f64x8 = _mm512_add_pd(sum_a_z_f64x8, a_z_f64x8);
749
+ sum_b_x_f64x8 = _mm512_add_pd(sum_b_x_f64x8, b_x_f64x8),
750
+ sum_b_y_f64x8 = _mm512_add_pd(sum_b_y_f64x8, b_y_f64x8),
751
+ sum_b_z_f64x8 = _mm512_add_pd(sum_b_z_f64x8, b_z_f64x8);
752
+
753
+ cov_xx_f64x8 = _mm512_fmadd_pd(a_x_f64x8, b_x_f64x8, cov_xx_f64x8),
754
+ cov_xy_f64x8 = _mm512_fmadd_pd(a_x_f64x8, b_y_f64x8, cov_xy_f64x8),
755
+ cov_xz_f64x8 = _mm512_fmadd_pd(a_x_f64x8, b_z_f64x8, cov_xz_f64x8);
756
+ cov_yx_f64x8 = _mm512_fmadd_pd(a_y_f64x8, b_x_f64x8, cov_yx_f64x8),
757
+ cov_yy_f64x8 = _mm512_fmadd_pd(a_y_f64x8, b_y_f64x8, cov_yy_f64x8),
758
+ cov_yz_f64x8 = _mm512_fmadd_pd(a_y_f64x8, b_z_f64x8, cov_yz_f64x8);
759
+ cov_zx_f64x8 = _mm512_fmadd_pd(a_z_f64x8, b_x_f64x8, cov_zx_f64x8),
760
+ cov_zy_f64x8 = _mm512_fmadd_pd(a_z_f64x8, b_y_f64x8, cov_zy_f64x8),
761
+ cov_zz_f64x8 = _mm512_fmadd_pd(a_z_f64x8, b_z_f64x8, cov_zz_f64x8);
762
+ }
763
+
764
+ // Reduce centroids and covariance.
765
+ nk_f64_t inv_n = 1.0 / (nk_f64_t)n;
766
+ nk_f64_t sum_a_x = nk_reduce_stable_f64x8_skylake_(sum_a_x_f64x8), sum_a_x_compensation = 0.0;
767
+ nk_f64_t sum_a_y = nk_reduce_stable_f64x8_skylake_(sum_a_y_f64x8), sum_a_y_compensation = 0.0;
768
+ nk_f64_t sum_a_z = nk_reduce_stable_f64x8_skylake_(sum_a_z_f64x8), sum_a_z_compensation = 0.0;
769
+ nk_f64_t sum_b_x = nk_reduce_stable_f64x8_skylake_(sum_b_x_f64x8), sum_b_x_compensation = 0.0;
770
+ nk_f64_t sum_b_y = nk_reduce_stable_f64x8_skylake_(sum_b_y_f64x8), sum_b_y_compensation = 0.0;
771
+ nk_f64_t sum_b_z = nk_reduce_stable_f64x8_skylake_(sum_b_z_f64x8), sum_b_z_compensation = 0.0;
772
+ nk_f64_t covariance_x_x = nk_reduce_stable_f64x8_skylake_(cov_xx_f64x8), covariance_x_x_compensation = 0.0;
773
+ nk_f64_t covariance_x_y = nk_reduce_stable_f64x8_skylake_(cov_xy_f64x8), covariance_x_y_compensation = 0.0;
774
+ nk_f64_t covariance_x_z = nk_reduce_stable_f64x8_skylake_(cov_xz_f64x8), covariance_x_z_compensation = 0.0;
775
+ nk_f64_t covariance_y_x = nk_reduce_stable_f64x8_skylake_(cov_yx_f64x8), covariance_y_x_compensation = 0.0;
776
+ nk_f64_t covariance_y_y = nk_reduce_stable_f64x8_skylake_(cov_yy_f64x8), covariance_y_y_compensation = 0.0;
777
+ nk_f64_t covariance_y_z = nk_reduce_stable_f64x8_skylake_(cov_yz_f64x8), covariance_y_z_compensation = 0.0;
778
+ nk_f64_t covariance_z_x = nk_reduce_stable_f64x8_skylake_(cov_zx_f64x8), covariance_z_x_compensation = 0.0;
779
+ nk_f64_t covariance_z_y = nk_reduce_stable_f64x8_skylake_(cov_zy_f64x8), covariance_z_y_compensation = 0.0;
780
+ nk_f64_t covariance_z_z = nk_reduce_stable_f64x8_skylake_(cov_zz_f64x8), covariance_z_z_compensation = 0.0;
781
+
782
+ for (; i < n; ++i) {
783
+ nk_f64_t ax = a[i * 3 + 0], ay = a[i * 3 + 1], az = a[i * 3 + 2];
784
+ nk_f64_t bx = b[i * 3 + 0], by = b[i * 3 + 1], bz = b[i * 3 + 2];
785
+ nk_accumulate_sum_f64_(&sum_a_x, &sum_a_x_compensation, ax);
786
+ nk_accumulate_sum_f64_(&sum_a_y, &sum_a_y_compensation, ay);
787
+ nk_accumulate_sum_f64_(&sum_a_z, &sum_a_z_compensation, az);
788
+ nk_accumulate_sum_f64_(&sum_b_x, &sum_b_x_compensation, bx);
789
+ nk_accumulate_sum_f64_(&sum_b_y, &sum_b_y_compensation, by);
790
+ nk_accumulate_sum_f64_(&sum_b_z, &sum_b_z_compensation, bz);
791
+ nk_accumulate_product_f64_(&covariance_x_x, &covariance_x_x_compensation, ax, bx);
792
+ nk_accumulate_product_f64_(&covariance_x_y, &covariance_x_y_compensation, ax, by);
793
+ nk_accumulate_product_f64_(&covariance_x_z, &covariance_x_z_compensation, ax, bz);
794
+ nk_accumulate_product_f64_(&covariance_y_x, &covariance_y_x_compensation, ay, bx);
795
+ nk_accumulate_product_f64_(&covariance_y_y, &covariance_y_y_compensation, ay, by);
796
+ nk_accumulate_product_f64_(&covariance_y_z, &covariance_y_z_compensation, ay, bz);
797
+ nk_accumulate_product_f64_(&covariance_z_x, &covariance_z_x_compensation, az, bx);
798
+ nk_accumulate_product_f64_(&covariance_z_y, &covariance_z_y_compensation, az, by);
799
+ nk_accumulate_product_f64_(&covariance_z_z, &covariance_z_z_compensation, az, bz);
800
+ }
801
+
802
+ sum_a_x += sum_a_x_compensation, sum_a_y += sum_a_y_compensation, sum_a_z += sum_a_z_compensation;
803
+ sum_b_x += sum_b_x_compensation, sum_b_y += sum_b_y_compensation, sum_b_z += sum_b_z_compensation;
804
+ covariance_x_x += covariance_x_x_compensation, covariance_x_y += covariance_x_y_compensation,
805
+ covariance_x_z += covariance_x_z_compensation;
806
+ covariance_y_x += covariance_y_x_compensation, covariance_y_y += covariance_y_y_compensation,
807
+ covariance_y_z += covariance_y_z_compensation;
808
+ covariance_z_x += covariance_z_x_compensation, covariance_z_y += covariance_z_y_compensation,
809
+ covariance_z_z += covariance_z_z_compensation;
810
+
811
+ nk_f64_t centroid_a_x = sum_a_x * inv_n, centroid_a_y = sum_a_y * inv_n, centroid_a_z = sum_a_z * inv_n;
812
+ nk_f64_t centroid_b_x = sum_b_x * inv_n, centroid_b_y = sum_b_y * inv_n, centroid_b_z = sum_b_z * inv_n;
813
+
814
+ if (a_centroid) a_centroid[0] = centroid_a_x, a_centroid[1] = centroid_a_y, a_centroid[2] = centroid_a_z;
815
+ if (b_centroid) b_centroid[0] = centroid_b_x, b_centroid[1] = centroid_b_y, b_centroid[2] = centroid_b_z;
816
+
817
+ // Compute centered covariance matrix: Hᵢⱼ = Σ(aᵢ×bⱼ) - Σaᵢ × Σbⱼ / n.
818
+ nk_f64_t cross_covariance[9];
819
+ cross_covariance[0] = covariance_x_x - sum_a_x * sum_b_x * inv_n;
820
+ cross_covariance[1] = covariance_x_y - sum_a_x * sum_b_y * inv_n;
821
+ cross_covariance[2] = covariance_x_z - sum_a_x * sum_b_z * inv_n;
822
+ cross_covariance[3] = covariance_y_x - sum_a_y * sum_b_x * inv_n;
823
+ cross_covariance[4] = covariance_y_y - sum_a_y * sum_b_y * inv_n;
824
+ cross_covariance[5] = covariance_y_z - sum_a_y * sum_b_z * inv_n;
825
+ cross_covariance[6] = covariance_z_x - sum_a_z * sum_b_x * inv_n;
826
+ cross_covariance[7] = covariance_z_y - sum_a_z * sum_b_y * inv_n;
827
+ cross_covariance[8] = covariance_z_z - sum_a_z * sum_b_z * inv_n;
828
+
829
+ // SVD using f64 for full precision
830
+ nk_f64_t svd_u[9], svd_s[9], svd_v[9];
831
+ nk_svd3x3_f64_(cross_covariance, svd_u, svd_s, svd_v);
832
+
833
+ nk_f64_t r[9];
834
+ nk_rotation_from_svd_f64_skylake_(svd_u, svd_v, r);
835
+
836
+ // Handle reflection
837
+ if (nk_det3x3_f64_(r) < 0) {
838
+ svd_v[2] = -svd_v[2], svd_v[5] = -svd_v[5], svd_v[8] = -svd_v[8];
839
+ nk_rotation_from_svd_f64_skylake_(svd_u, svd_v, r);
840
+ }
841
+
842
+ // Output rotation matrix and scale=1.0.
843
+ if (rotation) {
844
+ for (int j = 0; j < 9; ++j) rotation[j] = (nk_f64_t)r[j];
845
+ }
846
+ if (scale) *scale = 1.0;
847
+
848
+ // Compute RMSD after optimal rotation
849
+ nk_f64_t sum_squared = nk_transformed_ssd_f64_skylake_(a, b, n, r, 1.0, centroid_a_x, centroid_a_y, centroid_a_z,
850
+ centroid_b_x, centroid_b_y, centroid_b_z);
851
+ *result = nk_f64_sqrt_haswell(sum_squared * inv_n);
852
+ }
853
+
854
+ NK_INTERNAL void nk_centroid_and_cross_covariance_and_variance_f32_skylake_( //
855
+ nk_f32_t const *a, nk_f32_t const *b, nk_size_t n, //
856
+ nk_f64_t *centroid_a_x, nk_f64_t *centroid_a_y, nk_f64_t *centroid_a_z, nk_f64_t *centroid_b_x,
857
+ nk_f64_t *centroid_b_y, nk_f64_t *centroid_b_z, nk_f64_t cross_covariance_f64[9], nk_f64_t *variance_a) {
858
+ nk_centroid_and_cross_covariance_f32_skylake_(a, b, n, centroid_a_x, centroid_a_y, centroid_a_z, centroid_b_x,
859
+ centroid_b_y, centroid_b_z, cross_covariance_f64);
860
+ __m512d variance_a_f64x8 = _mm512_setzero_pd();
861
+ __m512 a_x_f32x16, a_y_f32x16, a_z_f32x16;
862
+ nk_size_t index = 0;
863
+
864
+ for (; index + 16 <= n; index += 16) {
865
+ nk_deinterleave_f32x16_skylake_(a + index * 3, &a_x_f32x16, &a_y_f32x16, &a_z_f32x16);
866
+ __m512d a_x_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(a_x_f32x16));
867
+ __m512d a_x_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(a_x_f32x16, 1));
868
+ __m512d a_y_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(a_y_f32x16));
869
+ __m512d a_y_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(a_y_f32x16, 1));
870
+ __m512d a_z_lower_f64x8 = _mm512_cvtps_pd(_mm512_castps512_ps256(a_z_f32x16));
871
+ __m512d a_z_upper_f64x8 = _mm512_cvtps_pd(_mm512_extractf32x8_ps(a_z_f32x16, 1));
872
+ __m512d batch_norm_squared_f64x8 = _mm512_add_pd(_mm512_mul_pd(a_x_lower_f64x8, a_x_lower_f64x8),
873
+ _mm512_mul_pd(a_x_upper_f64x8, a_x_upper_f64x8));
874
+ batch_norm_squared_f64x8 = _mm512_fmadd_pd(a_y_lower_f64x8, a_y_lower_f64x8, batch_norm_squared_f64x8);
875
+ batch_norm_squared_f64x8 = _mm512_fmadd_pd(a_y_upper_f64x8, a_y_upper_f64x8, batch_norm_squared_f64x8);
876
+ batch_norm_squared_f64x8 = _mm512_fmadd_pd(a_z_lower_f64x8, a_z_lower_f64x8, batch_norm_squared_f64x8);
877
+ batch_norm_squared_f64x8 = _mm512_fmadd_pd(a_z_upper_f64x8, a_z_upper_f64x8, batch_norm_squared_f64x8);
878
+ variance_a_f64x8 = _mm512_add_pd(variance_a_f64x8, batch_norm_squared_f64x8);
879
+ }
880
+
881
+ nk_f64_t variance_sum = _mm512_reduce_add_pd(variance_a_f64x8);
882
+ for (; index < n; ++index) {
883
+ nk_f64_t a_x = a[index * 3 + 0], a_y = a[index * 3 + 1], a_z = a[index * 3 + 2];
884
+ variance_sum += a_x * a_x + a_y * a_y + a_z * a_z;
885
+ }
886
+
887
+ nk_f64_t inv_n = 1.0 / (nk_f64_t)n;
888
+ *variance_a = variance_sum * inv_n - ((*centroid_a_x) * (*centroid_a_x) + (*centroid_a_y) * (*centroid_a_y) +
889
+ (*centroid_a_z) * (*centroid_a_z));
890
+ }
891
+
892
+ NK_PUBLIC void nk_umeyama_f32_skylake(nk_f32_t const *a, nk_f32_t const *b, nk_size_t n, nk_f32_t *a_centroid,
893
+ nk_f32_t *b_centroid, nk_f32_t *rotation, nk_f32_t *scale, nk_f64_t *result) {
894
+ nk_f64_t centroid_a_x, centroid_a_y, centroid_a_z, centroid_b_x, centroid_b_y, centroid_b_z, variance_a;
895
+ nk_f64_t cross_covariance_f64[9];
896
+ nk_centroid_and_cross_covariance_and_variance_f32_skylake_(a, b, n, &centroid_a_x, &centroid_a_y, &centroid_a_z,
897
+ &centroid_b_x, &centroid_b_y, &centroid_b_z,
898
+ cross_covariance_f64, &variance_a);
899
+ if (a_centroid)
900
+ a_centroid[0] = (nk_f32_t)centroid_a_x, a_centroid[1] = (nk_f32_t)centroid_a_y,
901
+ a_centroid[2] = (nk_f32_t)centroid_a_z;
902
+ if (b_centroid)
903
+ b_centroid[0] = (nk_f32_t)centroid_b_x, b_centroid[1] = (nk_f32_t)centroid_b_y,
904
+ b_centroid[2] = (nk_f32_t)centroid_b_z;
905
+
906
+ nk_f64_t svd_u[9], svd_s[9], svd_v[9], r[9];
907
+ nk_svd3x3_f64_(cross_covariance_f64, svd_u, svd_s, svd_v);
908
+ r[0] = svd_v[0] * svd_u[0] + svd_v[1] * svd_u[1] + svd_v[2] * svd_u[2];
909
+ r[1] = svd_v[0] * svd_u[3] + svd_v[1] * svd_u[4] + svd_v[2] * svd_u[5];
910
+ r[2] = svd_v[0] * svd_u[6] + svd_v[1] * svd_u[7] + svd_v[2] * svd_u[8];
911
+ r[3] = svd_v[3] * svd_u[0] + svd_v[4] * svd_u[1] + svd_v[5] * svd_u[2];
912
+ r[4] = svd_v[3] * svd_u[3] + svd_v[4] * svd_u[4] + svd_v[5] * svd_u[5];
913
+ r[5] = svd_v[3] * svd_u[6] + svd_v[4] * svd_u[7] + svd_v[5] * svd_u[8];
914
+ r[6] = svd_v[6] * svd_u[0] + svd_v[7] * svd_u[1] + svd_v[8] * svd_u[2];
915
+ r[7] = svd_v[6] * svd_u[3] + svd_v[7] * svd_u[4] + svd_v[8] * svd_u[5];
916
+ r[8] = svd_v[6] * svd_u[6] + svd_v[7] * svd_u[7] + svd_v[8] * svd_u[8];
917
+
918
+ nk_f64_t det = nk_det3x3_f64_(r);
919
+ nk_f64_t trace_signed_singular_values = svd_s[0] + svd_s[4] + (det < 0 ? -svd_s[8] : svd_s[8]);
920
+ nk_f64_t applied_scale = trace_signed_singular_values / ((nk_f64_t)n * variance_a);
921
+ if (det < 0) {
922
+ svd_v[2] = -svd_v[2], svd_v[5] = -svd_v[5], svd_v[8] = -svd_v[8];
923
+ r[0] = svd_v[0] * svd_u[0] + svd_v[1] * svd_u[1] + svd_v[2] * svd_u[2];
924
+ r[1] = svd_v[0] * svd_u[3] + svd_v[1] * svd_u[4] + svd_v[2] * svd_u[5];
925
+ r[2] = svd_v[0] * svd_u[6] + svd_v[1] * svd_u[7] + svd_v[2] * svd_u[8];
926
+ r[3] = svd_v[3] * svd_u[0] + svd_v[4] * svd_u[1] + svd_v[5] * svd_u[2];
927
+ r[4] = svd_v[3] * svd_u[3] + svd_v[4] * svd_u[4] + svd_v[5] * svd_u[5];
928
+ r[5] = svd_v[3] * svd_u[6] + svd_v[4] * svd_u[7] + svd_v[5] * svd_u[8];
929
+ r[6] = svd_v[6] * svd_u[0] + svd_v[7] * svd_u[1] + svd_v[8] * svd_u[2];
930
+ r[7] = svd_v[6] * svd_u[3] + svd_v[7] * svd_u[4] + svd_v[8] * svd_u[5];
931
+ r[8] = svd_v[6] * svd_u[6] + svd_v[7] * svd_u[7] + svd_v[8] * svd_u[8];
932
+ }
933
+
934
+ if (rotation)
935
+ for (int index = 0; index != 9; ++index) rotation[index] = (nk_f32_t)r[index];
936
+ if (scale) *scale = (nk_f32_t)applied_scale;
937
+ *result = nk_f64_sqrt_haswell(nk_transformed_ssd_f32_skylake_(a, b, n, r, applied_scale, centroid_a_x, centroid_a_y,
938
+ centroid_a_z, centroid_b_x, centroid_b_y,
939
+ centroid_b_z) /
940
+ (nk_f64_t)n);
941
+ }
942
+
943
+ NK_PUBLIC void nk_umeyama_f64_skylake(nk_f64_t const *a, nk_f64_t const *b, nk_size_t n, nk_f64_t *a_centroid,
944
+ nk_f64_t *b_centroid, nk_f64_t *rotation, nk_f64_t *scale, nk_f64_t *result) {
945
+ // Fused single-pass: centroids, covariance, and variance of A
946
+ __m512i const gather_idx_i64x8 = _mm512_setr_epi64(0, 3, 6, 9, 12, 15, 18, 21);
947
+ __m512d const zeros_f64x8 = _mm512_setzero_pd();
948
+
949
+ __m512d sum_a_x_f64x8 = zeros_f64x8, sum_a_y_f64x8 = zeros_f64x8, sum_a_z_f64x8 = zeros_f64x8;
950
+ __m512d sum_b_x_f64x8 = zeros_f64x8, sum_b_y_f64x8 = zeros_f64x8, sum_b_z_f64x8 = zeros_f64x8;
951
+ __m512d cov_xx_f64x8 = zeros_f64x8, cov_xy_f64x8 = zeros_f64x8, cov_xz_f64x8 = zeros_f64x8;
952
+ __m512d cov_yx_f64x8 = zeros_f64x8, cov_yy_f64x8 = zeros_f64x8, cov_yz_f64x8 = zeros_f64x8;
953
+ __m512d cov_zx_f64x8 = zeros_f64x8, cov_zy_f64x8 = zeros_f64x8, cov_zz_f64x8 = zeros_f64x8;
954
+ __m512d variance_a_f64x8 = zeros_f64x8;
955
+
956
+ nk_size_t i = 0;
957
+ __m512d a_x_f64x8, a_y_f64x8, a_z_f64x8, b_x_f64x8, b_y_f64x8, b_z_f64x8;
958
+
959
+ for (; i + 8 <= n; i += 8) {
960
+ nk_deinterleave_f64x8_skylake_(a + i * 3, &a_x_f64x8, &a_y_f64x8, &a_z_f64x8);
961
+ nk_deinterleave_f64x8_skylake_(b + i * 3, &b_x_f64x8, &b_y_f64x8, &b_z_f64x8);
962
+
963
+ sum_a_x_f64x8 = _mm512_add_pd(sum_a_x_f64x8, a_x_f64x8),
964
+ sum_a_y_f64x8 = _mm512_add_pd(sum_a_y_f64x8, a_y_f64x8);
965
+ sum_a_z_f64x8 = _mm512_add_pd(sum_a_z_f64x8, a_z_f64x8);
966
+ sum_b_x_f64x8 = _mm512_add_pd(sum_b_x_f64x8, b_x_f64x8),
967
+ sum_b_y_f64x8 = _mm512_add_pd(sum_b_y_f64x8, b_y_f64x8);
968
+ sum_b_z_f64x8 = _mm512_add_pd(sum_b_z_f64x8, b_z_f64x8);
969
+
970
+ cov_xx_f64x8 = _mm512_fmadd_pd(a_x_f64x8, b_x_f64x8, cov_xx_f64x8),
971
+ cov_xy_f64x8 = _mm512_fmadd_pd(a_x_f64x8, b_y_f64x8, cov_xy_f64x8);
972
+ cov_xz_f64x8 = _mm512_fmadd_pd(a_x_f64x8, b_z_f64x8, cov_xz_f64x8);
973
+ cov_yx_f64x8 = _mm512_fmadd_pd(a_y_f64x8, b_x_f64x8, cov_yx_f64x8),
974
+ cov_yy_f64x8 = _mm512_fmadd_pd(a_y_f64x8, b_y_f64x8, cov_yy_f64x8);
975
+ cov_yz_f64x8 = _mm512_fmadd_pd(a_y_f64x8, b_z_f64x8, cov_yz_f64x8);
976
+ cov_zx_f64x8 = _mm512_fmadd_pd(a_z_f64x8, b_x_f64x8, cov_zx_f64x8),
977
+ cov_zy_f64x8 = _mm512_fmadd_pd(a_z_f64x8, b_y_f64x8, cov_zy_f64x8);
978
+ cov_zz_f64x8 = _mm512_fmadd_pd(a_z_f64x8, b_z_f64x8, cov_zz_f64x8);
979
+ variance_a_f64x8 = _mm512_fmadd_pd(a_x_f64x8, a_x_f64x8, variance_a_f64x8);
980
+ variance_a_f64x8 = _mm512_fmadd_pd(a_y_f64x8, a_y_f64x8, variance_a_f64x8);
981
+ variance_a_f64x8 = _mm512_fmadd_pd(a_z_f64x8, a_z_f64x8, variance_a_f64x8);
982
+ }
983
+
984
+ if (i < n) {
985
+ nk_size_t tail = n - i;
986
+ __mmask8 mask = (__mmask8)_bzhi_u32(0xFF, tail);
987
+ nk_f64_t const *a_tail = a + i * 3;
988
+ nk_f64_t const *b_tail = b + i * 3;
989
+
990
+ a_x_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, a_tail + 0, 8);
991
+ a_y_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, a_tail + 1, 8);
992
+ a_z_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, a_tail + 2, 8);
993
+ b_x_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, b_tail + 0, 8);
994
+ b_y_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, b_tail + 1, 8);
995
+ b_z_f64x8 = _mm512_mask_i64gather_pd(zeros_f64x8, mask, gather_idx_i64x8, b_tail + 2, 8);
996
+
997
+ sum_a_x_f64x8 = _mm512_add_pd(sum_a_x_f64x8, a_x_f64x8),
998
+ sum_a_y_f64x8 = _mm512_add_pd(sum_a_y_f64x8, a_y_f64x8);
999
+ sum_a_z_f64x8 = _mm512_add_pd(sum_a_z_f64x8, a_z_f64x8);
1000
+ sum_b_x_f64x8 = _mm512_add_pd(sum_b_x_f64x8, b_x_f64x8),
1001
+ sum_b_y_f64x8 = _mm512_add_pd(sum_b_y_f64x8, b_y_f64x8);
1002
+ sum_b_z_f64x8 = _mm512_add_pd(sum_b_z_f64x8, b_z_f64x8);
1003
+
1004
+ cov_xx_f64x8 = _mm512_fmadd_pd(a_x_f64x8, b_x_f64x8, cov_xx_f64x8),
1005
+ cov_xy_f64x8 = _mm512_fmadd_pd(a_x_f64x8, b_y_f64x8, cov_xy_f64x8);
1006
+ cov_xz_f64x8 = _mm512_fmadd_pd(a_x_f64x8, b_z_f64x8, cov_xz_f64x8);
1007
+ cov_yx_f64x8 = _mm512_fmadd_pd(a_y_f64x8, b_x_f64x8, cov_yx_f64x8),
1008
+ cov_yy_f64x8 = _mm512_fmadd_pd(a_y_f64x8, b_y_f64x8, cov_yy_f64x8);
1009
+ cov_yz_f64x8 = _mm512_fmadd_pd(a_y_f64x8, b_z_f64x8, cov_yz_f64x8);
1010
+ cov_zx_f64x8 = _mm512_fmadd_pd(a_z_f64x8, b_x_f64x8, cov_zx_f64x8),
1011
+ cov_zy_f64x8 = _mm512_fmadd_pd(a_z_f64x8, b_y_f64x8, cov_zy_f64x8);
1012
+ cov_zz_f64x8 = _mm512_fmadd_pd(a_z_f64x8, b_z_f64x8, cov_zz_f64x8);
1013
+ variance_a_f64x8 = _mm512_fmadd_pd(a_x_f64x8, a_x_f64x8, variance_a_f64x8);
1014
+ variance_a_f64x8 = _mm512_fmadd_pd(a_y_f64x8, a_y_f64x8, variance_a_f64x8);
1015
+ variance_a_f64x8 = _mm512_fmadd_pd(a_z_f64x8, a_z_f64x8, variance_a_f64x8);
1016
+ }
1017
+
1018
+ // Reduce centroids, covariance, and variance.
1019
+ nk_f64_t inv_n = 1.0 / (nk_f64_t)n;
1020
+ nk_f64_t sum_a_x = nk_reduce_stable_f64x8_skylake_(sum_a_x_f64x8), sum_a_x_compensation = 0.0;
1021
+ nk_f64_t sum_a_y = nk_reduce_stable_f64x8_skylake_(sum_a_y_f64x8), sum_a_y_compensation = 0.0;
1022
+ nk_f64_t sum_a_z = nk_reduce_stable_f64x8_skylake_(sum_a_z_f64x8), sum_a_z_compensation = 0.0;
1023
+ nk_f64_t sum_b_x = nk_reduce_stable_f64x8_skylake_(sum_b_x_f64x8), sum_b_x_compensation = 0.0;
1024
+ nk_f64_t sum_b_y = nk_reduce_stable_f64x8_skylake_(sum_b_y_f64x8), sum_b_y_compensation = 0.0;
1025
+ nk_f64_t sum_b_z = nk_reduce_stable_f64x8_skylake_(sum_b_z_f64x8), sum_b_z_compensation = 0.0;
1026
+ nk_f64_t covariance_x_x = nk_reduce_stable_f64x8_skylake_(cov_xx_f64x8), covariance_x_x_compensation = 0.0;
1027
+ nk_f64_t covariance_x_y = nk_reduce_stable_f64x8_skylake_(cov_xy_f64x8), covariance_x_y_compensation = 0.0;
1028
+ nk_f64_t covariance_x_z = nk_reduce_stable_f64x8_skylake_(cov_xz_f64x8), covariance_x_z_compensation = 0.0;
1029
+ nk_f64_t covariance_y_x = nk_reduce_stable_f64x8_skylake_(cov_yx_f64x8), covariance_y_x_compensation = 0.0;
1030
+ nk_f64_t covariance_y_y = nk_reduce_stable_f64x8_skylake_(cov_yy_f64x8), covariance_y_y_compensation = 0.0;
1031
+ nk_f64_t covariance_y_z = nk_reduce_stable_f64x8_skylake_(cov_yz_f64x8), covariance_y_z_compensation = 0.0;
1032
+ nk_f64_t covariance_z_x = nk_reduce_stable_f64x8_skylake_(cov_zx_f64x8), covariance_z_x_compensation = 0.0;
1033
+ nk_f64_t covariance_z_y = nk_reduce_stable_f64x8_skylake_(cov_zy_f64x8), covariance_z_y_compensation = 0.0;
1034
+ nk_f64_t covariance_z_z = nk_reduce_stable_f64x8_skylake_(cov_zz_f64x8), covariance_z_z_compensation = 0.0;
1035
+ nk_f64_t variance_a_sum = nk_reduce_stable_f64x8_skylake_(variance_a_f64x8), variance_a_compensation = 0.0;
1036
+
1037
+ for (; i < n; ++i) {
1038
+ nk_f64_t ax = a[i * 3 + 0], ay = a[i * 3 + 1], az = a[i * 3 + 2];
1039
+ nk_f64_t bx = b[i * 3 + 0], by = b[i * 3 + 1], bz = b[i * 3 + 2];
1040
+ nk_accumulate_sum_f64_(&sum_a_x, &sum_a_x_compensation, ax);
1041
+ nk_accumulate_sum_f64_(&sum_a_y, &sum_a_y_compensation, ay);
1042
+ nk_accumulate_sum_f64_(&sum_a_z, &sum_a_z_compensation, az);
1043
+ nk_accumulate_sum_f64_(&sum_b_x, &sum_b_x_compensation, bx);
1044
+ nk_accumulate_sum_f64_(&sum_b_y, &sum_b_y_compensation, by);
1045
+ nk_accumulate_sum_f64_(&sum_b_z, &sum_b_z_compensation, bz);
1046
+ nk_accumulate_product_f64_(&covariance_x_x, &covariance_x_x_compensation, ax, bx);
1047
+ nk_accumulate_product_f64_(&covariance_x_y, &covariance_x_y_compensation, ax, by);
1048
+ nk_accumulate_product_f64_(&covariance_x_z, &covariance_x_z_compensation, ax, bz);
1049
+ nk_accumulate_product_f64_(&covariance_y_x, &covariance_y_x_compensation, ay, bx);
1050
+ nk_accumulate_product_f64_(&covariance_y_y, &covariance_y_y_compensation, ay, by);
1051
+ nk_accumulate_product_f64_(&covariance_y_z, &covariance_y_z_compensation, ay, bz);
1052
+ nk_accumulate_product_f64_(&covariance_z_x, &covariance_z_x_compensation, az, bx);
1053
+ nk_accumulate_product_f64_(&covariance_z_y, &covariance_z_y_compensation, az, by);
1054
+ nk_accumulate_product_f64_(&covariance_z_z, &covariance_z_z_compensation, az, bz);
1055
+ nk_accumulate_square_f64_(&variance_a_sum, &variance_a_compensation, ax);
1056
+ nk_accumulate_square_f64_(&variance_a_sum, &variance_a_compensation, ay);
1057
+ nk_accumulate_square_f64_(&variance_a_sum, &variance_a_compensation, az);
1058
+ }
1059
+
1060
+ sum_a_x += sum_a_x_compensation, sum_a_y += sum_a_y_compensation, sum_a_z += sum_a_z_compensation;
1061
+ sum_b_x += sum_b_x_compensation, sum_b_y += sum_b_y_compensation, sum_b_z += sum_b_z_compensation;
1062
+ covariance_x_x += covariance_x_x_compensation, covariance_x_y += covariance_x_y_compensation,
1063
+ covariance_x_z += covariance_x_z_compensation;
1064
+ covariance_y_x += covariance_y_x_compensation, covariance_y_y += covariance_y_y_compensation,
1065
+ covariance_y_z += covariance_y_z_compensation;
1066
+ covariance_z_x += covariance_z_x_compensation, covariance_z_y += covariance_z_y_compensation,
1067
+ covariance_z_z += covariance_z_z_compensation;
1068
+ variance_a_sum += variance_a_compensation;
1069
+
1070
+ nk_f64_t centroid_a_x = sum_a_x * inv_n, centroid_a_y = sum_a_y * inv_n, centroid_a_z = sum_a_z * inv_n;
1071
+ nk_f64_t centroid_b_x = sum_b_x * inv_n, centroid_b_y = sum_b_y * inv_n, centroid_b_z = sum_b_z * inv_n;
1072
+
1073
+ if (a_centroid) a_centroid[0] = centroid_a_x, a_centroid[1] = centroid_a_y, a_centroid[2] = centroid_a_z;
1074
+ if (b_centroid) b_centroid[0] = centroid_b_x, b_centroid[1] = centroid_b_y, b_centroid[2] = centroid_b_z;
1075
+
1076
+ // Compute centered covariance and variance.
1077
+ nk_f64_t variance_a = variance_a_sum * inv_n -
1078
+ (centroid_a_x * centroid_a_x + centroid_a_y * centroid_a_y + centroid_a_z * centroid_a_z);
1079
+
1080
+ // Compute centered covariance matrix: Hᵢⱼ = Σ(aᵢ×bⱼ) - Σaᵢ × Σbⱼ / n.
1081
+ nk_f64_t cross_covariance[9];
1082
+ cross_covariance[0] = covariance_x_x - sum_a_x * sum_b_x * inv_n;
1083
+ cross_covariance[1] = covariance_x_y - sum_a_x * sum_b_y * inv_n;
1084
+ cross_covariance[2] = covariance_x_z - sum_a_x * sum_b_z * inv_n;
1085
+ cross_covariance[3] = covariance_y_x - sum_a_y * sum_b_x * inv_n;
1086
+ cross_covariance[4] = covariance_y_y - sum_a_y * sum_b_y * inv_n;
1087
+ cross_covariance[5] = covariance_y_z - sum_a_y * sum_b_z * inv_n;
1088
+ cross_covariance[6] = covariance_z_x - sum_a_z * sum_b_x * inv_n;
1089
+ cross_covariance[7] = covariance_z_y - sum_a_z * sum_b_y * inv_n;
1090
+ cross_covariance[8] = covariance_z_z - sum_a_z * sum_b_z * inv_n;
1091
+
1092
+ // SVD using f64 for full precision
1093
+ nk_f64_t svd_u[9], svd_s[9], svd_v[9];
1094
+ nk_svd3x3_f64_(cross_covariance, svd_u, svd_s, svd_v);
1095
+
1096
+ nk_f64_t r[9];
1097
+ nk_rotation_from_svd_f64_skylake_(svd_u, svd_v, r);
1098
+
1099
+ // Scale factor: c = trace(D × S) / (n × variance(a))
1100
+ nk_f64_t det = nk_det3x3_f64_(r);
1101
+ nk_f64_t d3 = det < 0 ? -1.0 : 1.0;
1102
+ nk_f64_t trace_ds = nk_sum_three_products_f64_(svd_s[0], 1.0, svd_s[4], 1.0, svd_s[8], d3);
1103
+ nk_f64_t c = trace_ds / (n * variance_a);
1104
+ if (scale) *scale = c;
1105
+
1106
+ // Handle reflection
1107
+ if (det < 0) {
1108
+ svd_v[2] = -svd_v[2], svd_v[5] = -svd_v[5], svd_v[8] = -svd_v[8];
1109
+ nk_rotation_from_svd_f64_skylake_(svd_u, svd_v, r);
1110
+ }
1111
+
1112
+ // Output rotation matrix.
1113
+ if (rotation) {
1114
+ for (int j = 0; j < 9; ++j) rotation[j] = (nk_f64_t)r[j];
1115
+ }
1116
+
1117
+ // Compute RMSD with scaling
1118
+ nk_f64_t sum_squared = nk_transformed_ssd_f64_skylake_(a, b, n, r, c, centroid_a_x, centroid_a_y, centroid_a_z,
1119
+ centroid_b_x, centroid_b_y, centroid_b_z);
1120
+ *result = nk_f64_sqrt_haswell(sum_squared * inv_n);
1121
+ }
1122
+
1123
+ #if defined(__clang__)
1124
+ #pragma clang attribute pop
1125
+ #elif defined(__GNUC__)
1126
+ #pragma GCC pop_options
1127
+ #endif
1128
+
1129
+ #if defined(__cplusplus)
1130
+ } // extern "C"
1131
+ #endif
1132
+
1133
+ #endif // NK_TARGET_SKYLAKE
1134
+ #endif // NK_TARGET_X86_
1135
+ #endif // NK_MESH_SKYLAKE_H