faiss 0.5.2 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +16 -0
  3. data/LICENSE.txt +1 -1
  4. data/ext/faiss/ext.cpp +1 -1
  5. data/ext/faiss/extconf.rb +5 -6
  6. data/ext/faiss/index_binary.cpp +76 -17
  7. data/ext/faiss/{index.cpp → index_rb.cpp} +108 -35
  8. data/ext/faiss/kmeans.cpp +12 -9
  9. data/ext/faiss/numo.hpp +11 -9
  10. data/ext/faiss/pca_matrix.cpp +10 -8
  11. data/ext/faiss/product_quantizer.cpp +14 -12
  12. data/ext/faiss/{utils.cpp → utils_rb.cpp} +10 -3
  13. data/ext/faiss/{utils.h → utils_rb.h} +6 -0
  14. data/lib/faiss/version.rb +1 -1
  15. data/lib/faiss.rb +1 -1
  16. data/vendor/faiss/faiss/AutoTune.cpp +130 -11
  17. data/vendor/faiss/faiss/AutoTune.h +14 -1
  18. data/vendor/faiss/faiss/Clustering.cpp +59 -10
  19. data/vendor/faiss/faiss/Clustering.h +12 -0
  20. data/vendor/faiss/faiss/IVFlib.cpp +31 -28
  21. data/vendor/faiss/faiss/Index.cpp +20 -8
  22. data/vendor/faiss/faiss/Index.h +25 -3
  23. data/vendor/faiss/faiss/IndexAdditiveQuantizer.cpp +19 -24
  24. data/vendor/faiss/faiss/IndexBinary.cpp +1 -0
  25. data/vendor/faiss/faiss/IndexBinaryHNSW.cpp +9 -4
  26. data/vendor/faiss/faiss/IndexBinaryIVF.cpp +45 -11
  27. data/vendor/faiss/faiss/IndexFastScan.cpp +35 -22
  28. data/vendor/faiss/faiss/IndexFastScan.h +10 -1
  29. data/vendor/faiss/faiss/IndexFlat.cpp +193 -136
  30. data/vendor/faiss/faiss/IndexFlat.h +16 -1
  31. data/vendor/faiss/faiss/IndexFlatCodes.cpp +46 -22
  32. data/vendor/faiss/faiss/IndexFlatCodes.h +7 -1
  33. data/vendor/faiss/faiss/IndexHNSW.cpp +24 -50
  34. data/vendor/faiss/faiss/IndexHNSW.h +14 -12
  35. data/vendor/faiss/faiss/IndexIDMap.cpp +1 -1
  36. data/vendor/faiss/faiss/IndexIVF.cpp +76 -49
  37. data/vendor/faiss/faiss/IndexIVF.h +14 -4
  38. data/vendor/faiss/faiss/IndexIVFAdditiveQuantizerFastScan.cpp +11 -8
  39. data/vendor/faiss/faiss/IndexIVFAdditiveQuantizerFastScan.h +2 -2
  40. data/vendor/faiss/faiss/IndexIVFFastScan.cpp +25 -14
  41. data/vendor/faiss/faiss/IndexIVFFastScan.h +26 -22
  42. data/vendor/faiss/faiss/IndexIVFFlat.cpp +10 -61
  43. data/vendor/faiss/faiss/IndexIVFFlatPanorama.cpp +39 -111
  44. data/vendor/faiss/faiss/IndexIVFPQ.cpp +89 -147
  45. data/vendor/faiss/faiss/IndexIVFPQFastScan.cpp +37 -5
  46. data/vendor/faiss/faiss/IndexIVFPQR.cpp +2 -1
  47. data/vendor/faiss/faiss/IndexIVFRaBitQ.cpp +42 -30
  48. data/vendor/faiss/faiss/IndexIVFRaBitQ.h +2 -2
  49. data/vendor/faiss/faiss/IndexIVFRaBitQFastScan.cpp +246 -97
  50. data/vendor/faiss/faiss/IndexIVFRaBitQFastScan.h +32 -29
  51. data/vendor/faiss/faiss/IndexLSH.cpp +8 -6
  52. data/vendor/faiss/faiss/IndexLattice.cpp +29 -24
  53. data/vendor/faiss/faiss/IndexNNDescent.cpp +1 -0
  54. data/vendor/faiss/faiss/IndexNSG.cpp +2 -1
  55. data/vendor/faiss/faiss/IndexNSG.h +0 -2
  56. data/vendor/faiss/faiss/IndexNeuralNetCodec.cpp +1 -1
  57. data/vendor/faiss/faiss/IndexPQ.cpp +19 -10
  58. data/vendor/faiss/faiss/IndexRaBitQ.cpp +26 -13
  59. data/vendor/faiss/faiss/IndexRaBitQ.h +2 -2
  60. data/vendor/faiss/faiss/IndexRaBitQFastScan.cpp +132 -78
  61. data/vendor/faiss/faiss/IndexRaBitQFastScan.h +14 -12
  62. data/vendor/faiss/faiss/IndexRefine.cpp +0 -30
  63. data/vendor/faiss/faiss/IndexShards.cpp +3 -4
  64. data/vendor/faiss/faiss/MetricType.h +16 -0
  65. data/vendor/faiss/faiss/VectorTransform.cpp +120 -0
  66. data/vendor/faiss/faiss/VectorTransform.h +23 -0
  67. data/vendor/faiss/faiss/clone_index.cpp +7 -4
  68. data/vendor/faiss/faiss/{cppcontrib/factory_tools.cpp → factory_tools.cpp} +1 -1
  69. data/vendor/faiss/faiss/gpu/GpuCloner.cpp +1 -1
  70. data/vendor/faiss/faiss/impl/AdditiveQuantizer.cpp +37 -11
  71. data/vendor/faiss/faiss/impl/AuxIndexStructures.h +0 -28
  72. data/vendor/faiss/faiss/impl/ClusteringInitialization.cpp +367 -0
  73. data/vendor/faiss/faiss/impl/ClusteringInitialization.h +107 -0
  74. data/vendor/faiss/faiss/impl/CodePacker.cpp +4 -0
  75. data/vendor/faiss/faiss/impl/CodePacker.h +11 -3
  76. data/vendor/faiss/faiss/impl/CodePackerRaBitQ.cpp +83 -0
  77. data/vendor/faiss/faiss/impl/CodePackerRaBitQ.h +47 -0
  78. data/vendor/faiss/faiss/impl/FaissAssert.h +60 -2
  79. data/vendor/faiss/faiss/impl/HNSW.cpp +25 -34
  80. data/vendor/faiss/faiss/impl/HNSW.h +8 -6
  81. data/vendor/faiss/faiss/impl/LocalSearchQuantizer.cpp +34 -27
  82. data/vendor/faiss/faiss/impl/NNDescent.cpp +1 -1
  83. data/vendor/faiss/faiss/impl/NSG.cpp +6 -5
  84. data/vendor/faiss/faiss/impl/NSG.h +17 -7
  85. data/vendor/faiss/faiss/impl/Panorama.cpp +53 -46
  86. data/vendor/faiss/faiss/impl/Panorama.h +22 -6
  87. data/vendor/faiss/faiss/impl/PolysemousTraining.cpp +16 -5
  88. data/vendor/faiss/faiss/impl/ProductQuantizer.cpp +70 -58
  89. data/vendor/faiss/faiss/impl/RaBitQUtils.cpp +92 -0
  90. data/vendor/faiss/faiss/impl/RaBitQUtils.h +93 -31
  91. data/vendor/faiss/faiss/impl/RaBitQuantizer.cpp +12 -28
  92. data/vendor/faiss/faiss/impl/RaBitQuantizer.h +3 -10
  93. data/vendor/faiss/faiss/impl/RaBitQuantizerMultiBit.cpp +15 -41
  94. data/vendor/faiss/faiss/impl/RaBitQuantizerMultiBit.h +0 -4
  95. data/vendor/faiss/faiss/impl/ResidualQuantizer.cpp +14 -9
  96. data/vendor/faiss/faiss/impl/ResultHandler.h +131 -50
  97. data/vendor/faiss/faiss/impl/ScalarQuantizer.cpp +67 -2358
  98. data/vendor/faiss/faiss/impl/ScalarQuantizer.h +0 -2
  99. data/vendor/faiss/faiss/impl/VisitedTable.cpp +42 -0
  100. data/vendor/faiss/faiss/impl/VisitedTable.h +69 -0
  101. data/vendor/faiss/faiss/impl/expanded_scanners.h +158 -0
  102. data/vendor/faiss/faiss/impl/index_read.cpp +829 -471
  103. data/vendor/faiss/faiss/impl/index_read_utils.h +0 -1
  104. data/vendor/faiss/faiss/impl/index_write.cpp +17 -8
  105. data/vendor/faiss/faiss/impl/lattice_Zn.cpp +47 -20
  106. data/vendor/faiss/faiss/impl/mapped_io.cpp +9 -2
  107. data/vendor/faiss/faiss/impl/pq4_fast_scan.cpp +7 -2
  108. data/vendor/faiss/faiss/impl/pq4_fast_scan.h +11 -3
  109. data/vendor/faiss/faiss/impl/pq4_fast_scan_search_1.cpp +19 -13
  110. data/vendor/faiss/faiss/impl/pq4_fast_scan_search_qbs.cpp +29 -21
  111. data/vendor/faiss/faiss/impl/{code_distance/code_distance-avx2.h → pq_code_distance/pq_code_distance-avx2.cpp} +42 -215
  112. data/vendor/faiss/faiss/impl/{code_distance/code_distance-avx512.h → pq_code_distance/pq_code_distance-avx512.cpp} +68 -107
  113. data/vendor/faiss/faiss/impl/pq_code_distance/pq_code_distance-generic.cpp +141 -0
  114. data/vendor/faiss/faiss/impl/pq_code_distance/pq_code_distance-inl.h +23 -0
  115. data/vendor/faiss/faiss/impl/{code_distance/code_distance-sve.h → pq_code_distance/pq_code_distance-sve.cpp} +57 -144
  116. data/vendor/faiss/faiss/impl/residual_quantizer_encode_steps.cpp +9 -6
  117. data/vendor/faiss/faiss/impl/scalar_quantizer/codecs.h +121 -0
  118. data/vendor/faiss/faiss/impl/scalar_quantizer/distance_computers.h +136 -0
  119. data/vendor/faiss/faiss/impl/scalar_quantizer/quantizers.h +280 -0
  120. data/vendor/faiss/faiss/impl/scalar_quantizer/scanners.h +164 -0
  121. data/vendor/faiss/faiss/impl/scalar_quantizer/similarities.h +94 -0
  122. data/vendor/faiss/faiss/impl/scalar_quantizer/sq-avx2.cpp +455 -0
  123. data/vendor/faiss/faiss/impl/scalar_quantizer/sq-avx512.cpp +430 -0
  124. data/vendor/faiss/faiss/impl/scalar_quantizer/sq-dispatch.h +329 -0
  125. data/vendor/faiss/faiss/impl/scalar_quantizer/sq-neon.cpp +467 -0
  126. data/vendor/faiss/faiss/impl/scalar_quantizer/training.cpp +203 -0
  127. data/vendor/faiss/faiss/impl/scalar_quantizer/training.h +42 -0
  128. data/vendor/faiss/faiss/impl/simd_dispatch.h +139 -0
  129. data/vendor/faiss/faiss/impl/simd_result_handlers.h +18 -18
  130. data/vendor/faiss/faiss/index_factory.cpp +35 -16
  131. data/vendor/faiss/faiss/index_io.h +29 -3
  132. data/vendor/faiss/faiss/invlists/BlockInvertedLists.cpp +7 -4
  133. data/vendor/faiss/faiss/invlists/OnDiskInvertedLists.cpp +1 -1
  134. data/vendor/faiss/faiss/svs/IndexSVSFaissUtils.h +9 -19
  135. data/vendor/faiss/faiss/svs/IndexSVSFlat.h +2 -0
  136. data/vendor/faiss/faiss/svs/IndexSVSVamana.h +2 -1
  137. data/vendor/faiss/faiss/svs/IndexSVSVamanaLeanVec.cpp +9 -1
  138. data/vendor/faiss/faiss/svs/IndexSVSVamanaLeanVec.h +9 -0
  139. data/vendor/faiss/faiss/utils/Heap.cpp +46 -0
  140. data/vendor/faiss/faiss/utils/Heap.h +21 -0
  141. data/vendor/faiss/faiss/utils/NeuralNet.cpp +10 -7
  142. data/vendor/faiss/faiss/utils/distances.cpp +141 -23
  143. data/vendor/faiss/faiss/utils/distances.h +98 -0
  144. data/vendor/faiss/faiss/utils/distances_dispatch.h +170 -0
  145. data/vendor/faiss/faiss/utils/distances_simd.cpp +74 -3511
  146. data/vendor/faiss/faiss/utils/extra_distances-inl.h +164 -157
  147. data/vendor/faiss/faiss/utils/extra_distances.cpp +52 -95
  148. data/vendor/faiss/faiss/utils/extra_distances.h +47 -1
  149. data/vendor/faiss/faiss/utils/hamming_distance/generic-inl.h +0 -1
  150. data/vendor/faiss/faiss/utils/partitioning.cpp +1 -1
  151. data/vendor/faiss/faiss/utils/pq_code_distance.h +251 -0
  152. data/vendor/faiss/faiss/utils/rabitq_simd.h +260 -0
  153. data/vendor/faiss/faiss/utils/simd_impl/distances_aarch64.cpp +150 -0
  154. data/vendor/faiss/faiss/utils/simd_impl/distances_arm_sve.cpp +568 -0
  155. data/vendor/faiss/faiss/utils/simd_impl/distances_autovec-inl.h +153 -0
  156. data/vendor/faiss/faiss/utils/simd_impl/distances_avx2.cpp +1185 -0
  157. data/vendor/faiss/faiss/utils/simd_impl/distances_avx512.cpp +1092 -0
  158. data/vendor/faiss/faiss/utils/simd_impl/distances_sse-inl.h +391 -0
  159. data/vendor/faiss/faiss/utils/simd_levels.cpp +322 -0
  160. data/vendor/faiss/faiss/utils/simd_levels.h +91 -0
  161. data/vendor/faiss/faiss/utils/simdlib_avx2.h +12 -1
  162. data/vendor/faiss/faiss/utils/simdlib_avx512.h +69 -0
  163. data/vendor/faiss/faiss/utils/simdlib_neon.h +6 -0
  164. data/vendor/faiss/faiss/utils/sorting.cpp +4 -4
  165. data/vendor/faiss/faiss/utils/utils.cpp +16 -9
  166. metadata +47 -18
  167. data/vendor/faiss/faiss/impl/code_distance/code_distance-generic.h +0 -81
  168. data/vendor/faiss/faiss/impl/code_distance/code_distance.h +0 -186
  169. /data/vendor/faiss/faiss/{cppcontrib/factory_tools.h → factory_tools.h} +0 -0
@@ -0,0 +1,1092 @@
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ *
4
+ * This source code is licensed under the MIT license found in the
5
+ * LICENSE file in the root directory of this source tree.
6
+ */
7
+
8
+ #include <faiss/utils/distances.h>
9
+
10
+ #include <immintrin.h>
11
+
12
+ #define AUTOVEC_LEVEL SIMDLevel::AVX512
13
+ #include <faiss/utils/simd_impl/distances_autovec-inl.h>
14
+ #include <faiss/utils/simd_impl/distances_sse-inl.h>
15
+ #include <faiss/utils/transpose/transpose-avx512-inl.h>
16
+
17
+ namespace faiss {
18
+
19
+ template <>
20
+ void fvec_madd<SIMDLevel::AVX512>(
21
+ const size_t n,
22
+ const float* __restrict a,
23
+ const float bf,
24
+ const float* __restrict b,
25
+ float* __restrict c) {
26
+ const size_t n16 = n / 16;
27
+ const size_t n_for_masking = n % 16;
28
+
29
+ const __m512 bfmm = _mm512_set1_ps(bf);
30
+
31
+ size_t idx = 0;
32
+ for (idx = 0; idx < n16 * 16; idx += 16) {
33
+ const __m512 ax = _mm512_loadu_ps(a + idx);
34
+ const __m512 bx = _mm512_loadu_ps(b + idx);
35
+ const __m512 abmul = _mm512_fmadd_ps(bfmm, bx, ax);
36
+ _mm512_storeu_ps(c + idx, abmul);
37
+ }
38
+
39
+ if (n_for_masking > 0) {
40
+ const __mmask16 mask = (1 << n_for_masking) - 1;
41
+
42
+ const __m512 ax = _mm512_maskz_loadu_ps(mask, a + idx);
43
+ const __m512 bx = _mm512_maskz_loadu_ps(mask, b + idx);
44
+ const __m512 abmul = _mm512_fmadd_ps(bfmm, bx, ax);
45
+ _mm512_mask_storeu_ps(c + idx, mask, abmul);
46
+ }
47
+ }
48
+
49
+ template <size_t DIM>
50
+ void fvec_L2sqr_ny_y_transposed_D(
51
+ float* distances,
52
+ const float* x,
53
+ const float* y,
54
+ const float* y_sqlen,
55
+ const size_t d_offset,
56
+ size_t ny) {
57
+ // current index being processed
58
+ size_t i = 0;
59
+
60
+ // squared length of x
61
+ float x_sqlen = 0;
62
+ for (size_t j = 0; j < DIM; j++) {
63
+ x_sqlen += x[j] * x[j];
64
+ }
65
+
66
+ // process 16 vectors per loop
67
+ const size_t ny16 = ny / 16;
68
+
69
+ if (ny16 > 0) {
70
+ // m[i] = (2 * x[i], ... 2 * x[i])
71
+ __m512 m[DIM];
72
+ for (size_t j = 0; j < DIM; j++) {
73
+ m[j] = _mm512_set1_ps(x[j]);
74
+ m[j] = _mm512_add_ps(m[j], m[j]); // m[j] = 2 * x[j]
75
+ }
76
+
77
+ __m512 x_sqlen_ymm = _mm512_set1_ps(x_sqlen);
78
+
79
+ for (; i < ny16 * 16; i += 16) {
80
+ // Load vectors for 16 dimensions
81
+ __m512 v[DIM];
82
+ for (size_t j = 0; j < DIM; j++) {
83
+ v[j] = _mm512_loadu_ps(y + j * d_offset);
84
+ }
85
+
86
+ // Compute dot products
87
+ __m512 dp = _mm512_fnmadd_ps(m[0], v[0], x_sqlen_ymm);
88
+ for (size_t j = 1; j < DIM; j++) {
89
+ dp = _mm512_fnmadd_ps(m[j], v[j], dp);
90
+ }
91
+
92
+ // Compute y^2 - (2 * x, y) + x^2
93
+ __m512 distances_v = _mm512_add_ps(_mm512_loadu_ps(y_sqlen), dp);
94
+
95
+ _mm512_storeu_ps(distances + i, distances_v);
96
+
97
+ // Scroll y and y_sqlen forward
98
+ y += 16;
99
+ y_sqlen += 16;
100
+ }
101
+ }
102
+
103
+ if (i < ny) {
104
+ // Process leftovers
105
+ for (; i < ny; i++) {
106
+ float dp = 0;
107
+ for (size_t j = 0; j < DIM; j++) {
108
+ dp += x[j] * y[j * d_offset];
109
+ }
110
+
111
+ // Compute y^2 - 2 * (x, y), which is sufficient for looking for the
112
+ // lowest distance.
113
+ const float distance = y_sqlen[0] - 2 * dp + x_sqlen;
114
+ distances[i] = distance;
115
+
116
+ y += 1;
117
+ y_sqlen += 1;
118
+ }
119
+ }
120
+ }
121
+
122
+ template <>
123
+ void fvec_L2sqr_ny_transposed<SIMDLevel::AVX512>(
124
+ float* dis,
125
+ const float* x,
126
+ const float* y,
127
+ const float* y_sqlen,
128
+ size_t d,
129
+ size_t d_offset,
130
+ size_t ny) {
131
+ // optimized for a few special cases
132
+ #define DISPATCH(dval) \
133
+ case dval: \
134
+ return fvec_L2sqr_ny_y_transposed_D<dval>( \
135
+ dis, x, y, y_sqlen, d_offset, ny);
136
+
137
+ switch (d) {
138
+ DISPATCH(1)
139
+ DISPATCH(2)
140
+ DISPATCH(4)
141
+ DISPATCH(8)
142
+ default:
143
+ return fvec_L2sqr_ny_transposed<SIMDLevel::NONE>(
144
+ dis, x, y, y_sqlen, d, d_offset, ny);
145
+ }
146
+ #undef DISPATCH
147
+ }
148
+
149
+ struct AVX512ElementOpIP : public ElementOpIP {
150
+ using ElementOpIP::op;
151
+ static __m512 op(__m512 x, __m512 y) {
152
+ return _mm512_mul_ps(x, y);
153
+ }
154
+ static __m256 op(__m256 x, __m256 y) {
155
+ return _mm256_mul_ps(x, y);
156
+ }
157
+ };
158
+
159
+ struct AVX512ElementOpL2 : public ElementOpL2 {
160
+ using ElementOpL2::op;
161
+ static __m512 op(__m512 x, __m512 y) {
162
+ __m512 tmp = _mm512_sub_ps(x, y);
163
+ return _mm512_mul_ps(tmp, tmp);
164
+ }
165
+ static __m256 op(__m256 x, __m256 y) {
166
+ __m256 tmp = _mm256_sub_ps(x, y);
167
+ return _mm256_mul_ps(tmp, tmp);
168
+ }
169
+ };
170
+
171
+ /// helper function for AVX512
172
+ inline float horizontal_sum(const __m512 v) {
173
+ // performs better than adding the high and low parts
174
+ return _mm512_reduce_add_ps(v);
175
+ }
176
+
177
+ inline float horizontal_sum(const __m256 v) {
178
+ // add high and low parts
179
+ const __m128 v0 =
180
+ _mm_add_ps(_mm256_castps256_ps128(v), _mm256_extractf128_ps(v, 1));
181
+ // perform horizontal sum on v0
182
+ return horizontal_sum(v0);
183
+ }
184
+
185
+ template <>
186
+ void fvec_op_ny_D2<AVX512ElementOpIP>(
187
+ float* dis,
188
+ const float* x,
189
+ const float* y,
190
+ size_t ny) {
191
+ const size_t ny16 = ny / 16;
192
+ size_t i = 0;
193
+
194
+ if (ny16 > 0) {
195
+ // process 16 D2-vectors per loop.
196
+ _mm_prefetch((const char*)y, _MM_HINT_T0);
197
+ _mm_prefetch((const char*)(y + 32), _MM_HINT_T0);
198
+
199
+ const __m512 m0 = _mm512_set1_ps(x[0]);
200
+ const __m512 m1 = _mm512_set1_ps(x[1]);
201
+
202
+ for (i = 0; i < ny16 * 16; i += 16) {
203
+ _mm_prefetch((const char*)(y + 64), _MM_HINT_T0);
204
+
205
+ // load 16x2 matrix and transpose it in registers.
206
+ // the typical bottleneck is memory access, so
207
+ // let's trade instructions for the bandwidth.
208
+
209
+ __m512 v0;
210
+ __m512 v1;
211
+
212
+ transpose_16x2(
213
+ _mm512_loadu_ps(y + 0 * 16),
214
+ _mm512_loadu_ps(y + 1 * 16),
215
+ v0,
216
+ v1);
217
+
218
+ // compute distances (dot product)
219
+ __m512 distances = _mm512_mul_ps(m0, v0);
220
+ distances = _mm512_fmadd_ps(m1, v1, distances);
221
+
222
+ // store
223
+ _mm512_storeu_ps(dis + i, distances);
224
+
225
+ y += 32; // move to the next set of 16x2 elements
226
+ }
227
+ }
228
+
229
+ if (i < ny) {
230
+ // process leftovers
231
+ float x0 = x[0];
232
+ float x1 = x[1];
233
+
234
+ for (; i < ny; i++) {
235
+ float distance = x0 * y[0] + x1 * y[1];
236
+ y += 2;
237
+ dis[i] = distance;
238
+ }
239
+ }
240
+ }
241
+
242
+ template <>
243
+ void fvec_op_ny_D2<AVX512ElementOpL2>(
244
+ float* dis,
245
+ const float* x,
246
+ const float* y,
247
+ size_t ny) {
248
+ const size_t ny16 = ny / 16;
249
+ size_t i = 0;
250
+
251
+ if (ny16 > 0) {
252
+ // process 16 D2-vectors per loop.
253
+ _mm_prefetch((const char*)y, _MM_HINT_T0);
254
+ _mm_prefetch((const char*)(y + 32), _MM_HINT_T0);
255
+
256
+ const __m512 m0 = _mm512_set1_ps(x[0]);
257
+ const __m512 m1 = _mm512_set1_ps(x[1]);
258
+
259
+ for (i = 0; i < ny16 * 16; i += 16) {
260
+ _mm_prefetch((const char*)(y + 64), _MM_HINT_T0);
261
+
262
+ // load 16x2 matrix and transpose it in registers.
263
+ // the typical bottleneck is memory access, so
264
+ // let's trade instructions for the bandwidth.
265
+
266
+ __m512 v0;
267
+ __m512 v1;
268
+
269
+ transpose_16x2(
270
+ _mm512_loadu_ps(y + 0 * 16),
271
+ _mm512_loadu_ps(y + 1 * 16),
272
+ v0,
273
+ v1);
274
+
275
+ // compute differences
276
+ const __m512 d0 = _mm512_sub_ps(m0, v0);
277
+ const __m512 d1 = _mm512_sub_ps(m1, v1);
278
+
279
+ // compute squares of differences
280
+ __m512 distances = _mm512_mul_ps(d0, d0);
281
+ distances = _mm512_fmadd_ps(d1, d1, distances);
282
+
283
+ // store
284
+ _mm512_storeu_ps(dis + i, distances);
285
+
286
+ y += 32; // move to the next set of 16x2 elements
287
+ }
288
+ }
289
+
290
+ if (i < ny) {
291
+ // process leftovers
292
+ float x0 = x[0];
293
+ float x1 = x[1];
294
+
295
+ for (; i < ny; i++) {
296
+ float sub0 = x0 - y[0];
297
+ float sub1 = x1 - y[1];
298
+ float distance = sub0 * sub0 + sub1 * sub1;
299
+
300
+ y += 2;
301
+ dis[i] = distance;
302
+ }
303
+ }
304
+ }
305
+
306
+ template <>
307
+ void fvec_op_ny_D4<AVX512ElementOpIP>(
308
+ float* dis,
309
+ const float* x,
310
+ const float* y,
311
+ size_t ny) {
312
+ const size_t ny16 = ny / 16;
313
+ size_t i = 0;
314
+
315
+ if (ny16 > 0) {
316
+ // process 16 D4-vectors per loop.
317
+ const __m512 m0 = _mm512_set1_ps(x[0]);
318
+ const __m512 m1 = _mm512_set1_ps(x[1]);
319
+ const __m512 m2 = _mm512_set1_ps(x[2]);
320
+ const __m512 m3 = _mm512_set1_ps(x[3]);
321
+
322
+ for (i = 0; i < ny16 * 16; i += 16) {
323
+ // load 16x4 matrix and transpose it in registers.
324
+ // the typical bottleneck is memory access, so
325
+ // let's trade instructions for the bandwidth.
326
+
327
+ __m512 v0;
328
+ __m512 v1;
329
+ __m512 v2;
330
+ __m512 v3;
331
+
332
+ transpose_16x4(
333
+ _mm512_loadu_ps(y + 0 * 16),
334
+ _mm512_loadu_ps(y + 1 * 16),
335
+ _mm512_loadu_ps(y + 2 * 16),
336
+ _mm512_loadu_ps(y + 3 * 16),
337
+ v0,
338
+ v1,
339
+ v2,
340
+ v3);
341
+
342
+ // compute distances
343
+ __m512 distances = _mm512_mul_ps(m0, v0);
344
+ distances = _mm512_fmadd_ps(m1, v1, distances);
345
+ distances = _mm512_fmadd_ps(m2, v2, distances);
346
+ distances = _mm512_fmadd_ps(m3, v3, distances);
347
+
348
+ // store
349
+ _mm512_storeu_ps(dis + i, distances);
350
+
351
+ y += 64; // move to the next set of 16x4 elements
352
+ }
353
+ }
354
+
355
+ if (i < ny) {
356
+ // process leftovers
357
+ __m128 x0 = _mm_loadu_ps(x);
358
+
359
+ for (; i < ny; i++) {
360
+ __m128 accu = AVX512ElementOpIP::op(x0, _mm_loadu_ps(y));
361
+ y += 4;
362
+ dis[i] = horizontal_sum(accu);
363
+ }
364
+ }
365
+ }
366
+
367
+ template <>
368
+ void fvec_op_ny_D4<AVX512ElementOpL2>(
369
+ float* dis,
370
+ const float* x,
371
+ const float* y,
372
+ size_t ny) {
373
+ const size_t ny16 = ny / 16;
374
+ size_t i = 0;
375
+
376
+ if (ny16 > 0) {
377
+ // process 16 D4-vectors per loop.
378
+ const __m512 m0 = _mm512_set1_ps(x[0]);
379
+ const __m512 m1 = _mm512_set1_ps(x[1]);
380
+ const __m512 m2 = _mm512_set1_ps(x[2]);
381
+ const __m512 m3 = _mm512_set1_ps(x[3]);
382
+
383
+ for (i = 0; i < ny16 * 16; i += 16) {
384
+ // load 16x4 matrix and transpose it in registers.
385
+ // the typical bottleneck is memory access, so
386
+ // let's trade instructions for the bandwidth.
387
+
388
+ __m512 v0;
389
+ __m512 v1;
390
+ __m512 v2;
391
+ __m512 v3;
392
+
393
+ transpose_16x4(
394
+ _mm512_loadu_ps(y + 0 * 16),
395
+ _mm512_loadu_ps(y + 1 * 16),
396
+ _mm512_loadu_ps(y + 2 * 16),
397
+ _mm512_loadu_ps(y + 3 * 16),
398
+ v0,
399
+ v1,
400
+ v2,
401
+ v3);
402
+
403
+ // compute differences
404
+ const __m512 d0 = _mm512_sub_ps(m0, v0);
405
+ const __m512 d1 = _mm512_sub_ps(m1, v1);
406
+ const __m512 d2 = _mm512_sub_ps(m2, v2);
407
+ const __m512 d3 = _mm512_sub_ps(m3, v3);
408
+
409
+ // compute squares of differences
410
+ __m512 distances = _mm512_mul_ps(d0, d0);
411
+ distances = _mm512_fmadd_ps(d1, d1, distances);
412
+ distances = _mm512_fmadd_ps(d2, d2, distances);
413
+ distances = _mm512_fmadd_ps(d3, d3, distances);
414
+
415
+ // store
416
+ _mm512_storeu_ps(dis + i, distances);
417
+
418
+ y += 64; // move to the next set of 16x4 elements
419
+ }
420
+ }
421
+
422
+ if (i < ny) {
423
+ // process leftovers
424
+ __m128 x0 = _mm_loadu_ps(x);
425
+
426
+ for (; i < ny; i++) {
427
+ __m128 accu = AVX512ElementOpL2::op(x0, _mm_loadu_ps(y));
428
+ y += 4;
429
+ dis[i] = horizontal_sum(accu);
430
+ }
431
+ }
432
+ }
433
+
434
+ template <>
435
+ void fvec_op_ny_D8<AVX512ElementOpIP>(
436
+ float* dis,
437
+ const float* x,
438
+ const float* y,
439
+ size_t ny) {
440
+ const size_t ny16 = ny / 16;
441
+ size_t i = 0;
442
+
443
+ if (ny16 > 0) {
444
+ // process 16 D16-vectors per loop.
445
+ const __m512 m0 = _mm512_set1_ps(x[0]);
446
+ const __m512 m1 = _mm512_set1_ps(x[1]);
447
+ const __m512 m2 = _mm512_set1_ps(x[2]);
448
+ const __m512 m3 = _mm512_set1_ps(x[3]);
449
+ const __m512 m4 = _mm512_set1_ps(x[4]);
450
+ const __m512 m5 = _mm512_set1_ps(x[5]);
451
+ const __m512 m6 = _mm512_set1_ps(x[6]);
452
+ const __m512 m7 = _mm512_set1_ps(x[7]);
453
+
454
+ for (i = 0; i < ny16 * 16; i += 16) {
455
+ // load 16x8 matrix and transpose it in registers.
456
+ // the typical bottleneck is memory access, so
457
+ // let's trade instructions for the bandwidth.
458
+
459
+ __m512 v0;
460
+ __m512 v1;
461
+ __m512 v2;
462
+ __m512 v3;
463
+ __m512 v4;
464
+ __m512 v5;
465
+ __m512 v6;
466
+ __m512 v7;
467
+
468
+ transpose_16x8(
469
+ _mm512_loadu_ps(y + 0 * 16),
470
+ _mm512_loadu_ps(y + 1 * 16),
471
+ _mm512_loadu_ps(y + 2 * 16),
472
+ _mm512_loadu_ps(y + 3 * 16),
473
+ _mm512_loadu_ps(y + 4 * 16),
474
+ _mm512_loadu_ps(y + 5 * 16),
475
+ _mm512_loadu_ps(y + 6 * 16),
476
+ _mm512_loadu_ps(y + 7 * 16),
477
+ v0,
478
+ v1,
479
+ v2,
480
+ v3,
481
+ v4,
482
+ v5,
483
+ v6,
484
+ v7);
485
+
486
+ // compute distances
487
+ __m512 distances = _mm512_mul_ps(m0, v0);
488
+ distances = _mm512_fmadd_ps(m1, v1, distances);
489
+ distances = _mm512_fmadd_ps(m2, v2, distances);
490
+ distances = _mm512_fmadd_ps(m3, v3, distances);
491
+ distances = _mm512_fmadd_ps(m4, v4, distances);
492
+ distances = _mm512_fmadd_ps(m5, v5, distances);
493
+ distances = _mm512_fmadd_ps(m6, v6, distances);
494
+ distances = _mm512_fmadd_ps(m7, v7, distances);
495
+
496
+ // store
497
+ _mm512_storeu_ps(dis + i, distances);
498
+
499
+ y += 128; // 16 floats * 8 rows
500
+ }
501
+ }
502
+
503
+ if (i < ny) {
504
+ // process leftovers
505
+ __m256 x0 = _mm256_loadu_ps(x);
506
+
507
+ for (; i < ny; i++) {
508
+ __m256 accu = AVX512ElementOpIP::op(x0, _mm256_loadu_ps(y));
509
+ y += 8;
510
+ dis[i] = horizontal_sum(accu);
511
+ }
512
+ }
513
+ }
514
+
515
+ template <>
516
+ void fvec_op_ny_D8<AVX512ElementOpL2>(
517
+ float* dis,
518
+ const float* x,
519
+ const float* y,
520
+ size_t ny) {
521
+ const size_t ny16 = ny / 16;
522
+ size_t i = 0;
523
+
524
+ if (ny16 > 0) {
525
+ // process 16 D16-vectors per loop.
526
+ const __m512 m0 = _mm512_set1_ps(x[0]);
527
+ const __m512 m1 = _mm512_set1_ps(x[1]);
528
+ const __m512 m2 = _mm512_set1_ps(x[2]);
529
+ const __m512 m3 = _mm512_set1_ps(x[3]);
530
+ const __m512 m4 = _mm512_set1_ps(x[4]);
531
+ const __m512 m5 = _mm512_set1_ps(x[5]);
532
+ const __m512 m6 = _mm512_set1_ps(x[6]);
533
+ const __m512 m7 = _mm512_set1_ps(x[7]);
534
+
535
+ for (i = 0; i < ny16 * 16; i += 16) {
536
+ // load 16x8 matrix and transpose it in registers.
537
+ // the typical bottleneck is memory access, so
538
+ // let's trade instructions for the bandwidth.
539
+
540
+ __m512 v0;
541
+ __m512 v1;
542
+ __m512 v2;
543
+ __m512 v3;
544
+ __m512 v4;
545
+ __m512 v5;
546
+ __m512 v6;
547
+ __m512 v7;
548
+
549
+ transpose_16x8(
550
+ _mm512_loadu_ps(y + 0 * 16),
551
+ _mm512_loadu_ps(y + 1 * 16),
552
+ _mm512_loadu_ps(y + 2 * 16),
553
+ _mm512_loadu_ps(y + 3 * 16),
554
+ _mm512_loadu_ps(y + 4 * 16),
555
+ _mm512_loadu_ps(y + 5 * 16),
556
+ _mm512_loadu_ps(y + 6 * 16),
557
+ _mm512_loadu_ps(y + 7 * 16),
558
+ v0,
559
+ v1,
560
+ v2,
561
+ v3,
562
+ v4,
563
+ v5,
564
+ v6,
565
+ v7);
566
+
567
+ // compute differences
568
+ const __m512 d0 = _mm512_sub_ps(m0, v0);
569
+ const __m512 d1 = _mm512_sub_ps(m1, v1);
570
+ const __m512 d2 = _mm512_sub_ps(m2, v2);
571
+ const __m512 d3 = _mm512_sub_ps(m3, v3);
572
+ const __m512 d4 = _mm512_sub_ps(m4, v4);
573
+ const __m512 d5 = _mm512_sub_ps(m5, v5);
574
+ const __m512 d6 = _mm512_sub_ps(m6, v6);
575
+ const __m512 d7 = _mm512_sub_ps(m7, v7);
576
+
577
+ // compute squares of differences
578
+ __m512 distances = _mm512_mul_ps(d0, d0);
579
+ distances = _mm512_fmadd_ps(d1, d1, distances);
580
+ distances = _mm512_fmadd_ps(d2, d2, distances);
581
+ distances = _mm512_fmadd_ps(d3, d3, distances);
582
+ distances = _mm512_fmadd_ps(d4, d4, distances);
583
+ distances = _mm512_fmadd_ps(d5, d5, distances);
584
+ distances = _mm512_fmadd_ps(d6, d6, distances);
585
+ distances = _mm512_fmadd_ps(d7, d7, distances);
586
+
587
+ // store
588
+ _mm512_storeu_ps(dis + i, distances);
589
+
590
+ y += 128; // 16 floats * 8 rows
591
+ }
592
+ }
593
+
594
+ if (i < ny) {
595
+ // process leftovers
596
+ __m256 x0 = _mm256_loadu_ps(x);
597
+
598
+ for (; i < ny; i++) {
599
+ __m256 accu = AVX512ElementOpL2::op(x0, _mm256_loadu_ps(y));
600
+ y += 8;
601
+ dis[i] = horizontal_sum(accu);
602
+ }
603
+ }
604
+ }
605
+
606
+ template <>
607
+ void fvec_inner_products_ny<SIMDLevel::AVX512>(
608
+ float* ip, /* output inner product */
609
+ const float* x,
610
+ const float* y,
611
+ size_t d,
612
+ size_t ny) {
613
+ fvec_inner_products_ny_ref<AVX512ElementOpIP>(ip, x, y, d, ny);
614
+ }
615
+
616
+ template <>
617
+ void fvec_L2sqr_ny<SIMDLevel::AVX512>(
618
+ float* dis,
619
+ const float* x,
620
+ const float* y,
621
+ size_t d,
622
+ size_t ny) {
623
+ fvec_L2sqr_ny_ref<AVX512ElementOpL2>(dis, x, y, d, ny);
624
+ }
625
+
626
+ template <>
627
+ size_t fvec_L2sqr_ny_nearest_D2<SIMDLevel::AVX512>(
628
+ float* distances_tmp_buffer,
629
+ const float* x,
630
+ const float* y,
631
+ size_t ny) {
632
+ // this implementation does not use distances_tmp_buffer.
633
+
634
+ size_t i = 0;
635
+ float current_min_distance = HUGE_VALF;
636
+ size_t current_min_index = 0;
637
+
638
+ const size_t ny16 = ny / 16;
639
+ if (ny16 > 0) {
640
+ _mm_prefetch((const char*)y, _MM_HINT_T0);
641
+ _mm_prefetch((const char*)(y + 32), _MM_HINT_T0);
642
+
643
+ __m512 min_distances = _mm512_set1_ps(HUGE_VALF);
644
+ __m512i min_indices = _mm512_set1_epi32(0);
645
+
646
+ __m512i current_indices = _mm512_setr_epi32(
647
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
648
+ const __m512i indices_increment = _mm512_set1_epi32(16);
649
+
650
+ const __m512 m0 = _mm512_set1_ps(x[0]);
651
+ const __m512 m1 = _mm512_set1_ps(x[1]);
652
+
653
+ for (; i < ny16 * 16; i += 16) {
654
+ _mm_prefetch((const char*)(y + 64), _MM_HINT_T0);
655
+
656
+ __m512 v0;
657
+ __m512 v1;
658
+
659
+ transpose_16x2(
660
+ _mm512_loadu_ps(y + 0 * 16),
661
+ _mm512_loadu_ps(y + 1 * 16),
662
+ v0,
663
+ v1);
664
+
665
+ const __m512 d0 = _mm512_sub_ps(m0, v0);
666
+ const __m512 d1 = _mm512_sub_ps(m1, v1);
667
+
668
+ __m512 distances = _mm512_mul_ps(d0, d0);
669
+ distances = _mm512_fmadd_ps(d1, d1, distances);
670
+
671
+ __mmask16 comparison =
672
+ _mm512_cmp_ps_mask(distances, min_distances, _CMP_LT_OS);
673
+
674
+ min_distances = _mm512_min_ps(distances, min_distances);
675
+ min_indices = _mm512_mask_blend_epi32(
676
+ comparison, min_indices, current_indices);
677
+
678
+ current_indices =
679
+ _mm512_add_epi32(current_indices, indices_increment);
680
+
681
+ y += 32;
682
+ }
683
+
684
+ alignas(64) float min_distances_scalar[16];
685
+ alignas(64) uint32_t min_indices_scalar[16];
686
+ _mm512_store_ps(min_distances_scalar, min_distances);
687
+ _mm512_store_epi32(min_indices_scalar, min_indices);
688
+
689
+ for (size_t j = 0; j < 16; j++) {
690
+ if (current_min_distance > min_distances_scalar[j]) {
691
+ current_min_distance = min_distances_scalar[j];
692
+ current_min_index = min_indices_scalar[j];
693
+ }
694
+ }
695
+ }
696
+
697
+ if (i < ny) {
698
+ float x0 = x[0];
699
+ float x1 = x[1];
700
+
701
+ for (; i < ny; i++) {
702
+ float sub0 = x0 - y[0];
703
+ float sub1 = x1 - y[1];
704
+ float distance = sub0 * sub0 + sub1 * sub1;
705
+
706
+ y += 2;
707
+
708
+ if (current_min_distance > distance) {
709
+ current_min_distance = distance;
710
+ current_min_index = i;
711
+ }
712
+ }
713
+ }
714
+
715
+ return current_min_index;
716
+ }
717
+
718
+ template <>
719
+ size_t fvec_L2sqr_ny_nearest_D4<SIMDLevel::AVX512>(
720
+ float* distances_tmp_buffer,
721
+ const float* x,
722
+ const float* y,
723
+ size_t ny) {
724
+ // this implementation does not use distances_tmp_buffer.
725
+
726
+ size_t i = 0;
727
+ float current_min_distance = HUGE_VALF;
728
+ size_t current_min_index = 0;
729
+
730
+ const size_t ny16 = ny / 16;
731
+
732
+ if (ny16 > 0) {
733
+ __m512 min_distances = _mm512_set1_ps(HUGE_VALF);
734
+ __m512i min_indices = _mm512_set1_epi32(0);
735
+
736
+ __m512i current_indices = _mm512_setr_epi32(
737
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
738
+ const __m512i indices_increment = _mm512_set1_epi32(16);
739
+
740
+ const __m512 m0 = _mm512_set1_ps(x[0]);
741
+ const __m512 m1 = _mm512_set1_ps(x[1]);
742
+ const __m512 m2 = _mm512_set1_ps(x[2]);
743
+ const __m512 m3 = _mm512_set1_ps(x[3]);
744
+
745
+ for (; i < ny16 * 16; i += 16) {
746
+ __m512 v0;
747
+ __m512 v1;
748
+ __m512 v2;
749
+ __m512 v3;
750
+
751
+ transpose_16x4(
752
+ _mm512_loadu_ps(y + 0 * 16),
753
+ _mm512_loadu_ps(y + 1 * 16),
754
+ _mm512_loadu_ps(y + 2 * 16),
755
+ _mm512_loadu_ps(y + 3 * 16),
756
+ v0,
757
+ v1,
758
+ v2,
759
+ v3);
760
+
761
+ const __m512 d0 = _mm512_sub_ps(m0, v0);
762
+ const __m512 d1 = _mm512_sub_ps(m1, v1);
763
+ const __m512 d2 = _mm512_sub_ps(m2, v2);
764
+ const __m512 d3 = _mm512_sub_ps(m3, v3);
765
+
766
+ __m512 distances = _mm512_mul_ps(d0, d0);
767
+ distances = _mm512_fmadd_ps(d1, d1, distances);
768
+ distances = _mm512_fmadd_ps(d2, d2, distances);
769
+ distances = _mm512_fmadd_ps(d3, d3, distances);
770
+
771
+ __mmask16 comparison =
772
+ _mm512_cmp_ps_mask(distances, min_distances, _CMP_LT_OS);
773
+
774
+ min_distances = _mm512_min_ps(distances, min_distances);
775
+ min_indices = _mm512_mask_blend_epi32(
776
+ comparison, min_indices, current_indices);
777
+
778
+ current_indices =
779
+ _mm512_add_epi32(current_indices, indices_increment);
780
+
781
+ y += 64;
782
+ }
783
+
784
+ alignas(64) float min_distances_scalar[16];
785
+ alignas(64) uint32_t min_indices_scalar[16];
786
+ _mm512_store_ps(min_distances_scalar, min_distances);
787
+ _mm512_store_epi32(min_indices_scalar, min_indices);
788
+
789
+ for (size_t j = 0; j < 16; j++) {
790
+ if (current_min_distance > min_distances_scalar[j]) {
791
+ current_min_distance = min_distances_scalar[j];
792
+ current_min_index = min_indices_scalar[j];
793
+ }
794
+ }
795
+ }
796
+
797
+ if (i < ny) {
798
+ __m128 x0 = _mm_loadu_ps(x);
799
+
800
+ for (; i < ny; i++) {
801
+ __m128 accu = ElementOpL2::op(x0, _mm_loadu_ps(y));
802
+ y += 4;
803
+ const float distance = horizontal_sum(accu);
804
+
805
+ if (current_min_distance > distance) {
806
+ current_min_distance = distance;
807
+ current_min_index = i;
808
+ }
809
+ }
810
+ }
811
+
812
+ return current_min_index;
813
+ }
814
+
815
+ template <>
816
+ size_t fvec_L2sqr_ny_nearest_D8<SIMDLevel::AVX512>(
817
+ float* distances_tmp_buffer,
818
+ const float* x,
819
+ const float* y,
820
+ size_t ny) {
821
+ // this implementation does not use distances_tmp_buffer.
822
+
823
+ size_t i = 0;
824
+ float current_min_distance = HUGE_VALF;
825
+ size_t current_min_index = 0;
826
+
827
+ const size_t ny16 = ny / 16;
828
+ if (ny16 > 0) {
829
+ __m512 min_distances = _mm512_set1_ps(HUGE_VALF);
830
+ __m512i min_indices = _mm512_set1_epi32(0);
831
+
832
+ __m512i current_indices = _mm512_setr_epi32(
833
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
834
+ const __m512i indices_increment = _mm512_set1_epi32(16);
835
+
836
+ const __m512 m0 = _mm512_set1_ps(x[0]);
837
+ const __m512 m1 = _mm512_set1_ps(x[1]);
838
+ const __m512 m2 = _mm512_set1_ps(x[2]);
839
+ const __m512 m3 = _mm512_set1_ps(x[3]);
840
+
841
+ const __m512 m4 = _mm512_set1_ps(x[4]);
842
+ const __m512 m5 = _mm512_set1_ps(x[5]);
843
+ const __m512 m6 = _mm512_set1_ps(x[6]);
844
+ const __m512 m7 = _mm512_set1_ps(x[7]);
845
+
846
+ for (; i < ny16 * 16; i += 16) {
847
+ __m512 v0;
848
+ __m512 v1;
849
+ __m512 v2;
850
+ __m512 v3;
851
+ __m512 v4;
852
+ __m512 v5;
853
+ __m512 v6;
854
+ __m512 v7;
855
+
856
+ transpose_16x8(
857
+ _mm512_loadu_ps(y + 0 * 16),
858
+ _mm512_loadu_ps(y + 1 * 16),
859
+ _mm512_loadu_ps(y + 2 * 16),
860
+ _mm512_loadu_ps(y + 3 * 16),
861
+ _mm512_loadu_ps(y + 4 * 16),
862
+ _mm512_loadu_ps(y + 5 * 16),
863
+ _mm512_loadu_ps(y + 6 * 16),
864
+ _mm512_loadu_ps(y + 7 * 16),
865
+ v0,
866
+ v1,
867
+ v2,
868
+ v3,
869
+ v4,
870
+ v5,
871
+ v6,
872
+ v7);
873
+
874
+ const __m512 d0 = _mm512_sub_ps(m0, v0);
875
+ const __m512 d1 = _mm512_sub_ps(m1, v1);
876
+ const __m512 d2 = _mm512_sub_ps(m2, v2);
877
+ const __m512 d3 = _mm512_sub_ps(m3, v3);
878
+ const __m512 d4 = _mm512_sub_ps(m4, v4);
879
+ const __m512 d5 = _mm512_sub_ps(m5, v5);
880
+ const __m512 d6 = _mm512_sub_ps(m6, v6);
881
+ const __m512 d7 = _mm512_sub_ps(m7, v7);
882
+
883
+ __m512 distances = _mm512_mul_ps(d0, d0);
884
+ distances = _mm512_fmadd_ps(d1, d1, distances);
885
+ distances = _mm512_fmadd_ps(d2, d2, distances);
886
+ distances = _mm512_fmadd_ps(d3, d3, distances);
887
+ distances = _mm512_fmadd_ps(d4, d4, distances);
888
+ distances = _mm512_fmadd_ps(d5, d5, distances);
889
+ distances = _mm512_fmadd_ps(d6, d6, distances);
890
+ distances = _mm512_fmadd_ps(d7, d7, distances);
891
+
892
+ __mmask16 comparison =
893
+ _mm512_cmp_ps_mask(distances, min_distances, _CMP_LT_OS);
894
+
895
+ min_distances = _mm512_min_ps(distances, min_distances);
896
+ min_indices = _mm512_mask_blend_epi32(
897
+ comparison, min_indices, current_indices);
898
+
899
+ current_indices =
900
+ _mm512_add_epi32(current_indices, indices_increment);
901
+
902
+ y += 128;
903
+ }
904
+
905
+ alignas(64) float min_distances_scalar[16];
906
+ alignas(64) uint32_t min_indices_scalar[16];
907
+ _mm512_store_ps(min_distances_scalar, min_distances);
908
+ _mm512_store_epi32(min_indices_scalar, min_indices);
909
+
910
+ for (size_t j = 0; j < 16; j++) {
911
+ if (current_min_distance > min_distances_scalar[j]) {
912
+ current_min_distance = min_distances_scalar[j];
913
+ current_min_index = min_indices_scalar[j];
914
+ }
915
+ }
916
+ }
917
+
918
+ if (i < ny) {
919
+ __m256 x0 = _mm256_loadu_ps(x);
920
+
921
+ for (; i < ny; i++) {
922
+ __m256 accu = AVX512ElementOpL2::op(x0, _mm256_loadu_ps(y));
923
+ y += 8;
924
+ const float distance = horizontal_sum(accu);
925
+
926
+ if (current_min_distance > distance) {
927
+ current_min_distance = distance;
928
+ current_min_index = i;
929
+ }
930
+ }
931
+ }
932
+
933
+ return current_min_index;
934
+ }
935
+
936
+ template <>
937
+ size_t fvec_L2sqr_ny_nearest<SIMDLevel::AVX512>(
938
+ float* distances_tmp_buffer,
939
+ const float* x,
940
+ const float* y,
941
+ size_t d,
942
+ size_t ny) {
943
+ return fvec_L2sqr_ny_nearest_x86<SIMDLevel::AVX512>(
944
+ distances_tmp_buffer,
945
+ x,
946
+ y,
947
+ d,
948
+ ny,
949
+ &fvec_L2sqr_ny_nearest_D2<SIMDLevel::AVX512>,
950
+ &fvec_L2sqr_ny_nearest_D4<SIMDLevel::AVX512>,
951
+ &fvec_L2sqr_ny_nearest_D8<SIMDLevel::AVX512>);
952
+ }
953
+
954
+ template <>
955
+ size_t fvec_L2sqr_ny_nearest_y_transposed<SIMDLevel::AVX512>(
956
+ float* distances_tmp_buffer,
957
+ const float* x,
958
+ const float* y,
959
+ const float* y_sqlen,
960
+ size_t d,
961
+ size_t d_offset,
962
+ size_t ny) {
963
+ return fvec_L2sqr_ny_nearest_y_transposed<SIMDLevel::NONE>(
964
+ distances_tmp_buffer, x, y, y_sqlen, d, d_offset, ny);
965
+ }
966
+
967
+ // TODO: Following functions are not used in the current codebase. Check AVX2 ,
968
+ // respective implementation has been used
969
+ template <size_t DIM>
970
+ size_t fvec_L2sqr_ny_nearest_y_transposed_D(
971
+ float* /* distances_tmp_buffer */,
972
+ const float* x,
973
+ const float* y,
974
+ const float* y_sqlen,
975
+ const size_t d_offset,
976
+ size_t ny) {
977
+ // This implementation does not use distances_tmp_buffer.
978
+
979
+ // Current index being processed
980
+ size_t i = 0;
981
+
982
+ // Min distance and the index of the closest vector so far
983
+ float current_min_distance = HUGE_VALF;
984
+ size_t current_min_index = 0;
985
+
986
+ // Process 16 vectors per loop
987
+ const size_t ny16 = ny / 16;
988
+
989
+ if (ny16 > 0) {
990
+ // Track min distance and the closest vector independently
991
+ // for each of 16 AVX-512 components.
992
+ __m512 min_distances = _mm512_set1_ps(HUGE_VALF);
993
+ __m512i min_indices = _mm512_set1_epi32(0);
994
+
995
+ __m512i current_indices = _mm512_setr_epi32(
996
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
997
+ const __m512i indices_increment = _mm512_set1_epi32(16);
998
+
999
+ // m[i] = (2 * x[i], ... 2 * x[i])
1000
+ __m512 m[DIM];
1001
+ for (size_t j = 0; j < DIM; j++) {
1002
+ m[j] = _mm512_set1_ps(x[j]);
1003
+ m[j] = _mm512_add_ps(m[j], m[j]);
1004
+ }
1005
+
1006
+ for (; i < ny16 * 16; i += 16) {
1007
+ // Compute dot products
1008
+ const __m512 v0 = _mm512_loadu_ps(y + 0 * d_offset);
1009
+ __m512 dp = _mm512_mul_ps(m[0], v0);
1010
+ for (size_t j = 1; j < DIM; j++) {
1011
+ const __m512 vj = _mm512_loadu_ps(y + j * d_offset);
1012
+ dp = _mm512_fmadd_ps(m[j], vj, dp);
1013
+ }
1014
+
1015
+ // Compute y^2 - (2 * x, y), which is sufficient for looking for the
1016
+ // lowest distance.
1017
+ // x^2 is the constant that can be avoided.
1018
+ const __m512 distances =
1019
+ _mm512_sub_ps(_mm512_loadu_ps(y_sqlen), dp);
1020
+
1021
+ // Compare the new distances to the min distances
1022
+ __mmask16 comparison =
1023
+ _mm512_cmp_ps_mask(min_distances, distances, _CMP_LT_OS);
1024
+
1025
+ // Update min distances and indices with closest vectors if needed
1026
+ min_distances =
1027
+ _mm512_mask_blend_ps(comparison, distances, min_distances);
1028
+ min_indices = _mm512_castps_si512(_mm512_mask_blend_ps(
1029
+ comparison,
1030
+ _mm512_castsi512_ps(current_indices),
1031
+ _mm512_castsi512_ps(min_indices)));
1032
+
1033
+ // Update current indices values. Basically, +16 to each of the 16
1034
+ // AVX-512 components.
1035
+ current_indices =
1036
+ _mm512_add_epi32(current_indices, indices_increment);
1037
+
1038
+ // Scroll y and y_sqlen forward.
1039
+ y += 16;
1040
+ y_sqlen += 16;
1041
+ }
1042
+
1043
+ // Dump values and find the minimum distance / minimum index
1044
+ float min_distances_scalar[16];
1045
+ uint32_t min_indices_scalar[16];
1046
+ _mm512_storeu_ps(min_distances_scalar, min_distances);
1047
+ _mm512_storeu_si512((__m512i*)(min_indices_scalar), min_indices);
1048
+
1049
+ for (size_t j = 0; j < 16; j++) {
1050
+ if (current_min_distance > min_distances_scalar[j]) {
1051
+ current_min_distance = min_distances_scalar[j];
1052
+ current_min_index = min_indices_scalar[j];
1053
+ }
1054
+ }
1055
+ }
1056
+
1057
+ if (i < ny) {
1058
+ // Process leftovers
1059
+ for (; i < ny; i++) {
1060
+ float dp = 0;
1061
+ for (size_t j = 0; j < DIM; j++) {
1062
+ dp += x[j] * y[j * d_offset];
1063
+ }
1064
+
1065
+ // Compute y^2 - 2 * (x, y), which is sufficient for looking for the
1066
+ // lowest distance.
1067
+ const float distance = y_sqlen[0] - 2 * dp;
1068
+
1069
+ if (current_min_distance > distance) {
1070
+ current_min_distance = distance;
1071
+ current_min_index = i;
1072
+ }
1073
+
1074
+ y += 1;
1075
+ y_sqlen += 1;
1076
+ }
1077
+ }
1078
+
1079
+ return current_min_index;
1080
+ }
1081
+
1082
+ template <>
1083
+ int fvec_madd_and_argmin<SIMDLevel::AVX512>(
1084
+ size_t n,
1085
+ const float* a,
1086
+ float bf,
1087
+ const float* b,
1088
+ float* c) {
1089
+ return fvec_madd_and_argmin_sse(n, a, bf, b, c);
1090
+ }
1091
+
1092
+ } // namespace faiss