faiss 0.2.4 → 0.2.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/README.md +23 -21
- data/ext/faiss/extconf.rb +11 -0
- data/ext/faiss/index.cpp +4 -4
- data/ext/faiss/index_binary.cpp +6 -6
- data/ext/faiss/product_quantizer.cpp +4 -4
- data/lib/faiss/version.rb +1 -1
- data/vendor/faiss/faiss/AutoTune.cpp +13 -0
- data/vendor/faiss/faiss/IVFlib.cpp +101 -2
- data/vendor/faiss/faiss/IVFlib.h +26 -2
- data/vendor/faiss/faiss/Index.cpp +36 -3
- data/vendor/faiss/faiss/Index.h +43 -6
- data/vendor/faiss/faiss/Index2Layer.cpp +6 -2
- data/vendor/faiss/faiss/Index2Layer.h +6 -1
- data/vendor/faiss/faiss/IndexAdditiveQuantizer.cpp +219 -16
- data/vendor/faiss/faiss/IndexAdditiveQuantizer.h +63 -5
- data/vendor/faiss/faiss/IndexAdditiveQuantizerFastScan.cpp +299 -0
- data/vendor/faiss/faiss/IndexAdditiveQuantizerFastScan.h +199 -0
- data/vendor/faiss/faiss/IndexBinary.cpp +20 -4
- data/vendor/faiss/faiss/IndexBinary.h +18 -3
- data/vendor/faiss/faiss/IndexBinaryFlat.cpp +9 -2
- data/vendor/faiss/faiss/IndexBinaryFlat.h +4 -2
- data/vendor/faiss/faiss/IndexBinaryFromFloat.cpp +4 -1
- data/vendor/faiss/faiss/IndexBinaryFromFloat.h +2 -1
- data/vendor/faiss/faiss/IndexBinaryHNSW.cpp +5 -1
- data/vendor/faiss/faiss/IndexBinaryHNSW.h +2 -1
- data/vendor/faiss/faiss/IndexBinaryHash.cpp +17 -4
- data/vendor/faiss/faiss/IndexBinaryHash.h +8 -4
- data/vendor/faiss/faiss/IndexBinaryIVF.cpp +28 -13
- data/vendor/faiss/faiss/IndexBinaryIVF.h +10 -7
- data/vendor/faiss/faiss/IndexFastScan.cpp +626 -0
- data/vendor/faiss/faiss/IndexFastScan.h +145 -0
- data/vendor/faiss/faiss/IndexFlat.cpp +34 -21
- data/vendor/faiss/faiss/IndexFlat.h +7 -4
- data/vendor/faiss/faiss/IndexFlatCodes.cpp +35 -1
- data/vendor/faiss/faiss/IndexFlatCodes.h +12 -0
- data/vendor/faiss/faiss/IndexHNSW.cpp +66 -138
- data/vendor/faiss/faiss/IndexHNSW.h +4 -2
- data/vendor/faiss/faiss/IndexIDMap.cpp +247 -0
- data/vendor/faiss/faiss/IndexIDMap.h +107 -0
- data/vendor/faiss/faiss/IndexIVF.cpp +121 -33
- data/vendor/faiss/faiss/IndexIVF.h +35 -16
- data/vendor/faiss/faiss/IndexIVFAdditiveQuantizer.cpp +84 -7
- data/vendor/faiss/faiss/IndexIVFAdditiveQuantizer.h +63 -1
- data/vendor/faiss/faiss/IndexIVFAdditiveQuantizerFastScan.cpp +590 -0
- data/vendor/faiss/faiss/IndexIVFAdditiveQuantizerFastScan.h +171 -0
- data/vendor/faiss/faiss/IndexIVFFastScan.cpp +1290 -0
- data/vendor/faiss/faiss/IndexIVFFastScan.h +213 -0
- data/vendor/faiss/faiss/IndexIVFFlat.cpp +37 -17
- data/vendor/faiss/faiss/IndexIVFFlat.h +4 -2
- data/vendor/faiss/faiss/IndexIVFPQ.cpp +234 -50
- data/vendor/faiss/faiss/IndexIVFPQ.h +5 -1
- data/vendor/faiss/faiss/IndexIVFPQFastScan.cpp +23 -852
- data/vendor/faiss/faiss/IndexIVFPQFastScan.h +7 -112
- data/vendor/faiss/faiss/IndexIVFPQR.cpp +3 -3
- data/vendor/faiss/faiss/IndexIVFPQR.h +1 -1
- data/vendor/faiss/faiss/IndexIVFSpectralHash.cpp +3 -1
- data/vendor/faiss/faiss/IndexIVFSpectralHash.h +2 -1
- data/vendor/faiss/faiss/IndexLSH.cpp +4 -2
- data/vendor/faiss/faiss/IndexLSH.h +2 -1
- data/vendor/faiss/faiss/IndexLattice.cpp +7 -1
- data/vendor/faiss/faiss/IndexLattice.h +3 -1
- data/vendor/faiss/faiss/IndexNNDescent.cpp +4 -3
- data/vendor/faiss/faiss/IndexNNDescent.h +2 -1
- data/vendor/faiss/faiss/IndexNSG.cpp +37 -3
- data/vendor/faiss/faiss/IndexNSG.h +25 -1
- data/vendor/faiss/faiss/IndexPQ.cpp +106 -69
- data/vendor/faiss/faiss/IndexPQ.h +19 -5
- data/vendor/faiss/faiss/IndexPQFastScan.cpp +15 -450
- data/vendor/faiss/faiss/IndexPQFastScan.h +15 -78
- data/vendor/faiss/faiss/IndexPreTransform.cpp +47 -8
- data/vendor/faiss/faiss/IndexPreTransform.h +15 -3
- data/vendor/faiss/faiss/IndexRefine.cpp +8 -4
- data/vendor/faiss/faiss/IndexRefine.h +4 -2
- data/vendor/faiss/faiss/IndexReplicas.cpp +4 -2
- data/vendor/faiss/faiss/IndexReplicas.h +2 -1
- data/vendor/faiss/faiss/IndexRowwiseMinMax.cpp +438 -0
- data/vendor/faiss/faiss/IndexRowwiseMinMax.h +92 -0
- data/vendor/faiss/faiss/IndexScalarQuantizer.cpp +26 -15
- data/vendor/faiss/faiss/IndexScalarQuantizer.h +6 -7
- data/vendor/faiss/faiss/IndexShards.cpp +4 -1
- data/vendor/faiss/faiss/IndexShards.h +2 -1
- data/vendor/faiss/faiss/MetaIndexes.cpp +5 -178
- data/vendor/faiss/faiss/MetaIndexes.h +3 -81
- data/vendor/faiss/faiss/VectorTransform.cpp +43 -0
- data/vendor/faiss/faiss/VectorTransform.h +22 -4
- data/vendor/faiss/faiss/clone_index.cpp +23 -1
- data/vendor/faiss/faiss/clone_index.h +3 -0
- data/vendor/faiss/faiss/cppcontrib/SaDecodeKernels.h +300 -0
- data/vendor/faiss/faiss/cppcontrib/detail/CoarseBitType.h +24 -0
- data/vendor/faiss/faiss/cppcontrib/detail/UintReader.h +195 -0
- data/vendor/faiss/faiss/cppcontrib/sa_decode/Level2-avx2-inl.h +2058 -0
- data/vendor/faiss/faiss/cppcontrib/sa_decode/Level2-inl.h +408 -0
- data/vendor/faiss/faiss/cppcontrib/sa_decode/Level2-neon-inl.h +2147 -0
- data/vendor/faiss/faiss/cppcontrib/sa_decode/MinMax-inl.h +460 -0
- data/vendor/faiss/faiss/cppcontrib/sa_decode/MinMaxFP16-inl.h +465 -0
- data/vendor/faiss/faiss/cppcontrib/sa_decode/PQ-avx2-inl.h +1618 -0
- data/vendor/faiss/faiss/cppcontrib/sa_decode/PQ-inl.h +251 -0
- data/vendor/faiss/faiss/cppcontrib/sa_decode/PQ-neon-inl.h +1452 -0
- data/vendor/faiss/faiss/gpu/GpuAutoTune.cpp +1 -0
- data/vendor/faiss/faiss/gpu/GpuCloner.cpp +0 -4
- data/vendor/faiss/faiss/gpu/GpuIndex.h +28 -4
- data/vendor/faiss/faiss/gpu/GpuIndexBinaryFlat.h +2 -1
- data/vendor/faiss/faiss/gpu/GpuIndexFlat.h +10 -8
- data/vendor/faiss/faiss/gpu/GpuIndexIVF.h +75 -14
- data/vendor/faiss/faiss/gpu/GpuIndexIVFFlat.h +19 -32
- data/vendor/faiss/faiss/gpu/GpuIndexIVFPQ.h +22 -31
- data/vendor/faiss/faiss/gpu/GpuIndexIVFScalarQuantizer.h +22 -28
- data/vendor/faiss/faiss/gpu/GpuResources.cpp +14 -0
- data/vendor/faiss/faiss/gpu/GpuResources.h +16 -3
- data/vendor/faiss/faiss/gpu/StandardGpuResources.cpp +3 -3
- data/vendor/faiss/faiss/gpu/impl/IndexUtils.h +32 -0
- data/vendor/faiss/faiss/gpu/test/TestGpuIndexBinaryFlat.cpp +1 -0
- data/vendor/faiss/faiss/gpu/test/TestGpuIndexFlat.cpp +311 -75
- data/vendor/faiss/faiss/gpu/test/TestUtils.cpp +10 -0
- data/vendor/faiss/faiss/gpu/test/TestUtils.h +3 -0
- data/vendor/faiss/faiss/gpu/test/demo_ivfpq_indexing_gpu.cpp +2 -2
- data/vendor/faiss/faiss/gpu/utils/DeviceUtils.h +5 -4
- data/vendor/faiss/faiss/impl/AdditiveQuantizer.cpp +116 -47
- data/vendor/faiss/faiss/impl/AdditiveQuantizer.h +44 -13
- data/vendor/faiss/faiss/impl/AuxIndexStructures.cpp +0 -54
- data/vendor/faiss/faiss/impl/AuxIndexStructures.h +0 -76
- data/vendor/faiss/faiss/impl/DistanceComputer.h +64 -0
- data/vendor/faiss/faiss/impl/HNSW.cpp +123 -27
- data/vendor/faiss/faiss/impl/HNSW.h +19 -16
- data/vendor/faiss/faiss/impl/IDSelector.cpp +125 -0
- data/vendor/faiss/faiss/impl/IDSelector.h +135 -0
- data/vendor/faiss/faiss/impl/LocalSearchQuantizer.cpp +6 -28
- data/vendor/faiss/faiss/impl/LocalSearchQuantizer.h +6 -1
- data/vendor/faiss/faiss/impl/LookupTableScaler.h +77 -0
- data/vendor/faiss/faiss/impl/NNDescent.cpp +1 -0
- data/vendor/faiss/faiss/impl/NSG.cpp +1 -1
- data/vendor/faiss/faiss/impl/ProductAdditiveQuantizer.cpp +383 -0
- data/vendor/faiss/faiss/impl/ProductAdditiveQuantizer.h +154 -0
- data/vendor/faiss/faiss/impl/ProductQuantizer.cpp +225 -145
- data/vendor/faiss/faiss/impl/ProductQuantizer.h +29 -10
- data/vendor/faiss/faiss/impl/Quantizer.h +43 -0
- data/vendor/faiss/faiss/impl/ResidualQuantizer.cpp +192 -36
- data/vendor/faiss/faiss/impl/ResidualQuantizer.h +40 -20
- data/vendor/faiss/faiss/impl/ResultHandler.h +96 -0
- data/vendor/faiss/faiss/impl/ScalarQuantizer.cpp +97 -173
- data/vendor/faiss/faiss/impl/ScalarQuantizer.h +18 -18
- data/vendor/faiss/faiss/impl/index_read.cpp +240 -9
- data/vendor/faiss/faiss/impl/index_write.cpp +237 -5
- data/vendor/faiss/faiss/impl/kmeans1d.cpp +6 -4
- data/vendor/faiss/faiss/impl/pq4_fast_scan.cpp +56 -16
- data/vendor/faiss/faiss/impl/pq4_fast_scan.h +25 -8
- data/vendor/faiss/faiss/impl/pq4_fast_scan_search_1.cpp +66 -25
- data/vendor/faiss/faiss/impl/pq4_fast_scan_search_qbs.cpp +75 -27
- data/vendor/faiss/faiss/index_factory.cpp +196 -7
- data/vendor/faiss/faiss/index_io.h +5 -0
- data/vendor/faiss/faiss/invlists/DirectMap.cpp +1 -0
- data/vendor/faiss/faiss/invlists/InvertedLists.cpp +4 -1
- data/vendor/faiss/faiss/invlists/OnDiskInvertedLists.cpp +2 -1
- data/vendor/faiss/faiss/python/python_callbacks.cpp +27 -0
- data/vendor/faiss/faiss/python/python_callbacks.h +15 -0
- data/vendor/faiss/faiss/utils/Heap.h +31 -15
- data/vendor/faiss/faiss/utils/distances.cpp +380 -56
- data/vendor/faiss/faiss/utils/distances.h +113 -15
- data/vendor/faiss/faiss/utils/distances_simd.cpp +726 -6
- data/vendor/faiss/faiss/utils/extra_distances.cpp +12 -7
- data/vendor/faiss/faiss/utils/extra_distances.h +3 -1
- data/vendor/faiss/faiss/utils/fp16-fp16c.h +21 -0
- data/vendor/faiss/faiss/utils/fp16-inl.h +101 -0
- data/vendor/faiss/faiss/utils/fp16.h +11 -0
- data/vendor/faiss/faiss/utils/hamming-inl.h +54 -0
- data/vendor/faiss/faiss/utils/hamming.cpp +0 -48
- data/vendor/faiss/faiss/utils/ordered_key_value.h +10 -0
- data/vendor/faiss/faiss/utils/quantize_lut.cpp +62 -0
- data/vendor/faiss/faiss/utils/quantize_lut.h +20 -0
- data/vendor/faiss/faiss/utils/random.cpp +53 -0
- data/vendor/faiss/faiss/utils/random.h +5 -0
- data/vendor/faiss/faiss/utils/simdlib_avx2.h +4 -0
- data/vendor/faiss/faiss/utils/simdlib_emulated.h +6 -1
- data/vendor/faiss/faiss/utils/simdlib_neon.h +7 -2
- metadata +37 -3
@@ -0,0 +1,460 @@
|
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include <cstddef>
|
4
|
+
#include <cstdint>
|
5
|
+
|
6
|
+
namespace faiss {
|
7
|
+
namespace cppcontrib {
|
8
|
+
|
9
|
+
template <typename SubIndexT>
|
10
|
+
struct IndexMinMaxDecoder {
|
11
|
+
static constexpr intptr_t dim = SubIndexT::dim;
|
12
|
+
|
13
|
+
// Process 1 sample.
|
14
|
+
// Performs outputStore = scaler * decoded(code) + minv
|
15
|
+
static void store(
|
16
|
+
const float* const __restrict pqCoarseCentroids,
|
17
|
+
const float* const __restrict pqFineCentroids,
|
18
|
+
const uint8_t* const __restrict code,
|
19
|
+
float* const __restrict outputStore) {
|
20
|
+
const float* const __restrict codeFloat =
|
21
|
+
reinterpret_cast<const float*>(code);
|
22
|
+
const float scaler = codeFloat[0];
|
23
|
+
const float minv = codeFloat[1];
|
24
|
+
|
25
|
+
SubIndexT::store(
|
26
|
+
pqCoarseCentroids,
|
27
|
+
pqFineCentroids,
|
28
|
+
code + 2 * sizeof(float),
|
29
|
+
outputStore);
|
30
|
+
for (intptr_t i = 0; i < SubIndexT::dim; i++) {
|
31
|
+
outputStore[i] = outputStore[i] * scaler + minv;
|
32
|
+
}
|
33
|
+
}
|
34
|
+
|
35
|
+
// Process 1 sample.
|
36
|
+
// Performs outputStore = scaler * decoded(code) + minv
|
37
|
+
static void store(
|
38
|
+
const float* const __restrict pqFineCentroids,
|
39
|
+
const uint8_t* const __restrict code,
|
40
|
+
float* const __restrict outputStore) {
|
41
|
+
const float* const __restrict codeFloat =
|
42
|
+
reinterpret_cast<const float*>(code);
|
43
|
+
const float scaler = codeFloat[0];
|
44
|
+
const float minv = codeFloat[1];
|
45
|
+
|
46
|
+
SubIndexT::store(
|
47
|
+
pqFineCentroids, code + 2 * sizeof(float), outputStore);
|
48
|
+
for (intptr_t i = 0; i < SubIndexT::dim; i++) {
|
49
|
+
outputStore[i] = outputStore[i] * scaler + minv;
|
50
|
+
}
|
51
|
+
}
|
52
|
+
|
53
|
+
// Process 1 sample.
|
54
|
+
// Performs
|
55
|
+
// * outputAccum += weight * scaler * decoded(code)
|
56
|
+
// * minvAccum += weight * minv
|
57
|
+
static void accum(
|
58
|
+
const float* const __restrict pqCoarseCentroids,
|
59
|
+
const float* const __restrict pqFineCentroids,
|
60
|
+
const uint8_t* const __restrict code,
|
61
|
+
const float weight,
|
62
|
+
float* const __restrict outputAccum,
|
63
|
+
float& minvAccum) {
|
64
|
+
const float* const __restrict codeFloat =
|
65
|
+
reinterpret_cast<const float*>(code);
|
66
|
+
const float scaler = codeFloat[0] * weight;
|
67
|
+
const float minv = codeFloat[1] * weight;
|
68
|
+
|
69
|
+
SubIndexT::accum(
|
70
|
+
pqCoarseCentroids,
|
71
|
+
pqFineCentroids,
|
72
|
+
code + 2 * sizeof(float),
|
73
|
+
scaler,
|
74
|
+
outputAccum);
|
75
|
+
|
76
|
+
minvAccum += minv;
|
77
|
+
}
|
78
|
+
|
79
|
+
// Process 1 sample.
|
80
|
+
// Performs
|
81
|
+
// * outputAccum += weight * scaler * decoded(code)
|
82
|
+
// * minvAccum += weight * minv
|
83
|
+
static void accum(
|
84
|
+
const float* const __restrict pqFineCentroids,
|
85
|
+
const uint8_t* const __restrict code,
|
86
|
+
const float weight,
|
87
|
+
float* const __restrict outputAccum,
|
88
|
+
float& minvAccum) {
|
89
|
+
const float* const __restrict codeFloat =
|
90
|
+
reinterpret_cast<const float*>(code);
|
91
|
+
const float scaler = codeFloat[0] * weight;
|
92
|
+
const float minv = codeFloat[1] * weight;
|
93
|
+
|
94
|
+
SubIndexT::accum(
|
95
|
+
pqFineCentroids, code + 2 * sizeof(float), scaler, outputAccum);
|
96
|
+
|
97
|
+
minvAccum += minv;
|
98
|
+
}
|
99
|
+
|
100
|
+
// Process 2 samples.
|
101
|
+
// Each code uses its own coarse pq centroids table and fine pq centroids
|
102
|
+
// table.
|
103
|
+
//
|
104
|
+
// Performs
|
105
|
+
// * outputAccum += weight0 * scaler0 * decoded(code0)
|
106
|
+
// + weight1 * scaler1 * decoded(code1)
|
107
|
+
// * minvAccum += weight0 * minv0 + weight1 * minv1
|
108
|
+
static void accum(
|
109
|
+
const float* const __restrict pqCoarseCentroids0,
|
110
|
+
const float* const __restrict pqFineCentroids0,
|
111
|
+
const uint8_t* const __restrict code0,
|
112
|
+
const float weight0,
|
113
|
+
const float* const __restrict pqCoarseCentroids1,
|
114
|
+
const float* const __restrict pqFineCentroids1,
|
115
|
+
const uint8_t* const __restrict code1,
|
116
|
+
const float weight1,
|
117
|
+
float* const __restrict outputAccum,
|
118
|
+
float& minvAccum) {
|
119
|
+
const float* const __restrict code0Float =
|
120
|
+
reinterpret_cast<const float*>(code0);
|
121
|
+
const float scaler0 = code0Float[0] * weight0;
|
122
|
+
const float minv0 = code0Float[1] * weight0;
|
123
|
+
|
124
|
+
const float* const __restrict code1Float =
|
125
|
+
reinterpret_cast<const float*>(code1);
|
126
|
+
const float scaler1 = code1Float[0] * weight1;
|
127
|
+
const float minv1 = code1Float[1] * weight1;
|
128
|
+
|
129
|
+
SubIndexT::accum(
|
130
|
+
pqCoarseCentroids0,
|
131
|
+
pqFineCentroids0,
|
132
|
+
code0 + 2 * sizeof(float),
|
133
|
+
scaler0,
|
134
|
+
pqCoarseCentroids1,
|
135
|
+
pqFineCentroids1,
|
136
|
+
code1 + 2 * sizeof(float),
|
137
|
+
scaler1,
|
138
|
+
outputAccum);
|
139
|
+
|
140
|
+
minvAccum += minv0 + minv1;
|
141
|
+
}
|
142
|
+
|
143
|
+
// Process 2 samples.
|
144
|
+
// Coarse pq centroids table and fine pq centroids table are shared among
|
145
|
+
// codes.
|
146
|
+
//
|
147
|
+
// Performs
|
148
|
+
// * outputAccum += weight0 * scaler0 * decoded(code0)
|
149
|
+
// + weight1 * scaler1 * decoded(code1)
|
150
|
+
// * minvAccum += weight0 * minv0 + weight1 * minv1
|
151
|
+
static void accum(
|
152
|
+
const float* const __restrict pqCoarseCentroids,
|
153
|
+
const float* const __restrict pqFineCentroids,
|
154
|
+
const uint8_t* const __restrict code0,
|
155
|
+
const float weight0,
|
156
|
+
const uint8_t* const __restrict code1,
|
157
|
+
const float weight1,
|
158
|
+
float* const __restrict outputAccum,
|
159
|
+
float& minvAccum) {
|
160
|
+
const float* const __restrict code0Float =
|
161
|
+
reinterpret_cast<const float*>(code0);
|
162
|
+
const float scaler0 = code0Float[0] * weight0;
|
163
|
+
const float minv0 = code0Float[1] * weight0;
|
164
|
+
|
165
|
+
const float* const __restrict code1Float =
|
166
|
+
reinterpret_cast<const float*>(code1);
|
167
|
+
const float scaler1 = code1Float[0] * weight1;
|
168
|
+
const float minv1 = code1Float[1] * weight1;
|
169
|
+
|
170
|
+
SubIndexT::accum(
|
171
|
+
pqCoarseCentroids,
|
172
|
+
pqFineCentroids,
|
173
|
+
code0 + 2 * sizeof(float),
|
174
|
+
scaler0,
|
175
|
+
code1 + 2 * sizeof(float),
|
176
|
+
scaler1,
|
177
|
+
outputAccum);
|
178
|
+
|
179
|
+
minvAccum += minv0 + minv1;
|
180
|
+
}
|
181
|
+
|
182
|
+
// Process 2 samples.
|
183
|
+
// Each code uses its own fine pq centroids table.
|
184
|
+
//
|
185
|
+
// Performs
|
186
|
+
// * outputAccum += weight0 * scaler0 * decoded(code0)
|
187
|
+
// + weight1 * scaler1 * decoded(code1)
|
188
|
+
// * minvAccum += weight0 * minv0 + weight1 * minv1
|
189
|
+
static void accum(
|
190
|
+
const float* const __restrict pqFineCentroids0,
|
191
|
+
const uint8_t* const __restrict code0,
|
192
|
+
const float weight0,
|
193
|
+
const float* const __restrict pqFineCentroids1,
|
194
|
+
const uint8_t* const __restrict code1,
|
195
|
+
const float weight1,
|
196
|
+
float* const __restrict outputAccum,
|
197
|
+
float& minvAccum) {
|
198
|
+
const float* const __restrict code0Float =
|
199
|
+
reinterpret_cast<const float*>(code0);
|
200
|
+
const float scaler0 = code0Float[0] * weight0;
|
201
|
+
const float minv0 = code0Float[1] * weight0;
|
202
|
+
|
203
|
+
const float* const __restrict code1Float =
|
204
|
+
reinterpret_cast<const float*>(code1);
|
205
|
+
const float scaler1 = code1Float[0] * weight1;
|
206
|
+
const float minv1 = code1Float[1] * weight1;
|
207
|
+
|
208
|
+
SubIndexT::accum(
|
209
|
+
pqFineCentroids0,
|
210
|
+
code0 + 2 * sizeof(float),
|
211
|
+
scaler0,
|
212
|
+
pqFineCentroids1,
|
213
|
+
code1 + 2 * sizeof(float),
|
214
|
+
scaler1,
|
215
|
+
outputAccum);
|
216
|
+
|
217
|
+
minvAccum += minv0 + minv1;
|
218
|
+
}
|
219
|
+
|
220
|
+
// Process 2 samples.
|
221
|
+
// Fine pq centroids table is shared among codes.
|
222
|
+
//
|
223
|
+
// Performs
|
224
|
+
// * outputAccum += weight0 * scaler0 * decoded(code0)
|
225
|
+
// + weight1 * scaler1 * decoded(code1)
|
226
|
+
// * minvAccum += weight0 * minv0 + weight1 * minv1
|
227
|
+
static void accum(
|
228
|
+
const float* const __restrict pqFineCentroids,
|
229
|
+
const uint8_t* const __restrict code0,
|
230
|
+
const float weight0,
|
231
|
+
const uint8_t* const __restrict code1,
|
232
|
+
const float weight1,
|
233
|
+
float* const __restrict outputAccum,
|
234
|
+
float& minvAccum) {
|
235
|
+
const float* const __restrict code0Float =
|
236
|
+
reinterpret_cast<const float*>(code0);
|
237
|
+
const float scaler0 = code0Float[0] * weight0;
|
238
|
+
const float minv0 = code0Float[1] * weight0;
|
239
|
+
|
240
|
+
const float* const __restrict code1Float =
|
241
|
+
reinterpret_cast<const float*>(code1);
|
242
|
+
const float scaler1 = code1Float[0] * weight1;
|
243
|
+
const float minv1 = code1Float[1] * weight1;
|
244
|
+
|
245
|
+
SubIndexT::accum(
|
246
|
+
pqFineCentroids,
|
247
|
+
code0 + 2 * sizeof(float),
|
248
|
+
scaler0,
|
249
|
+
code1 + 2 * sizeof(float),
|
250
|
+
scaler1,
|
251
|
+
outputAccum);
|
252
|
+
|
253
|
+
minvAccum += minv0 + minv1;
|
254
|
+
}
|
255
|
+
|
256
|
+
// Process 3 samples.
|
257
|
+
// Each code uses its own coarse pq centroids table and fine pq centroids
|
258
|
+
// table.
|
259
|
+
//
|
260
|
+
// Performs
|
261
|
+
// * outputAccum += weight0 * scaler0 * decoded(code0)
|
262
|
+
// + weight1 * scaler1 * decoded(code1)
|
263
|
+
// + weight2 * scaler2 * decoded(code2)
|
264
|
+
// * minvAccum += weight0 * minv0 + weight1 * minv1 + weight2 * minv2
|
265
|
+
static void accum(
|
266
|
+
const float* const __restrict pqCoarseCentroids0,
|
267
|
+
const float* const __restrict pqFineCentroids0,
|
268
|
+
const uint8_t* const __restrict code0,
|
269
|
+
const float weight0,
|
270
|
+
const float* const __restrict pqCoarseCentroids1,
|
271
|
+
const float* const __restrict pqFineCentroids1,
|
272
|
+
const uint8_t* const __restrict code1,
|
273
|
+
const float weight1,
|
274
|
+
const float* const __restrict pqCoarseCentroids2,
|
275
|
+
const float* const __restrict pqFineCentroids2,
|
276
|
+
const uint8_t* const __restrict code2,
|
277
|
+
const float weight2,
|
278
|
+
float* const __restrict outputAccum,
|
279
|
+
float& minvAccum) {
|
280
|
+
const float* const __restrict code0Float =
|
281
|
+
reinterpret_cast<const float*>(code0);
|
282
|
+
const float scaler0 = code0Float[0] * weight0;
|
283
|
+
const float minv0 = code0Float[1] * weight0;
|
284
|
+
|
285
|
+
const float* const __restrict code1Float =
|
286
|
+
reinterpret_cast<const float*>(code1);
|
287
|
+
const float scaler1 = code1Float[0] * weight1;
|
288
|
+
const float minv1 = code1Float[1] * weight1;
|
289
|
+
|
290
|
+
const float* const __restrict code2Float =
|
291
|
+
reinterpret_cast<const float*>(code2);
|
292
|
+
const float scaler2 = code2Float[0] * weight2;
|
293
|
+
const float minv2 = code2Float[1] * weight2;
|
294
|
+
|
295
|
+
SubIndexT::accum(
|
296
|
+
pqCoarseCentroids0,
|
297
|
+
pqFineCentroids0,
|
298
|
+
code0 + 2 * sizeof(float),
|
299
|
+
scaler0,
|
300
|
+
pqCoarseCentroids1,
|
301
|
+
pqFineCentroids1,
|
302
|
+
code1 + 2 * sizeof(float),
|
303
|
+
scaler1,
|
304
|
+
pqCoarseCentroids2,
|
305
|
+
pqFineCentroids2,
|
306
|
+
code2 + 2 * sizeof(float),
|
307
|
+
scaler2,
|
308
|
+
outputAccum);
|
309
|
+
|
310
|
+
minvAccum += minv0 + minv1 + minv2;
|
311
|
+
}
|
312
|
+
|
313
|
+
// Process 3 samples.
|
314
|
+
// Coarse pq centroids table and fine pq centroids table are shared among
|
315
|
+
// codes.
|
316
|
+
//
|
317
|
+
// Performs
|
318
|
+
// * outputAccum += weight0 * scaler0 * decoded(code0)
|
319
|
+
// + weight1 * scaler1 * decoded(code1)
|
320
|
+
// + weight2 * scaler2 * decoded(code2)
|
321
|
+
// * minvAccum += weight0 * minv0 + weight1 * minv1 + weight2 * minv2
|
322
|
+
static void accum(
|
323
|
+
const float* const __restrict pqCoarseCentroids,
|
324
|
+
const float* const __restrict pqFineCentroids,
|
325
|
+
const uint8_t* const __restrict code0,
|
326
|
+
const float weight0,
|
327
|
+
const uint8_t* const __restrict code1,
|
328
|
+
const float weight1,
|
329
|
+
const uint8_t* const __restrict code2,
|
330
|
+
const float weight2,
|
331
|
+
float* const __restrict outputAccum,
|
332
|
+
float& minvAccum) {
|
333
|
+
const float* const __restrict code0Float =
|
334
|
+
reinterpret_cast<const float*>(code0);
|
335
|
+
const float scaler0 = code0Float[0] * weight0;
|
336
|
+
const float minv0 = code0Float[1] * weight0;
|
337
|
+
|
338
|
+
const float* const __restrict code1Float =
|
339
|
+
reinterpret_cast<const float*>(code1);
|
340
|
+
const float scaler1 = code1Float[0] * weight1;
|
341
|
+
const float minv1 = code1Float[1] * weight1;
|
342
|
+
|
343
|
+
const float* const __restrict code2Float =
|
344
|
+
reinterpret_cast<const float*>(code2);
|
345
|
+
const float scaler2 = code2Float[0] * weight2;
|
346
|
+
const float minv2 = code2Float[1] * weight2;
|
347
|
+
|
348
|
+
SubIndexT::accum(
|
349
|
+
pqCoarseCentroids,
|
350
|
+
pqFineCentroids,
|
351
|
+
code0 + 2 * sizeof(float),
|
352
|
+
scaler0,
|
353
|
+
code1 + 2 * sizeof(float),
|
354
|
+
scaler1,
|
355
|
+
code2 + 2 * sizeof(float),
|
356
|
+
scaler2,
|
357
|
+
outputAccum);
|
358
|
+
|
359
|
+
minvAccum += minv0 + minv1 + minv2;
|
360
|
+
}
|
361
|
+
|
362
|
+
// Process 3 samples.
|
363
|
+
// Each code uses its own fine pq centroids table.
|
364
|
+
//
|
365
|
+
// Performs
|
366
|
+
// * outputAccum += weight0 * scaler0 * decoded(code0)
|
367
|
+
// + weight1 * scaler1 * decoded(code1)
|
368
|
+
// + weight2 * scaler2 * decoded(code2)
|
369
|
+
// * minvAccum += weight0 * minv0 + weight1 * minv1 + weight2 * minv2
|
370
|
+
static void accum(
|
371
|
+
const float* const __restrict pqFineCentroids0,
|
372
|
+
const uint8_t* const __restrict code0,
|
373
|
+
const float weight0,
|
374
|
+
const float* const __restrict pqFineCentroids1,
|
375
|
+
const uint8_t* const __restrict code1,
|
376
|
+
const float weight1,
|
377
|
+
const float* const __restrict pqFineCentroids2,
|
378
|
+
const uint8_t* const __restrict code2,
|
379
|
+
const float weight2,
|
380
|
+
float* const __restrict outputAccum,
|
381
|
+
float& minvAccum) {
|
382
|
+
const float* const __restrict code0Float =
|
383
|
+
reinterpret_cast<const float*>(code0);
|
384
|
+
const float scaler0 = code0Float[0] * weight0;
|
385
|
+
const float minv0 = code0Float[1] * weight0;
|
386
|
+
|
387
|
+
const float* const __restrict code1Float =
|
388
|
+
reinterpret_cast<const float*>(code1);
|
389
|
+
const float scaler1 = code1Float[0] * weight1;
|
390
|
+
const float minv1 = code1Float[1] * weight1;
|
391
|
+
|
392
|
+
const float* const __restrict code2Float =
|
393
|
+
reinterpret_cast<const float*>(code2);
|
394
|
+
const float scaler2 = code2Float[0] * weight2;
|
395
|
+
const float minv2 = code2Float[1] * weight2;
|
396
|
+
|
397
|
+
SubIndexT::accum(
|
398
|
+
pqFineCentroids0,
|
399
|
+
code0 + 2 * sizeof(float),
|
400
|
+
scaler0,
|
401
|
+
pqFineCentroids1,
|
402
|
+
code1 + 2 * sizeof(float),
|
403
|
+
scaler1,
|
404
|
+
pqFineCentroids2,
|
405
|
+
code2 + 2 * sizeof(float),
|
406
|
+
scaler2,
|
407
|
+
outputAccum);
|
408
|
+
|
409
|
+
minvAccum += minv0 + minv1 + minv2;
|
410
|
+
}
|
411
|
+
|
412
|
+
// Process 3 samples.
|
413
|
+
// Fine pq centroids table is shared among codes.
|
414
|
+
//
|
415
|
+
// Performs
|
416
|
+
// * outputAccum += weight0 * scaler0 * decoded(code0)
|
417
|
+
// + weight1 * scaler1 * decoded(code1)
|
418
|
+
// + weight2 * scaler2 * decoded(code2)
|
419
|
+
// * minvAccum += weight0 * minv0 + weight1 * minv1 + weight2 * minv2
|
420
|
+
static void accum(
|
421
|
+
const float* const __restrict pqFineCentroids,
|
422
|
+
const uint8_t* const __restrict code0,
|
423
|
+
const float weight0,
|
424
|
+
const uint8_t* const __restrict code1,
|
425
|
+
const float weight1,
|
426
|
+
const uint8_t* const __restrict code2,
|
427
|
+
const float weight2,
|
428
|
+
float* const __restrict outputAccum,
|
429
|
+
float& minvAccum) {
|
430
|
+
const float* const __restrict code0Float =
|
431
|
+
reinterpret_cast<const float*>(code0);
|
432
|
+
const float scaler0 = code0Float[0] * weight0;
|
433
|
+
const float minv0 = code0Float[1] * weight0;
|
434
|
+
|
435
|
+
const float* const __restrict code1Float =
|
436
|
+
reinterpret_cast<const float*>(code1);
|
437
|
+
const float scaler1 = code1Float[0] * weight1;
|
438
|
+
const float minv1 = code1Float[1] * weight1;
|
439
|
+
|
440
|
+
const float* const __restrict code2Float =
|
441
|
+
reinterpret_cast<const float*>(code2);
|
442
|
+
const float scaler2 = code2Float[0] * weight2;
|
443
|
+
const float minv2 = code2Float[1] * weight2;
|
444
|
+
|
445
|
+
SubIndexT::accum(
|
446
|
+
pqFineCentroids,
|
447
|
+
code0 + 2 * sizeof(float),
|
448
|
+
scaler0,
|
449
|
+
code1 + 2 * sizeof(float),
|
450
|
+
scaler1,
|
451
|
+
code2 + 2 * sizeof(float),
|
452
|
+
scaler2,
|
453
|
+
outputAccum);
|
454
|
+
|
455
|
+
minvAccum += minv0 + minv1 + minv2;
|
456
|
+
}
|
457
|
+
};
|
458
|
+
|
459
|
+
} // namespace cppcontrib
|
460
|
+
} // namespace faiss
|