faiss 0.2.3 → 0.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/LICENSE.txt +1 -1
- data/lib/faiss/version.rb +1 -1
- data/vendor/faiss/faiss/Clustering.cpp +32 -0
- data/vendor/faiss/faiss/Clustering.h +14 -0
- data/vendor/faiss/faiss/Index.h +1 -1
- data/vendor/faiss/faiss/Index2Layer.cpp +19 -92
- data/vendor/faiss/faiss/Index2Layer.h +2 -16
- data/vendor/faiss/faiss/IndexAdditiveQuantizer.cpp +407 -0
- data/vendor/faiss/faiss/{IndexResidual.h → IndexAdditiveQuantizer.h} +101 -58
- data/vendor/faiss/faiss/IndexFlat.cpp +22 -52
- data/vendor/faiss/faiss/IndexFlat.h +9 -15
- data/vendor/faiss/faiss/IndexFlatCodes.cpp +67 -0
- data/vendor/faiss/faiss/IndexFlatCodes.h +47 -0
- data/vendor/faiss/faiss/IndexIVF.cpp +79 -7
- data/vendor/faiss/faiss/IndexIVF.h +25 -7
- data/vendor/faiss/faiss/IndexIVFAdditiveQuantizer.cpp +316 -0
- data/vendor/faiss/faiss/IndexIVFAdditiveQuantizer.h +121 -0
- data/vendor/faiss/faiss/IndexIVFFlat.cpp +9 -12
- data/vendor/faiss/faiss/IndexIVFPQ.cpp +5 -4
- data/vendor/faiss/faiss/IndexIVFPQ.h +1 -1
- data/vendor/faiss/faiss/IndexIVFSpectralHash.cpp +60 -39
- data/vendor/faiss/faiss/IndexIVFSpectralHash.h +21 -6
- data/vendor/faiss/faiss/IndexLSH.cpp +4 -30
- data/vendor/faiss/faiss/IndexLSH.h +2 -15
- data/vendor/faiss/faiss/IndexNNDescent.cpp +0 -2
- data/vendor/faiss/faiss/IndexNSG.cpp +0 -2
- data/vendor/faiss/faiss/IndexPQ.cpp +2 -51
- data/vendor/faiss/faiss/IndexPQ.h +2 -17
- data/vendor/faiss/faiss/IndexRefine.cpp +28 -0
- data/vendor/faiss/faiss/IndexRefine.h +10 -0
- data/vendor/faiss/faiss/IndexScalarQuantizer.cpp +2 -28
- data/vendor/faiss/faiss/IndexScalarQuantizer.h +2 -16
- data/vendor/faiss/faiss/VectorTransform.cpp +2 -1
- data/vendor/faiss/faiss/VectorTransform.h +3 -0
- data/vendor/faiss/faiss/clone_index.cpp +3 -2
- data/vendor/faiss/faiss/gpu/GpuCloner.cpp +2 -2
- data/vendor/faiss/faiss/gpu/GpuIcmEncoder.h +60 -0
- data/vendor/faiss/faiss/impl/AdditiveQuantizer.cpp +257 -24
- data/vendor/faiss/faiss/impl/AdditiveQuantizer.h +69 -9
- data/vendor/faiss/faiss/impl/HNSW.cpp +10 -5
- data/vendor/faiss/faiss/impl/LocalSearchQuantizer.cpp +393 -210
- data/vendor/faiss/faiss/impl/LocalSearchQuantizer.h +100 -28
- data/vendor/faiss/faiss/impl/NSG.cpp +0 -3
- data/vendor/faiss/faiss/impl/NSG.h +1 -1
- data/vendor/faiss/faiss/impl/ResidualQuantizer.cpp +357 -47
- data/vendor/faiss/faiss/impl/ResidualQuantizer.h +65 -7
- data/vendor/faiss/faiss/impl/ScalarQuantizer.cpp +12 -19
- data/vendor/faiss/faiss/impl/index_read.cpp +102 -19
- data/vendor/faiss/faiss/impl/index_write.cpp +66 -16
- data/vendor/faiss/faiss/impl/io.cpp +1 -1
- data/vendor/faiss/faiss/impl/io_macros.h +20 -0
- data/vendor/faiss/faiss/impl/kmeans1d.cpp +301 -0
- data/vendor/faiss/faiss/impl/kmeans1d.h +48 -0
- data/vendor/faiss/faiss/index_factory.cpp +585 -414
- data/vendor/faiss/faiss/index_factory.h +3 -0
- data/vendor/faiss/faiss/utils/distances.cpp +4 -2
- data/vendor/faiss/faiss/utils/distances.h +36 -3
- data/vendor/faiss/faiss/utils/distances_simd.cpp +50 -0
- data/vendor/faiss/faiss/utils/utils.h +1 -1
- metadata +12 -5
- data/vendor/faiss/faiss/IndexResidual.cpp +0 -291
@@ -0,0 +1,407 @@
|
|
1
|
+
/**
|
2
|
+
* Copyright (c) Facebook, Inc. and its affiliates.
|
3
|
+
*
|
4
|
+
* This source code is licensed under the MIT license found in the
|
5
|
+
* LICENSE file in the root directory of this source tree.
|
6
|
+
*/
|
7
|
+
|
8
|
+
// quiet the noise
|
9
|
+
// clang-format off
|
10
|
+
|
11
|
+
#include <faiss/IndexAdditiveQuantizer.h>
|
12
|
+
|
13
|
+
#include <algorithm>
|
14
|
+
#include <cmath>
|
15
|
+
#include <cstring>
|
16
|
+
|
17
|
+
#include <faiss/impl/FaissAssert.h>
|
18
|
+
#include <faiss/impl/ResidualQuantizer.h>
|
19
|
+
#include <faiss/impl/ResultHandler.h>
|
20
|
+
#include <faiss/utils/distances.h>
|
21
|
+
#include <faiss/utils/extra_distances.h>
|
22
|
+
#include <faiss/utils/utils.h>
|
23
|
+
|
24
|
+
|
25
|
+
namespace faiss {
|
26
|
+
|
27
|
+
/**************************************************************************************
|
28
|
+
* IndexAdditiveQuantizer
|
29
|
+
**************************************************************************************/
|
30
|
+
|
31
|
+
IndexAdditiveQuantizer::IndexAdditiveQuantizer(
|
32
|
+
idx_t d,
|
33
|
+
AdditiveQuantizer* aq,
|
34
|
+
MetricType metric):
|
35
|
+
IndexFlatCodes(aq->code_size, d, metric), aq(aq)
|
36
|
+
{
|
37
|
+
FAISS_THROW_IF_NOT(metric == METRIC_INNER_PRODUCT || metric == METRIC_L2);
|
38
|
+
}
|
39
|
+
|
40
|
+
|
41
|
+
namespace {
|
42
|
+
|
43
|
+
template <class VectorDistance, class ResultHandler>
|
44
|
+
void search_with_decompress(
|
45
|
+
const IndexAdditiveQuantizer& ir,
|
46
|
+
const float* xq,
|
47
|
+
VectorDistance& vd,
|
48
|
+
ResultHandler& res) {
|
49
|
+
const uint8_t* codes = ir.codes.data();
|
50
|
+
size_t ntotal = ir.ntotal;
|
51
|
+
size_t code_size = ir.code_size;
|
52
|
+
const AdditiveQuantizer *aq = ir.aq;
|
53
|
+
|
54
|
+
using SingleResultHandler = typename ResultHandler::SingleResultHandler;
|
55
|
+
|
56
|
+
#pragma omp parallel for if(res.nq > 100)
|
57
|
+
for (int64_t q = 0; q < res.nq; q++) {
|
58
|
+
SingleResultHandler resi(res);
|
59
|
+
resi.begin(q);
|
60
|
+
std::vector<float> tmp(ir.d);
|
61
|
+
const float* x = xq + ir.d * q;
|
62
|
+
for (size_t i = 0; i < ntotal; i++) {
|
63
|
+
aq->decode(codes + i * code_size, tmp.data(), 1);
|
64
|
+
float dis = vd(x, tmp.data());
|
65
|
+
resi.add_result(dis, i);
|
66
|
+
}
|
67
|
+
resi.end();
|
68
|
+
}
|
69
|
+
}
|
70
|
+
|
71
|
+
template<bool is_IP, AdditiveQuantizer::Search_type_t st, class ResultHandler>
|
72
|
+
void search_with_LUT(
|
73
|
+
const IndexAdditiveQuantizer& ir,
|
74
|
+
const float* xq,
|
75
|
+
ResultHandler& res)
|
76
|
+
{
|
77
|
+
const AdditiveQuantizer & aq = *ir.aq;
|
78
|
+
const uint8_t* codes = ir.codes.data();
|
79
|
+
size_t ntotal = ir.ntotal;
|
80
|
+
size_t code_size = aq.code_size;
|
81
|
+
size_t nq = res.nq;
|
82
|
+
size_t d = ir.d;
|
83
|
+
|
84
|
+
using SingleResultHandler = typename ResultHandler::SingleResultHandler;
|
85
|
+
std::unique_ptr<float []> LUT(new float[nq * aq.total_codebook_size]);
|
86
|
+
|
87
|
+
aq.compute_LUT(nq, xq, LUT.get());
|
88
|
+
|
89
|
+
#pragma omp parallel for if(nq > 100)
|
90
|
+
for (int64_t q = 0; q < nq; q++) {
|
91
|
+
SingleResultHandler resi(res);
|
92
|
+
resi.begin(q);
|
93
|
+
std::vector<float> tmp(aq.d);
|
94
|
+
const float *LUT_q = LUT.get() + aq.total_codebook_size * q;
|
95
|
+
float bias = 0;
|
96
|
+
if (!is_IP) { // the LUT function returns ||y||^2 - 2 * <x, y>, need to add ||x||^2
|
97
|
+
bias = fvec_norm_L2sqr(xq + q * d, d);
|
98
|
+
}
|
99
|
+
for (size_t i = 0; i < ntotal; i++) {
|
100
|
+
float dis = aq.compute_1_distance_LUT<is_IP, st>(
|
101
|
+
codes + i * code_size,
|
102
|
+
LUT_q
|
103
|
+
);
|
104
|
+
resi.add_result(dis + bias, i);
|
105
|
+
}
|
106
|
+
resi.end();
|
107
|
+
}
|
108
|
+
|
109
|
+
}
|
110
|
+
|
111
|
+
|
112
|
+
} // anonymous namespace
|
113
|
+
|
114
|
+
void IndexAdditiveQuantizer::search(
|
115
|
+
idx_t n,
|
116
|
+
const float* x,
|
117
|
+
idx_t k,
|
118
|
+
float* distances,
|
119
|
+
idx_t* labels) const {
|
120
|
+
if (aq->search_type == AdditiveQuantizer::ST_decompress) {
|
121
|
+
if (metric_type == METRIC_L2) {
|
122
|
+
using VD = VectorDistance<METRIC_L2>;
|
123
|
+
VD vd = {size_t(d), metric_arg};
|
124
|
+
HeapResultHandler<VD::C> rh(n, distances, labels, k);
|
125
|
+
search_with_decompress(*this, x, vd, rh);
|
126
|
+
} else if (metric_type == METRIC_INNER_PRODUCT) {
|
127
|
+
using VD = VectorDistance<METRIC_INNER_PRODUCT>;
|
128
|
+
VD vd = {size_t(d), metric_arg};
|
129
|
+
HeapResultHandler<VD::C> rh(n, distances, labels, k);
|
130
|
+
search_with_decompress(*this, x, vd, rh);
|
131
|
+
}
|
132
|
+
} else {
|
133
|
+
if (metric_type == METRIC_INNER_PRODUCT) {
|
134
|
+
HeapResultHandler<CMin<float, idx_t> > rh(n, distances, labels, k);
|
135
|
+
search_with_LUT<true, AdditiveQuantizer::ST_LUT_nonorm> (*this, x, rh);
|
136
|
+
} else {
|
137
|
+
HeapResultHandler<CMax<float, idx_t> > rh(n, distances, labels, k);
|
138
|
+
|
139
|
+
if (aq->search_type == AdditiveQuantizer::ST_norm_float) {
|
140
|
+
search_with_LUT<false, AdditiveQuantizer::ST_norm_float> (*this, x, rh);
|
141
|
+
} else if (aq->search_type == AdditiveQuantizer::ST_LUT_nonorm) {
|
142
|
+
search_with_LUT<false, AdditiveQuantizer::ST_norm_float> (*this, x, rh);
|
143
|
+
} else if (aq->search_type == AdditiveQuantizer::ST_norm_qint8) {
|
144
|
+
search_with_LUT<false, AdditiveQuantizer::ST_norm_qint8> (*this, x, rh);
|
145
|
+
} else if (aq->search_type == AdditiveQuantizer::ST_norm_qint4) {
|
146
|
+
search_with_LUT<false, AdditiveQuantizer::ST_norm_qint4> (*this, x, rh);
|
147
|
+
} else if (aq->search_type == AdditiveQuantizer::ST_norm_cqint8) {
|
148
|
+
search_with_LUT<false, AdditiveQuantizer::ST_norm_cqint8> (*this, x, rh);
|
149
|
+
} else if (aq->search_type == AdditiveQuantizer::ST_norm_cqint4) {
|
150
|
+
search_with_LUT<false, AdditiveQuantizer::ST_norm_cqint4> (*this, x, rh);
|
151
|
+
} else {
|
152
|
+
FAISS_THROW_FMT("search type %d not supported", aq->search_type);
|
153
|
+
}
|
154
|
+
}
|
155
|
+
|
156
|
+
}
|
157
|
+
}
|
158
|
+
|
159
|
+
void IndexAdditiveQuantizer::sa_encode(idx_t n, const float* x, uint8_t* bytes) const {
|
160
|
+
return aq->compute_codes(x, bytes, n);
|
161
|
+
}
|
162
|
+
|
163
|
+
void IndexAdditiveQuantizer::sa_decode(idx_t n, const uint8_t* bytes, float* x) const {
|
164
|
+
return aq->decode(bytes, x, n);
|
165
|
+
}
|
166
|
+
|
167
|
+
|
168
|
+
|
169
|
+
|
170
|
+
/**************************************************************************************
|
171
|
+
* IndexResidualQuantizer
|
172
|
+
**************************************************************************************/
|
173
|
+
|
174
|
+
IndexResidualQuantizer::IndexResidualQuantizer(
|
175
|
+
int d, ///< dimensionality of the input vectors
|
176
|
+
size_t M, ///< number of subquantizers
|
177
|
+
size_t nbits, ///< number of bit per subvector index
|
178
|
+
MetricType metric,
|
179
|
+
Search_type_t search_type)
|
180
|
+
: IndexResidualQuantizer(d, std::vector<size_t>(M, nbits), metric, search_type) {
|
181
|
+
}
|
182
|
+
|
183
|
+
IndexResidualQuantizer::IndexResidualQuantizer(
|
184
|
+
int d,
|
185
|
+
const std::vector<size_t>& nbits,
|
186
|
+
MetricType metric,
|
187
|
+
Search_type_t search_type)
|
188
|
+
: IndexAdditiveQuantizer(d, &rq, metric), rq(d, nbits, search_type) {
|
189
|
+
code_size = rq.code_size;
|
190
|
+
is_trained = false;
|
191
|
+
}
|
192
|
+
|
193
|
+
IndexResidualQuantizer::IndexResidualQuantizer() : IndexResidualQuantizer(0, 0, 0) {}
|
194
|
+
|
195
|
+
void IndexResidualQuantizer::train(idx_t n, const float* x) {
|
196
|
+
rq.train(n, x);
|
197
|
+
is_trained = true;
|
198
|
+
}
|
199
|
+
|
200
|
+
|
201
|
+
/**************************************************************************************
|
202
|
+
* IndexLocalSearchQuantizer
|
203
|
+
**************************************************************************************/
|
204
|
+
|
205
|
+
IndexLocalSearchQuantizer::IndexLocalSearchQuantizer(
|
206
|
+
int d,
|
207
|
+
size_t M, ///< number of subquantizers
|
208
|
+
size_t nbits, ///< number of bit per subvector index
|
209
|
+
MetricType metric,
|
210
|
+
Search_type_t search_type)
|
211
|
+
: IndexAdditiveQuantizer(d, &lsq, metric), lsq(d, M, nbits, search_type) {
|
212
|
+
code_size = lsq.code_size;
|
213
|
+
is_trained = false;
|
214
|
+
}
|
215
|
+
|
216
|
+
IndexLocalSearchQuantizer::IndexLocalSearchQuantizer() : IndexLocalSearchQuantizer(0, 0, 0) {}
|
217
|
+
|
218
|
+
void IndexLocalSearchQuantizer::train(idx_t n, const float* x) {
|
219
|
+
lsq.train(n, x);
|
220
|
+
is_trained = true;
|
221
|
+
}
|
222
|
+
|
223
|
+
/**************************************************************************************
|
224
|
+
* AdditiveCoarseQuantizer
|
225
|
+
**************************************************************************************/
|
226
|
+
|
227
|
+
AdditiveCoarseQuantizer::AdditiveCoarseQuantizer(
|
228
|
+
idx_t d,
|
229
|
+
AdditiveQuantizer* aq,
|
230
|
+
MetricType metric):
|
231
|
+
Index(d, metric), aq(aq)
|
232
|
+
{}
|
233
|
+
|
234
|
+
void AdditiveCoarseQuantizer::add(idx_t, const float*) {
|
235
|
+
FAISS_THROW_MSG("not applicable");
|
236
|
+
}
|
237
|
+
|
238
|
+
void AdditiveCoarseQuantizer::reconstruct(idx_t key, float* recons) const {
|
239
|
+
aq->decode_64bit(key, recons);
|
240
|
+
}
|
241
|
+
|
242
|
+
void AdditiveCoarseQuantizer::reset() {
|
243
|
+
FAISS_THROW_MSG("not applicable");
|
244
|
+
}
|
245
|
+
|
246
|
+
|
247
|
+
void AdditiveCoarseQuantizer::train(idx_t n, const float* x) {
|
248
|
+
if (verbose) {
|
249
|
+
printf("AdditiveCoarseQuantizer::train: training on %zd vectors\n", size_t(n));
|
250
|
+
}
|
251
|
+
aq->train(n, x);
|
252
|
+
is_trained = true;
|
253
|
+
ntotal = (idx_t)1 << aq->tot_bits;
|
254
|
+
|
255
|
+
if (metric_type == METRIC_L2) {
|
256
|
+
if (verbose) {
|
257
|
+
printf("AdditiveCoarseQuantizer::train: computing centroid norms for %zd centroids\n", size_t(ntotal));
|
258
|
+
}
|
259
|
+
// this is not necessary for the residualcoarsequantizer when
|
260
|
+
// using beam search. We'll see if the memory overhead is too high
|
261
|
+
centroid_norms.resize(ntotal);
|
262
|
+
aq->compute_centroid_norms(centroid_norms.data());
|
263
|
+
}
|
264
|
+
}
|
265
|
+
|
266
|
+
void AdditiveCoarseQuantizer::search(
|
267
|
+
idx_t n,
|
268
|
+
const float* x,
|
269
|
+
idx_t k,
|
270
|
+
float* distances,
|
271
|
+
idx_t* labels) const {
|
272
|
+
if (metric_type == METRIC_INNER_PRODUCT) {
|
273
|
+
aq->knn_centroids_inner_product(n, x, k, distances, labels);
|
274
|
+
} else if (metric_type == METRIC_L2) {
|
275
|
+
FAISS_THROW_IF_NOT(centroid_norms.size() == ntotal);
|
276
|
+
aq->knn_centroids_L2(
|
277
|
+
n, x, k, distances, labels, centroid_norms.data());
|
278
|
+
}
|
279
|
+
}
|
280
|
+
|
281
|
+
/**************************************************************************************
|
282
|
+
* ResidualCoarseQuantizer
|
283
|
+
**************************************************************************************/
|
284
|
+
|
285
|
+
ResidualCoarseQuantizer::ResidualCoarseQuantizer(
|
286
|
+
int d, ///< dimensionality of the input vectors
|
287
|
+
const std::vector<size_t>& nbits,
|
288
|
+
MetricType metric)
|
289
|
+
: AdditiveCoarseQuantizer(d, &rq, metric), rq(d, nbits), beam_factor(4.0) {
|
290
|
+
FAISS_THROW_IF_NOT(rq.tot_bits <= 63);
|
291
|
+
is_trained = false;
|
292
|
+
}
|
293
|
+
|
294
|
+
ResidualCoarseQuantizer::ResidualCoarseQuantizer(
|
295
|
+
int d,
|
296
|
+
size_t M, ///< number of subquantizers
|
297
|
+
size_t nbits, ///< number of bit per subvector index
|
298
|
+
MetricType metric)
|
299
|
+
: ResidualCoarseQuantizer(d, std::vector<size_t>(M, nbits), metric) {}
|
300
|
+
|
301
|
+
ResidualCoarseQuantizer::ResidualCoarseQuantizer(): ResidualCoarseQuantizer(0, 0, 0) {}
|
302
|
+
|
303
|
+
|
304
|
+
|
305
|
+
void ResidualCoarseQuantizer::set_beam_factor(float new_beam_factor) {
|
306
|
+
beam_factor = new_beam_factor;
|
307
|
+
if (new_beam_factor > 0) {
|
308
|
+
FAISS_THROW_IF_NOT(new_beam_factor >= 1.0);
|
309
|
+
return;
|
310
|
+
} else if (metric_type == METRIC_L2 && ntotal != centroid_norms.size()) {
|
311
|
+
if (verbose) {
|
312
|
+
printf("AdditiveCoarseQuantizer::train: computing centroid norms for %zd centroids\n", size_t(ntotal));
|
313
|
+
}
|
314
|
+
centroid_norms.resize(ntotal);
|
315
|
+
aq->compute_centroid_norms(centroid_norms.data());
|
316
|
+
}
|
317
|
+
}
|
318
|
+
|
319
|
+
void ResidualCoarseQuantizer::search(
|
320
|
+
idx_t n,
|
321
|
+
const float* x,
|
322
|
+
idx_t k,
|
323
|
+
float* distances,
|
324
|
+
idx_t* labels) const {
|
325
|
+
if (beam_factor < 0) {
|
326
|
+
AdditiveCoarseQuantizer::search(n, x, k, distances, labels);
|
327
|
+
return;
|
328
|
+
}
|
329
|
+
|
330
|
+
int beam_size = int(k * beam_factor);
|
331
|
+
if (beam_size > ntotal) {
|
332
|
+
beam_size = ntotal;
|
333
|
+
}
|
334
|
+
size_t memory_per_point = rq.memory_per_point(beam_size);
|
335
|
+
|
336
|
+
/*
|
337
|
+
|
338
|
+
printf("mem per point %ld n=%d max_mem_distance=%ld mem_kb=%zd\n",
|
339
|
+
memory_per_point, int(n), rq.max_mem_distances, get_mem_usage_kb());
|
340
|
+
*/
|
341
|
+
if (n > 1 && memory_per_point * n > rq.max_mem_distances) {
|
342
|
+
// then split queries to reduce temp memory
|
343
|
+
idx_t bs = rq.max_mem_distances / memory_per_point;
|
344
|
+
if (bs == 0) {
|
345
|
+
bs = 1; // otherwise we can't do much
|
346
|
+
}
|
347
|
+
if (verbose) {
|
348
|
+
printf("ResidualCoarseQuantizer::search: run %d searches in batches of size %d\n",
|
349
|
+
int(n),
|
350
|
+
int(bs));
|
351
|
+
}
|
352
|
+
for (idx_t i0 = 0; i0 < n; i0 += bs) {
|
353
|
+
idx_t i1 = std::min(n, i0 + bs);
|
354
|
+
search(i1 - i0, x + i0 * d, k, distances + i0 * k, labels + i0 * k);
|
355
|
+
InterruptCallback::check();
|
356
|
+
}
|
357
|
+
return;
|
358
|
+
}
|
359
|
+
|
360
|
+
std::vector<int32_t> codes(beam_size * rq.M * n);
|
361
|
+
std::vector<float> beam_distances(n * beam_size);
|
362
|
+
|
363
|
+
rq.refine_beam(
|
364
|
+
n, 1, x, beam_size, codes.data(), nullptr, beam_distances.data());
|
365
|
+
|
366
|
+
#pragma omp parallel for if (n > 4000)
|
367
|
+
for (idx_t i = 0; i < n; i++) {
|
368
|
+
memcpy(distances + i * k,
|
369
|
+
beam_distances.data() + beam_size * i,
|
370
|
+
k * sizeof(distances[0]));
|
371
|
+
|
372
|
+
const int32_t* codes_i = codes.data() + beam_size * i * rq.M;
|
373
|
+
for (idx_t j = 0; j < k; j++) {
|
374
|
+
idx_t l = 0;
|
375
|
+
int shift = 0;
|
376
|
+
for (int m = 0; m < rq.M; m++) {
|
377
|
+
l |= (*codes_i++) << shift;
|
378
|
+
shift += rq.nbits[m];
|
379
|
+
}
|
380
|
+
labels[i * k + j] = l;
|
381
|
+
}
|
382
|
+
}
|
383
|
+
}
|
384
|
+
|
385
|
+
/**************************************************************************************
|
386
|
+
* LocalSearchCoarseQuantizer
|
387
|
+
**************************************************************************************/
|
388
|
+
|
389
|
+
LocalSearchCoarseQuantizer::LocalSearchCoarseQuantizer(
|
390
|
+
int d, ///< dimensionality of the input vectors
|
391
|
+
size_t M, ///< number of subquantizers
|
392
|
+
size_t nbits, ///< number of bit per subvector index
|
393
|
+
MetricType metric)
|
394
|
+
: AdditiveCoarseQuantizer(d, &lsq, metric), lsq(d, M, nbits) {
|
395
|
+
FAISS_THROW_IF_NOT(lsq.tot_bits <= 63);
|
396
|
+
is_trained = false;
|
397
|
+
}
|
398
|
+
|
399
|
+
|
400
|
+
LocalSearchCoarseQuantizer::LocalSearchCoarseQuantizer() {
|
401
|
+
aq = &lsq;
|
402
|
+
}
|
403
|
+
|
404
|
+
|
405
|
+
|
406
|
+
|
407
|
+
} // namespace faiss
|
@@ -5,74 +5,117 @@
|
|
5
5
|
* LICENSE file in the root directory of this source tree.
|
6
6
|
*/
|
7
7
|
|
8
|
-
#ifndef
|
9
|
-
#define
|
8
|
+
#ifndef FAISS_INDEX_ADDITIVE_QUANTIZER_H
|
9
|
+
#define FAISS_INDEX_ADDITIVE_QUANTIZER_H
|
10
10
|
|
11
|
-
#include <
|
11
|
+
#include <faiss/impl/AdditiveQuantizer.h>
|
12
12
|
|
13
|
+
#include <cstdint>
|
13
14
|
#include <vector>
|
14
15
|
|
15
|
-
#include <faiss/
|
16
|
+
#include <faiss/IndexFlatCodes.h>
|
17
|
+
#include <faiss/impl/LocalSearchQuantizer.h>
|
16
18
|
#include <faiss/impl/ResidualQuantizer.h>
|
17
19
|
#include <faiss/impl/platform_macros.h>
|
18
20
|
|
19
21
|
namespace faiss {
|
20
22
|
|
23
|
+
/// Abstract class for additive quantizers. The search functions are in common.
|
24
|
+
struct IndexAdditiveQuantizer : IndexFlatCodes {
|
25
|
+
// the quantizer, this points to the relevant field in the inheriting
|
26
|
+
// classes
|
27
|
+
AdditiveQuantizer* aq;
|
28
|
+
using Search_type_t = AdditiveQuantizer::Search_type_t;
|
29
|
+
|
30
|
+
explicit IndexAdditiveQuantizer(
|
31
|
+
idx_t d = 0,
|
32
|
+
AdditiveQuantizer* aq = nullptr,
|
33
|
+
MetricType metric = METRIC_L2);
|
34
|
+
|
35
|
+
void search(
|
36
|
+
idx_t n,
|
37
|
+
const float* x,
|
38
|
+
idx_t k,
|
39
|
+
float* distances,
|
40
|
+
idx_t* labels) const override;
|
41
|
+
|
42
|
+
/* The standalone codec interface */
|
43
|
+
void sa_encode(idx_t n, const float* x, uint8_t* bytes) const override;
|
44
|
+
|
45
|
+
void sa_decode(idx_t n, const uint8_t* bytes, float* x) const override;
|
46
|
+
};
|
47
|
+
|
21
48
|
/** Index based on a residual quantizer. Stored vectors are
|
22
49
|
* approximated by residual quantization codes.
|
23
50
|
* Can also be used as a codec
|
24
51
|
*/
|
25
|
-
struct
|
52
|
+
struct IndexResidualQuantizer : IndexAdditiveQuantizer {
|
26
53
|
/// The residual quantizer used to encode the vectors
|
27
54
|
ResidualQuantizer rq;
|
28
55
|
|
29
|
-
enum Search_type_t {
|
30
|
-
ST_decompress, ///< decompress database vector
|
31
|
-
ST_LUT_nonorm, ///< use a LUT, don't include norms (OK for IP or
|
32
|
-
///< normalized vectors)
|
33
|
-
ST_norm_float, ///< use a LUT, and store float32 norm with the vectors
|
34
|
-
ST_norm_qint8, ///< use a LUT, and store 8bit-quantized norm
|
35
|
-
};
|
36
|
-
Search_type_t search_type;
|
37
|
-
|
38
|
-
/// min/max for quantization of norms
|
39
|
-
float norm_min, norm_max;
|
40
|
-
|
41
|
-
/// size of residual quantizer codes + norms
|
42
|
-
size_t code_size;
|
43
|
-
|
44
|
-
/// Codes. Size ntotal * rq.code_size
|
45
|
-
std::vector<uint8_t> codes;
|
46
|
-
|
47
56
|
/** Constructor.
|
48
57
|
*
|
49
58
|
* @param d dimensionality of the input vectors
|
50
59
|
* @param M number of subquantizers
|
51
60
|
* @param nbits number of bit per subvector index
|
52
61
|
*/
|
53
|
-
|
62
|
+
IndexResidualQuantizer(
|
54
63
|
int d, ///< dimensionality of the input vectors
|
55
64
|
size_t M, ///< number of subquantizers
|
56
65
|
size_t nbits, ///< number of bit per subvector index
|
57
66
|
MetricType metric = METRIC_L2,
|
58
|
-
Search_type_t search_type = ST_decompress);
|
67
|
+
Search_type_t search_type = AdditiveQuantizer::ST_decompress);
|
59
68
|
|
60
|
-
|
69
|
+
IndexResidualQuantizer(
|
61
70
|
int d,
|
62
71
|
const std::vector<size_t>& nbits,
|
63
72
|
MetricType metric = METRIC_L2,
|
64
|
-
Search_type_t search_type = ST_decompress);
|
73
|
+
Search_type_t search_type = AdditiveQuantizer::ST_decompress);
|
74
|
+
|
75
|
+
IndexResidualQuantizer();
|
65
76
|
|
66
|
-
|
77
|
+
void train(idx_t n, const float* x) override;
|
78
|
+
};
|
67
79
|
|
68
|
-
|
69
|
-
|
80
|
+
struct IndexLocalSearchQuantizer : IndexAdditiveQuantizer {
|
81
|
+
LocalSearchQuantizer lsq;
|
82
|
+
|
83
|
+
/** Constructor.
|
84
|
+
*
|
85
|
+
* @param d dimensionality of the input vectors
|
86
|
+
* @param M number of subquantizers
|
87
|
+
* @param nbits number of bit per subvector index
|
88
|
+
*/
|
89
|
+
IndexLocalSearchQuantizer(
|
90
|
+
int d, ///< dimensionality of the input vectors
|
91
|
+
size_t M, ///< number of subquantizers
|
92
|
+
size_t nbits, ///< number of bit per subvector index
|
93
|
+
MetricType metric = METRIC_L2,
|
94
|
+
Search_type_t search_type = AdditiveQuantizer::ST_decompress);
|
95
|
+
|
96
|
+
IndexLocalSearchQuantizer();
|
70
97
|
|
71
98
|
void train(idx_t n, const float* x) override;
|
99
|
+
};
|
100
|
+
|
101
|
+
/** A "virtual" index where the elements are the residual quantizer centroids.
|
102
|
+
*
|
103
|
+
* Intended for use as a coarse quantizer in an IndexIVF.
|
104
|
+
*/
|
105
|
+
struct AdditiveCoarseQuantizer : Index {
|
106
|
+
AdditiveQuantizer* aq;
|
107
|
+
|
108
|
+
explicit AdditiveCoarseQuantizer(
|
109
|
+
idx_t d = 0,
|
110
|
+
AdditiveQuantizer* aq = nullptr,
|
111
|
+
MetricType metric = METRIC_L2);
|
112
|
+
|
113
|
+
/// norms of centroids, useful for knn-search
|
114
|
+
std::vector<float> centroid_norms;
|
72
115
|
|
116
|
+
/// N/A
|
73
117
|
void add(idx_t n, const float* x) override;
|
74
118
|
|
75
|
-
/// not implemented
|
76
119
|
void search(
|
77
120
|
idx_t n,
|
78
121
|
const float* x,
|
@@ -80,23 +123,17 @@ struct IndexResidual : Index {
|
|
80
123
|
float* distances,
|
81
124
|
idx_t* labels) const override;
|
82
125
|
|
83
|
-
void
|
84
|
-
|
85
|
-
/* The standalone codec interface */
|
86
|
-
size_t sa_code_size() const override;
|
87
|
-
|
88
|
-
void sa_encode(idx_t n, const float* x, uint8_t* bytes) const override;
|
89
|
-
|
90
|
-
void sa_decode(idx_t n, const uint8_t* bytes, float* x) const override;
|
126
|
+
void reconstruct(idx_t key, float* recons) const override;
|
127
|
+
void train(idx_t n, const float* x) override;
|
91
128
|
|
92
|
-
|
129
|
+
/// N/A
|
130
|
+
void reset() override;
|
93
131
|
};
|
94
132
|
|
95
|
-
/**
|
96
|
-
*
|
97
|
-
*
|
98
|
-
|
99
|
-
struct ResidualCoarseQuantizer : Index {
|
133
|
+
/** The ResidualCoarseQuantizer is a bit specialized compared to the
|
134
|
+
* default AdditiveCoarseQuantizer because it can use a beam search
|
135
|
+
* at search time (slow but may be useful for very large vocabularies) */
|
136
|
+
struct ResidualCoarseQuantizer : AdditiveCoarseQuantizer {
|
100
137
|
/// The residual quantizer used to encode the vectors
|
101
138
|
ResidualQuantizer rq;
|
102
139
|
|
@@ -104,9 +141,6 @@ struct ResidualCoarseQuantizer : Index {
|
|
104
141
|
/// if negative, use exact search-to-centroid
|
105
142
|
float beam_factor;
|
106
143
|
|
107
|
-
/// norms of centroids, useful for knn-search
|
108
|
-
std::vector<float> centroid_norms;
|
109
|
-
|
110
144
|
/// computes centroid norms if required
|
111
145
|
void set_beam_factor(float new_beam_factor);
|
112
146
|
|
@@ -127,13 +161,6 @@ struct ResidualCoarseQuantizer : Index {
|
|
127
161
|
const std::vector<size_t>& nbits,
|
128
162
|
MetricType metric = METRIC_L2);
|
129
163
|
|
130
|
-
ResidualCoarseQuantizer();
|
131
|
-
|
132
|
-
void train(idx_t n, const float* x) override;
|
133
|
-
|
134
|
-
/// N/A
|
135
|
-
void add(idx_t n, const float* x) override;
|
136
|
-
|
137
164
|
void search(
|
138
165
|
idx_t n,
|
139
166
|
const float* x,
|
@@ -141,10 +168,26 @@ struct ResidualCoarseQuantizer : Index {
|
|
141
168
|
float* distances,
|
142
169
|
idx_t* labels) const override;
|
143
170
|
|
144
|
-
|
171
|
+
ResidualCoarseQuantizer();
|
172
|
+
};
|
145
173
|
|
146
|
-
|
147
|
-
|
174
|
+
struct LocalSearchCoarseQuantizer : AdditiveCoarseQuantizer {
|
175
|
+
/// The residual quantizer used to encode the vectors
|
176
|
+
LocalSearchQuantizer lsq;
|
177
|
+
|
178
|
+
/** Constructor.
|
179
|
+
*
|
180
|
+
* @param d dimensionality of the input vectors
|
181
|
+
* @param M number of subquantizers
|
182
|
+
* @param nbits number of bit per subvector index
|
183
|
+
*/
|
184
|
+
LocalSearchCoarseQuantizer(
|
185
|
+
int d, ///< dimensionality of the input vectors
|
186
|
+
size_t M, ///< number of subquantizers
|
187
|
+
size_t nbits, ///< number of bit per subvector index
|
188
|
+
MetricType metric = METRIC_L2);
|
189
|
+
|
190
|
+
LocalSearchCoarseQuantizer();
|
148
191
|
};
|
149
192
|
|
150
193
|
} // namespace faiss
|