faiss 0.1.0 → 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (226) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +5 -0
  3. data/README.md +103 -3
  4. data/ext/faiss/ext.cpp +99 -32
  5. data/ext/faiss/extconf.rb +12 -2
  6. data/lib/faiss/ext.bundle +0 -0
  7. data/lib/faiss/index.rb +3 -3
  8. data/lib/faiss/index_binary.rb +3 -3
  9. data/lib/faiss/kmeans.rb +1 -1
  10. data/lib/faiss/pca_matrix.rb +2 -2
  11. data/lib/faiss/product_quantizer.rb +3 -3
  12. data/lib/faiss/version.rb +1 -1
  13. data/vendor/faiss/AutoTune.cpp +719 -0
  14. data/vendor/faiss/AutoTune.h +212 -0
  15. data/vendor/faiss/Clustering.cpp +261 -0
  16. data/vendor/faiss/Clustering.h +101 -0
  17. data/vendor/faiss/IVFlib.cpp +339 -0
  18. data/vendor/faiss/IVFlib.h +132 -0
  19. data/vendor/faiss/Index.cpp +171 -0
  20. data/vendor/faiss/Index.h +261 -0
  21. data/vendor/faiss/Index2Layer.cpp +437 -0
  22. data/vendor/faiss/Index2Layer.h +85 -0
  23. data/vendor/faiss/IndexBinary.cpp +77 -0
  24. data/vendor/faiss/IndexBinary.h +163 -0
  25. data/vendor/faiss/IndexBinaryFlat.cpp +83 -0
  26. data/vendor/faiss/IndexBinaryFlat.h +54 -0
  27. data/vendor/faiss/IndexBinaryFromFloat.cpp +78 -0
  28. data/vendor/faiss/IndexBinaryFromFloat.h +52 -0
  29. data/vendor/faiss/IndexBinaryHNSW.cpp +325 -0
  30. data/vendor/faiss/IndexBinaryHNSW.h +56 -0
  31. data/vendor/faiss/IndexBinaryIVF.cpp +671 -0
  32. data/vendor/faiss/IndexBinaryIVF.h +211 -0
  33. data/vendor/faiss/IndexFlat.cpp +508 -0
  34. data/vendor/faiss/IndexFlat.h +175 -0
  35. data/vendor/faiss/IndexHNSW.cpp +1090 -0
  36. data/vendor/faiss/IndexHNSW.h +170 -0
  37. data/vendor/faiss/IndexIVF.cpp +909 -0
  38. data/vendor/faiss/IndexIVF.h +353 -0
  39. data/vendor/faiss/IndexIVFFlat.cpp +502 -0
  40. data/vendor/faiss/IndexIVFFlat.h +118 -0
  41. data/vendor/faiss/IndexIVFPQ.cpp +1207 -0
  42. data/vendor/faiss/IndexIVFPQ.h +161 -0
  43. data/vendor/faiss/IndexIVFPQR.cpp +219 -0
  44. data/vendor/faiss/IndexIVFPQR.h +65 -0
  45. data/vendor/faiss/IndexIVFSpectralHash.cpp +331 -0
  46. data/vendor/faiss/IndexIVFSpectralHash.h +75 -0
  47. data/vendor/faiss/IndexLSH.cpp +225 -0
  48. data/vendor/faiss/IndexLSH.h +87 -0
  49. data/vendor/faiss/IndexLattice.cpp +143 -0
  50. data/vendor/faiss/IndexLattice.h +68 -0
  51. data/vendor/faiss/IndexPQ.cpp +1188 -0
  52. data/vendor/faiss/IndexPQ.h +199 -0
  53. data/vendor/faiss/IndexPreTransform.cpp +288 -0
  54. data/vendor/faiss/IndexPreTransform.h +91 -0
  55. data/vendor/faiss/IndexReplicas.cpp +123 -0
  56. data/vendor/faiss/IndexReplicas.h +76 -0
  57. data/vendor/faiss/IndexScalarQuantizer.cpp +317 -0
  58. data/vendor/faiss/IndexScalarQuantizer.h +127 -0
  59. data/vendor/faiss/IndexShards.cpp +317 -0
  60. data/vendor/faiss/IndexShards.h +100 -0
  61. data/vendor/faiss/InvertedLists.cpp +623 -0
  62. data/vendor/faiss/InvertedLists.h +334 -0
  63. data/vendor/faiss/LICENSE +21 -0
  64. data/vendor/faiss/MatrixStats.cpp +252 -0
  65. data/vendor/faiss/MatrixStats.h +62 -0
  66. data/vendor/faiss/MetaIndexes.cpp +351 -0
  67. data/vendor/faiss/MetaIndexes.h +126 -0
  68. data/vendor/faiss/OnDiskInvertedLists.cpp +674 -0
  69. data/vendor/faiss/OnDiskInvertedLists.h +127 -0
  70. data/vendor/faiss/VectorTransform.cpp +1157 -0
  71. data/vendor/faiss/VectorTransform.h +322 -0
  72. data/vendor/faiss/c_api/AutoTune_c.cpp +83 -0
  73. data/vendor/faiss/c_api/AutoTune_c.h +64 -0
  74. data/vendor/faiss/c_api/Clustering_c.cpp +139 -0
  75. data/vendor/faiss/c_api/Clustering_c.h +117 -0
  76. data/vendor/faiss/c_api/IndexFlat_c.cpp +140 -0
  77. data/vendor/faiss/c_api/IndexFlat_c.h +115 -0
  78. data/vendor/faiss/c_api/IndexIVFFlat_c.cpp +64 -0
  79. data/vendor/faiss/c_api/IndexIVFFlat_c.h +58 -0
  80. data/vendor/faiss/c_api/IndexIVF_c.cpp +92 -0
  81. data/vendor/faiss/c_api/IndexIVF_c.h +135 -0
  82. data/vendor/faiss/c_api/IndexLSH_c.cpp +37 -0
  83. data/vendor/faiss/c_api/IndexLSH_c.h +40 -0
  84. data/vendor/faiss/c_api/IndexShards_c.cpp +44 -0
  85. data/vendor/faiss/c_api/IndexShards_c.h +42 -0
  86. data/vendor/faiss/c_api/Index_c.cpp +105 -0
  87. data/vendor/faiss/c_api/Index_c.h +183 -0
  88. data/vendor/faiss/c_api/MetaIndexes_c.cpp +49 -0
  89. data/vendor/faiss/c_api/MetaIndexes_c.h +49 -0
  90. data/vendor/faiss/c_api/clone_index_c.cpp +23 -0
  91. data/vendor/faiss/c_api/clone_index_c.h +32 -0
  92. data/vendor/faiss/c_api/error_c.h +42 -0
  93. data/vendor/faiss/c_api/error_impl.cpp +27 -0
  94. data/vendor/faiss/c_api/error_impl.h +16 -0
  95. data/vendor/faiss/c_api/faiss_c.h +58 -0
  96. data/vendor/faiss/c_api/gpu/GpuAutoTune_c.cpp +96 -0
  97. data/vendor/faiss/c_api/gpu/GpuAutoTune_c.h +56 -0
  98. data/vendor/faiss/c_api/gpu/GpuClonerOptions_c.cpp +52 -0
  99. data/vendor/faiss/c_api/gpu/GpuClonerOptions_c.h +68 -0
  100. data/vendor/faiss/c_api/gpu/GpuIndex_c.cpp +17 -0
  101. data/vendor/faiss/c_api/gpu/GpuIndex_c.h +30 -0
  102. data/vendor/faiss/c_api/gpu/GpuIndicesOptions_c.h +38 -0
  103. data/vendor/faiss/c_api/gpu/GpuResources_c.cpp +86 -0
  104. data/vendor/faiss/c_api/gpu/GpuResources_c.h +66 -0
  105. data/vendor/faiss/c_api/gpu/StandardGpuResources_c.cpp +54 -0
  106. data/vendor/faiss/c_api/gpu/StandardGpuResources_c.h +53 -0
  107. data/vendor/faiss/c_api/gpu/macros_impl.h +42 -0
  108. data/vendor/faiss/c_api/impl/AuxIndexStructures_c.cpp +220 -0
  109. data/vendor/faiss/c_api/impl/AuxIndexStructures_c.h +149 -0
  110. data/vendor/faiss/c_api/index_factory_c.cpp +26 -0
  111. data/vendor/faiss/c_api/index_factory_c.h +30 -0
  112. data/vendor/faiss/c_api/index_io_c.cpp +42 -0
  113. data/vendor/faiss/c_api/index_io_c.h +50 -0
  114. data/vendor/faiss/c_api/macros_impl.h +110 -0
  115. data/vendor/faiss/clone_index.cpp +147 -0
  116. data/vendor/faiss/clone_index.h +38 -0
  117. data/vendor/faiss/demos/demo_imi_flat.cpp +151 -0
  118. data/vendor/faiss/demos/demo_imi_pq.cpp +199 -0
  119. data/vendor/faiss/demos/demo_ivfpq_indexing.cpp +146 -0
  120. data/vendor/faiss/demos/demo_sift1M.cpp +252 -0
  121. data/vendor/faiss/gpu/GpuAutoTune.cpp +95 -0
  122. data/vendor/faiss/gpu/GpuAutoTune.h +27 -0
  123. data/vendor/faiss/gpu/GpuCloner.cpp +403 -0
  124. data/vendor/faiss/gpu/GpuCloner.h +82 -0
  125. data/vendor/faiss/gpu/GpuClonerOptions.cpp +28 -0
  126. data/vendor/faiss/gpu/GpuClonerOptions.h +53 -0
  127. data/vendor/faiss/gpu/GpuDistance.h +52 -0
  128. data/vendor/faiss/gpu/GpuFaissAssert.h +29 -0
  129. data/vendor/faiss/gpu/GpuIndex.h +148 -0
  130. data/vendor/faiss/gpu/GpuIndexBinaryFlat.h +89 -0
  131. data/vendor/faiss/gpu/GpuIndexFlat.h +190 -0
  132. data/vendor/faiss/gpu/GpuIndexIVF.h +89 -0
  133. data/vendor/faiss/gpu/GpuIndexIVFFlat.h +85 -0
  134. data/vendor/faiss/gpu/GpuIndexIVFPQ.h +143 -0
  135. data/vendor/faiss/gpu/GpuIndexIVFScalarQuantizer.h +100 -0
  136. data/vendor/faiss/gpu/GpuIndicesOptions.h +30 -0
  137. data/vendor/faiss/gpu/GpuResources.cpp +52 -0
  138. data/vendor/faiss/gpu/GpuResources.h +73 -0
  139. data/vendor/faiss/gpu/StandardGpuResources.cpp +295 -0
  140. data/vendor/faiss/gpu/StandardGpuResources.h +114 -0
  141. data/vendor/faiss/gpu/impl/RemapIndices.cpp +43 -0
  142. data/vendor/faiss/gpu/impl/RemapIndices.h +24 -0
  143. data/vendor/faiss/gpu/perf/IndexWrapper-inl.h +71 -0
  144. data/vendor/faiss/gpu/perf/IndexWrapper.h +39 -0
  145. data/vendor/faiss/gpu/perf/PerfClustering.cpp +115 -0
  146. data/vendor/faiss/gpu/perf/PerfIVFPQAdd.cpp +139 -0
  147. data/vendor/faiss/gpu/perf/WriteIndex.cpp +102 -0
  148. data/vendor/faiss/gpu/test/TestGpuIndexBinaryFlat.cpp +130 -0
  149. data/vendor/faiss/gpu/test/TestGpuIndexFlat.cpp +371 -0
  150. data/vendor/faiss/gpu/test/TestGpuIndexIVFFlat.cpp +550 -0
  151. data/vendor/faiss/gpu/test/TestGpuIndexIVFPQ.cpp +450 -0
  152. data/vendor/faiss/gpu/test/TestGpuMemoryException.cpp +84 -0
  153. data/vendor/faiss/gpu/test/TestUtils.cpp +315 -0
  154. data/vendor/faiss/gpu/test/TestUtils.h +93 -0
  155. data/vendor/faiss/gpu/test/demo_ivfpq_indexing_gpu.cpp +159 -0
  156. data/vendor/faiss/gpu/utils/DeviceMemory.cpp +77 -0
  157. data/vendor/faiss/gpu/utils/DeviceMemory.h +71 -0
  158. data/vendor/faiss/gpu/utils/DeviceUtils.h +185 -0
  159. data/vendor/faiss/gpu/utils/MemorySpace.cpp +89 -0
  160. data/vendor/faiss/gpu/utils/MemorySpace.h +44 -0
  161. data/vendor/faiss/gpu/utils/StackDeviceMemory.cpp +239 -0
  162. data/vendor/faiss/gpu/utils/StackDeviceMemory.h +129 -0
  163. data/vendor/faiss/gpu/utils/StaticUtils.h +83 -0
  164. data/vendor/faiss/gpu/utils/Timer.cpp +60 -0
  165. data/vendor/faiss/gpu/utils/Timer.h +52 -0
  166. data/vendor/faiss/impl/AuxIndexStructures.cpp +305 -0
  167. data/vendor/faiss/impl/AuxIndexStructures.h +246 -0
  168. data/vendor/faiss/impl/FaissAssert.h +95 -0
  169. data/vendor/faiss/impl/FaissException.cpp +66 -0
  170. data/vendor/faiss/impl/FaissException.h +71 -0
  171. data/vendor/faiss/impl/HNSW.cpp +818 -0
  172. data/vendor/faiss/impl/HNSW.h +275 -0
  173. data/vendor/faiss/impl/PolysemousTraining.cpp +953 -0
  174. data/vendor/faiss/impl/PolysemousTraining.h +158 -0
  175. data/vendor/faiss/impl/ProductQuantizer.cpp +876 -0
  176. data/vendor/faiss/impl/ProductQuantizer.h +242 -0
  177. data/vendor/faiss/impl/ScalarQuantizer.cpp +1628 -0
  178. data/vendor/faiss/impl/ScalarQuantizer.h +120 -0
  179. data/vendor/faiss/impl/ThreadedIndex-inl.h +192 -0
  180. data/vendor/faiss/impl/ThreadedIndex.h +80 -0
  181. data/vendor/faiss/impl/index_read.cpp +793 -0
  182. data/vendor/faiss/impl/index_write.cpp +558 -0
  183. data/vendor/faiss/impl/io.cpp +142 -0
  184. data/vendor/faiss/impl/io.h +98 -0
  185. data/vendor/faiss/impl/lattice_Zn.cpp +712 -0
  186. data/vendor/faiss/impl/lattice_Zn.h +199 -0
  187. data/vendor/faiss/index_factory.cpp +392 -0
  188. data/vendor/faiss/index_factory.h +25 -0
  189. data/vendor/faiss/index_io.h +75 -0
  190. data/vendor/faiss/misc/test_blas.cpp +84 -0
  191. data/vendor/faiss/tests/test_binary_flat.cpp +64 -0
  192. data/vendor/faiss/tests/test_dealloc_invlists.cpp +183 -0
  193. data/vendor/faiss/tests/test_ivfpq_codec.cpp +67 -0
  194. data/vendor/faiss/tests/test_ivfpq_indexing.cpp +98 -0
  195. data/vendor/faiss/tests/test_lowlevel_ivf.cpp +566 -0
  196. data/vendor/faiss/tests/test_merge.cpp +258 -0
  197. data/vendor/faiss/tests/test_omp_threads.cpp +14 -0
  198. data/vendor/faiss/tests/test_ondisk_ivf.cpp +220 -0
  199. data/vendor/faiss/tests/test_pairs_decoding.cpp +189 -0
  200. data/vendor/faiss/tests/test_params_override.cpp +231 -0
  201. data/vendor/faiss/tests/test_pq_encoding.cpp +98 -0
  202. data/vendor/faiss/tests/test_sliding_ivf.cpp +240 -0
  203. data/vendor/faiss/tests/test_threaded_index.cpp +253 -0
  204. data/vendor/faiss/tests/test_transfer_invlists.cpp +159 -0
  205. data/vendor/faiss/tutorial/cpp/1-Flat.cpp +98 -0
  206. data/vendor/faiss/tutorial/cpp/2-IVFFlat.cpp +81 -0
  207. data/vendor/faiss/tutorial/cpp/3-IVFPQ.cpp +93 -0
  208. data/vendor/faiss/tutorial/cpp/4-GPU.cpp +119 -0
  209. data/vendor/faiss/tutorial/cpp/5-Multiple-GPUs.cpp +99 -0
  210. data/vendor/faiss/utils/Heap.cpp +122 -0
  211. data/vendor/faiss/utils/Heap.h +495 -0
  212. data/vendor/faiss/utils/WorkerThread.cpp +126 -0
  213. data/vendor/faiss/utils/WorkerThread.h +61 -0
  214. data/vendor/faiss/utils/distances.cpp +765 -0
  215. data/vendor/faiss/utils/distances.h +243 -0
  216. data/vendor/faiss/utils/distances_simd.cpp +809 -0
  217. data/vendor/faiss/utils/extra_distances.cpp +336 -0
  218. data/vendor/faiss/utils/extra_distances.h +54 -0
  219. data/vendor/faiss/utils/hamming-inl.h +472 -0
  220. data/vendor/faiss/utils/hamming.cpp +792 -0
  221. data/vendor/faiss/utils/hamming.h +220 -0
  222. data/vendor/faiss/utils/random.cpp +192 -0
  223. data/vendor/faiss/utils/random.h +60 -0
  224. data/vendor/faiss/utils/utils.cpp +783 -0
  225. data/vendor/faiss/utils/utils.h +181 -0
  226. metadata +216 -2
@@ -0,0 +1,171 @@
1
+ /**
2
+ * Copyright (c) Facebook, Inc. and its affiliates.
3
+ *
4
+ * This source code is licensed under the MIT license found in the
5
+ * LICENSE file in the root directory of this source tree.
6
+ */
7
+
8
+ // -*- c++ -*-
9
+
10
+ #include <faiss/Index.h>
11
+
12
+ #include <faiss/impl/AuxIndexStructures.h>
13
+ #include <faiss/impl/FaissAssert.h>
14
+ #include <faiss/utils/distances.h>
15
+
16
+ #include <cstring>
17
+
18
+
19
+ namespace faiss {
20
+
21
+ Index::~Index ()
22
+ {
23
+ }
24
+
25
+
26
+ void Index::train(idx_t /*n*/, const float* /*x*/) {
27
+ // does nothing by default
28
+ }
29
+
30
+
31
+ void Index::range_search (idx_t , const float *, float,
32
+ RangeSearchResult *) const
33
+ {
34
+ FAISS_THROW_MSG ("range search not implemented");
35
+ }
36
+
37
+ void Index::assign (idx_t n, const float * x, idx_t * labels, idx_t k)
38
+ {
39
+ float * distances = new float[n * k];
40
+ ScopeDeleter<float> del(distances);
41
+ search (n, x, k, distances, labels);
42
+ }
43
+
44
+ void Index::add_with_ids(
45
+ idx_t /*n*/,
46
+ const float* /*x*/,
47
+ const idx_t* /*xids*/) {
48
+ FAISS_THROW_MSG ("add_with_ids not implemented for this type of index");
49
+ }
50
+
51
+ size_t Index::remove_ids(const IDSelector& /*sel*/) {
52
+ FAISS_THROW_MSG ("remove_ids not implemented for this type of index");
53
+ return -1;
54
+ }
55
+
56
+
57
+ void Index::reconstruct (idx_t, float * ) const {
58
+ FAISS_THROW_MSG ("reconstruct not implemented for this type of index");
59
+ }
60
+
61
+
62
+ void Index::reconstruct_n (idx_t i0, idx_t ni, float *recons) const {
63
+ for (idx_t i = 0; i < ni; i++) {
64
+ reconstruct (i0 + i, recons + i * d);
65
+ }
66
+ }
67
+
68
+
69
+ void Index::search_and_reconstruct (idx_t n, const float *x, idx_t k,
70
+ float *distances, idx_t *labels,
71
+ float *recons) const {
72
+ search (n, x, k, distances, labels);
73
+ for (idx_t i = 0; i < n; ++i) {
74
+ for (idx_t j = 0; j < k; ++j) {
75
+ idx_t ij = i * k + j;
76
+ idx_t key = labels[ij];
77
+ float* reconstructed = recons + ij * d;
78
+ if (key < 0) {
79
+ // Fill with NaNs
80
+ memset(reconstructed, -1, sizeof(*reconstructed) * d);
81
+ } else {
82
+ reconstruct (key, reconstructed);
83
+ }
84
+ }
85
+ }
86
+ }
87
+
88
+ void Index::compute_residual (const float * x,
89
+ float * residual, idx_t key) const {
90
+ reconstruct (key, residual);
91
+ for (size_t i = 0; i < d; i++) {
92
+ residual[i] = x[i] - residual[i];
93
+ }
94
+ }
95
+
96
+ void Index::compute_residual_n (idx_t n, const float* xs,
97
+ float* residuals,
98
+ const idx_t* keys) const {
99
+ #pragma omp parallel for
100
+ for (idx_t i = 0; i < n; ++i) {
101
+ compute_residual(&xs[i * d], &residuals[i * d], keys[i]);
102
+ }
103
+ }
104
+
105
+
106
+
107
+ size_t Index::sa_code_size () const
108
+ {
109
+ FAISS_THROW_MSG ("standalone codec not implemented for this type of index");
110
+ }
111
+
112
+ void Index::sa_encode (idx_t, const float *,
113
+ uint8_t *) const
114
+ {
115
+ FAISS_THROW_MSG ("standalone codec not implemented for this type of index");
116
+ }
117
+
118
+ void Index::sa_decode (idx_t, const uint8_t *,
119
+ float *) const
120
+ {
121
+ FAISS_THROW_MSG ("standalone codec not implemented for this type of index");
122
+ }
123
+
124
+
125
+ namespace {
126
+
127
+
128
+ // storage that explicitly reconstructs vectors before computing distances
129
+ struct GenericDistanceComputer : DistanceComputer {
130
+ size_t d;
131
+ const Index& storage;
132
+ std::vector<float> buf;
133
+ const float *q;
134
+
135
+ explicit GenericDistanceComputer(const Index& storage)
136
+ : storage(storage) {
137
+ d = storage.d;
138
+ buf.resize(d * 2);
139
+ }
140
+
141
+ float operator () (idx_t i) override {
142
+ storage.reconstruct(i, buf.data());
143
+ return fvec_L2sqr(q, buf.data(), d);
144
+ }
145
+
146
+ float symmetric_dis(idx_t i, idx_t j) override {
147
+ storage.reconstruct(i, buf.data());
148
+ storage.reconstruct(j, buf.data() + d);
149
+ return fvec_L2sqr(buf.data() + d, buf.data(), d);
150
+ }
151
+
152
+ void set_query(const float *x) override {
153
+ q = x;
154
+ }
155
+
156
+ };
157
+
158
+
159
+ } // namespace
160
+
161
+
162
+ DistanceComputer * Index::get_distance_computer() const {
163
+ if (metric_type == METRIC_L2) {
164
+ return new GenericDistanceComputer(*this);
165
+ } else {
166
+ FAISS_THROW_MSG ("get_distance_computer() not implemented");
167
+ }
168
+ }
169
+
170
+
171
+ }
@@ -0,0 +1,261 @@
1
+ /**
2
+ * Copyright (c) Facebook, Inc. and its affiliates.
3
+ *
4
+ * This source code is licensed under the MIT license found in the
5
+ * LICENSE file in the root directory of this source tree.
6
+ */
7
+
8
+ // -*- c++ -*-
9
+
10
+ #ifndef FAISS_INDEX_H
11
+ #define FAISS_INDEX_H
12
+
13
+
14
+ #include <cstdio>
15
+ #include <typeinfo>
16
+ #include <string>
17
+ #include <sstream>
18
+
19
+ #define FAISS_VERSION_MAJOR 1
20
+ #define FAISS_VERSION_MINOR 6
21
+ #define FAISS_VERSION_PATCH 1
22
+
23
+ /**
24
+ * @namespace faiss
25
+ *
26
+ * Throughout the library, vectors are provided as float * pointers.
27
+ * Most algorithms can be optimized when several vectors are processed
28
+ * (added/searched) together in a batch. In this case, they are passed
29
+ * in as a matrix. When n vectors of size d are provided as float * x,
30
+ * component j of vector i is
31
+ *
32
+ * x[ i * d + j ]
33
+ *
34
+ * where 0 <= i < n and 0 <= j < d. In other words, matrices are
35
+ * always compact. When specifying the size of the matrix, we call it
36
+ * an n*d matrix, which implies a row-major storage.
37
+ */
38
+
39
+
40
+ namespace faiss {
41
+
42
+
43
+ /// Some algorithms support both an inner product version and a L2 search version.
44
+ enum MetricType {
45
+ METRIC_INNER_PRODUCT = 0, ///< maximum inner product search
46
+ METRIC_L2 = 1, ///< squared L2 search
47
+ METRIC_L1, ///< L1 (aka cityblock)
48
+ METRIC_Linf, ///< infinity distance
49
+ METRIC_Lp, ///< L_p distance, p is given by metric_arg
50
+
51
+ /// some additional metrics defined in scipy.spatial.distance
52
+ METRIC_Canberra = 20,
53
+ METRIC_BrayCurtis,
54
+ METRIC_JensenShannon,
55
+
56
+ };
57
+
58
+
59
+ /// Forward declarations see AuxIndexStructures.h
60
+ struct IDSelector;
61
+ struct RangeSearchResult;
62
+ struct DistanceComputer;
63
+
64
+ /** Abstract structure for an index
65
+ *
66
+ * Supports adding vertices and searching them.
67
+ *
68
+ * Currently only asymmetric queries are supported:
69
+ * database-to-database queries are not implemented.
70
+ */
71
+ struct Index {
72
+ using idx_t = int64_t; ///< all indices are this type
73
+ using component_t = float;
74
+ using distance_t = float;
75
+
76
+ int d; ///< vector dimension
77
+ idx_t ntotal; ///< total nb of indexed vectors
78
+ bool verbose; ///< verbosity level
79
+
80
+ /// set if the Index does not require training, or if training is
81
+ /// done already
82
+ bool is_trained;
83
+
84
+ /// type of metric this index uses for search
85
+ MetricType metric_type;
86
+ float metric_arg; ///< argument of the metric type
87
+
88
+ explicit Index (idx_t d = 0, MetricType metric = METRIC_L2):
89
+ d(d),
90
+ ntotal(0),
91
+ verbose(false),
92
+ is_trained(true),
93
+ metric_type (metric),
94
+ metric_arg(0) {}
95
+
96
+ virtual ~Index ();
97
+
98
+
99
+ /** Perform training on a representative set of vectors
100
+ *
101
+ * @param n nb of training vectors
102
+ * @param x training vecors, size n * d
103
+ */
104
+ virtual void train(idx_t n, const float* x);
105
+
106
+ /** Add n vectors of dimension d to the index.
107
+ *
108
+ * Vectors are implicitly assigned labels ntotal .. ntotal + n - 1
109
+ * This function slices the input vectors in chuncks smaller than
110
+ * blocksize_add and calls add_core.
111
+ * @param x input matrix, size n * d
112
+ */
113
+ virtual void add (idx_t n, const float *x) = 0;
114
+
115
+ /** Same as add, but stores xids instead of sequential ids.
116
+ *
117
+ * The default implementation fails with an assertion, as it is
118
+ * not supported by all indexes.
119
+ *
120
+ * @param xids if non-null, ids to store for the vectors (size n)
121
+ */
122
+ virtual void add_with_ids (idx_t n, const float * x, const idx_t *xids);
123
+
124
+ /** query n vectors of dimension d to the index.
125
+ *
126
+ * return at most k vectors. If there are not enough results for a
127
+ * query, the result array is padded with -1s.
128
+ *
129
+ * @param x input vectors to search, size n * d
130
+ * @param labels output labels of the NNs, size n*k
131
+ * @param distances output pairwise distances, size n*k
132
+ */
133
+ virtual void search (idx_t n, const float *x, idx_t k,
134
+ float *distances, idx_t *labels) const = 0;
135
+
136
+ /** query n vectors of dimension d to the index.
137
+ *
138
+ * return all vectors with distance < radius. Note that many
139
+ * indexes do not implement the range_search (only the k-NN search
140
+ * is mandatory).
141
+ *
142
+ * @param x input vectors to search, size n * d
143
+ * @param radius search radius
144
+ * @param result result table
145
+ */
146
+ virtual void range_search (idx_t n, const float *x, float radius,
147
+ RangeSearchResult *result) const;
148
+
149
+ /** return the indexes of the k vectors closest to the query x.
150
+ *
151
+ * This function is identical as search but only return labels of neighbors.
152
+ * @param x input vectors to search, size n * d
153
+ * @param labels output labels of the NNs, size n*k
154
+ */
155
+ void assign (idx_t n, const float * x, idx_t * labels, idx_t k = 1);
156
+
157
+ /// removes all elements from the database.
158
+ virtual void reset() = 0;
159
+
160
+ /** removes IDs from the index. Not supported by all
161
+ * indexes. Returns the number of elements removed.
162
+ */
163
+ virtual size_t remove_ids (const IDSelector & sel);
164
+
165
+ /** Reconstruct a stored vector (or an approximation if lossy coding)
166
+ *
167
+ * this function may not be defined for some indexes
168
+ * @param key id of the vector to reconstruct
169
+ * @param recons reconstucted vector (size d)
170
+ */
171
+ virtual void reconstruct (idx_t key, float * recons) const;
172
+
173
+ /** Reconstruct vectors i0 to i0 + ni - 1
174
+ *
175
+ * this function may not be defined for some indexes
176
+ * @param recons reconstucted vector (size ni * d)
177
+ */
178
+ virtual void reconstruct_n (idx_t i0, idx_t ni, float *recons) const;
179
+
180
+ /** Similar to search, but also reconstructs the stored vectors (or an
181
+ * approximation in the case of lossy coding) for the search results.
182
+ *
183
+ * If there are not enough results for a query, the resulting arrays
184
+ * is padded with -1s.
185
+ *
186
+ * @param recons reconstructed vectors size (n, k, d)
187
+ **/
188
+ virtual void search_and_reconstruct (idx_t n, const float *x, idx_t k,
189
+ float *distances, idx_t *labels,
190
+ float *recons) const;
191
+
192
+ /** Computes a residual vector after indexing encoding.
193
+ *
194
+ * The residual vector is the difference between a vector and the
195
+ * reconstruction that can be decoded from its representation in
196
+ * the index. The residual can be used for multiple-stage indexing
197
+ * methods, like IndexIVF's methods.
198
+ *
199
+ * @param x input vector, size d
200
+ * @param residual output residual vector, size d
201
+ * @param key encoded index, as returned by search and assign
202
+ */
203
+ virtual void compute_residual (const float * x,
204
+ float * residual, idx_t key) const;
205
+
206
+ /** Computes a residual vector after indexing encoding (batch form).
207
+ * Equivalent to calling compute_residual for each vector.
208
+ *
209
+ * The residual vector is the difference between a vector and the
210
+ * reconstruction that can be decoded from its representation in
211
+ * the index. The residual can be used for multiple-stage indexing
212
+ * methods, like IndexIVF's methods.
213
+ *
214
+ * @param n number of vectors
215
+ * @param xs input vectors, size (n x d)
216
+ * @param residuals output residual vectors, size (n x d)
217
+ * @param keys encoded index, as returned by search and assign
218
+ */
219
+ virtual void compute_residual_n (idx_t n, const float* xs,
220
+ float* residuals,
221
+ const idx_t* keys) const;
222
+
223
+ /** Get a DistanceComputer (defined in AuxIndexStructures) object
224
+ * for this kind of index.
225
+ *
226
+ * DistanceComputer is implemented for indexes that support random
227
+ * access of their vectors.
228
+ */
229
+ virtual DistanceComputer * get_distance_computer() const;
230
+
231
+
232
+ /* The standalone codec interface */
233
+
234
+ /** size of the produced codes in bytes */
235
+ virtual size_t sa_code_size () const;
236
+
237
+ /** encode a set of vectors
238
+ *
239
+ * @param n number of vectors
240
+ * @param x input vectors, size n * d
241
+ * @param bytes output encoded vectors, size n * sa_code_size()
242
+ */
243
+ virtual void sa_encode (idx_t n, const float *x,
244
+ uint8_t *bytes) const;
245
+
246
+ /** encode a set of vectors
247
+ *
248
+ * @param n number of vectors
249
+ * @param bytes input encoded vectors, size n * sa_code_size()
250
+ * @param x output vectors, size n * d
251
+ */
252
+ virtual void sa_decode (idx_t n, const uint8_t *bytes,
253
+ float *x) const;
254
+
255
+
256
+ };
257
+
258
+ }
259
+
260
+
261
+ #endif
@@ -0,0 +1,437 @@
1
+ /**
2
+ * Copyright (c) Facebook, Inc. and its affiliates.
3
+ *
4
+ * This source code is licensed under the MIT license found in the
5
+ * LICENSE file in the root directory of this source tree.
6
+ */
7
+
8
+ // -*- c++ -*-
9
+
10
+ #include <faiss/Index2Layer.h>
11
+
12
+ #include <cmath>
13
+ #include <cstdio>
14
+ #include <cassert>
15
+ #include <stdint.h>
16
+
17
+ #ifdef __SSE__
18
+ #include <immintrin.h>
19
+ #endif
20
+
21
+ #include <algorithm>
22
+
23
+ #include <faiss/IndexIVFPQ.h>
24
+
25
+ #include <faiss/impl/FaissAssert.h>
26
+ #include <faiss/utils/utils.h>
27
+ #include <faiss/impl/AuxIndexStructures.h>
28
+ #include <faiss/IndexFlat.h>
29
+ #include <faiss/utils/distances.h>
30
+
31
+
32
+ /*
33
+ #include <faiss/utils/Heap.h>
34
+
35
+ #include <faiss/Clustering.h>
36
+
37
+ #include <faiss/utils/hamming.h>
38
+
39
+
40
+ */
41
+
42
+
43
+ namespace faiss {
44
+
45
+ using idx_t = Index::idx_t;
46
+
47
+ /*************************************
48
+ * Index2Layer implementation
49
+ *************************************/
50
+
51
+
52
+ Index2Layer::Index2Layer (Index * quantizer, size_t nlist,
53
+ int M, int nbit,
54
+ MetricType metric):
55
+ Index (quantizer->d, metric),
56
+ q1 (quantizer, nlist),
57
+ pq (quantizer->d, M, nbit)
58
+ {
59
+ is_trained = false;
60
+ for (int nbyte = 0; nbyte < 7; nbyte++) {
61
+ if ((1L << (8 * nbyte)) >= nlist) {
62
+ code_size_1 = nbyte;
63
+ break;
64
+ }
65
+ }
66
+ code_size_2 = pq.code_size;
67
+ code_size = code_size_1 + code_size_2;
68
+ }
69
+
70
+ Index2Layer::Index2Layer ()
71
+ {
72
+ code_size = code_size_1 = code_size_2 = 0;
73
+ }
74
+
75
+ Index2Layer::~Index2Layer ()
76
+ {}
77
+
78
+ void Index2Layer::train(idx_t n, const float* x)
79
+ {
80
+ if (verbose) {
81
+ printf ("training level-1 quantizer %ld vectors in %dD\n",
82
+ n, d);
83
+ }
84
+
85
+ q1.train_q1 (n, x, verbose, metric_type);
86
+
87
+ if (verbose) {
88
+ printf("computing residuals\n");
89
+ }
90
+
91
+ const float * x_in = x;
92
+
93
+ x = fvecs_maybe_subsample (
94
+ d, (size_t*)&n, pq.cp.max_points_per_centroid * pq.ksub,
95
+ x, verbose, pq.cp.seed);
96
+
97
+ ScopeDeleter<float> del_x (x_in == x ? nullptr : x);
98
+
99
+ std::vector<idx_t> assign(n); // assignement to coarse centroids
100
+ q1.quantizer->assign (n, x, assign.data());
101
+ std::vector<float> residuals(n * d);
102
+ for (idx_t i = 0; i < n; i++) {
103
+ q1.quantizer->compute_residual (
104
+ x + i * d, residuals.data() + i * d, assign[i]);
105
+ }
106
+
107
+ if (verbose)
108
+ printf ("training %zdx%zd product quantizer on %ld vectors in %dD\n",
109
+ pq.M, pq.ksub, n, d);
110
+ pq.verbose = verbose;
111
+ pq.train (n, residuals.data());
112
+
113
+ is_trained = true;
114
+ }
115
+
116
+ void Index2Layer::add(idx_t n, const float* x)
117
+ {
118
+ idx_t bs = 32768;
119
+ if (n > bs) {
120
+ for (idx_t i0 = 0; i0 < n; i0 += bs) {
121
+ idx_t i1 = std::min(i0 + bs, n);
122
+ if (verbose) {
123
+ printf("Index2Layer::add: adding %ld:%ld / %ld\n",
124
+ i0, i1, n);
125
+ }
126
+ add (i1 - i0, x + i0 * d);
127
+ }
128
+ return;
129
+ }
130
+
131
+ std::vector<idx_t> codes1 (n);
132
+ q1.quantizer->assign (n, x, codes1.data());
133
+ std::vector<float> residuals(n * d);
134
+ for (idx_t i = 0; i < n; i++) {
135
+ q1.quantizer->compute_residual (
136
+ x + i * d, residuals.data() + i * d, codes1[i]);
137
+ }
138
+ std::vector<uint8_t> codes2 (n * code_size_2);
139
+
140
+ pq.compute_codes (residuals.data(), codes2.data(), n);
141
+
142
+ codes.resize ((ntotal + n) * code_size);
143
+ uint8_t *wp = &codes[ntotal * code_size];
144
+
145
+ {
146
+ int i = 0x11223344;
147
+ const char *ip = (char*)&i;
148
+ FAISS_THROW_IF_NOT_MSG (ip[0] == 0x44,
149
+ "works only on a little-endian CPU");
150
+ }
151
+
152
+ // copy to output table
153
+ for (idx_t i = 0; i < n; i++) {
154
+ memcpy (wp, &codes1[i], code_size_1);
155
+ wp += code_size_1;
156
+ memcpy (wp, &codes2[i * code_size_2], code_size_2);
157
+ wp += code_size_2;
158
+ }
159
+
160
+ ntotal += n;
161
+
162
+ }
163
+
164
+ void Index2Layer::search(
165
+ idx_t /*n*/,
166
+ const float* /*x*/,
167
+ idx_t /*k*/,
168
+ float* /*distances*/,
169
+ idx_t* /*labels*/) const {
170
+ FAISS_THROW_MSG("not implemented");
171
+ }
172
+
173
+
174
+ void Index2Layer::reconstruct_n(idx_t i0, idx_t ni, float* recons) const
175
+ {
176
+ float recons1[d];
177
+ FAISS_THROW_IF_NOT (i0 >= 0 && i0 + ni <= ntotal);
178
+ const uint8_t *rp = &codes[i0 * code_size];
179
+
180
+ for (idx_t i = 0; i < ni; i++) {
181
+ idx_t key = 0;
182
+ memcpy (&key, rp, code_size_1);
183
+ q1.quantizer->reconstruct (key, recons1);
184
+ rp += code_size_1;
185
+ pq.decode (rp, recons);
186
+ for (idx_t j = 0; j < d; j++) {
187
+ recons[j] += recons1[j];
188
+ }
189
+ rp += code_size_2;
190
+ recons += d;
191
+ }
192
+ }
193
+
194
+ void Index2Layer::transfer_to_IVFPQ (IndexIVFPQ & other) const
195
+ {
196
+ FAISS_THROW_IF_NOT (other.nlist == q1.nlist);
197
+ FAISS_THROW_IF_NOT (other.code_size == code_size_2);
198
+ FAISS_THROW_IF_NOT (other.ntotal == 0);
199
+
200
+ const uint8_t *rp = codes.data();
201
+
202
+ for (idx_t i = 0; i < ntotal; i++) {
203
+ idx_t key = 0;
204
+ memcpy (&key, rp, code_size_1);
205
+ rp += code_size_1;
206
+ other.invlists->add_entry (key, i, rp);
207
+ rp += code_size_2;
208
+ }
209
+
210
+ other.ntotal = ntotal;
211
+
212
+ }
213
+
214
+
215
+
216
+ void Index2Layer::reconstruct(idx_t key, float* recons) const
217
+ {
218
+ reconstruct_n (key, 1, recons);
219
+ }
220
+
221
+ void Index2Layer::reset()
222
+ {
223
+ ntotal = 0;
224
+ codes.clear ();
225
+ }
226
+
227
+
228
+ namespace {
229
+
230
+
231
+ struct Distance2Level : DistanceComputer {
232
+ size_t d;
233
+ const Index2Layer& storage;
234
+ std::vector<float> buf;
235
+ const float *q;
236
+
237
+ const float *pq_l1_tab, *pq_l2_tab;
238
+
239
+ explicit Distance2Level(const Index2Layer& storage)
240
+ : storage(storage) {
241
+ d = storage.d;
242
+ FAISS_ASSERT(storage.pq.dsub == 4);
243
+ pq_l2_tab = storage.pq.centroids.data();
244
+ buf.resize(2 * d);
245
+ }
246
+
247
+ float symmetric_dis(idx_t i, idx_t j) override {
248
+ storage.reconstruct(i, buf.data());
249
+ storage.reconstruct(j, buf.data() + d);
250
+ return fvec_L2sqr(buf.data() + d, buf.data(), d);
251
+ }
252
+
253
+ void set_query(const float *x) override {
254
+ q = x;
255
+ }
256
+ };
257
+
258
+ // well optimized for xNN+PQNN
259
+ struct DistanceXPQ4 : Distance2Level {
260
+
261
+ int M, k;
262
+
263
+ explicit DistanceXPQ4(const Index2Layer& storage)
264
+ : Distance2Level (storage) {
265
+ const IndexFlat *quantizer =
266
+ dynamic_cast<IndexFlat*> (storage.q1.quantizer);
267
+
268
+ FAISS_ASSERT(quantizer);
269
+ M = storage.pq.M;
270
+ pq_l1_tab = quantizer->xb.data();
271
+ }
272
+
273
+ float operator () (idx_t i) override {
274
+ #ifdef __SSE__
275
+ const uint8_t *code = storage.codes.data() + i * storage.code_size;
276
+ long key = 0;
277
+ memcpy (&key, code, storage.code_size_1);
278
+ code += storage.code_size_1;
279
+
280
+ // walking pointers
281
+ const float *qa = q;
282
+ const __m128 *l1_t = (const __m128 *)(pq_l1_tab + d * key);
283
+ const __m128 *pq_l2_t = (const __m128 *)pq_l2_tab;
284
+ __m128 accu = _mm_setzero_ps();
285
+
286
+ for (int m = 0; m < M; m++) {
287
+ __m128 qi = _mm_loadu_ps(qa);
288
+ __m128 recons = l1_t[m] + pq_l2_t[*code++];
289
+ __m128 diff = qi - recons;
290
+ accu += diff * diff;
291
+ pq_l2_t += 256;
292
+ qa += 4;
293
+ }
294
+
295
+ accu = _mm_hadd_ps (accu, accu);
296
+ accu = _mm_hadd_ps (accu, accu);
297
+ return _mm_cvtss_f32 (accu);
298
+ #else
299
+ FAISS_THROW_MSG("not implemented for non-x64 platforms");
300
+ #endif
301
+ }
302
+
303
+ };
304
+
305
+ // well optimized for 2xNN+PQNN
306
+ struct Distance2xXPQ4 : Distance2Level {
307
+
308
+ int M_2, mi_nbits;
309
+
310
+ explicit Distance2xXPQ4(const Index2Layer& storage)
311
+ : Distance2Level(storage) {
312
+ const MultiIndexQuantizer *mi =
313
+ dynamic_cast<MultiIndexQuantizer*> (storage.q1.quantizer);
314
+
315
+ FAISS_ASSERT(mi);
316
+ FAISS_ASSERT(storage.pq.M % 2 == 0);
317
+ M_2 = storage.pq.M / 2;
318
+ mi_nbits = mi->pq.nbits;
319
+ pq_l1_tab = mi->pq.centroids.data();
320
+ }
321
+
322
+ float operator () (idx_t i) override {
323
+ const uint8_t *code = storage.codes.data() + i * storage.code_size;
324
+ long key01 = 0;
325
+ memcpy (&key01, code, storage.code_size_1);
326
+ code += storage.code_size_1;
327
+ #ifdef __SSE__
328
+
329
+ // walking pointers
330
+ const float *qa = q;
331
+ const __m128 *pq_l1_t = (const __m128 *)pq_l1_tab;
332
+ const __m128 *pq_l2_t = (const __m128 *)pq_l2_tab;
333
+ __m128 accu = _mm_setzero_ps();
334
+
335
+ for (int mi_m = 0; mi_m < 2; mi_m++) {
336
+ long l1_idx = key01 & ((1L << mi_nbits) - 1);
337
+ const __m128 * pq_l1 = pq_l1_t + M_2 * l1_idx;
338
+
339
+ for (int m = 0; m < M_2; m++) {
340
+ __m128 qi = _mm_loadu_ps(qa);
341
+ __m128 recons = pq_l1[m] + pq_l2_t[*code++];
342
+ __m128 diff = qi - recons;
343
+ accu += diff * diff;
344
+ pq_l2_t += 256;
345
+ qa += 4;
346
+ }
347
+ pq_l1_t += M_2 << mi_nbits;
348
+ key01 >>= mi_nbits;
349
+ }
350
+ accu = _mm_hadd_ps (accu, accu);
351
+ accu = _mm_hadd_ps (accu, accu);
352
+ return _mm_cvtss_f32 (accu);
353
+ #else
354
+ FAISS_THROW_MSG("not implemented for non-x64 platforms");
355
+ #endif
356
+ }
357
+
358
+ };
359
+
360
+
361
+ } // namespace
362
+
363
+
364
+ DistanceComputer * Index2Layer::get_distance_computer() const {
365
+ #ifdef __SSE__
366
+ const MultiIndexQuantizer *mi =
367
+ dynamic_cast<MultiIndexQuantizer*> (q1.quantizer);
368
+
369
+ if (mi && pq.M % 2 == 0 && pq.dsub == 4) {
370
+ return new Distance2xXPQ4(*this);
371
+ }
372
+
373
+ const IndexFlat *fl =
374
+ dynamic_cast<IndexFlat*> (q1.quantizer);
375
+
376
+ if (fl && pq.dsub == 4) {
377
+ return new DistanceXPQ4(*this);
378
+ }
379
+ #endif
380
+
381
+ return Index::get_distance_computer();
382
+ }
383
+
384
+
385
+ /* The standalone codec interface */
386
+ size_t Index2Layer::sa_code_size () const
387
+ {
388
+ return code_size;
389
+ }
390
+
391
+ void Index2Layer::sa_encode (idx_t n, const float *x, uint8_t *bytes) const
392
+ {
393
+ FAISS_THROW_IF_NOT (is_trained);
394
+ std::unique_ptr<int64_t []> list_nos (new int64_t [n]);
395
+ q1.quantizer->assign (n, x, list_nos.get());
396
+ std::vector<float> residuals(n * d);
397
+ for (idx_t i = 0; i < n; i++) {
398
+ q1.quantizer->compute_residual (
399
+ x + i * d, residuals.data() + i * d, list_nos[i]);
400
+ }
401
+ pq.compute_codes (residuals.data(), bytes, n);
402
+
403
+ for (idx_t i = n - 1; i >= 0; i--) {
404
+ uint8_t * code = bytes + i * code_size;
405
+ memmove (code + code_size_1,
406
+ bytes + i * code_size_2, code_size_2);
407
+ q1.encode_listno (list_nos[i], code);
408
+ }
409
+
410
+ }
411
+
412
+ void Index2Layer::sa_decode (idx_t n, const uint8_t *bytes, float *x) const
413
+ {
414
+
415
+ #pragma omp parallel
416
+ {
417
+ std::vector<float> residual (d);
418
+
419
+ #pragma omp for
420
+ for (size_t i = 0; i < n; i++) {
421
+ const uint8_t *code = bytes + i * code_size;
422
+ int64_t list_no = q1.decode_listno (code);
423
+ float *xi = x + i * d;
424
+ pq.decode (code + code_size_1, xi);
425
+ q1.quantizer->reconstruct (list_no, residual.data());
426
+ for (size_t j = 0; j < d; j++) {
427
+ xi[j] += residual[j];
428
+ }
429
+ }
430
+ }
431
+
432
+ }
433
+
434
+
435
+
436
+
437
+ } // namespace faiss