faiss 0.1.0 → 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (226) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +5 -0
  3. data/README.md +103 -3
  4. data/ext/faiss/ext.cpp +99 -32
  5. data/ext/faiss/extconf.rb +12 -2
  6. data/lib/faiss/ext.bundle +0 -0
  7. data/lib/faiss/index.rb +3 -3
  8. data/lib/faiss/index_binary.rb +3 -3
  9. data/lib/faiss/kmeans.rb +1 -1
  10. data/lib/faiss/pca_matrix.rb +2 -2
  11. data/lib/faiss/product_quantizer.rb +3 -3
  12. data/lib/faiss/version.rb +1 -1
  13. data/vendor/faiss/AutoTune.cpp +719 -0
  14. data/vendor/faiss/AutoTune.h +212 -0
  15. data/vendor/faiss/Clustering.cpp +261 -0
  16. data/vendor/faiss/Clustering.h +101 -0
  17. data/vendor/faiss/IVFlib.cpp +339 -0
  18. data/vendor/faiss/IVFlib.h +132 -0
  19. data/vendor/faiss/Index.cpp +171 -0
  20. data/vendor/faiss/Index.h +261 -0
  21. data/vendor/faiss/Index2Layer.cpp +437 -0
  22. data/vendor/faiss/Index2Layer.h +85 -0
  23. data/vendor/faiss/IndexBinary.cpp +77 -0
  24. data/vendor/faiss/IndexBinary.h +163 -0
  25. data/vendor/faiss/IndexBinaryFlat.cpp +83 -0
  26. data/vendor/faiss/IndexBinaryFlat.h +54 -0
  27. data/vendor/faiss/IndexBinaryFromFloat.cpp +78 -0
  28. data/vendor/faiss/IndexBinaryFromFloat.h +52 -0
  29. data/vendor/faiss/IndexBinaryHNSW.cpp +325 -0
  30. data/vendor/faiss/IndexBinaryHNSW.h +56 -0
  31. data/vendor/faiss/IndexBinaryIVF.cpp +671 -0
  32. data/vendor/faiss/IndexBinaryIVF.h +211 -0
  33. data/vendor/faiss/IndexFlat.cpp +508 -0
  34. data/vendor/faiss/IndexFlat.h +175 -0
  35. data/vendor/faiss/IndexHNSW.cpp +1090 -0
  36. data/vendor/faiss/IndexHNSW.h +170 -0
  37. data/vendor/faiss/IndexIVF.cpp +909 -0
  38. data/vendor/faiss/IndexIVF.h +353 -0
  39. data/vendor/faiss/IndexIVFFlat.cpp +502 -0
  40. data/vendor/faiss/IndexIVFFlat.h +118 -0
  41. data/vendor/faiss/IndexIVFPQ.cpp +1207 -0
  42. data/vendor/faiss/IndexIVFPQ.h +161 -0
  43. data/vendor/faiss/IndexIVFPQR.cpp +219 -0
  44. data/vendor/faiss/IndexIVFPQR.h +65 -0
  45. data/vendor/faiss/IndexIVFSpectralHash.cpp +331 -0
  46. data/vendor/faiss/IndexIVFSpectralHash.h +75 -0
  47. data/vendor/faiss/IndexLSH.cpp +225 -0
  48. data/vendor/faiss/IndexLSH.h +87 -0
  49. data/vendor/faiss/IndexLattice.cpp +143 -0
  50. data/vendor/faiss/IndexLattice.h +68 -0
  51. data/vendor/faiss/IndexPQ.cpp +1188 -0
  52. data/vendor/faiss/IndexPQ.h +199 -0
  53. data/vendor/faiss/IndexPreTransform.cpp +288 -0
  54. data/vendor/faiss/IndexPreTransform.h +91 -0
  55. data/vendor/faiss/IndexReplicas.cpp +123 -0
  56. data/vendor/faiss/IndexReplicas.h +76 -0
  57. data/vendor/faiss/IndexScalarQuantizer.cpp +317 -0
  58. data/vendor/faiss/IndexScalarQuantizer.h +127 -0
  59. data/vendor/faiss/IndexShards.cpp +317 -0
  60. data/vendor/faiss/IndexShards.h +100 -0
  61. data/vendor/faiss/InvertedLists.cpp +623 -0
  62. data/vendor/faiss/InvertedLists.h +334 -0
  63. data/vendor/faiss/LICENSE +21 -0
  64. data/vendor/faiss/MatrixStats.cpp +252 -0
  65. data/vendor/faiss/MatrixStats.h +62 -0
  66. data/vendor/faiss/MetaIndexes.cpp +351 -0
  67. data/vendor/faiss/MetaIndexes.h +126 -0
  68. data/vendor/faiss/OnDiskInvertedLists.cpp +674 -0
  69. data/vendor/faiss/OnDiskInvertedLists.h +127 -0
  70. data/vendor/faiss/VectorTransform.cpp +1157 -0
  71. data/vendor/faiss/VectorTransform.h +322 -0
  72. data/vendor/faiss/c_api/AutoTune_c.cpp +83 -0
  73. data/vendor/faiss/c_api/AutoTune_c.h +64 -0
  74. data/vendor/faiss/c_api/Clustering_c.cpp +139 -0
  75. data/vendor/faiss/c_api/Clustering_c.h +117 -0
  76. data/vendor/faiss/c_api/IndexFlat_c.cpp +140 -0
  77. data/vendor/faiss/c_api/IndexFlat_c.h +115 -0
  78. data/vendor/faiss/c_api/IndexIVFFlat_c.cpp +64 -0
  79. data/vendor/faiss/c_api/IndexIVFFlat_c.h +58 -0
  80. data/vendor/faiss/c_api/IndexIVF_c.cpp +92 -0
  81. data/vendor/faiss/c_api/IndexIVF_c.h +135 -0
  82. data/vendor/faiss/c_api/IndexLSH_c.cpp +37 -0
  83. data/vendor/faiss/c_api/IndexLSH_c.h +40 -0
  84. data/vendor/faiss/c_api/IndexShards_c.cpp +44 -0
  85. data/vendor/faiss/c_api/IndexShards_c.h +42 -0
  86. data/vendor/faiss/c_api/Index_c.cpp +105 -0
  87. data/vendor/faiss/c_api/Index_c.h +183 -0
  88. data/vendor/faiss/c_api/MetaIndexes_c.cpp +49 -0
  89. data/vendor/faiss/c_api/MetaIndexes_c.h +49 -0
  90. data/vendor/faiss/c_api/clone_index_c.cpp +23 -0
  91. data/vendor/faiss/c_api/clone_index_c.h +32 -0
  92. data/vendor/faiss/c_api/error_c.h +42 -0
  93. data/vendor/faiss/c_api/error_impl.cpp +27 -0
  94. data/vendor/faiss/c_api/error_impl.h +16 -0
  95. data/vendor/faiss/c_api/faiss_c.h +58 -0
  96. data/vendor/faiss/c_api/gpu/GpuAutoTune_c.cpp +96 -0
  97. data/vendor/faiss/c_api/gpu/GpuAutoTune_c.h +56 -0
  98. data/vendor/faiss/c_api/gpu/GpuClonerOptions_c.cpp +52 -0
  99. data/vendor/faiss/c_api/gpu/GpuClonerOptions_c.h +68 -0
  100. data/vendor/faiss/c_api/gpu/GpuIndex_c.cpp +17 -0
  101. data/vendor/faiss/c_api/gpu/GpuIndex_c.h +30 -0
  102. data/vendor/faiss/c_api/gpu/GpuIndicesOptions_c.h +38 -0
  103. data/vendor/faiss/c_api/gpu/GpuResources_c.cpp +86 -0
  104. data/vendor/faiss/c_api/gpu/GpuResources_c.h +66 -0
  105. data/vendor/faiss/c_api/gpu/StandardGpuResources_c.cpp +54 -0
  106. data/vendor/faiss/c_api/gpu/StandardGpuResources_c.h +53 -0
  107. data/vendor/faiss/c_api/gpu/macros_impl.h +42 -0
  108. data/vendor/faiss/c_api/impl/AuxIndexStructures_c.cpp +220 -0
  109. data/vendor/faiss/c_api/impl/AuxIndexStructures_c.h +149 -0
  110. data/vendor/faiss/c_api/index_factory_c.cpp +26 -0
  111. data/vendor/faiss/c_api/index_factory_c.h +30 -0
  112. data/vendor/faiss/c_api/index_io_c.cpp +42 -0
  113. data/vendor/faiss/c_api/index_io_c.h +50 -0
  114. data/vendor/faiss/c_api/macros_impl.h +110 -0
  115. data/vendor/faiss/clone_index.cpp +147 -0
  116. data/vendor/faiss/clone_index.h +38 -0
  117. data/vendor/faiss/demos/demo_imi_flat.cpp +151 -0
  118. data/vendor/faiss/demos/demo_imi_pq.cpp +199 -0
  119. data/vendor/faiss/demos/demo_ivfpq_indexing.cpp +146 -0
  120. data/vendor/faiss/demos/demo_sift1M.cpp +252 -0
  121. data/vendor/faiss/gpu/GpuAutoTune.cpp +95 -0
  122. data/vendor/faiss/gpu/GpuAutoTune.h +27 -0
  123. data/vendor/faiss/gpu/GpuCloner.cpp +403 -0
  124. data/vendor/faiss/gpu/GpuCloner.h +82 -0
  125. data/vendor/faiss/gpu/GpuClonerOptions.cpp +28 -0
  126. data/vendor/faiss/gpu/GpuClonerOptions.h +53 -0
  127. data/vendor/faiss/gpu/GpuDistance.h +52 -0
  128. data/vendor/faiss/gpu/GpuFaissAssert.h +29 -0
  129. data/vendor/faiss/gpu/GpuIndex.h +148 -0
  130. data/vendor/faiss/gpu/GpuIndexBinaryFlat.h +89 -0
  131. data/vendor/faiss/gpu/GpuIndexFlat.h +190 -0
  132. data/vendor/faiss/gpu/GpuIndexIVF.h +89 -0
  133. data/vendor/faiss/gpu/GpuIndexIVFFlat.h +85 -0
  134. data/vendor/faiss/gpu/GpuIndexIVFPQ.h +143 -0
  135. data/vendor/faiss/gpu/GpuIndexIVFScalarQuantizer.h +100 -0
  136. data/vendor/faiss/gpu/GpuIndicesOptions.h +30 -0
  137. data/vendor/faiss/gpu/GpuResources.cpp +52 -0
  138. data/vendor/faiss/gpu/GpuResources.h +73 -0
  139. data/vendor/faiss/gpu/StandardGpuResources.cpp +295 -0
  140. data/vendor/faiss/gpu/StandardGpuResources.h +114 -0
  141. data/vendor/faiss/gpu/impl/RemapIndices.cpp +43 -0
  142. data/vendor/faiss/gpu/impl/RemapIndices.h +24 -0
  143. data/vendor/faiss/gpu/perf/IndexWrapper-inl.h +71 -0
  144. data/vendor/faiss/gpu/perf/IndexWrapper.h +39 -0
  145. data/vendor/faiss/gpu/perf/PerfClustering.cpp +115 -0
  146. data/vendor/faiss/gpu/perf/PerfIVFPQAdd.cpp +139 -0
  147. data/vendor/faiss/gpu/perf/WriteIndex.cpp +102 -0
  148. data/vendor/faiss/gpu/test/TestGpuIndexBinaryFlat.cpp +130 -0
  149. data/vendor/faiss/gpu/test/TestGpuIndexFlat.cpp +371 -0
  150. data/vendor/faiss/gpu/test/TestGpuIndexIVFFlat.cpp +550 -0
  151. data/vendor/faiss/gpu/test/TestGpuIndexIVFPQ.cpp +450 -0
  152. data/vendor/faiss/gpu/test/TestGpuMemoryException.cpp +84 -0
  153. data/vendor/faiss/gpu/test/TestUtils.cpp +315 -0
  154. data/vendor/faiss/gpu/test/TestUtils.h +93 -0
  155. data/vendor/faiss/gpu/test/demo_ivfpq_indexing_gpu.cpp +159 -0
  156. data/vendor/faiss/gpu/utils/DeviceMemory.cpp +77 -0
  157. data/vendor/faiss/gpu/utils/DeviceMemory.h +71 -0
  158. data/vendor/faiss/gpu/utils/DeviceUtils.h +185 -0
  159. data/vendor/faiss/gpu/utils/MemorySpace.cpp +89 -0
  160. data/vendor/faiss/gpu/utils/MemorySpace.h +44 -0
  161. data/vendor/faiss/gpu/utils/StackDeviceMemory.cpp +239 -0
  162. data/vendor/faiss/gpu/utils/StackDeviceMemory.h +129 -0
  163. data/vendor/faiss/gpu/utils/StaticUtils.h +83 -0
  164. data/vendor/faiss/gpu/utils/Timer.cpp +60 -0
  165. data/vendor/faiss/gpu/utils/Timer.h +52 -0
  166. data/vendor/faiss/impl/AuxIndexStructures.cpp +305 -0
  167. data/vendor/faiss/impl/AuxIndexStructures.h +246 -0
  168. data/vendor/faiss/impl/FaissAssert.h +95 -0
  169. data/vendor/faiss/impl/FaissException.cpp +66 -0
  170. data/vendor/faiss/impl/FaissException.h +71 -0
  171. data/vendor/faiss/impl/HNSW.cpp +818 -0
  172. data/vendor/faiss/impl/HNSW.h +275 -0
  173. data/vendor/faiss/impl/PolysemousTraining.cpp +953 -0
  174. data/vendor/faiss/impl/PolysemousTraining.h +158 -0
  175. data/vendor/faiss/impl/ProductQuantizer.cpp +876 -0
  176. data/vendor/faiss/impl/ProductQuantizer.h +242 -0
  177. data/vendor/faiss/impl/ScalarQuantizer.cpp +1628 -0
  178. data/vendor/faiss/impl/ScalarQuantizer.h +120 -0
  179. data/vendor/faiss/impl/ThreadedIndex-inl.h +192 -0
  180. data/vendor/faiss/impl/ThreadedIndex.h +80 -0
  181. data/vendor/faiss/impl/index_read.cpp +793 -0
  182. data/vendor/faiss/impl/index_write.cpp +558 -0
  183. data/vendor/faiss/impl/io.cpp +142 -0
  184. data/vendor/faiss/impl/io.h +98 -0
  185. data/vendor/faiss/impl/lattice_Zn.cpp +712 -0
  186. data/vendor/faiss/impl/lattice_Zn.h +199 -0
  187. data/vendor/faiss/index_factory.cpp +392 -0
  188. data/vendor/faiss/index_factory.h +25 -0
  189. data/vendor/faiss/index_io.h +75 -0
  190. data/vendor/faiss/misc/test_blas.cpp +84 -0
  191. data/vendor/faiss/tests/test_binary_flat.cpp +64 -0
  192. data/vendor/faiss/tests/test_dealloc_invlists.cpp +183 -0
  193. data/vendor/faiss/tests/test_ivfpq_codec.cpp +67 -0
  194. data/vendor/faiss/tests/test_ivfpq_indexing.cpp +98 -0
  195. data/vendor/faiss/tests/test_lowlevel_ivf.cpp +566 -0
  196. data/vendor/faiss/tests/test_merge.cpp +258 -0
  197. data/vendor/faiss/tests/test_omp_threads.cpp +14 -0
  198. data/vendor/faiss/tests/test_ondisk_ivf.cpp +220 -0
  199. data/vendor/faiss/tests/test_pairs_decoding.cpp +189 -0
  200. data/vendor/faiss/tests/test_params_override.cpp +231 -0
  201. data/vendor/faiss/tests/test_pq_encoding.cpp +98 -0
  202. data/vendor/faiss/tests/test_sliding_ivf.cpp +240 -0
  203. data/vendor/faiss/tests/test_threaded_index.cpp +253 -0
  204. data/vendor/faiss/tests/test_transfer_invlists.cpp +159 -0
  205. data/vendor/faiss/tutorial/cpp/1-Flat.cpp +98 -0
  206. data/vendor/faiss/tutorial/cpp/2-IVFFlat.cpp +81 -0
  207. data/vendor/faiss/tutorial/cpp/3-IVFPQ.cpp +93 -0
  208. data/vendor/faiss/tutorial/cpp/4-GPU.cpp +119 -0
  209. data/vendor/faiss/tutorial/cpp/5-Multiple-GPUs.cpp +99 -0
  210. data/vendor/faiss/utils/Heap.cpp +122 -0
  211. data/vendor/faiss/utils/Heap.h +495 -0
  212. data/vendor/faiss/utils/WorkerThread.cpp +126 -0
  213. data/vendor/faiss/utils/WorkerThread.h +61 -0
  214. data/vendor/faiss/utils/distances.cpp +765 -0
  215. data/vendor/faiss/utils/distances.h +243 -0
  216. data/vendor/faiss/utils/distances_simd.cpp +809 -0
  217. data/vendor/faiss/utils/extra_distances.cpp +336 -0
  218. data/vendor/faiss/utils/extra_distances.h +54 -0
  219. data/vendor/faiss/utils/hamming-inl.h +472 -0
  220. data/vendor/faiss/utils/hamming.cpp +792 -0
  221. data/vendor/faiss/utils/hamming.h +220 -0
  222. data/vendor/faiss/utils/random.cpp +192 -0
  223. data/vendor/faiss/utils/random.h +60 -0
  224. data/vendor/faiss/utils/utils.cpp +783 -0
  225. data/vendor/faiss/utils/utils.h +181 -0
  226. metadata +216 -2
@@ -0,0 +1,674 @@
1
+ /**
2
+ * Copyright (c) Facebook, Inc. and its affiliates.
3
+ *
4
+ * This source code is licensed under the MIT license found in the
5
+ * LICENSE file in the root directory of this source tree.
6
+ */
7
+
8
+ // -*- c++ -*-
9
+
10
+ #include <faiss/OnDiskInvertedLists.h>
11
+
12
+ #include <pthread.h>
13
+
14
+ #include <unordered_set>
15
+
16
+ #include <sys/mman.h>
17
+ #include <unistd.h>
18
+ #include <sys/types.h>
19
+
20
+ #include <faiss/impl/FaissAssert.h>
21
+ #include <faiss/utils/utils.h>
22
+
23
+
24
+ namespace faiss {
25
+
26
+
27
+ /**********************************************
28
+ * LockLevels
29
+ **********************************************/
30
+
31
+
32
+ struct LockLevels {
33
+ /* There n times lock1(n), one lock2 and one lock3
34
+ * Invariants:
35
+ * a single thread can hold one lock1(n) for some n
36
+ * a single thread can hold lock2, if it holds lock1(n) for some n
37
+ * a single thread can hold lock3, if it holds lock1(n) for some n
38
+ * AND lock2 AND no other thread holds lock1(m) for m != n
39
+ */
40
+ pthread_mutex_t mutex1;
41
+ pthread_cond_t level1_cv;
42
+ pthread_cond_t level2_cv;
43
+ pthread_cond_t level3_cv;
44
+
45
+ std::unordered_set<int> level1_holders; // which level1 locks are held
46
+ int n_level2; // nb threads that wait on level2
47
+ bool level3_in_use; // a threads waits on level3
48
+ bool level2_in_use;
49
+
50
+ LockLevels() {
51
+ pthread_mutex_init(&mutex1, nullptr);
52
+ pthread_cond_init(&level1_cv, nullptr);
53
+ pthread_cond_init(&level2_cv, nullptr);
54
+ pthread_cond_init(&level3_cv, nullptr);
55
+ n_level2 = 0;
56
+ level2_in_use = false;
57
+ level3_in_use = false;
58
+ }
59
+
60
+ ~LockLevels() {
61
+ pthread_cond_destroy(&level1_cv);
62
+ pthread_cond_destroy(&level2_cv);
63
+ pthread_cond_destroy(&level3_cv);
64
+ pthread_mutex_destroy(&mutex1);
65
+ }
66
+
67
+ void lock_1(int no) {
68
+ pthread_mutex_lock(&mutex1);
69
+ while (level3_in_use || level1_holders.count(no) > 0) {
70
+ pthread_cond_wait(&level1_cv, &mutex1);
71
+ }
72
+ level1_holders.insert(no);
73
+ pthread_mutex_unlock(&mutex1);
74
+ }
75
+
76
+ void unlock_1(int no) {
77
+ pthread_mutex_lock(&mutex1);
78
+ assert(level1_holders.count(no) == 1);
79
+ level1_holders.erase(no);
80
+ if (level3_in_use) { // a writer is waiting
81
+ pthread_cond_signal(&level3_cv);
82
+ } else {
83
+ pthread_cond_broadcast(&level1_cv);
84
+ }
85
+ pthread_mutex_unlock(&mutex1);
86
+ }
87
+
88
+ void lock_2() {
89
+ pthread_mutex_lock(&mutex1);
90
+ n_level2 ++;
91
+ if (level3_in_use) { // tell waiting level3 that we are blocked
92
+ pthread_cond_signal(&level3_cv);
93
+ }
94
+ while (level2_in_use) {
95
+ pthread_cond_wait(&level2_cv, &mutex1);
96
+ }
97
+ level2_in_use = true;
98
+ pthread_mutex_unlock(&mutex1);
99
+ }
100
+
101
+ void unlock_2() {
102
+ pthread_mutex_lock(&mutex1);
103
+ level2_in_use = false;
104
+ n_level2 --;
105
+ pthread_cond_signal(&level2_cv);
106
+ pthread_mutex_unlock(&mutex1);
107
+ }
108
+
109
+ void lock_3() {
110
+ pthread_mutex_lock(&mutex1);
111
+ level3_in_use = true;
112
+ // wait until there are no level1 holders anymore except the
113
+ // ones that are waiting on level2 (we are holding lock2)
114
+ while (level1_holders.size() > n_level2) {
115
+ pthread_cond_wait(&level3_cv, &mutex1);
116
+ }
117
+ // don't release the lock!
118
+ }
119
+
120
+ void unlock_3() {
121
+ level3_in_use = false;
122
+ // wake up all level1_holders
123
+ pthread_cond_broadcast(&level1_cv);
124
+ pthread_mutex_unlock(&mutex1);
125
+ }
126
+
127
+ void print () {
128
+ pthread_mutex_lock(&mutex1);
129
+ printf("State: level3_in_use=%d n_level2=%d level1_holders: [", level3_in_use, n_level2);
130
+ for (int k : level1_holders) {
131
+ printf("%d ", k);
132
+ }
133
+ printf("]\n");
134
+ pthread_mutex_unlock(&mutex1);
135
+ }
136
+
137
+ };
138
+
139
+ /**********************************************
140
+ * OngoingPrefetch
141
+ **********************************************/
142
+
143
+ struct OnDiskInvertedLists::OngoingPrefetch {
144
+
145
+ struct Thread {
146
+ pthread_t pth;
147
+ OngoingPrefetch *pf;
148
+
149
+ bool one_list () {
150
+ idx_t list_no = pf->get_next_list();
151
+ if(list_no == -1) return false;
152
+ const OnDiskInvertedLists *od = pf->od;
153
+ od->locks->lock_1 (list_no);
154
+ size_t n = od->list_size (list_no);
155
+ const Index::idx_t *idx = od->get_ids (list_no);
156
+ const uint8_t *codes = od->get_codes (list_no);
157
+ int cs = 0;
158
+ for (size_t i = 0; i < n;i++) {
159
+ cs += idx[i];
160
+ }
161
+ const idx_t *codes8 = (const idx_t*)codes;
162
+ idx_t n8 = n * od->code_size / 8;
163
+
164
+ for (size_t i = 0; i < n8;i++) {
165
+ cs += codes8[i];
166
+ }
167
+ od->locks->unlock_1(list_no);
168
+
169
+ global_cs += cs & 1;
170
+ return true;
171
+ }
172
+
173
+ };
174
+
175
+ std::vector<Thread> threads;
176
+
177
+ pthread_mutex_t list_ids_mutex;
178
+ std::vector<idx_t> list_ids;
179
+ int cur_list;
180
+
181
+ // mutex for the list of tasks
182
+ pthread_mutex_t mutex;
183
+
184
+ // pretext to avoid code below to be optimized out
185
+ static int global_cs;
186
+
187
+ const OnDiskInvertedLists *od;
188
+
189
+ explicit OngoingPrefetch (const OnDiskInvertedLists *od): od (od)
190
+ {
191
+ pthread_mutex_init (&mutex, nullptr);
192
+ pthread_mutex_init (&list_ids_mutex, nullptr);
193
+ cur_list = 0;
194
+ }
195
+
196
+ static void* prefetch_list (void * arg) {
197
+ Thread *th = static_cast<Thread*>(arg);
198
+
199
+ while (th->one_list()) ;
200
+
201
+ return nullptr;
202
+ }
203
+
204
+ idx_t get_next_list () {
205
+ idx_t list_no = -1;
206
+ pthread_mutex_lock (&list_ids_mutex);
207
+ if (cur_list >= 0 && cur_list < list_ids.size()) {
208
+ list_no = list_ids[cur_list++];
209
+ }
210
+ pthread_mutex_unlock (&list_ids_mutex);
211
+ return list_no;
212
+ }
213
+
214
+ void prefetch_lists (const idx_t *list_nos, int n) {
215
+ pthread_mutex_lock (&mutex);
216
+ pthread_mutex_lock (&list_ids_mutex);
217
+ list_ids.clear ();
218
+ pthread_mutex_unlock (&list_ids_mutex);
219
+ for (auto &th: threads) {
220
+ pthread_join (th.pth, nullptr);
221
+ }
222
+
223
+ threads.resize (0);
224
+ cur_list = 0;
225
+ int nt = std::min (n, od->prefetch_nthread);
226
+
227
+ if (nt > 0) {
228
+ // prepare tasks
229
+ for (int i = 0; i < n; i++) {
230
+ idx_t list_no = list_nos[i];
231
+ if (list_no >= 0 && od->list_size(list_no) > 0) {
232
+ list_ids.push_back (list_no);
233
+ }
234
+ }
235
+ // prepare threads
236
+ threads.resize (nt);
237
+ for (Thread &th: threads) {
238
+ th.pf = this;
239
+ pthread_create (&th.pth, nullptr, prefetch_list, &th);
240
+ }
241
+ }
242
+ pthread_mutex_unlock (&mutex);
243
+ }
244
+
245
+ ~OngoingPrefetch () {
246
+ pthread_mutex_lock (&mutex);
247
+ for (auto &th: threads) {
248
+ pthread_join (th.pth, nullptr);
249
+ }
250
+ pthread_mutex_unlock (&mutex);
251
+ pthread_mutex_destroy (&mutex);
252
+ pthread_mutex_destroy (&list_ids_mutex);
253
+ }
254
+
255
+ };
256
+
257
+ int OnDiskInvertedLists::OngoingPrefetch::global_cs = 0;
258
+
259
+
260
+ void OnDiskInvertedLists::prefetch_lists (const idx_t *list_nos, int n) const
261
+ {
262
+ pf->prefetch_lists (list_nos, n);
263
+ }
264
+
265
+
266
+
267
+ /**********************************************
268
+ * OnDiskInvertedLists: mmapping
269
+ **********************************************/
270
+
271
+
272
+ void OnDiskInvertedLists::do_mmap ()
273
+ {
274
+ const char *rw_flags = read_only ? "r" : "r+";
275
+ int prot = read_only ? PROT_READ : PROT_WRITE | PROT_READ;
276
+ FILE *f = fopen (filename.c_str(), rw_flags);
277
+ FAISS_THROW_IF_NOT_FMT (f, "could not open %s in mode %s: %s",
278
+ filename.c_str(), rw_flags, strerror(errno));
279
+
280
+ uint8_t * ptro = (uint8_t*)mmap (nullptr, totsize,
281
+ prot, MAP_SHARED, fileno (f), 0);
282
+
283
+ FAISS_THROW_IF_NOT_FMT (ptro != MAP_FAILED,
284
+ "could not mmap %s: %s",
285
+ filename.c_str(),
286
+ strerror(errno));
287
+ ptr = ptro;
288
+ fclose (f);
289
+
290
+ }
291
+
292
+ void OnDiskInvertedLists::update_totsize (size_t new_size)
293
+ {
294
+
295
+ // unmap file
296
+ if (ptr != nullptr) {
297
+ int err = munmap (ptr, totsize);
298
+ FAISS_THROW_IF_NOT_FMT (err == 0, "munmap error: %s",
299
+ strerror(errno));
300
+ }
301
+ if (totsize == 0) {
302
+ // must create file before truncating it
303
+ FILE *f = fopen (filename.c_str(), "w");
304
+ FAISS_THROW_IF_NOT_FMT (f, "could not open %s in mode W: %s",
305
+ filename.c_str(), strerror(errno));
306
+ fclose (f);
307
+ }
308
+
309
+ if (new_size > totsize) {
310
+ if (!slots.empty() &&
311
+ slots.back().offset + slots.back().capacity == totsize) {
312
+ slots.back().capacity += new_size - totsize;
313
+ } else {
314
+ slots.push_back (Slot(totsize, new_size - totsize));
315
+ }
316
+ } else {
317
+ assert(!"not implemented");
318
+ }
319
+
320
+ totsize = new_size;
321
+
322
+ // create file
323
+ printf ("resizing %s to %ld bytes\n", filename.c_str(), totsize);
324
+
325
+ int err = truncate (filename.c_str(), totsize);
326
+
327
+ FAISS_THROW_IF_NOT_FMT (err == 0, "truncate %s to %ld: %s",
328
+ filename.c_str(), totsize,
329
+ strerror(errno));
330
+ do_mmap ();
331
+ }
332
+
333
+
334
+
335
+
336
+
337
+
338
+ /**********************************************
339
+ * OnDiskInvertedLists
340
+ **********************************************/
341
+
342
+ #define INVALID_OFFSET (size_t)(-1)
343
+
344
+ OnDiskInvertedLists::List::List ():
345
+ size (0), capacity (0), offset (INVALID_OFFSET)
346
+ {}
347
+
348
+ OnDiskInvertedLists::Slot::Slot (size_t offset, size_t capacity):
349
+ offset (offset), capacity (capacity)
350
+ {}
351
+
352
+ OnDiskInvertedLists::Slot::Slot ():
353
+ offset (0), capacity (0)
354
+ {}
355
+
356
+
357
+
358
+ OnDiskInvertedLists::OnDiskInvertedLists (
359
+ size_t nlist, size_t code_size,
360
+ const char *filename):
361
+ InvertedLists (nlist, code_size),
362
+ filename (filename),
363
+ totsize (0),
364
+ ptr (nullptr),
365
+ read_only (false),
366
+ locks (new LockLevels ()),
367
+ pf (new OngoingPrefetch (this)),
368
+ prefetch_nthread (32)
369
+ {
370
+ lists.resize (nlist);
371
+
372
+ // slots starts empty
373
+ }
374
+
375
+ OnDiskInvertedLists::OnDiskInvertedLists ():
376
+ OnDiskInvertedLists (0, 0, "")
377
+ {
378
+ }
379
+
380
+ OnDiskInvertedLists::~OnDiskInvertedLists ()
381
+ {
382
+ delete pf;
383
+
384
+ // unmap all lists
385
+ if (ptr != nullptr) {
386
+ int err = munmap (ptr, totsize);
387
+ if (err != 0) {
388
+ fprintf(stderr, "mumap error: %s",
389
+ strerror(errno));
390
+ }
391
+ }
392
+ delete locks;
393
+ }
394
+
395
+
396
+
397
+
398
+ size_t OnDiskInvertedLists::list_size(size_t list_no) const
399
+ {
400
+ return lists[list_no].size;
401
+ }
402
+
403
+
404
+ const uint8_t * OnDiskInvertedLists::get_codes (size_t list_no) const
405
+ {
406
+ if (lists[list_no].offset == INVALID_OFFSET) {
407
+ return nullptr;
408
+ }
409
+
410
+ return ptr + lists[list_no].offset;
411
+ }
412
+
413
+ const Index::idx_t * OnDiskInvertedLists::get_ids (size_t list_no) const
414
+ {
415
+ if (lists[list_no].offset == INVALID_OFFSET) {
416
+ return nullptr;
417
+ }
418
+
419
+ return (const idx_t*)(ptr + lists[list_no].offset +
420
+ code_size * lists[list_no].capacity);
421
+ }
422
+
423
+
424
+ void OnDiskInvertedLists::update_entries (
425
+ size_t list_no, size_t offset, size_t n_entry,
426
+ const idx_t *ids_in, const uint8_t *codes_in)
427
+ {
428
+ FAISS_THROW_IF_NOT (!read_only);
429
+ if (n_entry == 0) return;
430
+ const List & l = lists[list_no];
431
+ assert (n_entry + offset <= l.size);
432
+ idx_t *ids = const_cast<idx_t*>(get_ids (list_no));
433
+ memcpy (ids + offset, ids_in, sizeof(ids_in[0]) * n_entry);
434
+ uint8_t *codes = const_cast<uint8_t*>(get_codes (list_no));
435
+ memcpy (codes + offset * code_size, codes_in, code_size * n_entry);
436
+ }
437
+
438
+ size_t OnDiskInvertedLists::add_entries (
439
+ size_t list_no, size_t n_entry,
440
+ const idx_t* ids, const uint8_t *code)
441
+ {
442
+ FAISS_THROW_IF_NOT (!read_only);
443
+ locks->lock_1 (list_no);
444
+ size_t o = list_size (list_no);
445
+ resize_locked (list_no, n_entry + o);
446
+ update_entries (list_no, o, n_entry, ids, code);
447
+ locks->unlock_1 (list_no);
448
+ return o;
449
+ }
450
+
451
+ void OnDiskInvertedLists::resize (size_t list_no, size_t new_size)
452
+ {
453
+ FAISS_THROW_IF_NOT (!read_only);
454
+ locks->lock_1 (list_no);
455
+ resize_locked (list_no, new_size);
456
+ locks->unlock_1 (list_no);
457
+ }
458
+
459
+
460
+
461
+ void OnDiskInvertedLists::resize_locked (size_t list_no, size_t new_size)
462
+ {
463
+ List & l = lists[list_no];
464
+
465
+ if (new_size <= l.capacity &&
466
+ new_size > l.capacity / 2) {
467
+ l.size = new_size;
468
+ return;
469
+ }
470
+
471
+ // otherwise we release the current slot, and find a new one
472
+
473
+ locks->lock_2 ();
474
+ free_slot (l.offset, l.capacity);
475
+
476
+ List new_l;
477
+
478
+ if (new_size == 0) {
479
+ new_l = List();
480
+ } else {
481
+ new_l.size = new_size;
482
+ new_l.capacity = 1;
483
+ while (new_l.capacity < new_size) {
484
+ new_l.capacity *= 2;
485
+ }
486
+ new_l.offset = allocate_slot (
487
+ new_l.capacity * (sizeof(idx_t) + code_size));
488
+ }
489
+
490
+ // copy common data
491
+ if (l.offset != new_l.offset) {
492
+ size_t n = std::min (new_size, l.size);
493
+ if (n > 0) {
494
+ memcpy (ptr + new_l.offset, get_codes(list_no), n * code_size);
495
+ memcpy (ptr + new_l.offset + new_l.capacity * code_size,
496
+ get_ids (list_no), n * sizeof(idx_t));
497
+ }
498
+ }
499
+
500
+ lists[list_no] = new_l;
501
+ locks->unlock_2 ();
502
+ }
503
+
504
+ size_t OnDiskInvertedLists::allocate_slot (size_t capacity) {
505
+ // should hold lock2
506
+
507
+ auto it = slots.begin();
508
+ while (it != slots.end() && it->capacity < capacity) {
509
+ it++;
510
+ }
511
+
512
+ if (it == slots.end()) {
513
+ // not enough capacity
514
+ size_t new_size = totsize == 0 ? 32 : totsize * 2;
515
+ while (new_size - totsize < capacity)
516
+ new_size *= 2;
517
+ locks->lock_3 ();
518
+ update_totsize(new_size);
519
+ locks->unlock_3 ();
520
+ it = slots.begin();
521
+ while (it != slots.end() && it->capacity < capacity) {
522
+ it++;
523
+ }
524
+ assert (it != slots.end());
525
+ }
526
+
527
+ size_t o = it->offset;
528
+ if (it->capacity == capacity) {
529
+ slots.erase (it);
530
+ } else {
531
+ // take from beginning of slot
532
+ it->capacity -= capacity;
533
+ it->offset += capacity;
534
+ }
535
+
536
+ return o;
537
+ }
538
+
539
+
540
+
541
+ void OnDiskInvertedLists::free_slot (size_t offset, size_t capacity) {
542
+
543
+ // should hold lock2
544
+ if (capacity == 0) return;
545
+
546
+ auto it = slots.begin();
547
+ while (it != slots.end() && it->offset <= offset) {
548
+ it++;
549
+ }
550
+
551
+ size_t inf = 1UL << 60;
552
+
553
+ size_t end_prev = inf;
554
+ if (it != slots.begin()) {
555
+ auto prev = it;
556
+ prev--;
557
+ end_prev = prev->offset + prev->capacity;
558
+ }
559
+
560
+ size_t begin_next = 1L << 60;
561
+ if (it != slots.end()) {
562
+ begin_next = it->offset;
563
+ }
564
+
565
+ assert (end_prev == inf || offset >= end_prev);
566
+ assert (offset + capacity <= begin_next);
567
+
568
+ if (offset == end_prev) {
569
+ auto prev = it;
570
+ prev--;
571
+ if (offset + capacity == begin_next) {
572
+ prev->capacity += capacity + it->capacity;
573
+ slots.erase (it);
574
+ } else {
575
+ prev->capacity += capacity;
576
+ }
577
+ } else {
578
+ if (offset + capacity == begin_next) {
579
+ it->offset -= capacity;
580
+ it->capacity += capacity;
581
+ } else {
582
+ slots.insert (it, Slot (offset, capacity));
583
+ }
584
+ }
585
+
586
+ // TODO shrink global storage if needed
587
+ }
588
+
589
+
590
+ /*****************************************
591
+ * Compact form
592
+ *****************************************/
593
+
594
+ size_t OnDiskInvertedLists::merge_from (const InvertedLists **ils, int n_il,
595
+ bool verbose)
596
+ {
597
+ FAISS_THROW_IF_NOT_MSG (totsize == 0, "works only on an empty InvertedLists");
598
+
599
+ std::vector<size_t> sizes (nlist);
600
+ for (int i = 0; i < n_il; i++) {
601
+ const InvertedLists *il = ils[i];
602
+ FAISS_THROW_IF_NOT (il->nlist == nlist && il->code_size == code_size);
603
+
604
+ for (size_t j = 0; j < nlist; j++) {
605
+ sizes [j] += il->list_size(j);
606
+ }
607
+ }
608
+
609
+ size_t cums = 0;
610
+ size_t ntotal = 0;
611
+ for (size_t j = 0; j < nlist; j++) {
612
+ ntotal += sizes[j];
613
+ lists[j].size = 0;
614
+ lists[j].capacity = sizes[j];
615
+ lists[j].offset = cums;
616
+ cums += lists[j].capacity * (sizeof(idx_t) + code_size);
617
+ }
618
+
619
+ update_totsize (cums);
620
+
621
+
622
+ size_t nmerged = 0;
623
+ double t0 = getmillisecs(), last_t = t0;
624
+
625
+ #pragma omp parallel for
626
+ for (size_t j = 0; j < nlist; j++) {
627
+ List & l = lists[j];
628
+ for (int i = 0; i < n_il; i++) {
629
+ const InvertedLists *il = ils[i];
630
+ size_t n_entry = il->list_size(j);
631
+ l.size += n_entry;
632
+ update_entries (j, l.size - n_entry, n_entry,
633
+ ScopedIds(il, j).get(),
634
+ ScopedCodes(il, j).get());
635
+ }
636
+ assert (l.size == l.capacity);
637
+ if (verbose) {
638
+ #pragma omp critical
639
+ {
640
+ nmerged++;
641
+ double t1 = getmillisecs();
642
+ if (t1 - last_t > 500) {
643
+ printf("merged %ld lists in %.3f s\r",
644
+ nmerged, (t1 - t0) / 1000.0);
645
+ fflush(stdout);
646
+ last_t = t1;
647
+ }
648
+ }
649
+ }
650
+ }
651
+ if(verbose) {
652
+ printf("\n");
653
+ }
654
+
655
+ return ntotal;
656
+ }
657
+
658
+
659
+ void OnDiskInvertedLists::crop_invlists(size_t l0, size_t l1)
660
+ {
661
+ FAISS_THROW_IF_NOT(0 <= l0 && l0 <= l1 && l1 <= nlist);
662
+
663
+ std::vector<List> new_lists (l1 - l0);
664
+ memcpy (new_lists.data(), &lists[l0], (l1 - l0) * sizeof(List));
665
+
666
+ lists.swap(new_lists);
667
+
668
+ nlist = l1 - l0;
669
+ }
670
+
671
+
672
+
673
+
674
+ } // namespace faiss