faiss 0.2.6 → 0.2.7
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/ext/faiss/extconf.rb +1 -1
- data/lib/faiss/version.rb +1 -1
- data/lib/faiss.rb +2 -2
- data/vendor/faiss/faiss/AutoTune.cpp +15 -4
- data/vendor/faiss/faiss/AutoTune.h +0 -1
- data/vendor/faiss/faiss/Clustering.cpp +1 -5
- data/vendor/faiss/faiss/Clustering.h +0 -2
- data/vendor/faiss/faiss/IVFlib.h +0 -2
- data/vendor/faiss/faiss/Index.h +1 -2
- data/vendor/faiss/faiss/IndexAdditiveQuantizer.cpp +17 -3
- data/vendor/faiss/faiss/IndexAdditiveQuantizer.h +10 -1
- data/vendor/faiss/faiss/IndexBinary.h +0 -1
- data/vendor/faiss/faiss/IndexBinaryFlat.cpp +2 -1
- data/vendor/faiss/faiss/IndexBinaryFlat.h +4 -0
- data/vendor/faiss/faiss/IndexBinaryHash.cpp +1 -3
- data/vendor/faiss/faiss/IndexBinaryIVF.cpp +273 -48
- data/vendor/faiss/faiss/IndexBinaryIVF.h +18 -11
- data/vendor/faiss/faiss/IndexFastScan.cpp +13 -10
- data/vendor/faiss/faiss/IndexFastScan.h +5 -1
- data/vendor/faiss/faiss/IndexFlat.cpp +16 -3
- data/vendor/faiss/faiss/IndexFlat.h +1 -1
- data/vendor/faiss/faiss/IndexFlatCodes.cpp +5 -0
- data/vendor/faiss/faiss/IndexFlatCodes.h +7 -2
- data/vendor/faiss/faiss/IndexHNSW.cpp +3 -6
- data/vendor/faiss/faiss/IndexHNSW.h +0 -1
- data/vendor/faiss/faiss/IndexIDMap.cpp +4 -4
- data/vendor/faiss/faiss/IndexIDMap.h +0 -2
- data/vendor/faiss/faiss/IndexIVF.cpp +155 -129
- data/vendor/faiss/faiss/IndexIVF.h +121 -61
- data/vendor/faiss/faiss/IndexIVFAdditiveQuantizer.cpp +2 -2
- data/vendor/faiss/faiss/IndexIVFFastScan.cpp +12 -11
- data/vendor/faiss/faiss/IndexIVFFastScan.h +6 -1
- data/vendor/faiss/faiss/IndexIVFPQ.cpp +221 -165
- data/vendor/faiss/faiss/IndexIVFPQ.h +1 -0
- data/vendor/faiss/faiss/IndexIVFPQFastScan.cpp +6 -1
- data/vendor/faiss/faiss/IndexIVFSpectralHash.cpp +0 -2
- data/vendor/faiss/faiss/IndexNNDescent.cpp +1 -2
- data/vendor/faiss/faiss/IndexNNDescent.h +0 -1
- data/vendor/faiss/faiss/IndexNSG.cpp +1 -2
- data/vendor/faiss/faiss/IndexPQ.cpp +7 -9
- data/vendor/faiss/faiss/IndexRefine.cpp +1 -1
- data/vendor/faiss/faiss/IndexReplicas.cpp +3 -4
- data/vendor/faiss/faiss/IndexReplicas.h +0 -1
- data/vendor/faiss/faiss/IndexRowwiseMinMax.cpp +8 -1
- data/vendor/faiss/faiss/IndexRowwiseMinMax.h +7 -0
- data/vendor/faiss/faiss/IndexShards.cpp +26 -109
- data/vendor/faiss/faiss/IndexShards.h +2 -3
- data/vendor/faiss/faiss/IndexShardsIVF.cpp +246 -0
- data/vendor/faiss/faiss/IndexShardsIVF.h +42 -0
- data/vendor/faiss/faiss/MetaIndexes.cpp +86 -0
- data/vendor/faiss/faiss/MetaIndexes.h +29 -0
- data/vendor/faiss/faiss/MetricType.h +14 -0
- data/vendor/faiss/faiss/VectorTransform.cpp +8 -10
- data/vendor/faiss/faiss/VectorTransform.h +1 -3
- data/vendor/faiss/faiss/clone_index.cpp +232 -18
- data/vendor/faiss/faiss/cppcontrib/SaDecodeKernels.h +25 -3
- data/vendor/faiss/faiss/cppcontrib/detail/CoarseBitType.h +7 -0
- data/vendor/faiss/faiss/cppcontrib/detail/UintReader.h +78 -0
- data/vendor/faiss/faiss/cppcontrib/sa_decode/Level2-avx2-inl.h +20 -6
- data/vendor/faiss/faiss/cppcontrib/sa_decode/Level2-inl.h +7 -1
- data/vendor/faiss/faiss/cppcontrib/sa_decode/Level2-neon-inl.h +21 -7
- data/vendor/faiss/faiss/cppcontrib/sa_decode/MinMax-inl.h +7 -0
- data/vendor/faiss/faiss/cppcontrib/sa_decode/MinMaxFP16-inl.h +7 -0
- data/vendor/faiss/faiss/cppcontrib/sa_decode/PQ-avx2-inl.h +10 -3
- data/vendor/faiss/faiss/cppcontrib/sa_decode/PQ-inl.h +7 -1
- data/vendor/faiss/faiss/cppcontrib/sa_decode/PQ-neon-inl.h +11 -3
- data/vendor/faiss/faiss/gpu/GpuAutoTune.cpp +25 -2
- data/vendor/faiss/faiss/gpu/GpuCloner.cpp +76 -29
- data/vendor/faiss/faiss/gpu/GpuCloner.h +2 -2
- data/vendor/faiss/faiss/gpu/GpuClonerOptions.h +14 -13
- data/vendor/faiss/faiss/gpu/GpuDistance.h +18 -6
- data/vendor/faiss/faiss/gpu/GpuIndex.h +23 -21
- data/vendor/faiss/faiss/gpu/GpuIndexBinaryFlat.h +10 -10
- data/vendor/faiss/faiss/gpu/GpuIndexFlat.h +11 -12
- data/vendor/faiss/faiss/gpu/GpuIndexIVF.h +29 -50
- data/vendor/faiss/faiss/gpu/GpuIndexIVFFlat.h +3 -3
- data/vendor/faiss/faiss/gpu/GpuIndexIVFPQ.h +8 -8
- data/vendor/faiss/faiss/gpu/GpuIndexIVFScalarQuantizer.h +4 -4
- data/vendor/faiss/faiss/gpu/impl/IndexUtils.h +2 -5
- data/vendor/faiss/faiss/gpu/impl/RemapIndices.cpp +9 -7
- data/vendor/faiss/faiss/gpu/impl/RemapIndices.h +4 -4
- data/vendor/faiss/faiss/gpu/perf/IndexWrapper-inl.h +2 -2
- data/vendor/faiss/faiss/gpu/perf/IndexWrapper.h +1 -1
- data/vendor/faiss/faiss/gpu/test/TestGpuIndexBinaryFlat.cpp +55 -6
- data/vendor/faiss/faiss/gpu/test/TestGpuIndexFlat.cpp +20 -6
- data/vendor/faiss/faiss/gpu/test/TestGpuIndexIVFFlat.cpp +95 -25
- data/vendor/faiss/faiss/gpu/test/TestGpuIndexIVFPQ.cpp +67 -16
- data/vendor/faiss/faiss/gpu/test/TestGpuIndexIVFScalarQuantizer.cpp +4 -4
- data/vendor/faiss/faiss/gpu/test/TestUtils.cpp +7 -7
- data/vendor/faiss/faiss/gpu/test/TestUtils.h +4 -4
- data/vendor/faiss/faiss/gpu/test/demo_ivfpq_indexing_gpu.cpp +1 -1
- data/vendor/faiss/faiss/gpu/utils/DeviceUtils.h +6 -0
- data/vendor/faiss/faiss/impl/AdditiveQuantizer.cpp +0 -7
- data/vendor/faiss/faiss/impl/AdditiveQuantizer.h +9 -9
- data/vendor/faiss/faiss/impl/AuxIndexStructures.cpp +1 -1
- data/vendor/faiss/faiss/impl/AuxIndexStructures.h +2 -7
- data/vendor/faiss/faiss/impl/CodePacker.cpp +67 -0
- data/vendor/faiss/faiss/impl/CodePacker.h +71 -0
- data/vendor/faiss/faiss/impl/DistanceComputer.h +0 -2
- data/vendor/faiss/faiss/impl/HNSW.cpp +3 -7
- data/vendor/faiss/faiss/impl/HNSW.h +6 -9
- data/vendor/faiss/faiss/impl/IDSelector.cpp +1 -1
- data/vendor/faiss/faiss/impl/IDSelector.h +39 -1
- data/vendor/faiss/faiss/impl/LocalSearchQuantizer.cpp +62 -51
- data/vendor/faiss/faiss/impl/LocalSearchQuantizer.h +11 -12
- data/vendor/faiss/faiss/impl/NNDescent.cpp +3 -9
- data/vendor/faiss/faiss/impl/NNDescent.h +10 -10
- data/vendor/faiss/faiss/impl/NSG.cpp +1 -6
- data/vendor/faiss/faiss/impl/NSG.h +4 -7
- data/vendor/faiss/faiss/impl/PolysemousTraining.cpp +1 -15
- data/vendor/faiss/faiss/impl/PolysemousTraining.h +11 -10
- data/vendor/faiss/faiss/impl/ProductAdditiveQuantizer.cpp +0 -7
- data/vendor/faiss/faiss/impl/ProductQuantizer.cpp +25 -12
- data/vendor/faiss/faiss/impl/ProductQuantizer.h +2 -4
- data/vendor/faiss/faiss/impl/Quantizer.h +6 -3
- data/vendor/faiss/faiss/impl/ResidualQuantizer.cpp +796 -174
- data/vendor/faiss/faiss/impl/ResidualQuantizer.h +16 -8
- data/vendor/faiss/faiss/impl/ScalarQuantizer.cpp +3 -5
- data/vendor/faiss/faiss/impl/ScalarQuantizer.h +4 -4
- data/vendor/faiss/faiss/impl/ThreadedIndex-inl.h +3 -3
- data/vendor/faiss/faiss/impl/ThreadedIndex.h +4 -4
- data/vendor/faiss/faiss/impl/code_distance/code_distance-avx2.h +291 -0
- data/vendor/faiss/faiss/impl/code_distance/code_distance-generic.h +74 -0
- data/vendor/faiss/faiss/impl/code_distance/code_distance.h +123 -0
- data/vendor/faiss/faiss/impl/code_distance/code_distance_avx512.h +102 -0
- data/vendor/faiss/faiss/impl/index_read.cpp +13 -10
- data/vendor/faiss/faiss/impl/index_write.cpp +3 -4
- data/vendor/faiss/faiss/impl/kmeans1d.cpp +0 -1
- data/vendor/faiss/faiss/impl/kmeans1d.h +3 -3
- data/vendor/faiss/faiss/impl/lattice_Zn.cpp +1 -1
- data/vendor/faiss/faiss/impl/platform_macros.h +61 -0
- data/vendor/faiss/faiss/impl/pq4_fast_scan.cpp +48 -4
- data/vendor/faiss/faiss/impl/pq4_fast_scan.h +18 -4
- data/vendor/faiss/faiss/impl/pq4_fast_scan_search_qbs.cpp +2 -2
- data/vendor/faiss/faiss/index_factory.cpp +8 -10
- data/vendor/faiss/faiss/invlists/BlockInvertedLists.cpp +29 -12
- data/vendor/faiss/faiss/invlists/BlockInvertedLists.h +8 -2
- data/vendor/faiss/faiss/invlists/DirectMap.cpp +1 -1
- data/vendor/faiss/faiss/invlists/DirectMap.h +2 -4
- data/vendor/faiss/faiss/invlists/InvertedLists.cpp +118 -18
- data/vendor/faiss/faiss/invlists/InvertedLists.h +44 -4
- data/vendor/faiss/faiss/invlists/OnDiskInvertedLists.cpp +3 -3
- data/vendor/faiss/faiss/invlists/OnDiskInvertedLists.h +1 -1
- data/vendor/faiss/faiss/python/python_callbacks.cpp +1 -1
- data/vendor/faiss/faiss/python/python_callbacks.h +1 -1
- data/vendor/faiss/faiss/utils/AlignedTable.h +3 -1
- data/vendor/faiss/faiss/utils/Heap.cpp +139 -3
- data/vendor/faiss/faiss/utils/Heap.h +35 -1
- data/vendor/faiss/faiss/utils/approx_topk/approx_topk.h +84 -0
- data/vendor/faiss/faiss/utils/approx_topk/avx2-inl.h +196 -0
- data/vendor/faiss/faiss/utils/approx_topk/generic.h +138 -0
- data/vendor/faiss/faiss/utils/approx_topk/mode.h +34 -0
- data/vendor/faiss/faiss/utils/approx_topk_hamming/approx_topk_hamming.h +367 -0
- data/vendor/faiss/faiss/utils/distances.cpp +61 -7
- data/vendor/faiss/faiss/utils/distances.h +11 -0
- data/vendor/faiss/faiss/utils/distances_fused/avx512.cpp +346 -0
- data/vendor/faiss/faiss/utils/distances_fused/avx512.h +36 -0
- data/vendor/faiss/faiss/utils/distances_fused/distances_fused.cpp +42 -0
- data/vendor/faiss/faiss/utils/distances_fused/distances_fused.h +40 -0
- data/vendor/faiss/faiss/utils/distances_fused/simdlib_based.cpp +352 -0
- data/vendor/faiss/faiss/utils/distances_fused/simdlib_based.h +32 -0
- data/vendor/faiss/faiss/utils/distances_simd.cpp +515 -327
- data/vendor/faiss/faiss/utils/extra_distances-inl.h +17 -1
- data/vendor/faiss/faiss/utils/extra_distances.cpp +37 -8
- data/vendor/faiss/faiss/utils/extra_distances.h +2 -1
- data/vendor/faiss/faiss/utils/fp16-fp16c.h +7 -0
- data/vendor/faiss/faiss/utils/fp16-inl.h +7 -0
- data/vendor/faiss/faiss/utils/fp16.h +7 -0
- data/vendor/faiss/faiss/utils/hamming-inl.h +0 -456
- data/vendor/faiss/faiss/utils/hamming.cpp +104 -120
- data/vendor/faiss/faiss/utils/hamming.h +21 -10
- data/vendor/faiss/faiss/utils/hamming_distance/avx2-inl.h +535 -0
- data/vendor/faiss/faiss/utils/hamming_distance/common.h +48 -0
- data/vendor/faiss/faiss/utils/hamming_distance/generic-inl.h +519 -0
- data/vendor/faiss/faiss/utils/hamming_distance/hamdis-inl.h +26 -0
- data/vendor/faiss/faiss/utils/hamming_distance/neon-inl.h +614 -0
- data/vendor/faiss/faiss/utils/partitioning.cpp +21 -25
- data/vendor/faiss/faiss/utils/simdlib_avx2.h +344 -3
- data/vendor/faiss/faiss/utils/simdlib_emulated.h +390 -0
- data/vendor/faiss/faiss/utils/simdlib_neon.h +655 -130
- data/vendor/faiss/faiss/utils/sorting.cpp +692 -0
- data/vendor/faiss/faiss/utils/sorting.h +71 -0
- data/vendor/faiss/faiss/utils/transpose/transpose-avx2-inl.h +165 -0
- data/vendor/faiss/faiss/utils/utils.cpp +4 -176
- data/vendor/faiss/faiss/utils/utils.h +2 -9
- metadata +29 -3
- data/vendor/faiss/faiss/gpu/GpuClonerOptions.cpp +0 -26
@@ -0,0 +1,614 @@
|
|
1
|
+
/**
|
2
|
+
* Copyright (c) Facebook, Inc. and its affiliates.
|
3
|
+
*
|
4
|
+
* This source code is licensed under the MIT license found in the
|
5
|
+
* LICENSE file in the root directory of this source tree.
|
6
|
+
*/
|
7
|
+
|
8
|
+
#ifndef HAMMING_NEON_INL_H
|
9
|
+
#define HAMMING_NEON_INL_H
|
10
|
+
|
11
|
+
// a specialized version of hamming is needed here, because both
|
12
|
+
// gcc, clang and msvc seem to generate suboptimal code sometimes.
|
13
|
+
|
14
|
+
#ifdef __aarch64__
|
15
|
+
|
16
|
+
#include <arm_neon.h>
|
17
|
+
|
18
|
+
#include <cassert>
|
19
|
+
#include <cstddef>
|
20
|
+
#include <cstdint>
|
21
|
+
|
22
|
+
#include <faiss/impl/platform_macros.h>
|
23
|
+
|
24
|
+
#include <faiss/utils/hamming_distance/common.h>
|
25
|
+
|
26
|
+
namespace faiss {
|
27
|
+
|
28
|
+
/* Elementary Hamming distance computation: unoptimized */
|
29
|
+
template <size_t nbits, typename T>
|
30
|
+
inline T hamming(const uint8_t* bs1, const uint8_t* bs2) {
|
31
|
+
const size_t nbytes = nbits / 8;
|
32
|
+
size_t i;
|
33
|
+
T h = 0;
|
34
|
+
for (i = 0; i < nbytes; i++) {
|
35
|
+
h += (T)hamdis_tab_ham_bytes[bs1[i] ^ bs2[i]];
|
36
|
+
}
|
37
|
+
return h;
|
38
|
+
}
|
39
|
+
|
40
|
+
/* Hamming distances for multiples of 64 bits */
|
41
|
+
template <size_t nbits>
|
42
|
+
inline hamdis_t hamming(const uint64_t* pa, const uint64_t* pb) {
|
43
|
+
constexpr size_t nwords256 = nbits / 256;
|
44
|
+
constexpr size_t nwords128 = (nbits - nwords256 * 256) / 128;
|
45
|
+
constexpr size_t nwords64 =
|
46
|
+
(nbits - nwords256 * 256 - nwords128 * 128) / 64;
|
47
|
+
|
48
|
+
hamdis_t h = 0;
|
49
|
+
if (nwords256 > 0) {
|
50
|
+
for (size_t i = 0; i < nwords256; i++) {
|
51
|
+
h += hamming<256>(pa, pb);
|
52
|
+
pa += 4;
|
53
|
+
pb += 4;
|
54
|
+
}
|
55
|
+
}
|
56
|
+
|
57
|
+
if (nwords128 > 0) {
|
58
|
+
h += hamming<128>(pa, pb);
|
59
|
+
pa += 2;
|
60
|
+
pb += 2;
|
61
|
+
}
|
62
|
+
|
63
|
+
if (nwords64 > 0) {
|
64
|
+
h += hamming<64>(pa, pb);
|
65
|
+
}
|
66
|
+
|
67
|
+
return h;
|
68
|
+
}
|
69
|
+
|
70
|
+
/* specialized (optimized) functions */
|
71
|
+
template <>
|
72
|
+
inline hamdis_t hamming<64>(const uint64_t* pa, const uint64_t* pb) {
|
73
|
+
return popcount64(pa[0] ^ pb[0]);
|
74
|
+
}
|
75
|
+
|
76
|
+
template <>
|
77
|
+
inline hamdis_t hamming<128>(const uint64_t* pa, const uint64_t* pb) {
|
78
|
+
const uint8_t* pa8 = reinterpret_cast<const uint8_t*>(pa);
|
79
|
+
const uint8_t* pb8 = reinterpret_cast<const uint8_t*>(pb);
|
80
|
+
uint8x16_t or0 = veorq_u8(vld1q_u8(pa8), vld1q_u8(pb8));
|
81
|
+
uint8x16_t c0 = vcntq_u8(or0);
|
82
|
+
auto dis = vaddvq_u8(c0);
|
83
|
+
return dis;
|
84
|
+
}
|
85
|
+
|
86
|
+
template <>
|
87
|
+
inline hamdis_t hamming<256>(const uint64_t* pa, const uint64_t* pb) {
|
88
|
+
const uint8_t* pa8 = reinterpret_cast<const uint8_t*>(pa);
|
89
|
+
const uint8_t* pb8 = reinterpret_cast<const uint8_t*>(pb);
|
90
|
+
uint8x16_t or0 = veorq_u8(vld1q_u8(pa8), vld1q_u8(pb8));
|
91
|
+
uint8x16_t or1 = veorq_u8(vld1q_u8(pa8 + 16), vld1q_u8(pb8 + 16));
|
92
|
+
uint8x16_t c0 = vcntq_u8(or0);
|
93
|
+
uint8x16_t c1 = vcntq_u8(or1);
|
94
|
+
uint8x16_t ca = vpaddq_u8(c0, c1);
|
95
|
+
auto dis = vaddvq_u8(ca);
|
96
|
+
return dis;
|
97
|
+
}
|
98
|
+
|
99
|
+
/* Hamming distances for multiple of 64 bits */
|
100
|
+
inline hamdis_t hamming(const uint64_t* pa, const uint64_t* pb, size_t nwords) {
|
101
|
+
const size_t nwords256 = nwords / 256;
|
102
|
+
const size_t nwords128 = (nwords - nwords256 * 256) / 128;
|
103
|
+
const size_t nwords64 = (nwords - nwords256 * 256 - nwords128 * 128) / 64;
|
104
|
+
|
105
|
+
hamdis_t h = 0;
|
106
|
+
if (nwords256 > 0) {
|
107
|
+
for (size_t i = 0; i < nwords256; i++) {
|
108
|
+
h += hamming<256>(pa, pb);
|
109
|
+
pa += 4;
|
110
|
+
pb += 4;
|
111
|
+
}
|
112
|
+
}
|
113
|
+
|
114
|
+
if (nwords128 > 0) {
|
115
|
+
h += hamming<128>(pa, pb);
|
116
|
+
pa += 2;
|
117
|
+
pb += 2;
|
118
|
+
}
|
119
|
+
|
120
|
+
if (nwords64 > 0) {
|
121
|
+
h += hamming<64>(pa, pb);
|
122
|
+
}
|
123
|
+
|
124
|
+
return h;
|
125
|
+
}
|
126
|
+
|
127
|
+
/******************************************************************
|
128
|
+
* The HammingComputer series of classes compares a single code of
|
129
|
+
* size 4 to 32 to incoming codes. They are intended for use as a
|
130
|
+
* template class where it would be inefficient to switch on the code
|
131
|
+
* size in the inner loop. Hopefully the compiler will inline the
|
132
|
+
* hamming() functions and put the a0, a1, ... in registers.
|
133
|
+
******************************************************************/
|
134
|
+
|
135
|
+
struct HammingComputer4 {
|
136
|
+
uint32_t a0;
|
137
|
+
|
138
|
+
HammingComputer4() {}
|
139
|
+
|
140
|
+
HammingComputer4(const uint8_t* a, int code_size) {
|
141
|
+
set(a, code_size);
|
142
|
+
}
|
143
|
+
|
144
|
+
void set(const uint8_t* a, int code_size) {
|
145
|
+
assert(code_size == 4);
|
146
|
+
a0 = *(uint32_t*)a;
|
147
|
+
}
|
148
|
+
|
149
|
+
inline int hamming(const uint8_t* b) const {
|
150
|
+
return popcount64(*(uint32_t*)b ^ a0);
|
151
|
+
}
|
152
|
+
|
153
|
+
inline static constexpr int get_code_size() {
|
154
|
+
return 4;
|
155
|
+
}
|
156
|
+
};
|
157
|
+
|
158
|
+
struct HammingComputer8 {
|
159
|
+
uint64_t a0;
|
160
|
+
|
161
|
+
HammingComputer8() {}
|
162
|
+
|
163
|
+
HammingComputer8(const uint8_t* a, int code_size) {
|
164
|
+
set(a, code_size);
|
165
|
+
}
|
166
|
+
|
167
|
+
void set(const uint8_t* a, int code_size) {
|
168
|
+
assert(code_size == 8);
|
169
|
+
a0 = *(uint64_t*)a;
|
170
|
+
}
|
171
|
+
|
172
|
+
inline int hamming(const uint8_t* b) const {
|
173
|
+
return popcount64(*(uint64_t*)b ^ a0);
|
174
|
+
}
|
175
|
+
|
176
|
+
inline static constexpr int get_code_size() {
|
177
|
+
return 8;
|
178
|
+
}
|
179
|
+
};
|
180
|
+
|
181
|
+
struct HammingComputer16 {
|
182
|
+
uint8x16_t a0;
|
183
|
+
|
184
|
+
HammingComputer16() {}
|
185
|
+
|
186
|
+
HammingComputer16(const uint8_t* a8, int code_size) {
|
187
|
+
set(a8, code_size);
|
188
|
+
}
|
189
|
+
|
190
|
+
void set(const uint8_t* a8, int code_size) {
|
191
|
+
assert(code_size == 16);
|
192
|
+
a0 = vld1q_u8(a8);
|
193
|
+
}
|
194
|
+
|
195
|
+
inline int hamming(const uint8_t* b8) const {
|
196
|
+
uint8x16_t b0 = vld1q_u8(b8);
|
197
|
+
|
198
|
+
uint8x16_t or0 = veorq_u8(a0, b0);
|
199
|
+
uint8x16_t c0 = vcntq_u8(or0);
|
200
|
+
auto dis = vaddvq_u8(c0);
|
201
|
+
return dis;
|
202
|
+
}
|
203
|
+
|
204
|
+
inline static constexpr int get_code_size() {
|
205
|
+
return 16;
|
206
|
+
}
|
207
|
+
};
|
208
|
+
|
209
|
+
// when applied to an array, 1/2 of the 64-bit accesses are unaligned.
|
210
|
+
// This incurs a penalty of ~10% wrt. fully aligned accesses.
|
211
|
+
struct HammingComputer20 {
|
212
|
+
uint8x16_t a0;
|
213
|
+
uint32_t a2;
|
214
|
+
|
215
|
+
HammingComputer20() {}
|
216
|
+
|
217
|
+
HammingComputer20(const uint8_t* a8, int code_size) {
|
218
|
+
set(a8, code_size);
|
219
|
+
}
|
220
|
+
|
221
|
+
void set(const uint8_t* a8, int code_size) {
|
222
|
+
assert(code_size == 20);
|
223
|
+
|
224
|
+
a0 = vld1q_u8(a8);
|
225
|
+
|
226
|
+
const uint32_t* a = (uint32_t*)a8;
|
227
|
+
a2 = a[4];
|
228
|
+
}
|
229
|
+
|
230
|
+
inline int hamming(const uint8_t* b8) const {
|
231
|
+
uint8x16_t b0 = vld1q_u8(b8);
|
232
|
+
|
233
|
+
uint8x16_t or0 = veorq_u8(a0, b0);
|
234
|
+
uint8x16_t c0 = vcntq_u8(or0);
|
235
|
+
auto dis = vaddvq_u8(c0);
|
236
|
+
|
237
|
+
const uint32_t* b = (uint32_t*)b8;
|
238
|
+
return dis + popcount64(b[4] ^ a2);
|
239
|
+
}
|
240
|
+
|
241
|
+
inline static constexpr int get_code_size() {
|
242
|
+
return 20;
|
243
|
+
}
|
244
|
+
};
|
245
|
+
|
246
|
+
struct HammingComputer32 {
|
247
|
+
uint8x16_t a0;
|
248
|
+
uint8x16_t a1;
|
249
|
+
|
250
|
+
HammingComputer32() {}
|
251
|
+
|
252
|
+
HammingComputer32(const uint8_t* a8, int code_size) {
|
253
|
+
set(a8, code_size);
|
254
|
+
}
|
255
|
+
|
256
|
+
void set(const uint8_t* a8, int code_size) {
|
257
|
+
assert(code_size == 32);
|
258
|
+
a0 = vld1q_u8(a8);
|
259
|
+
a1 = vld1q_u8(a8 + 16);
|
260
|
+
}
|
261
|
+
|
262
|
+
inline int hamming(const uint8_t* b8) const {
|
263
|
+
const uint64_t* b = (uint64_t*)b8;
|
264
|
+
uint8x16_t b0 = vld1q_u8(b8);
|
265
|
+
uint8x16_t b1 = vld1q_u8(b8 + 16);
|
266
|
+
|
267
|
+
uint8x16_t or0 = veorq_u8(a0, b0);
|
268
|
+
uint8x16_t or1 = veorq_u8(a1, b1);
|
269
|
+
uint8x16_t c0 = vcntq_u8(or0);
|
270
|
+
uint8x16_t c1 = vcntq_u8(or1);
|
271
|
+
uint8x16_t ca = vpaddq_u8(c0, c1);
|
272
|
+
auto dis = vaddvq_u8(ca);
|
273
|
+
return dis;
|
274
|
+
}
|
275
|
+
|
276
|
+
inline static constexpr int get_code_size() {
|
277
|
+
return 32;
|
278
|
+
}
|
279
|
+
};
|
280
|
+
|
281
|
+
struct HammingComputer64 {
|
282
|
+
HammingComputer32 hc0, hc1;
|
283
|
+
|
284
|
+
HammingComputer64() {}
|
285
|
+
|
286
|
+
HammingComputer64(const uint8_t* a8, int code_size) {
|
287
|
+
set(a8, code_size);
|
288
|
+
}
|
289
|
+
|
290
|
+
void set(const uint8_t* a8, int code_size) {
|
291
|
+
assert(code_size == 64);
|
292
|
+
hc0.set(a8, 32);
|
293
|
+
hc1.set(a8 + 32, 32);
|
294
|
+
}
|
295
|
+
|
296
|
+
inline int hamming(const uint8_t* b8) const {
|
297
|
+
return hc0.hamming(b8) + hc1.hamming(b8 + 32);
|
298
|
+
}
|
299
|
+
|
300
|
+
inline static constexpr int get_code_size() {
|
301
|
+
return 64;
|
302
|
+
}
|
303
|
+
};
|
304
|
+
|
305
|
+
struct HammingComputerDefault {
|
306
|
+
const uint8_t* a8;
|
307
|
+
int quotient8;
|
308
|
+
int remainder8;
|
309
|
+
|
310
|
+
HammingComputerDefault() {}
|
311
|
+
|
312
|
+
HammingComputerDefault(const uint8_t* a8, int code_size) {
|
313
|
+
set(a8, code_size);
|
314
|
+
}
|
315
|
+
|
316
|
+
void set(const uint8_t* a8, int code_size) {
|
317
|
+
this->a8 = a8;
|
318
|
+
quotient8 = code_size / 8;
|
319
|
+
remainder8 = code_size % 8;
|
320
|
+
}
|
321
|
+
|
322
|
+
int hamming(const uint8_t* b8) const {
|
323
|
+
int accu = 0;
|
324
|
+
|
325
|
+
const uint64_t* a64 = reinterpret_cast<const uint64_t*>(a8);
|
326
|
+
const uint64_t* b64 = reinterpret_cast<const uint64_t*>(b8);
|
327
|
+
int i = 0, len = quotient8;
|
328
|
+
|
329
|
+
int len256 = (quotient8 / 4) * 4;
|
330
|
+
for (; i < len256; i += 4) {
|
331
|
+
accu += ::faiss::hamming<256>(a64 + i, b64 + i);
|
332
|
+
len -= 4;
|
333
|
+
}
|
334
|
+
|
335
|
+
switch (len & 7) {
|
336
|
+
default:
|
337
|
+
while (len > 7) {
|
338
|
+
len -= 8;
|
339
|
+
accu += popcount64(a64[i] ^ b64[i]);
|
340
|
+
i++;
|
341
|
+
case 7:
|
342
|
+
accu += popcount64(a64[i] ^ b64[i]);
|
343
|
+
i++;
|
344
|
+
case 6:
|
345
|
+
accu += popcount64(a64[i] ^ b64[i]);
|
346
|
+
i++;
|
347
|
+
case 5:
|
348
|
+
accu += popcount64(a64[i] ^ b64[i]);
|
349
|
+
i++;
|
350
|
+
case 4:
|
351
|
+
accu += popcount64(a64[i] ^ b64[i]);
|
352
|
+
i++;
|
353
|
+
case 3:
|
354
|
+
accu += popcount64(a64[i] ^ b64[i]);
|
355
|
+
i++;
|
356
|
+
case 2:
|
357
|
+
accu += popcount64(a64[i] ^ b64[i]);
|
358
|
+
i++;
|
359
|
+
case 1:
|
360
|
+
accu += popcount64(a64[i] ^ b64[i]);
|
361
|
+
i++;
|
362
|
+
}
|
363
|
+
}
|
364
|
+
if (remainder8) {
|
365
|
+
const uint8_t* a = a8 + 8 * quotient8;
|
366
|
+
const uint8_t* b = b8 + 8 * quotient8;
|
367
|
+
switch (remainder8) {
|
368
|
+
case 7:
|
369
|
+
accu += hamdis_tab_ham_bytes[a[6] ^ b[6]];
|
370
|
+
case 6:
|
371
|
+
accu += hamdis_tab_ham_bytes[a[5] ^ b[5]];
|
372
|
+
case 5:
|
373
|
+
accu += hamdis_tab_ham_bytes[a[4] ^ b[4]];
|
374
|
+
case 4:
|
375
|
+
accu += hamdis_tab_ham_bytes[a[3] ^ b[3]];
|
376
|
+
case 3:
|
377
|
+
accu += hamdis_tab_ham_bytes[a[2] ^ b[2]];
|
378
|
+
case 2:
|
379
|
+
accu += hamdis_tab_ham_bytes[a[1] ^ b[1]];
|
380
|
+
case 1:
|
381
|
+
accu += hamdis_tab_ham_bytes[a[0] ^ b[0]];
|
382
|
+
default:
|
383
|
+
break;
|
384
|
+
}
|
385
|
+
}
|
386
|
+
|
387
|
+
return accu;
|
388
|
+
}
|
389
|
+
|
390
|
+
inline int get_code_size() const {
|
391
|
+
return quotient8 * 8 + remainder8;
|
392
|
+
}
|
393
|
+
};
|
394
|
+
|
395
|
+
// more inefficient than HammingComputerDefault (obsolete)
|
396
|
+
struct HammingComputerM8 {
|
397
|
+
const uint64_t* a;
|
398
|
+
int n;
|
399
|
+
|
400
|
+
HammingComputerM8() {}
|
401
|
+
|
402
|
+
HammingComputerM8(const uint8_t* a8, int code_size) {
|
403
|
+
set(a8, code_size);
|
404
|
+
}
|
405
|
+
|
406
|
+
void set(const uint8_t* a8, int code_size) {
|
407
|
+
assert(code_size % 8 == 0);
|
408
|
+
a = (uint64_t*)a8;
|
409
|
+
n = code_size / 8;
|
410
|
+
}
|
411
|
+
|
412
|
+
int hamming(const uint8_t* b8) const {
|
413
|
+
const uint64_t* b = (uint64_t*)b8;
|
414
|
+
int n4 = (n / 4) * 4;
|
415
|
+
int accu = 0;
|
416
|
+
|
417
|
+
int i = 0;
|
418
|
+
for (; i < n4; i += 4) {
|
419
|
+
accu += ::faiss::hamming<256>(a + i, b + i);
|
420
|
+
}
|
421
|
+
for (; i < n; i++) {
|
422
|
+
accu += popcount64(a[i] ^ b[i]);
|
423
|
+
}
|
424
|
+
return accu;
|
425
|
+
}
|
426
|
+
|
427
|
+
inline int get_code_size() const {
|
428
|
+
return n * 8;
|
429
|
+
}
|
430
|
+
};
|
431
|
+
|
432
|
+
// more inefficient than HammingComputerDefault (obsolete)
|
433
|
+
struct HammingComputerM4 {
|
434
|
+
const uint32_t* a;
|
435
|
+
int n;
|
436
|
+
|
437
|
+
HammingComputerM4() {}
|
438
|
+
|
439
|
+
HammingComputerM4(const uint8_t* a4, int code_size) {
|
440
|
+
set(a4, code_size);
|
441
|
+
}
|
442
|
+
|
443
|
+
void set(const uint8_t* a4, int code_size) {
|
444
|
+
assert(code_size % 4 == 0);
|
445
|
+
a = (uint32_t*)a4;
|
446
|
+
n = code_size / 4;
|
447
|
+
}
|
448
|
+
|
449
|
+
int hamming(const uint8_t* b8) const {
|
450
|
+
const uint32_t* b = (uint32_t*)b8;
|
451
|
+
|
452
|
+
int n8 = (n / 8) * 8;
|
453
|
+
int accu = 0;
|
454
|
+
|
455
|
+
int i = 0;
|
456
|
+
for (; i < n8; i += 8) {
|
457
|
+
accu += ::faiss::hamming<256>(
|
458
|
+
(const uint64_t*)(a + i), (const uint64_t*)(b + i));
|
459
|
+
}
|
460
|
+
for (; i < n; i++) {
|
461
|
+
accu += popcount64(a[i] ^ b[i]);
|
462
|
+
}
|
463
|
+
return accu;
|
464
|
+
}
|
465
|
+
|
466
|
+
inline int get_code_size() const {
|
467
|
+
return n * 4;
|
468
|
+
}
|
469
|
+
};
|
470
|
+
|
471
|
+
/***************************************************************************
|
472
|
+
* Equivalence with a template class when code size is known at compile time
|
473
|
+
**************************************************************************/
|
474
|
+
|
475
|
+
// default template
|
476
|
+
template <int CODE_SIZE>
|
477
|
+
struct HammingComputer : HammingComputerDefault {
|
478
|
+
HammingComputer(const uint8_t* a, int code_size)
|
479
|
+
: HammingComputerDefault(a, code_size) {}
|
480
|
+
};
|
481
|
+
|
482
|
+
#define SPECIALIZED_HC(CODE_SIZE) \
|
483
|
+
template <> \
|
484
|
+
struct HammingComputer<CODE_SIZE> : HammingComputer##CODE_SIZE { \
|
485
|
+
HammingComputer(const uint8_t* a) \
|
486
|
+
: HammingComputer##CODE_SIZE(a, CODE_SIZE) {} \
|
487
|
+
}
|
488
|
+
|
489
|
+
SPECIALIZED_HC(4);
|
490
|
+
SPECIALIZED_HC(8);
|
491
|
+
SPECIALIZED_HC(16);
|
492
|
+
SPECIALIZED_HC(20);
|
493
|
+
SPECIALIZED_HC(32);
|
494
|
+
SPECIALIZED_HC(64);
|
495
|
+
|
496
|
+
#undef SPECIALIZED_HC
|
497
|
+
|
498
|
+
/***************************************************************************
|
499
|
+
* generalized Hamming = number of bytes that are different between
|
500
|
+
* two codes.
|
501
|
+
***************************************************************************/
|
502
|
+
|
503
|
+
inline int generalized_hamming_64(uint64_t a) {
|
504
|
+
a |= a >> 1;
|
505
|
+
a |= a >> 2;
|
506
|
+
a |= a >> 4;
|
507
|
+
a &= 0x0101010101010101UL;
|
508
|
+
return popcount64(a);
|
509
|
+
}
|
510
|
+
|
511
|
+
struct GenHammingComputer8 {
|
512
|
+
uint8x8_t a0;
|
513
|
+
|
514
|
+
GenHammingComputer8(const uint8_t* a8, int code_size) {
|
515
|
+
assert(code_size == 8);
|
516
|
+
a0 = vld1_u8(a8);
|
517
|
+
}
|
518
|
+
|
519
|
+
inline int hamming(const uint8_t* b8) const {
|
520
|
+
uint8x8_t b0 = vld1_u8(b8);
|
521
|
+
uint8x8_t reg = vceq_u8(a0, b0);
|
522
|
+
uint8x8_t c0 = vcnt_u8(reg);
|
523
|
+
return 8 - vaddv_u8(c0) / 8;
|
524
|
+
}
|
525
|
+
|
526
|
+
inline static constexpr int get_code_size() {
|
527
|
+
return 8;
|
528
|
+
}
|
529
|
+
};
|
530
|
+
|
531
|
+
struct GenHammingComputer16 {
|
532
|
+
uint8x16_t a0;
|
533
|
+
|
534
|
+
GenHammingComputer16(const uint8_t* a8, int code_size) {
|
535
|
+
assert(code_size == 16);
|
536
|
+
a0 = vld1q_u8(a8);
|
537
|
+
}
|
538
|
+
|
539
|
+
inline int hamming(const uint8_t* b8) const {
|
540
|
+
uint8x16_t b0 = vld1q_u8(b8);
|
541
|
+
uint8x16_t reg = vceqq_u8(a0, b0);
|
542
|
+
uint8x16_t c0 = vcntq_u8(reg);
|
543
|
+
return 16 - vaddvq_u8(c0) / 8;
|
544
|
+
}
|
545
|
+
|
546
|
+
inline static constexpr int get_code_size() {
|
547
|
+
return 16;
|
548
|
+
}
|
549
|
+
};
|
550
|
+
|
551
|
+
struct GenHammingComputer32 {
|
552
|
+
GenHammingComputer16 a0, a1;
|
553
|
+
|
554
|
+
GenHammingComputer32(const uint8_t* a8, int code_size)
|
555
|
+
: a0(a8, 16), a1(a8 + 16, 16) {
|
556
|
+
assert(code_size == 32);
|
557
|
+
}
|
558
|
+
|
559
|
+
inline int hamming(const uint8_t* b8) const {
|
560
|
+
return a0.hamming(b8) + a1.hamming(b8 + 16);
|
561
|
+
}
|
562
|
+
|
563
|
+
inline static constexpr int get_code_size() {
|
564
|
+
return 32;
|
565
|
+
}
|
566
|
+
};
|
567
|
+
|
568
|
+
struct GenHammingComputerM8 {
|
569
|
+
const uint64_t* a;
|
570
|
+
int n;
|
571
|
+
|
572
|
+
GenHammingComputerM8(const uint8_t* a8, int code_size) {
|
573
|
+
assert(code_size % 8 == 0);
|
574
|
+
a = (uint64_t*)a8;
|
575
|
+
n = code_size / 8;
|
576
|
+
}
|
577
|
+
|
578
|
+
int hamming(const uint8_t* b8) const {
|
579
|
+
const uint64_t* b = (uint64_t*)b8;
|
580
|
+
int accu = 0;
|
581
|
+
|
582
|
+
int n2 = (n / 2) * 2;
|
583
|
+
int i = 0;
|
584
|
+
for (; i < n2; i += 2) {
|
585
|
+
uint8x16_t a0 = vld1q_u8((const uint8_t*)(a + i));
|
586
|
+
uint8x16_t b0 = vld1q_u8((const uint8_t*)(b + i));
|
587
|
+
uint8x16_t reg = vceqq_u8(a0, b0);
|
588
|
+
uint8x16_t c0 = vcntq_u8(reg);
|
589
|
+
auto dis = 16 - vaddvq_u8(c0) / 8;
|
590
|
+
accu += dis;
|
591
|
+
}
|
592
|
+
|
593
|
+
for (; i < n; i++) {
|
594
|
+
uint8x8_t a0 = vld1_u8((const uint8_t*)(a + i));
|
595
|
+
uint8x8_t b0 = vld1_u8((const uint8_t*)(b + i));
|
596
|
+
uint8x8_t reg = vceq_u8(a0, b0);
|
597
|
+
uint8x8_t c0 = vcnt_u8(reg);
|
598
|
+
auto dis = 8 - vaddv_u8(c0) / 8;
|
599
|
+
accu += dis;
|
600
|
+
}
|
601
|
+
|
602
|
+
return accu;
|
603
|
+
}
|
604
|
+
|
605
|
+
inline int get_code_size() {
|
606
|
+
return n * 8;
|
607
|
+
}
|
608
|
+
};
|
609
|
+
|
610
|
+
} // namespace faiss
|
611
|
+
|
612
|
+
#endif
|
613
|
+
|
614
|
+
#endif
|