minimap2 0.2.24.3 → 0.2.24.6

Sign up to get free protection for your applications and to get access to all the features.
Files changed (101) hide show
  1. checksums.yaml +4 -4
  2. data/ext/minimap2/lib/simde/CONTRIBUTING.md +114 -0
  3. data/ext/minimap2/lib/simde/COPYING +20 -0
  4. data/ext/minimap2/lib/simde/README.md +333 -0
  5. data/ext/minimap2/lib/simde/amalgamate.py +58 -0
  6. data/ext/minimap2/lib/simde/meson.build +33 -0
  7. data/ext/minimap2/lib/simde/netlify.toml +20 -0
  8. data/ext/minimap2/lib/simde/simde/arm/neon/float32x2.h +140 -0
  9. data/ext/minimap2/lib/simde/simde/arm/neon/float32x4.h +137 -0
  10. data/ext/minimap2/lib/simde/simde/arm/neon/float64x1.h +142 -0
  11. data/ext/minimap2/lib/simde/simde/arm/neon/float64x2.h +145 -0
  12. data/ext/minimap2/lib/simde/simde/arm/neon/int16x4.h +140 -0
  13. data/ext/minimap2/lib/simde/simde/arm/neon/int16x8.h +145 -0
  14. data/ext/minimap2/lib/simde/simde/arm/neon/int32x2.h +140 -0
  15. data/ext/minimap2/lib/simde/simde/arm/neon/int32x4.h +143 -0
  16. data/ext/minimap2/lib/simde/simde/arm/neon/int64x1.h +137 -0
  17. data/ext/minimap2/lib/simde/simde/arm/neon/int64x2.h +141 -0
  18. data/ext/minimap2/lib/simde/simde/arm/neon/int8x16.h +147 -0
  19. data/ext/minimap2/lib/simde/simde/arm/neon/int8x8.h +141 -0
  20. data/ext/minimap2/lib/simde/simde/arm/neon/uint16x4.h +134 -0
  21. data/ext/minimap2/lib/simde/simde/arm/neon/uint16x8.h +138 -0
  22. data/ext/minimap2/lib/simde/simde/arm/neon/uint32x2.h +134 -0
  23. data/ext/minimap2/lib/simde/simde/arm/neon/uint32x4.h +137 -0
  24. data/ext/minimap2/lib/simde/simde/arm/neon/uint64x1.h +131 -0
  25. data/ext/minimap2/lib/simde/simde/arm/neon/uint64x2.h +135 -0
  26. data/ext/minimap2/lib/simde/simde/arm/neon/uint8x16.h +141 -0
  27. data/ext/minimap2/lib/simde/simde/arm/neon/uint8x8.h +135 -0
  28. data/ext/minimap2/lib/simde/simde/arm/neon.h +97 -0
  29. data/ext/minimap2/lib/simde/simde/check.h +267 -0
  30. data/ext/minimap2/lib/simde/simde/debug-trap.h +83 -0
  31. data/ext/minimap2/lib/simde/simde/hedley.h +1899 -0
  32. data/ext/minimap2/lib/simde/simde/simde-arch.h +445 -0
  33. data/ext/minimap2/lib/simde/simde/simde-common.h +697 -0
  34. data/ext/minimap2/lib/simde/simde/x86/avx.h +5385 -0
  35. data/ext/minimap2/lib/simde/simde/x86/avx2.h +2402 -0
  36. data/ext/minimap2/lib/simde/simde/x86/avx512bw.h +391 -0
  37. data/ext/minimap2/lib/simde/simde/x86/avx512f.h +3389 -0
  38. data/ext/minimap2/lib/simde/simde/x86/avx512vl.h +112 -0
  39. data/ext/minimap2/lib/simde/simde/x86/fma.h +659 -0
  40. data/ext/minimap2/lib/simde/simde/x86/mmx.h +2210 -0
  41. data/ext/minimap2/lib/simde/simde/x86/sse.h +3696 -0
  42. data/ext/minimap2/lib/simde/simde/x86/sse2.h +5991 -0
  43. data/ext/minimap2/lib/simde/simde/x86/sse3.h +343 -0
  44. data/ext/minimap2/lib/simde/simde/x86/sse4.1.h +1783 -0
  45. data/ext/minimap2/lib/simde/simde/x86/sse4.2.h +105 -0
  46. data/ext/minimap2/lib/simde/simde/x86/ssse3.h +1053 -0
  47. data/ext/minimap2/lib/simde/simde/x86/svml.h +543 -0
  48. data/ext/minimap2/lib/simde/test/CMakeLists.txt +166 -0
  49. data/ext/minimap2/lib/simde/test/arm/meson.build +4 -0
  50. data/ext/minimap2/lib/simde/test/arm/neon/meson.build +23 -0
  51. data/ext/minimap2/lib/simde/test/arm/neon/skel.c +871 -0
  52. data/ext/minimap2/lib/simde/test/arm/neon/test-neon-internal.h +134 -0
  53. data/ext/minimap2/lib/simde/test/arm/neon/test-neon.c +39 -0
  54. data/ext/minimap2/lib/simde/test/arm/neon/test-neon.h +10 -0
  55. data/ext/minimap2/lib/simde/test/arm/neon/vadd.c +1260 -0
  56. data/ext/minimap2/lib/simde/test/arm/neon/vdup_n.c +873 -0
  57. data/ext/minimap2/lib/simde/test/arm/neon/vmul.c +1084 -0
  58. data/ext/minimap2/lib/simde/test/arm/neon/vsub.c +1260 -0
  59. data/ext/minimap2/lib/simde/test/arm/test-arm-internal.h +18 -0
  60. data/ext/minimap2/lib/simde/test/arm/test-arm.c +20 -0
  61. data/ext/minimap2/lib/simde/test/arm/test-arm.h +8 -0
  62. data/ext/minimap2/lib/simde/test/cmake/AddCompilerFlags.cmake +171 -0
  63. data/ext/minimap2/lib/simde/test/cmake/ExtraWarningFlags.cmake +68 -0
  64. data/ext/minimap2/lib/simde/test/meson.build +64 -0
  65. data/ext/minimap2/lib/simde/test/munit/COPYING +21 -0
  66. data/ext/minimap2/lib/simde/test/munit/Makefile +55 -0
  67. data/ext/minimap2/lib/simde/test/munit/README.md +54 -0
  68. data/ext/minimap2/lib/simde/test/munit/example.c +351 -0
  69. data/ext/minimap2/lib/simde/test/munit/meson.build +37 -0
  70. data/ext/minimap2/lib/simde/test/munit/munit.c +2055 -0
  71. data/ext/minimap2/lib/simde/test/munit/munit.h +535 -0
  72. data/ext/minimap2/lib/simde/test/run-tests.c +20 -0
  73. data/ext/minimap2/lib/simde/test/run-tests.h +260 -0
  74. data/ext/minimap2/lib/simde/test/x86/avx.c +13752 -0
  75. data/ext/minimap2/lib/simde/test/x86/avx2.c +9977 -0
  76. data/ext/minimap2/lib/simde/test/x86/avx512bw.c +2664 -0
  77. data/ext/minimap2/lib/simde/test/x86/avx512f.c +10416 -0
  78. data/ext/minimap2/lib/simde/test/x86/avx512vl.c +210 -0
  79. data/ext/minimap2/lib/simde/test/x86/fma.c +2557 -0
  80. data/ext/minimap2/lib/simde/test/x86/meson.build +33 -0
  81. data/ext/minimap2/lib/simde/test/x86/mmx.c +2878 -0
  82. data/ext/minimap2/lib/simde/test/x86/skel.c +2984 -0
  83. data/ext/minimap2/lib/simde/test/x86/sse.c +5121 -0
  84. data/ext/minimap2/lib/simde/test/x86/sse2.c +9860 -0
  85. data/ext/minimap2/lib/simde/test/x86/sse3.c +486 -0
  86. data/ext/minimap2/lib/simde/test/x86/sse4.1.c +3446 -0
  87. data/ext/minimap2/lib/simde/test/x86/sse4.2.c +101 -0
  88. data/ext/minimap2/lib/simde/test/x86/ssse3.c +2084 -0
  89. data/ext/minimap2/lib/simde/test/x86/svml.c +1545 -0
  90. data/ext/minimap2/lib/simde/test/x86/test-avx.h +16 -0
  91. data/ext/minimap2/lib/simde/test/x86/test-avx512.h +25 -0
  92. data/ext/minimap2/lib/simde/test/x86/test-mmx.h +13 -0
  93. data/ext/minimap2/lib/simde/test/x86/test-sse.h +13 -0
  94. data/ext/minimap2/lib/simde/test/x86/test-sse2.h +13 -0
  95. data/ext/minimap2/lib/simde/test/x86/test-x86-internal.h +196 -0
  96. data/ext/minimap2/lib/simde/test/x86/test-x86.c +48 -0
  97. data/ext/minimap2/lib/simde/test/x86/test-x86.h +8 -0
  98. data/lib/minimap2/aligner.rb +2 -2
  99. data/lib/minimap2/ffi/constants.rb +3 -0
  100. data/lib/minimap2/version.rb +1 -1
  101. metadata +99 -3
@@ -0,0 +1,391 @@
1
+ /* Permission is hereby granted, free of charge, to any person
2
+ * obtaining a copy of this software and associated documentation
3
+ * files (the "Software"), to deal in the Software without
4
+ * restriction, including without limitation the rights to use, copy,
5
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
6
+ * of the Software, and to permit persons to whom the Software is
7
+ * furnished to do so, subject to the following conditions:
8
+ *
9
+ * The above copyright notice and this permission notice shall be
10
+ * included in all copies or substantial portions of the Software.
11
+ *
12
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
13
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
14
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
15
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
16
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19
+ * SOFTWARE.
20
+ *
21
+ * Copyright:
22
+ * 2020 Evan Nemerson <evan@nemerson.com>
23
+ */
24
+
25
+ #if !defined(SIMDE__AVX512BW_H)
26
+ # if !defined(SIMDE__AVX512BW_H)
27
+ # define SIMDE__AVX512BW_H
28
+ # endif
29
+ # include "avx512f.h"
30
+
31
+ HEDLEY_DIAGNOSTIC_PUSH
32
+ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
33
+
34
+ # if defined(SIMDE_AVX512BW_NATIVE)
35
+ # undef SIMDE_AVX512BW_NATIVE
36
+ # endif
37
+ # if defined(SIMDE_ARCH_X86_AVX512BW) && !defined(SIMDE_AVX512BW_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
38
+ # define SIMDE_AVX512BW_NATIVE
39
+ # elif defined(SIMDE_ARCH_ARM_NEON) && !defined(SIMDE_AVX512BW_NO_NEON) && !defined(SIMDE_NO_NEON)
40
+ # define SIMDE_AVX512BW_NEON
41
+ # elif defined(SIMDE_ARCH_POWER_ALTIVEC)
42
+ # define SIMDE_AVX512BW_POWER_ALTIVEC
43
+ # endif
44
+
45
+ # if defined(SIMDE_AVX512BW_NATIVE)
46
+ # include <immintrin.h>
47
+ # endif
48
+
49
+ # if defined(SIMDE_AVX512BW_POWER_ALTIVEC)
50
+ # include <altivec.h>
51
+ # endif
52
+
53
+ #if !defined(SIMDE_AVX512BW_NATIVE) && defined(SIMDE_ENABLE_NATIVE_ALIASES)
54
+ #define SIMDE_AVX512BW_ENABLE_NATIVE_ALIASES
55
+ #endif
56
+
57
+ SIMDE__BEGIN_DECLS
58
+
59
+ SIMDE__FUNCTION_ATTRIBUTES
60
+ simde__m512i
61
+ simde_mm512_add_epi8 (simde__m512i a, simde__m512i b) {
62
+ #if defined(SIMDE_AVX512BW_NATIVE)
63
+ return _mm512_add_epi8(a, b);
64
+ #else
65
+ simde__m512i_private
66
+ r_,
67
+ a_ = simde__m512i_to_private(a),
68
+ b_ = simde__m512i_to_private(b);
69
+
70
+ #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
71
+ r_.i8 = a_.i8 + b_.i8;
72
+ #else
73
+ SIMDE__VECTORIZE
74
+ for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
75
+ r_.m256i[i] = simde_mm256_add_epi8(a_.m256i[i], b_.m256i[i]);
76
+ }
77
+ #endif
78
+
79
+ return simde__m512i_from_private(r_);
80
+ #endif
81
+ }
82
+ #if defined(SIMDE_AVX512BW_ENABLE_NATIVE_ALIASES)
83
+ # define _mm512_add_epi8(a, b) simde_mm512_add_epi8(a, b)
84
+ #endif
85
+
86
+ SIMDE__FUNCTION_ATTRIBUTES
87
+ simde__m512i
88
+ simde_mm512_add_epi16 (simde__m512i a, simde__m512i b) {
89
+ #if defined(SIMDE_AVX512BW_NATIVE)
90
+ return _mm512_add_epi16(a, b);
91
+ #else
92
+ simde__m512i_private
93
+ r_,
94
+ a_ = simde__m512i_to_private(a),
95
+ b_ = simde__m512i_to_private(b);
96
+
97
+ #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
98
+ r_.i16 = a_.i16 + b_.i16;
99
+ #else
100
+ SIMDE__VECTORIZE
101
+ for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
102
+ r_.m256i[i] = simde_mm256_add_epi16(a_.m256i[i], b_.m256i[i]);
103
+ }
104
+ #endif
105
+
106
+ return simde__m512i_from_private(r_);
107
+ #endif
108
+ }
109
+ #if defined(SIMDE_AVX512BW_ENABLE_NATIVE_ALIASES)
110
+ # define _mm512_add_epi16(a, b) simde_mm512_add_epi16(a, b)
111
+ #endif
112
+
113
+ SIMDE__FUNCTION_ATTRIBUTES
114
+ simde__m512i
115
+ simde_mm512_adds_epi8 (simde__m512i a, simde__m512i b) {
116
+ #if defined(SIMDE_AVX512BW_NATIVE)
117
+ return _mm512_adds_epi8(a, b);
118
+ #else
119
+ simde__m512i_private
120
+ r_,
121
+ a_ = simde__m512i_to_private(a),
122
+ b_ = simde__m512i_to_private(b);
123
+
124
+ #if !defined(HEDLEY_INTEL_VERSION)
125
+ SIMDE__VECTORIZE
126
+ for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
127
+ r_.m256i[i] = simde_mm256_adds_epi8(a_.m256i[i], b_.m256i[i]);
128
+ }
129
+ #else
130
+ SIMDE__VECTORIZE
131
+ for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
132
+ const int16_t tmp =
133
+ HEDLEY_STATIC_CAST(int16_t, a_.i8[i]) +
134
+ HEDLEY_STATIC_CAST(int16_t, b_.i8[i]);
135
+ r_.i8[i] = HEDLEY_STATIC_CAST(int8_t, ((tmp < INT8_MAX) ? ((tmp > INT8_MIN) ? tmp : INT8_MIN) : INT8_MAX));
136
+ }
137
+ #endif
138
+
139
+ return simde__m512i_from_private(r_);
140
+ #endif
141
+ }
142
+ #if defined(SIMDE_AVX512BW_ENABLE_NATIVE_ALIASES)
143
+ # define _mm512_adds_epi8(a, b) simde_mm512_adds_epi8(a, b)
144
+ #endif
145
+
146
+ SIMDE__FUNCTION_ATTRIBUTES
147
+ simde__m512i
148
+ simde_mm512_adds_epi16 (simde__m512i a, simde__m512i b) {
149
+ #if defined(SIMDE_AVX512BW_NATIVE)
150
+ return _mm512_adds_epi16(a, b);
151
+ #else
152
+ simde__m512i_private
153
+ r_,
154
+ a_ = simde__m512i_to_private(a),
155
+ b_ = simde__m512i_to_private(b);
156
+
157
+ #if !defined(HEDLEY_INTEL_VERSION)
158
+ SIMDE__VECTORIZE
159
+ for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
160
+ r_.m256i[i] = simde_mm256_adds_epi16(a_.m256i[i], b_.m256i[i]);
161
+ }
162
+ #else
163
+ SIMDE__VECTORIZE
164
+ for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
165
+ const int32_t tmp =
166
+ HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) +
167
+ HEDLEY_STATIC_CAST(int32_t, b_.i16[i]);
168
+ r_.i16[i] = HEDLEY_STATIC_CAST(int32_t, ((tmp < INT16_MAX) ? ((tmp > INT16_MIN) ? tmp : INT16_MIN) : INT16_MAX));
169
+ }
170
+ #endif
171
+
172
+ return simde__m512i_from_private(r_);
173
+ #endif
174
+ }
175
+ #if defined(SIMDE_AVX512BW_ENABLE_NATIVE_ALIASES)
176
+ # define _mm512_adds_epi16(a, b) simde_mm512_adds_epi16(a, b)
177
+ #endif
178
+
179
+ SIMDE__FUNCTION_ATTRIBUTES
180
+ simde__m512i
181
+ simde_mm512_adds_epu8 (simde__m512i a, simde__m512i b) {
182
+ #if defined(SIMDE_AVX512BW_NATIVE)
183
+ return _mm512_adds_epu8(a, b);
184
+ #else
185
+ simde__m512i_private
186
+ r_,
187
+ a_ = simde__m512i_to_private(a),
188
+ b_ = simde__m512i_to_private(b);
189
+
190
+ #if !defined(HEDLEY_INTEL_VERSION)
191
+ SIMDE__VECTORIZE
192
+ for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
193
+ r_.m128i[i] = simde_mm_adds_epu8(a_.m128i[i], b_.m128i[i]);
194
+ }
195
+ #else
196
+ SIMDE__VECTORIZE
197
+ for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
198
+ r_.u8[i] = ((UINT8_MAX - a_.u8[i]) > b_.u8[i]) ? (a_.u8[i] + b_.u8[i]) : UINT8_MAX;
199
+ }
200
+ #endif
201
+
202
+ return simde__m512i_from_private(r_);
203
+ #endif
204
+ }
205
+ #if defined(SIMDE_AVX512BW_ENABLE_NATIVE_ALIASES)
206
+ # define _mm512_adds_epu8(a, b) simde_mm512_adds_epu8(a, b)
207
+ #endif
208
+
209
+ SIMDE__FUNCTION_ATTRIBUTES
210
+ simde__m512i
211
+ simde_mm512_adds_epu16 (simde__m512i a, simde__m512i b) {
212
+ #if defined(SIMDE_AVX512BW_NATIVE)
213
+ return _mm512_adds_epu16(a, b);
214
+ #else
215
+ simde__m512i_private
216
+ r_,
217
+ a_ = simde__m512i_to_private(a),
218
+ b_ = simde__m512i_to_private(b);
219
+
220
+ #if !defined(HEDLEY_INTEL_VERSION)
221
+ SIMDE__VECTORIZE
222
+ for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
223
+ r_.m256i[i] = simde_mm256_adds_epu16(a_.m256i[i], b_.m256i[i]);
224
+ }
225
+ #else
226
+ SIMDE__VECTORIZE
227
+ for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
228
+ r_.u16[i] = ((UINT16_MAX - a_.u16[i]) > b_.u16[i]) ? (a_.u16[i] + b_.u16[i]) : UINT16_MAX;
229
+ }
230
+ #endif
231
+
232
+ return simde__m512i_from_private(r_);
233
+ #endif
234
+ }
235
+ #if defined(SIMDE_AVX512BW_ENABLE_NATIVE_ALIASES)
236
+ # define _mm512_adds_epu16(a, b) simde_mm512_adds_epu16(a, b)
237
+ #endif
238
+
239
+ SIMDE__FUNCTION_ATTRIBUTES
240
+ simde__m512i
241
+ simde_mm512_shuffle_epi8 (simde__m512i a, simde__m512i b) {
242
+ #if defined(SIMDE_AVX512BW_NATIVE)
243
+ return _mm512_shuffle_epi8(a, b);
244
+ #else
245
+ simde__m512i_private
246
+ r_,
247
+ a_ = simde__m512i_to_private(a),
248
+ b_ = simde__m512i_to_private(b);
249
+
250
+ #if defined(SIMDE_ARCH_X86_AVX2)
251
+ r_.m256i[0] = simde_mm256_shuffle_epi8(a_.m256i[0], b_.m256i[0]);
252
+ r_.m256i[1] = simde_mm256_shuffle_epi8(a_.m256i[1], b_.m256i[1]);
253
+ #elif defined(SIMDE_ARCH_X86_SSSE3)
254
+ r_.m128i[0] = simde_mm_shuffle_epi8(a_.m128i[0], b_.m128i[0]);
255
+ r_.m128i[1] = simde_mm_shuffle_epi8(a_.m128i[1], b_.m128i[1]);
256
+ r_.m128i[2] = simde_mm_shuffle_epi8(a_.m128i[2], b_.m128i[2]);
257
+ r_.m128i[3] = simde_mm_shuffle_epi8(a_.m128i[3], b_.m128i[3]);
258
+ #else
259
+ SIMDE__VECTORIZE
260
+ for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
261
+ r_.u8[i] = (b_.u8[i] & 0x80) ? 0 : a_.u8[(b_.u8[i] & 0x0f) + (i & 0x30)];
262
+ }
263
+ #endif
264
+
265
+ return simde__m512i_from_private(r_);
266
+ #endif
267
+ }
268
+ #if defined(SIMDE_AVX512BW_ENABLE_NATIVE_ALIASES)
269
+ # define _mm512_shuffle_epi8(a, b) simde_mm512_shuffle_epi8(a, b)
270
+ #endif
271
+
272
+
273
+ SIMDE__FUNCTION_ATTRIBUTES
274
+ simde__mmask64
275
+ simde_mm512_cmpeq_epi8_mask (simde__m512i a, simde__m512i b) {
276
+ #if defined(SIMDE_AVX512BW_NATIVE)
277
+ return _mm512_cmpeq_epi8_mask(a, b);
278
+ #else
279
+ simde__m512i_private
280
+ a_ = simde__m512i_to_private(a),
281
+ b_ = simde__m512i_to_private(b);
282
+
283
+ #if defined(SIMDE_ARCH_X86_AVX2)
284
+ simde__mmask64 r_;
285
+
286
+ // The second cast is absolutely necessary otherwise if the sign bit is set it will be sign extended to 64 bits
287
+ r_ = (uint32_t) simde_mm256_movemask_epi8(simde_mm256_cmpeq_epi8(a_.m256i[1], b_.m256i[1]));
288
+ r_ = (r_ << 32) | (uint32_t) simde_mm256_movemask_epi8(simde_mm256_cmpeq_epi8(a_.m256i[0], b_.m256i[0]));
289
+ #elif defined(SIMDE_ARCH_X86_SSE2)
290
+ simde__mmask64 r_;
291
+
292
+ r_ = simde_mm_movemask_epi8(simde_mm_cmpeq_epi8(a_.m128i[3], b_.m128i[3]));
293
+ r_ = (r_ << 16) | simde_mm_movemask_epi8(simde_mm_cmpeq_epi8(a_.m128i[2], b_.m128i[2]));
294
+ r_ = (r_ << 16) | simde_mm_movemask_epi8(simde_mm_cmpeq_epi8(a_.m128i[1], b_.m128i[1]));
295
+ r_ = (r_ << 16) | simde_mm_movemask_epi8(simde_mm_cmpeq_epi8(a_.m128i[0], b_.m128i[0]));
296
+ #else
297
+ simde__mmask64 r_ = 0;
298
+
299
+ for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
300
+ r_ |= (a_.u8[i] == b_.u8[i]) ? (1ULL << i) : 0;
301
+ }
302
+ #endif
303
+
304
+ return r_;
305
+ #endif
306
+ }
307
+ #if defined(SIMDE_AVX512BW_ENABLE_NATIVE_ALIASES)
308
+ # define _mm512_cmpeq_epi8_mask(a, b) simde_mm512_cmpeq_epi8_mask(a, b)
309
+ #endif
310
+
311
+ SIMDE__FUNCTION_ATTRIBUTES
312
+ simde__m256i
313
+ simde_mm512_cvtepi16_epi8 (simde__m512i a) {
314
+ #if defined(SIMDE_AVX512BW_NATIVE)
315
+ return _mm512_cvtepi16_epi8(a);
316
+ #else
317
+ simde__m256i_private r_;
318
+ simde__m512i_private a_ = simde__m512i_to_private(a);
319
+
320
+ #if defined(SIMDE__CONVERT_VECTOR)
321
+ SIMDE__CONVERT_VECTOR(r_.i8, a_.i16);
322
+ #else
323
+ SIMDE__VECTORIZE
324
+ for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
325
+ r_.i8[i] = HEDLEY_STATIC_CAST(int8_t, a_.i16[i]);
326
+ }
327
+ #endif
328
+
329
+ return simde__m256i_from_private(r_);
330
+ #endif
331
+ }
332
+ #if defined(SIMDE_AVX512BW_ENABLE_NATIVE_ALIASES)
333
+ # define _mm512_cvtepi16_epi8(a) simde_mm512_cvtepi16_epi8(a)
334
+ #endif
335
+
336
+ SIMDE__FUNCTION_ATTRIBUTES
337
+ simde__m512i
338
+ simde_mm512_cvtepi8_epi16 (simde__m256i a) {
339
+ #if defined(SIMDE_AVX512BW_NATIVE)
340
+ return _mm512_cvtepi8_epi16(a);
341
+ #else
342
+ simde__m512i_private r_;
343
+ simde__m256i_private a_ = simde__m256i_to_private(a);
344
+
345
+ #if defined(SIMDE__CONVERT_VECTOR)
346
+ SIMDE__CONVERT_VECTOR(r_.i16, a_.i8);
347
+ #else
348
+ SIMDE__VECTORIZE
349
+ for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
350
+ r_.i16[i] = a_.i8[i];
351
+ }
352
+ #endif
353
+
354
+ return simde__m512i_from_private(r_);
355
+ #endif
356
+ }
357
+ #if defined(SIMDE_AVX512BW_ENABLE_NATIVE_ALIASES)
358
+ # define _mm512_cvtepi8_epi16(a) simde_mm512_cvtepi8_epi16(a)
359
+ #endif
360
+
361
+ SIMDE__FUNCTION_ATTRIBUTES
362
+ simde__m256i
363
+ simde_mm512_cvtsepi16_epi8 (simde__m512i a) {
364
+ #if defined(SIMDE_AVX512VL_NATIVE) && defined(SIMDE_AVX512BW_NATIVE)
365
+ return _mm512_cvtsepi16_epi8(a);
366
+ #else
367
+ simde__m256i_private r_;
368
+ simde__m512i_private a_ = simde__m512i_to_private(a);
369
+
370
+ SIMDE__VECTORIZE
371
+ for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
372
+ r_.i8[i] =
373
+ (a_.i16[i] < INT8_MIN)
374
+ ? (INT8_MIN)
375
+ : ((a_.i16[i] > INT8_MAX)
376
+ ? (INT8_MAX)
377
+ : HEDLEY_STATIC_CAST(int8_t, a_.i16[i]));
378
+ }
379
+
380
+ return simde__m256i_from_private(r_);
381
+ #endif
382
+ }
383
+ #if defined(SIMDE_AVX512VL_ENABLE_NATIVE_ALIASES)
384
+ #define _mm512_cvtsepi16_epi8(a) simde_mm512_cvtsepi16_epi8(a)
385
+ #endif
386
+
387
+ SIMDE__END_DECLS
388
+
389
+ HEDLEY_DIAGNOSTIC_POP
390
+
391
+ #endif /* !defined(SIMDE__AVX512BW_H) */