minimap2 0.2.25.0 → 0.2.25.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +2 -3
- data/ext/minimap2/Makefile +6 -2
- data/ext/minimap2/NEWS.md +38 -0
- data/ext/minimap2/README.md +9 -3
- data/ext/minimap2/align.c +5 -3
- data/ext/minimap2/cookbook.md +2 -2
- data/ext/minimap2/format.c +7 -4
- data/ext/minimap2/kalloc.c +20 -1
- data/ext/minimap2/kalloc.h +13 -2
- data/ext/minimap2/ksw2.h +1 -0
- data/ext/minimap2/ksw2_extd2_sse.c +1 -1
- data/ext/minimap2/ksw2_exts2_sse.c +79 -40
- data/ext/minimap2/ksw2_extz2_sse.c +1 -1
- data/ext/minimap2/lchain.c +15 -16
- data/ext/minimap2/lib/simde/CONTRIBUTING.md +114 -0
- data/ext/minimap2/lib/simde/COPYING +20 -0
- data/ext/minimap2/lib/simde/README.md +333 -0
- data/ext/minimap2/lib/simde/amalgamate.py +58 -0
- data/ext/minimap2/lib/simde/meson.build +33 -0
- data/ext/minimap2/lib/simde/netlify.toml +20 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/float32x2.h +140 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/float32x4.h +137 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/float64x1.h +142 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/float64x2.h +145 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/int16x4.h +140 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/int16x8.h +145 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/int32x2.h +140 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/int32x4.h +143 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/int64x1.h +137 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/int64x2.h +141 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/int8x16.h +147 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/int8x8.h +141 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/uint16x4.h +134 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/uint16x8.h +138 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/uint32x2.h +134 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/uint32x4.h +137 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/uint64x1.h +131 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/uint64x2.h +135 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/uint8x16.h +141 -0
- data/ext/minimap2/lib/simde/simde/arm/neon/uint8x8.h +135 -0
- data/ext/minimap2/lib/simde/simde/arm/neon.h +97 -0
- data/ext/minimap2/lib/simde/simde/check.h +267 -0
- data/ext/minimap2/lib/simde/simde/debug-trap.h +83 -0
- data/ext/minimap2/lib/simde/simde/hedley.h +1899 -0
- data/ext/minimap2/lib/simde/simde/simde-arch.h +445 -0
- data/ext/minimap2/lib/simde/simde/simde-common.h +697 -0
- data/ext/minimap2/lib/simde/simde/x86/avx.h +5385 -0
- data/ext/minimap2/lib/simde/simde/x86/avx2.h +2402 -0
- data/ext/minimap2/lib/simde/simde/x86/avx512bw.h +391 -0
- data/ext/minimap2/lib/simde/simde/x86/avx512f.h +3389 -0
- data/ext/minimap2/lib/simde/simde/x86/avx512vl.h +112 -0
- data/ext/minimap2/lib/simde/simde/x86/fma.h +659 -0
- data/ext/minimap2/lib/simde/simde/x86/mmx.h +2210 -0
- data/ext/minimap2/lib/simde/simde/x86/sse.h +3696 -0
- data/ext/minimap2/lib/simde/simde/x86/sse2.h +5991 -0
- data/ext/minimap2/lib/simde/simde/x86/sse3.h +343 -0
- data/ext/minimap2/lib/simde/simde/x86/sse4.1.h +1783 -0
- data/ext/minimap2/lib/simde/simde/x86/sse4.2.h +105 -0
- data/ext/minimap2/lib/simde/simde/x86/ssse3.h +1053 -0
- data/ext/minimap2/lib/simde/simde/x86/svml.h +543 -0
- data/ext/minimap2/lib/simde/test/CMakeLists.txt +166 -0
- data/ext/minimap2/lib/simde/test/arm/meson.build +4 -0
- data/ext/minimap2/lib/simde/test/arm/neon/meson.build +23 -0
- data/ext/minimap2/lib/simde/test/arm/neon/skel.c +871 -0
- data/ext/minimap2/lib/simde/test/arm/neon/test-neon-internal.h +134 -0
- data/ext/minimap2/lib/simde/test/arm/neon/test-neon.c +39 -0
- data/ext/minimap2/lib/simde/test/arm/neon/test-neon.h +10 -0
- data/ext/minimap2/lib/simde/test/arm/neon/vadd.c +1260 -0
- data/ext/minimap2/lib/simde/test/arm/neon/vdup_n.c +873 -0
- data/ext/minimap2/lib/simde/test/arm/neon/vmul.c +1084 -0
- data/ext/minimap2/lib/simde/test/arm/neon/vsub.c +1260 -0
- data/ext/minimap2/lib/simde/test/arm/test-arm-internal.h +18 -0
- data/ext/minimap2/lib/simde/test/arm/test-arm.c +20 -0
- data/ext/minimap2/lib/simde/test/arm/test-arm.h +8 -0
- data/ext/minimap2/lib/simde/test/cmake/AddCompilerFlags.cmake +171 -0
- data/ext/minimap2/lib/simde/test/cmake/ExtraWarningFlags.cmake +68 -0
- data/ext/minimap2/lib/simde/test/meson.build +64 -0
- data/ext/minimap2/lib/simde/test/munit/COPYING +21 -0
- data/ext/minimap2/lib/simde/test/munit/Makefile +55 -0
- data/ext/minimap2/lib/simde/test/munit/README.md +54 -0
- data/ext/minimap2/lib/simde/test/munit/example.c +351 -0
- data/ext/minimap2/lib/simde/test/munit/meson.build +37 -0
- data/ext/minimap2/lib/simde/test/munit/munit.c +2055 -0
- data/ext/minimap2/lib/simde/test/munit/munit.h +535 -0
- data/ext/minimap2/lib/simde/test/run-tests.c +20 -0
- data/ext/minimap2/lib/simde/test/run-tests.h +260 -0
- data/ext/minimap2/lib/simde/test/x86/avx.c +13752 -0
- data/ext/minimap2/lib/simde/test/x86/avx2.c +9977 -0
- data/ext/minimap2/lib/simde/test/x86/avx512bw.c +2664 -0
- data/ext/minimap2/lib/simde/test/x86/avx512f.c +10416 -0
- data/ext/minimap2/lib/simde/test/x86/avx512vl.c +210 -0
- data/ext/minimap2/lib/simde/test/x86/fma.c +2557 -0
- data/ext/minimap2/lib/simde/test/x86/meson.build +33 -0
- data/ext/minimap2/lib/simde/test/x86/mmx.c +2878 -0
- data/ext/minimap2/lib/simde/test/x86/skel.c +2984 -0
- data/ext/minimap2/lib/simde/test/x86/sse.c +5121 -0
- data/ext/minimap2/lib/simde/test/x86/sse2.c +9860 -0
- data/ext/minimap2/lib/simde/test/x86/sse3.c +486 -0
- data/ext/minimap2/lib/simde/test/x86/sse4.1.c +3446 -0
- data/ext/minimap2/lib/simde/test/x86/sse4.2.c +101 -0
- data/ext/minimap2/lib/simde/test/x86/ssse3.c +2084 -0
- data/ext/minimap2/lib/simde/test/x86/svml.c +1545 -0
- data/ext/minimap2/lib/simde/test/x86/test-avx.h +16 -0
- data/ext/minimap2/lib/simde/test/x86/test-avx512.h +25 -0
- data/ext/minimap2/lib/simde/test/x86/test-mmx.h +13 -0
- data/ext/minimap2/lib/simde/test/x86/test-sse.h +13 -0
- data/ext/minimap2/lib/simde/test/x86/test-sse2.h +13 -0
- data/ext/minimap2/lib/simde/test/x86/test-x86-internal.h +196 -0
- data/ext/minimap2/lib/simde/test/x86/test-x86.c +48 -0
- data/ext/minimap2/lib/simde/test/x86/test-x86.h +8 -0
- data/ext/minimap2/main.c +13 -6
- data/ext/minimap2/map.c +0 -5
- data/ext/minimap2/minimap.h +40 -31
- data/ext/minimap2/minimap2.1 +19 -5
- data/ext/minimap2/misc/paftools.js +545 -24
- data/ext/minimap2/options.c +1 -1
- data/ext/minimap2/pyproject.toml +2 -0
- data/ext/minimap2/python/mappy.pyx +3 -1
- data/ext/minimap2/seed.c +1 -1
- data/ext/minimap2/setup.py +32 -22
- data/lib/minimap2/version.rb +1 -1
- metadata +100 -3
@@ -0,0 +1,147 @@
|
|
1
|
+
/* Copyright (c) 2018-2019 Evan Nemerson <evan@nemerson.com>
|
2
|
+
*
|
3
|
+
* Permission is hereby granted, free of charge, to any person
|
4
|
+
* obtaining a copy of this software and associated documentation
|
5
|
+
* files (the "Software"), to deal in the Software without
|
6
|
+
* restriction, including without limitation the rights to use, copy,
|
7
|
+
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
8
|
+
* of the Software, and to permit persons to whom the Software is
|
9
|
+
* furnished to do so, subject to the following conditions:
|
10
|
+
*
|
11
|
+
* The above copyright notice and this permission notice shall be
|
12
|
+
* included in all copies or substantial portions of the Software.
|
13
|
+
*
|
14
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
18
|
+
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
19
|
+
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
20
|
+
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
* SOFTWARE.
|
22
|
+
*/
|
23
|
+
|
24
|
+
#if !defined(SIMDE__INSIDE_NEON_H)
|
25
|
+
# error Do not include simde/arm/neon/int8x16.h directly; use simde/arm/neon.h.
|
26
|
+
#endif
|
27
|
+
|
28
|
+
#if !defined(SIMDE__NEON_INT8X16_H)
|
29
|
+
#define SIMDE__NEON_INT8X16_H
|
30
|
+
|
31
|
+
typedef union {
|
32
|
+
#if defined(SIMDE_VECTOR_SUBSCRIPT)
|
33
|
+
int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
|
34
|
+
#else
|
35
|
+
int8_t i8[16];
|
36
|
+
#endif
|
37
|
+
|
38
|
+
#if defined(SIMDE_NEON_NATIVE)
|
39
|
+
int8x16_t n;
|
40
|
+
#endif
|
41
|
+
|
42
|
+
#if defined(SIMDE_NEON_MMX)
|
43
|
+
__m64 mmx[2];
|
44
|
+
#endif
|
45
|
+
#if defined(SIMDE_NEON_SSE2)
|
46
|
+
__m128i sse;
|
47
|
+
#endif
|
48
|
+
} simde_int8x16_t;
|
49
|
+
|
50
|
+
#if defined(SIMDE_NEON_NATIVE)
|
51
|
+
HEDLEY_STATIC_ASSERT(sizeof(int8x16_t) == sizeof(simde_int8x16_t), "int8x16_t size doesn't match simde_int8x16_t size");
|
52
|
+
#endif
|
53
|
+
HEDLEY_STATIC_ASSERT(16 == sizeof(simde_int8x16_t), "simde_int8x16_t size incorrect");
|
54
|
+
|
55
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
56
|
+
simde_int8x16_t
|
57
|
+
simde_vaddq_s8(simde_int8x16_t a, simde_int8x16_t b) {
|
58
|
+
simde_int8x16_t r;
|
59
|
+
#if defined(SIMDE_NEON_NATIVE)
|
60
|
+
r.n = vaddq_s8(a.n, b.n);
|
61
|
+
#elif defined(SIMDE_SSE2_NATIVE)
|
62
|
+
r.sse = _mm_add_epi8(a.sse, b.sse);
|
63
|
+
#else
|
64
|
+
SIMDE__VECTORIZE
|
65
|
+
for (size_t i = 0 ; i < (sizeof(r.i8) / sizeof(r.i8[0])) ; i++) {
|
66
|
+
r.i8[i] = a.i8[i] + b.i8[i];
|
67
|
+
}
|
68
|
+
#endif
|
69
|
+
return r;
|
70
|
+
}
|
71
|
+
|
72
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
73
|
+
simde_int8x16_t
|
74
|
+
simde_vld1q_s8 (int8_t const ptr[8]) {
|
75
|
+
simde_int8x16_t r;
|
76
|
+
#if defined(SIMDE_NEON_NATIVE)
|
77
|
+
r.n = vld1q_s8(ptr);
|
78
|
+
#else
|
79
|
+
SIMDE__VECTORIZE
|
80
|
+
for (size_t i = 0 ; i < (sizeof(r.i8) / sizeof(r.i8[0])) ; i++) {
|
81
|
+
r.i8[i] = ptr[i];
|
82
|
+
}
|
83
|
+
#endif
|
84
|
+
return r;
|
85
|
+
}
|
86
|
+
|
87
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
88
|
+
simde_int8x16_t
|
89
|
+
simde_x_vloadq_s8 (int8_t l0, int8_t l1, int8_t l2, int8_t l3,
|
90
|
+
int8_t l4, int8_t l5, int8_t l6, int8_t l7,
|
91
|
+
int8_t l8, int8_t l9, int8_t l10, int8_t l11,
|
92
|
+
int8_t l12, int8_t l13, int8_t l14, int8_t l15) {
|
93
|
+
int8_t v[] = { l0, l1, l2, l3, l4, l5, l6, l7,
|
94
|
+
l8, l9, l10, l11, l12, l13, l14, l15};
|
95
|
+
return simde_vld1q_s8(v);
|
96
|
+
}
|
97
|
+
|
98
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
99
|
+
simde_int8x16_t
|
100
|
+
simde_vdupq_n_s8 (int8_t value) {
|
101
|
+
simde_int8x16_t r;
|
102
|
+
#if defined(SIMDE_NEON_NATIVE)
|
103
|
+
r.n = vdupq_n_s8(value);
|
104
|
+
#else
|
105
|
+
SIMDE__VECTORIZE
|
106
|
+
for (size_t i = 0 ; i < (sizeof(r.i8) / sizeof(r.i8[0])) ; i++) {
|
107
|
+
r.i8[i] = value;
|
108
|
+
}
|
109
|
+
#endif
|
110
|
+
return r;
|
111
|
+
}
|
112
|
+
|
113
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
114
|
+
simde_int8x16_t
|
115
|
+
simde_vmulq_s8(simde_int8x16_t a, simde_int8x16_t b) {
|
116
|
+
simde_int8x16_t r;
|
117
|
+
#if defined(SIMDE_NEON_NATIVE)
|
118
|
+
r.n = vmulq_s8(a.n, b.n);
|
119
|
+
#elif defined(SIMDE_SSE2_NATIVE)
|
120
|
+
r.sse = _mm_mul_epi8(a.sse, b.sse);
|
121
|
+
#else
|
122
|
+
SIMDE__VECTORIZE
|
123
|
+
for (size_t i = 0 ; i < (sizeof(r.i8) / sizeof(r.i8[0])) ; i++) {
|
124
|
+
r.i8[i] = a.i8[i] * b.i8[i];
|
125
|
+
}
|
126
|
+
#endif
|
127
|
+
return r;
|
128
|
+
}
|
129
|
+
|
130
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
131
|
+
simde_int8x16_t
|
132
|
+
simde_vsubq_s8(simde_int8x16_t a, simde_int8x16_t b) {
|
133
|
+
simde_int8x16_t r;
|
134
|
+
#if defined(SIMDE_NEON_NATIVE)
|
135
|
+
r.n = vsubq_s8(a.n, b.n);
|
136
|
+
#elif defined(SIMDE_SSE2_NATIVE)
|
137
|
+
r.sse = _mm_sub_epi8(a.sse, b.sse);
|
138
|
+
#else
|
139
|
+
SIMDE__VECTORIZE
|
140
|
+
for (size_t i = 0 ; i < (sizeof(r.i8) / sizeof(r.i8[0])) ; i++) {
|
141
|
+
r.i8[i] = a.i8[i] - b.i8[i];
|
142
|
+
}
|
143
|
+
#endif
|
144
|
+
return r;
|
145
|
+
}
|
146
|
+
|
147
|
+
#endif
|
@@ -0,0 +1,141 @@
|
|
1
|
+
/* Copyright (c) 2018-2019 Evan Nemerson <evan@nemerson.com>
|
2
|
+
*
|
3
|
+
* Permission is hereby granted, free of charge, to any person
|
4
|
+
* obtaining a copy of this software and associated documentation
|
5
|
+
* files (the "Software"), to deal in the Software without
|
6
|
+
* restriction, including without limitation the rights to use, copy,
|
7
|
+
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
8
|
+
* of the Software, and to permit persons to whom the Software is
|
9
|
+
* furnished to do so, subject to the following conditions:
|
10
|
+
*
|
11
|
+
* The above copyright notice and this permission notice shall be
|
12
|
+
* included in all copies or substantial portions of the Software.
|
13
|
+
*
|
14
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
18
|
+
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
19
|
+
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
20
|
+
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
* SOFTWARE.
|
22
|
+
*/
|
23
|
+
|
24
|
+
#if !defined(SIMDE__INSIDE_NEON_H)
|
25
|
+
# error Do not include simde/arm/neon/int8x8.h directly; use simde/arm/neon.h.
|
26
|
+
#endif
|
27
|
+
|
28
|
+
#if !defined(SIMDE__NEON_INT8X8_H)
|
29
|
+
#define SIMDE__NEON_INT8X8_H
|
30
|
+
|
31
|
+
typedef union {
|
32
|
+
#if defined(SIMDE_VECTOR_SUBSCRIPT)
|
33
|
+
int8_t i8 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
|
34
|
+
#else
|
35
|
+
int8_t i8[8];
|
36
|
+
#endif
|
37
|
+
|
38
|
+
#if defined(SIMDE_NEON_NATIVE)
|
39
|
+
int8x8_t n;
|
40
|
+
#endif
|
41
|
+
|
42
|
+
#if defined(SIMDE_NEON_MMX)
|
43
|
+
__m64 mmx;
|
44
|
+
#endif
|
45
|
+
} simde_int8x8_t;
|
46
|
+
|
47
|
+
#if defined(SIMDE_NEON_NATIVE)
|
48
|
+
HEDLEY_STATIC_ASSERT(sizeof(int8x8_t) == sizeof(simde_int8x8_t), "int8x8_t size doesn't match simde_int8x8_t size");
|
49
|
+
#endif
|
50
|
+
HEDLEY_STATIC_ASSERT(8 == sizeof(simde_int8x8_t), "simde_int8x8_t size incorrect");
|
51
|
+
|
52
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
53
|
+
simde_int8x8_t
|
54
|
+
simde_vadd_s8(simde_int8x8_t a, simde_int8x8_t b) {
|
55
|
+
simde_int8x8_t r;
|
56
|
+
#if defined(SIMDE_NEON_NATIVE)
|
57
|
+
r.n = vadd_s8(a.n, b.n);
|
58
|
+
#elif defined(SIMDE_MMX_NATIVE)
|
59
|
+
r.mmx = _mm_add_pi8(a.mmx, b.mmx);
|
60
|
+
#else
|
61
|
+
SIMDE__VECTORIZE
|
62
|
+
for (size_t i = 0 ; i < (sizeof(r.i8) / sizeof(r.i8[0])) ; i++) {
|
63
|
+
r.i8[i] = a.i8[i] + b.i8[i];
|
64
|
+
}
|
65
|
+
#endif
|
66
|
+
return r;
|
67
|
+
}
|
68
|
+
|
69
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
70
|
+
simde_int8x8_t
|
71
|
+
simde_vld1_s8 (int8_t const ptr[8]) {
|
72
|
+
simde_int8x8_t r;
|
73
|
+
#if defined(SIMDE_NEON_NATIVE)
|
74
|
+
r.n = vld1_s8(ptr);
|
75
|
+
#else
|
76
|
+
SIMDE__VECTORIZE
|
77
|
+
for (size_t i = 0 ; i < (sizeof(r.i8) / sizeof(r.i8[0])) ; i++) {
|
78
|
+
r.i8[i] = ptr[i];
|
79
|
+
}
|
80
|
+
#endif
|
81
|
+
return r;
|
82
|
+
}
|
83
|
+
|
84
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
85
|
+
simde_int8x8_t
|
86
|
+
simde_x_vload_s8 (int8_t l0, int8_t l1, int8_t l2, int8_t l3,
|
87
|
+
int8_t l4, int8_t l5, int8_t l6, int8_t l7) {
|
88
|
+
int8_t v[] = { l0, l1, l2, l3, l4, l5, l6, l7 };
|
89
|
+
return simde_vld1_s8(v);
|
90
|
+
}
|
91
|
+
|
92
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
93
|
+
simde_int8x8_t
|
94
|
+
simde_vdup_n_s8 (int8_t value) {
|
95
|
+
simde_int8x8_t r;
|
96
|
+
#if defined(SIMDE_NEON_NATIVE)
|
97
|
+
r.n = vdup_n_s8(value);
|
98
|
+
#else
|
99
|
+
SIMDE__VECTORIZE
|
100
|
+
for (size_t i = 0 ; i < (sizeof(r.i8) / sizeof(r.i8[0])) ; i++) {
|
101
|
+
r.i8[i] = value;
|
102
|
+
}
|
103
|
+
#endif
|
104
|
+
return r;
|
105
|
+
}
|
106
|
+
|
107
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
108
|
+
simde_int8x8_t
|
109
|
+
simde_vmul_s8(simde_int8x8_t a, simde_int8x8_t b) {
|
110
|
+
simde_int8x8_t r;
|
111
|
+
#if defined(SIMDE_NEON_NATIVE)
|
112
|
+
r.n = vmul_s8(a.n, b.n);
|
113
|
+
#elif defined(SIMDE_MMX_NATIVE)
|
114
|
+
r.mmx = _mm_mul_pi8(a.mmx, b.mmx);
|
115
|
+
#else
|
116
|
+
SIMDE__VECTORIZE
|
117
|
+
for (size_t i = 0 ; i < (sizeof(r.i8) / sizeof(r.i8[0])) ; i++) {
|
118
|
+
r.i8[i] = a.i8[i] * b.i8[i];
|
119
|
+
}
|
120
|
+
#endif
|
121
|
+
return r;
|
122
|
+
}
|
123
|
+
|
124
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
125
|
+
simde_int8x8_t
|
126
|
+
simde_vsub_s8(simde_int8x8_t a, simde_int8x8_t b) {
|
127
|
+
simde_int8x8_t r;
|
128
|
+
#if defined(SIMDE_NEON_NATIVE)
|
129
|
+
r.n = vsub_s8(a.n, b.n);
|
130
|
+
#elif defined(SIMDE_MMX_NATIVE)
|
131
|
+
r.mmx = _mm_sub_pi8(a.mmx, b.mmx);
|
132
|
+
#else
|
133
|
+
SIMDE__VECTORIZE
|
134
|
+
for (size_t i = 0 ; i < (sizeof(r.i8) / sizeof(r.i8[0])) ; i++) {
|
135
|
+
r.i8[i] = a.i8[i] - b.i8[i];
|
136
|
+
}
|
137
|
+
#endif
|
138
|
+
return r;
|
139
|
+
}
|
140
|
+
|
141
|
+
#endif
|
@@ -0,0 +1,134 @@
|
|
1
|
+
/* Copyright (c) 2018-2019 Evan Nemerson <evan@nemerson.com>
|
2
|
+
*
|
3
|
+
* Permission is hereby granted, free of charge, to any person
|
4
|
+
* obtaining a copy of this software and associated documentation
|
5
|
+
* files (the "Software"), to deal in the Software without
|
6
|
+
* restriction, including without limitation the rights to use, copy,
|
7
|
+
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
8
|
+
* of the Software, and to permit persons to whom the Software is
|
9
|
+
* furnished to do so, subject to the following conditions:
|
10
|
+
*
|
11
|
+
* The above copyright notice and this permission notice shall be
|
12
|
+
* included in all copies or substantial portions of the Software.
|
13
|
+
*
|
14
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
18
|
+
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
19
|
+
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
20
|
+
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
* SOFTWARE.
|
22
|
+
*/
|
23
|
+
|
24
|
+
#if !defined(SIMDE__INSIDE_NEON_H)
|
25
|
+
# error Do not include simde/arm/neon/uint16x4.h directly; use simde/arm/neon.h.
|
26
|
+
#endif
|
27
|
+
|
28
|
+
#if !defined(SIMDE__NEON_UINT16X4_H)
|
29
|
+
#define SIMDE__NEON_UINT16X4_H
|
30
|
+
|
31
|
+
typedef union {
|
32
|
+
#if defined(SIMDE_VECTOR_SUBSCRIPT)
|
33
|
+
uint16_t u16 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
|
34
|
+
#else
|
35
|
+
uint16_t u16[4];
|
36
|
+
#endif
|
37
|
+
|
38
|
+
#if defined(SIMDE_NEON_NATIVE)
|
39
|
+
uint16x4_t n;
|
40
|
+
#endif
|
41
|
+
|
42
|
+
#if defined(SIMDE_NEON_MMX)
|
43
|
+
__m64 mmx;
|
44
|
+
#endif
|
45
|
+
} simde_uint16x4_t;
|
46
|
+
|
47
|
+
#if defined(SIMDE_NEON_NATIVE)
|
48
|
+
HEDLEY_STATIC_ASSERT(sizeof(uint16x4_t) == sizeof(simde_uint16x4_t), "uint16x4_t size doesn't match simde_uint16x4_t size");
|
49
|
+
#endif
|
50
|
+
HEDLEY_STATIC_ASSERT(8 == sizeof(simde_uint16x4_t), "simde_uint16x4_t size incorrect");
|
51
|
+
|
52
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
53
|
+
simde_uint16x4_t
|
54
|
+
simde_vadd_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
|
55
|
+
simde_uint16x4_t r;
|
56
|
+
#if defined(SIMDE_NEON_NATIVE)
|
57
|
+
r.n = vadd_u16(a.n, b.n);
|
58
|
+
#else
|
59
|
+
SIMDE__VECTORIZE
|
60
|
+
for (size_t i = 0 ; i < (sizeof(r.u16) / sizeof(r.u16[0])) ; i++) {
|
61
|
+
r.u16[i] = a.u16[i] + b.u16[i];
|
62
|
+
}
|
63
|
+
#endif
|
64
|
+
return r;
|
65
|
+
}
|
66
|
+
|
67
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
68
|
+
simde_uint16x4_t
|
69
|
+
simde_vld1_u16 (uint16_t const ptr[4]) {
|
70
|
+
simde_uint16x4_t r;
|
71
|
+
#if defined(SIMDE_NEON_NATIVE)
|
72
|
+
r.n = vld1_u16(ptr);
|
73
|
+
#else
|
74
|
+
SIMDE__VECTORIZE
|
75
|
+
for (size_t i = 0 ; i < (sizeof(r.u16) / sizeof(r.u16[0])) ; i++) {
|
76
|
+
r.u16[i] = ptr[i];
|
77
|
+
}
|
78
|
+
#endif
|
79
|
+
return r;
|
80
|
+
}
|
81
|
+
|
82
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
83
|
+
simde_uint16x4_t
|
84
|
+
simde_x_vload_u16 (uint16_t l0, uint16_t l1, uint16_t l2, uint16_t l3) {
|
85
|
+
uint16_t v[] = { l0, l1, l2, l3 };
|
86
|
+
return simde_vld1_u16(v);
|
87
|
+
}
|
88
|
+
|
89
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
90
|
+
simde_uint16x4_t
|
91
|
+
simde_vdup_n_u16 (uint16_t value) {
|
92
|
+
simde_uint16x4_t r;
|
93
|
+
#if defined(SIMDE_NEON_NATIVE)
|
94
|
+
r.n = vdup_n_u16(value);
|
95
|
+
#else
|
96
|
+
SIMDE__VECTORIZE
|
97
|
+
for (size_t i = 0 ; i < (sizeof(r.u16) / sizeof(r.u16[0])) ; i++) {
|
98
|
+
r.u16[i] = value;
|
99
|
+
}
|
100
|
+
#endif
|
101
|
+
return r;
|
102
|
+
}
|
103
|
+
|
104
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
105
|
+
simde_uint16x4_t
|
106
|
+
simde_vmul_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
|
107
|
+
simde_uint16x4_t r;
|
108
|
+
#if defined(SIMDE_NEON_NATIVE)
|
109
|
+
r.n = vmul_u16(a.n, b.n);
|
110
|
+
#else
|
111
|
+
SIMDE__VECTORIZE
|
112
|
+
for (size_t i = 0 ; i < (sizeof(r.u16) / sizeof(r.u16[0])) ; i++) {
|
113
|
+
r.u16[i] = a.u16[i] * b.u16[i];
|
114
|
+
}
|
115
|
+
#endif
|
116
|
+
return r;
|
117
|
+
}
|
118
|
+
|
119
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
120
|
+
simde_uint16x4_t
|
121
|
+
simde_vsub_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
|
122
|
+
simde_uint16x4_t r;
|
123
|
+
#if defined(SIMDE_NEON_NATIVE)
|
124
|
+
r.n = vsub_u16(a.n, b.n);
|
125
|
+
#else
|
126
|
+
SIMDE__VECTORIZE
|
127
|
+
for (size_t i = 0 ; i < (sizeof(r.u16) / sizeof(r.u16[0])) ; i++) {
|
128
|
+
r.u16[i] = a.u16[i] - b.u16[i];
|
129
|
+
}
|
130
|
+
#endif
|
131
|
+
return r;
|
132
|
+
}
|
133
|
+
|
134
|
+
#endif
|
@@ -0,0 +1,138 @@
|
|
1
|
+
/* Copyright (c) 2018-2019 Evan Nemerson <evan@nemerson.com>
|
2
|
+
*
|
3
|
+
* Permission is hereby granted, free of charge, to any person
|
4
|
+
* obtaining a copy of this software and associated documentation
|
5
|
+
* files (the "Software"), to deal in the Software without
|
6
|
+
* restriction, including without limitation the rights to use, copy,
|
7
|
+
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
8
|
+
* of the Software, and to permit persons to whom the Software is
|
9
|
+
* furnished to do so, subject to the following conditions:
|
10
|
+
*
|
11
|
+
* The above copyright notice and this permission notice shall be
|
12
|
+
* included in all copies or substantial portions of the Software.
|
13
|
+
*
|
14
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
18
|
+
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
19
|
+
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
20
|
+
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
* SOFTWARE.
|
22
|
+
*/
|
23
|
+
|
24
|
+
#if !defined(SIMDE__INSIDE_NEON_H)
|
25
|
+
# error Do not include simde/arm/neon/uint16x8.h directly; use simde/arm/neon.h.
|
26
|
+
#endif
|
27
|
+
|
28
|
+
#if !defined(SIMDE__NEON_UINT16X8_H)
|
29
|
+
#define SIMDE__NEON_UINT16X8_H
|
30
|
+
|
31
|
+
typedef union {
|
32
|
+
#if defined(SIMDE_VECTOR_SUBSCRIPT)
|
33
|
+
uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
|
34
|
+
#else
|
35
|
+
uint16_t u16[8];
|
36
|
+
#endif
|
37
|
+
|
38
|
+
#if defined(SIMDE_NEON_NATIVE)
|
39
|
+
uint16x8_t n;
|
40
|
+
#endif
|
41
|
+
|
42
|
+
#if defined(SIMDE_NEON_MMX)
|
43
|
+
__m64 mmx[2];
|
44
|
+
#endif
|
45
|
+
#if defined(SIMDE_NEON_SSE2)
|
46
|
+
__m128i sse;
|
47
|
+
#endif
|
48
|
+
} simde_uint16x8_t;
|
49
|
+
|
50
|
+
#if defined(SIMDE_NEON_NATIVE)
|
51
|
+
HEDLEY_STATIC_ASSERT(sizeof(uint16x8_t) == sizeof(simde_uint16x8_t), "uint16x8_t size doesn't match simde_uint16x8_t size");
|
52
|
+
#endif
|
53
|
+
HEDLEY_STATIC_ASSERT(16 == sizeof(simde_uint16x8_t), "simde_uint16x8_t size incorrect");
|
54
|
+
|
55
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
56
|
+
simde_uint16x8_t
|
57
|
+
simde_vaddq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
|
58
|
+
simde_uint16x8_t r;
|
59
|
+
#if defined(SIMDE_NEON_NATIVE)
|
60
|
+
r.n = vaddq_u16(a.n, b.n);
|
61
|
+
#else
|
62
|
+
SIMDE__VECTORIZE
|
63
|
+
for (size_t i = 0 ; i < (sizeof(r.u16) / sizeof(r.u16[0])) ; i++) {
|
64
|
+
r.u16[i] = a.u16[i] + b.u16[i];
|
65
|
+
}
|
66
|
+
#endif
|
67
|
+
return r;
|
68
|
+
}
|
69
|
+
|
70
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
71
|
+
simde_uint16x8_t
|
72
|
+
simde_vld1q_u16 (uint16_t const ptr[8]) {
|
73
|
+
simde_uint16x8_t r;
|
74
|
+
#if defined(SIMDE_NEON_NATIVE)
|
75
|
+
r.n = vld1q_u16(ptr);
|
76
|
+
#else
|
77
|
+
SIMDE__VECTORIZE
|
78
|
+
for (size_t i = 0 ; i < (sizeof(r.u16) / sizeof(r.u16[0])) ; i++) {
|
79
|
+
r.u16[i] = ptr[i];
|
80
|
+
}
|
81
|
+
#endif
|
82
|
+
return r;
|
83
|
+
}
|
84
|
+
|
85
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
86
|
+
simde_uint16x8_t
|
87
|
+
simde_x_vloadq_u16 (uint16_t l0, uint16_t l1, uint16_t l2, uint16_t l3,
|
88
|
+
uint16_t l4, uint16_t l5, uint16_t l6, uint16_t l7) {
|
89
|
+
uint16_t v[] = { l0, l1, l2, l3, l4, l5, l6, l7 };
|
90
|
+
return simde_vld1q_u16(v);
|
91
|
+
}
|
92
|
+
|
93
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
94
|
+
simde_uint16x8_t
|
95
|
+
simde_vdupq_n_u16 (uint16_t value) {
|
96
|
+
simde_uint16x8_t r;
|
97
|
+
#if defined(SIMDE_NEON_NATIVE)
|
98
|
+
r.n = vdupq_n_u16(value);
|
99
|
+
#else
|
100
|
+
SIMDE__VECTORIZE
|
101
|
+
for (size_t i = 0 ; i < (sizeof(r.u16) / sizeof(r.u16[0])) ; i++) {
|
102
|
+
r.u16[i] = value;
|
103
|
+
}
|
104
|
+
#endif
|
105
|
+
return r;
|
106
|
+
}
|
107
|
+
|
108
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
109
|
+
simde_uint16x8_t
|
110
|
+
simde_vmulq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
|
111
|
+
simde_uint16x8_t r;
|
112
|
+
#if defined(SIMDE_NEON_NATIVE)
|
113
|
+
r.n = vmulq_u16(a.n, b.n);
|
114
|
+
#else
|
115
|
+
SIMDE__VECTORIZE
|
116
|
+
for (size_t i = 0 ; i < (sizeof(r.u16) / sizeof(r.u16[0])) ; i++) {
|
117
|
+
r.u16[i] = a.u16[i] * b.u16[i];
|
118
|
+
}
|
119
|
+
#endif
|
120
|
+
return r;
|
121
|
+
}
|
122
|
+
|
123
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
124
|
+
simde_uint16x8_t
|
125
|
+
simde_vsubq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
|
126
|
+
simde_uint16x8_t r;
|
127
|
+
#if defined(SIMDE_NEON_NATIVE)
|
128
|
+
r.n = vsubq_u16(a.n, b.n);
|
129
|
+
#else
|
130
|
+
SIMDE__VECTORIZE
|
131
|
+
for (size_t i = 0 ; i < (sizeof(r.u16) / sizeof(r.u16[0])) ; i++) {
|
132
|
+
r.u16[i] = a.u16[i] - b.u16[i];
|
133
|
+
}
|
134
|
+
#endif
|
135
|
+
return r;
|
136
|
+
}
|
137
|
+
|
138
|
+
#endif
|
@@ -0,0 +1,134 @@
|
|
1
|
+
/* Copyright (c) 2018-2019 Evan Nemerson <evan@nemerson.com>
|
2
|
+
*
|
3
|
+
* Permission is hereby granted, free of charge, to any person
|
4
|
+
* obtaining a copy of this software and associated documentation
|
5
|
+
* files (the "Software"), to deal in the Software without
|
6
|
+
* restriction, including without limitation the rights to use, copy,
|
7
|
+
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
8
|
+
* of the Software, and to permit persons to whom the Software is
|
9
|
+
* furnished to do so, subject to the following conditions:
|
10
|
+
*
|
11
|
+
* The above copyright notice and this permission notice shall be
|
12
|
+
* included in all copies or substantial portions of the Software.
|
13
|
+
*
|
14
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
18
|
+
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
19
|
+
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
20
|
+
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
* SOFTWARE.
|
22
|
+
*/
|
23
|
+
|
24
|
+
#if !defined(SIMDE__INSIDE_NEON_H)
|
25
|
+
# error Do not include simde/arm/neon/uint32x2.h directly; use simde/arm/neon.h.
|
26
|
+
#endif
|
27
|
+
|
28
|
+
#if !defined(SIMDE__NEON_UINT32X2_H)
|
29
|
+
#define SIMDE__NEON_UINT32X2_H
|
30
|
+
|
31
|
+
typedef union {
|
32
|
+
#if defined(SIMDE_VECTOR_SUBSCRIPT)
|
33
|
+
uint32_t u32 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
|
34
|
+
#else
|
35
|
+
uint32_t u32[2];
|
36
|
+
#endif
|
37
|
+
|
38
|
+
#if defined(SIMDE_NEON_NATIVE)
|
39
|
+
uint32x2_t n;
|
40
|
+
#endif
|
41
|
+
|
42
|
+
#if defined(SIMDE_NEON_MMX)
|
43
|
+
__m64 mmx;
|
44
|
+
#endif
|
45
|
+
} simde_uint32x2_t;
|
46
|
+
|
47
|
+
#if defined(SIMDE_NEON_NATIVE)
|
48
|
+
HEDLEY_STATIC_ASSERT(sizeof(uint32x2_t) == sizeof(simde_uint32x2_t), "uint32x2_t size doesn't match simde_uint32x2_t size");
|
49
|
+
#endif
|
50
|
+
HEDLEY_STATIC_ASSERT(8 == sizeof(simde_uint32x2_t), "simde_uint32x2_t size incorrect");
|
51
|
+
|
52
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
53
|
+
simde_uint32x2_t
|
54
|
+
simde_vadd_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
|
55
|
+
simde_uint32x2_t r;
|
56
|
+
#if defined(SIMDE_NEON_NATIVE)
|
57
|
+
r.n = vadd_u32(a.n, b.n);
|
58
|
+
#else
|
59
|
+
SIMDE__VECTORIZE
|
60
|
+
for (size_t i = 0 ; i < (sizeof(r.u32) / sizeof(r.u32[0])) ; i++) {
|
61
|
+
r.u32[i] = a.u32[i] + b.u32[i];
|
62
|
+
}
|
63
|
+
#endif
|
64
|
+
return r;
|
65
|
+
}
|
66
|
+
|
67
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
68
|
+
simde_uint32x2_t
|
69
|
+
simde_vld1_u32 (uint32_t const ptr[2]) {
|
70
|
+
simde_uint32x2_t r;
|
71
|
+
#if defined(SIMDE_NEON_NATIVE)
|
72
|
+
r.n = vld1_u32(ptr);
|
73
|
+
#else
|
74
|
+
SIMDE__VECTORIZE
|
75
|
+
for (size_t i = 0 ; i < (sizeof(r.u32) / sizeof(r.u32[0])) ; i++) {
|
76
|
+
r.u32[i] = ptr[i];
|
77
|
+
}
|
78
|
+
#endif
|
79
|
+
return r;
|
80
|
+
}
|
81
|
+
|
82
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
83
|
+
simde_uint32x2_t
|
84
|
+
simde_x_vload_u32 (uint32_t l0, uint32_t l1) {
|
85
|
+
uint32_t v[] = { l0, l1 };
|
86
|
+
return simde_vld1_u32(v);
|
87
|
+
}
|
88
|
+
|
89
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
90
|
+
simde_uint32x2_t
|
91
|
+
simde_vdup_n_u32 (uint32_t value) {
|
92
|
+
simde_uint32x2_t r;
|
93
|
+
#if defined(SIMDE_NEON_NATIVE)
|
94
|
+
r.n = vdup_n_u32(value);
|
95
|
+
#else
|
96
|
+
SIMDE__VECTORIZE
|
97
|
+
for (size_t i = 0 ; i < (sizeof(r.u32) / sizeof(r.u32[0])) ; i++) {
|
98
|
+
r.u32[i] = value;
|
99
|
+
}
|
100
|
+
#endif
|
101
|
+
return r;
|
102
|
+
}
|
103
|
+
|
104
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
105
|
+
simde_uint32x2_t
|
106
|
+
simde_vmul_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
|
107
|
+
simde_uint32x2_t r;
|
108
|
+
#if defined(SIMDE_NEON_NATIVE)
|
109
|
+
r.n = vmul_u32(a.n, b.n);
|
110
|
+
#else
|
111
|
+
SIMDE__VECTORIZE
|
112
|
+
for (size_t i = 0 ; i < (sizeof(r.u32) / sizeof(r.u32[0])) ; i++) {
|
113
|
+
r.u32[i] = a.u32[i] * b.u32[i];
|
114
|
+
}
|
115
|
+
#endif
|
116
|
+
return r;
|
117
|
+
}
|
118
|
+
|
119
|
+
SIMDE__FUNCTION_ATTRIBUTES
|
120
|
+
simde_uint32x2_t
|
121
|
+
simde_vsub_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
|
122
|
+
simde_uint32x2_t r;
|
123
|
+
#if defined(SIMDE_NEON_NATIVE)
|
124
|
+
r.n = vsub_u32(a.n, b.n);
|
125
|
+
#else
|
126
|
+
SIMDE__VECTORIZE
|
127
|
+
for (size_t i = 0 ; i < (sizeof(r.u32) / sizeof(r.u32[0])) ; i++) {
|
128
|
+
r.u32[i] = a.u32[i] - b.u32[i];
|
129
|
+
}
|
130
|
+
#endif
|
131
|
+
return r;
|
132
|
+
}
|
133
|
+
|
134
|
+
#endif
|