minimap2 0.2.24.3 → 0.2.24.6

Sign up to get free protection for your applications and to get access to all the features.
Files changed (101) hide show
  1. checksums.yaml +4 -4
  2. data/ext/minimap2/lib/simde/CONTRIBUTING.md +114 -0
  3. data/ext/minimap2/lib/simde/COPYING +20 -0
  4. data/ext/minimap2/lib/simde/README.md +333 -0
  5. data/ext/minimap2/lib/simde/amalgamate.py +58 -0
  6. data/ext/minimap2/lib/simde/meson.build +33 -0
  7. data/ext/minimap2/lib/simde/netlify.toml +20 -0
  8. data/ext/minimap2/lib/simde/simde/arm/neon/float32x2.h +140 -0
  9. data/ext/minimap2/lib/simde/simde/arm/neon/float32x4.h +137 -0
  10. data/ext/minimap2/lib/simde/simde/arm/neon/float64x1.h +142 -0
  11. data/ext/minimap2/lib/simde/simde/arm/neon/float64x2.h +145 -0
  12. data/ext/minimap2/lib/simde/simde/arm/neon/int16x4.h +140 -0
  13. data/ext/minimap2/lib/simde/simde/arm/neon/int16x8.h +145 -0
  14. data/ext/minimap2/lib/simde/simde/arm/neon/int32x2.h +140 -0
  15. data/ext/minimap2/lib/simde/simde/arm/neon/int32x4.h +143 -0
  16. data/ext/minimap2/lib/simde/simde/arm/neon/int64x1.h +137 -0
  17. data/ext/minimap2/lib/simde/simde/arm/neon/int64x2.h +141 -0
  18. data/ext/minimap2/lib/simde/simde/arm/neon/int8x16.h +147 -0
  19. data/ext/minimap2/lib/simde/simde/arm/neon/int8x8.h +141 -0
  20. data/ext/minimap2/lib/simde/simde/arm/neon/uint16x4.h +134 -0
  21. data/ext/minimap2/lib/simde/simde/arm/neon/uint16x8.h +138 -0
  22. data/ext/minimap2/lib/simde/simde/arm/neon/uint32x2.h +134 -0
  23. data/ext/minimap2/lib/simde/simde/arm/neon/uint32x4.h +137 -0
  24. data/ext/minimap2/lib/simde/simde/arm/neon/uint64x1.h +131 -0
  25. data/ext/minimap2/lib/simde/simde/arm/neon/uint64x2.h +135 -0
  26. data/ext/minimap2/lib/simde/simde/arm/neon/uint8x16.h +141 -0
  27. data/ext/minimap2/lib/simde/simde/arm/neon/uint8x8.h +135 -0
  28. data/ext/minimap2/lib/simde/simde/arm/neon.h +97 -0
  29. data/ext/minimap2/lib/simde/simde/check.h +267 -0
  30. data/ext/minimap2/lib/simde/simde/debug-trap.h +83 -0
  31. data/ext/minimap2/lib/simde/simde/hedley.h +1899 -0
  32. data/ext/minimap2/lib/simde/simde/simde-arch.h +445 -0
  33. data/ext/minimap2/lib/simde/simde/simde-common.h +697 -0
  34. data/ext/minimap2/lib/simde/simde/x86/avx.h +5385 -0
  35. data/ext/minimap2/lib/simde/simde/x86/avx2.h +2402 -0
  36. data/ext/minimap2/lib/simde/simde/x86/avx512bw.h +391 -0
  37. data/ext/minimap2/lib/simde/simde/x86/avx512f.h +3389 -0
  38. data/ext/minimap2/lib/simde/simde/x86/avx512vl.h +112 -0
  39. data/ext/minimap2/lib/simde/simde/x86/fma.h +659 -0
  40. data/ext/minimap2/lib/simde/simde/x86/mmx.h +2210 -0
  41. data/ext/minimap2/lib/simde/simde/x86/sse.h +3696 -0
  42. data/ext/minimap2/lib/simde/simde/x86/sse2.h +5991 -0
  43. data/ext/minimap2/lib/simde/simde/x86/sse3.h +343 -0
  44. data/ext/minimap2/lib/simde/simde/x86/sse4.1.h +1783 -0
  45. data/ext/minimap2/lib/simde/simde/x86/sse4.2.h +105 -0
  46. data/ext/minimap2/lib/simde/simde/x86/ssse3.h +1053 -0
  47. data/ext/minimap2/lib/simde/simde/x86/svml.h +543 -0
  48. data/ext/minimap2/lib/simde/test/CMakeLists.txt +166 -0
  49. data/ext/minimap2/lib/simde/test/arm/meson.build +4 -0
  50. data/ext/minimap2/lib/simde/test/arm/neon/meson.build +23 -0
  51. data/ext/minimap2/lib/simde/test/arm/neon/skel.c +871 -0
  52. data/ext/minimap2/lib/simde/test/arm/neon/test-neon-internal.h +134 -0
  53. data/ext/minimap2/lib/simde/test/arm/neon/test-neon.c +39 -0
  54. data/ext/minimap2/lib/simde/test/arm/neon/test-neon.h +10 -0
  55. data/ext/minimap2/lib/simde/test/arm/neon/vadd.c +1260 -0
  56. data/ext/minimap2/lib/simde/test/arm/neon/vdup_n.c +873 -0
  57. data/ext/minimap2/lib/simde/test/arm/neon/vmul.c +1084 -0
  58. data/ext/minimap2/lib/simde/test/arm/neon/vsub.c +1260 -0
  59. data/ext/minimap2/lib/simde/test/arm/test-arm-internal.h +18 -0
  60. data/ext/minimap2/lib/simde/test/arm/test-arm.c +20 -0
  61. data/ext/minimap2/lib/simde/test/arm/test-arm.h +8 -0
  62. data/ext/minimap2/lib/simde/test/cmake/AddCompilerFlags.cmake +171 -0
  63. data/ext/minimap2/lib/simde/test/cmake/ExtraWarningFlags.cmake +68 -0
  64. data/ext/minimap2/lib/simde/test/meson.build +64 -0
  65. data/ext/minimap2/lib/simde/test/munit/COPYING +21 -0
  66. data/ext/minimap2/lib/simde/test/munit/Makefile +55 -0
  67. data/ext/minimap2/lib/simde/test/munit/README.md +54 -0
  68. data/ext/minimap2/lib/simde/test/munit/example.c +351 -0
  69. data/ext/minimap2/lib/simde/test/munit/meson.build +37 -0
  70. data/ext/minimap2/lib/simde/test/munit/munit.c +2055 -0
  71. data/ext/minimap2/lib/simde/test/munit/munit.h +535 -0
  72. data/ext/minimap2/lib/simde/test/run-tests.c +20 -0
  73. data/ext/minimap2/lib/simde/test/run-tests.h +260 -0
  74. data/ext/minimap2/lib/simde/test/x86/avx.c +13752 -0
  75. data/ext/minimap2/lib/simde/test/x86/avx2.c +9977 -0
  76. data/ext/minimap2/lib/simde/test/x86/avx512bw.c +2664 -0
  77. data/ext/minimap2/lib/simde/test/x86/avx512f.c +10416 -0
  78. data/ext/minimap2/lib/simde/test/x86/avx512vl.c +210 -0
  79. data/ext/minimap2/lib/simde/test/x86/fma.c +2557 -0
  80. data/ext/minimap2/lib/simde/test/x86/meson.build +33 -0
  81. data/ext/minimap2/lib/simde/test/x86/mmx.c +2878 -0
  82. data/ext/minimap2/lib/simde/test/x86/skel.c +2984 -0
  83. data/ext/minimap2/lib/simde/test/x86/sse.c +5121 -0
  84. data/ext/minimap2/lib/simde/test/x86/sse2.c +9860 -0
  85. data/ext/minimap2/lib/simde/test/x86/sse3.c +486 -0
  86. data/ext/minimap2/lib/simde/test/x86/sse4.1.c +3446 -0
  87. data/ext/minimap2/lib/simde/test/x86/sse4.2.c +101 -0
  88. data/ext/minimap2/lib/simde/test/x86/ssse3.c +2084 -0
  89. data/ext/minimap2/lib/simde/test/x86/svml.c +1545 -0
  90. data/ext/minimap2/lib/simde/test/x86/test-avx.h +16 -0
  91. data/ext/minimap2/lib/simde/test/x86/test-avx512.h +25 -0
  92. data/ext/minimap2/lib/simde/test/x86/test-mmx.h +13 -0
  93. data/ext/minimap2/lib/simde/test/x86/test-sse.h +13 -0
  94. data/ext/minimap2/lib/simde/test/x86/test-sse2.h +13 -0
  95. data/ext/minimap2/lib/simde/test/x86/test-x86-internal.h +196 -0
  96. data/ext/minimap2/lib/simde/test/x86/test-x86.c +48 -0
  97. data/ext/minimap2/lib/simde/test/x86/test-x86.h +8 -0
  98. data/lib/minimap2/aligner.rb +2 -2
  99. data/lib/minimap2/ffi/constants.rb +3 -0
  100. data/lib/minimap2/version.rb +1 -1
  101. metadata +99 -3
@@ -0,0 +1,145 @@
1
+ /* Copyright (c) 2018-2019 Evan Nemerson <evan@nemerson.com>
2
+ *
3
+ * Permission is hereby granted, free of charge, to any person
4
+ * obtaining a copy of this software and associated documentation
5
+ * files (the "Software"), to deal in the Software without
6
+ * restriction, including without limitation the rights to use, copy,
7
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
8
+ * of the Software, and to permit persons to whom the Software is
9
+ * furnished to do so, subject to the following conditions:
10
+ *
11
+ * The above copyright notice and this permission notice shall be
12
+ * included in all copies or substantial portions of the Software.
13
+ *
14
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ * SOFTWARE.
22
+ */
23
+
24
+ #if !defined(SIMDE__INSIDE_NEON_H)
25
+ # error Do not include simde/arm/neon/int16x8.h directly; use simde/arm/neon.h.
26
+ #endif
27
+
28
+ #if !defined(SIMDE__NEON_INT16X8_H)
29
+ #define SIMDE__NEON_INT16X8_H
30
+
31
+ typedef union {
32
+ #if defined(SIMDE_VECTOR_SUBSCRIPT)
33
+ int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
34
+ #else
35
+ int16_t i16[8];
36
+ #endif
37
+
38
+ #if defined(SIMDE_NEON_NATIVE)
39
+ int16x8_t n;
40
+ #endif
41
+
42
+ #if defined(SIMDE_NEON_MMX)
43
+ __m64 mmx[2];
44
+ #endif
45
+ #if defined(SIMDE_NEON_SSE2)
46
+ __m128i sse;
47
+ #endif
48
+ } simde_int16x8_t;
49
+
50
+ #if defined(SIMDE_NEON_NATIVE)
51
+ HEDLEY_STATIC_ASSERT(sizeof(int16x8_t) == sizeof(simde_int16x8_t), "int16x8_t size doesn't match simde_int16x8_t size");
52
+ #endif
53
+ HEDLEY_STATIC_ASSERT(16 == sizeof(simde_int16x8_t), "simde_int16x8_t size incorrect");
54
+
55
+ SIMDE__FUNCTION_ATTRIBUTES
56
+ simde_int16x8_t
57
+ simde_vaddq_s16(simde_int16x8_t a, simde_int16x8_t b) {
58
+ simde_int16x8_t r;
59
+ #if defined(SIMDE_NEON_NATIVE)
60
+ r.n = vaddq_s16(a.n, b.n);
61
+ #elif defined(SIMDE_SSE2_NATIVE)
62
+ r.sse = _mm_add_epi16(a.sse, b.sse);
63
+ #else
64
+ SIMDE__VECTORIZE
65
+ for (size_t i = 0 ; i < (sizeof(r.i16) / sizeof(r.i16[0])) ; i++) {
66
+ r.i16[i] = a.i16[i] + b.i16[i];
67
+ }
68
+ #endif
69
+ return r;
70
+ }
71
+
72
+ SIMDE__FUNCTION_ATTRIBUTES
73
+ simde_int16x8_t
74
+ simde_vld1q_s16 (int16_t const ptr[8]) {
75
+ simde_int16x8_t r;
76
+ #if defined(SIMDE_NEON_NATIVE)
77
+ r.n = vld1q_s16(ptr);
78
+ #else
79
+ SIMDE__VECTORIZE
80
+ for (size_t i = 0 ; i < (sizeof(r.i16) / sizeof(r.i16[0])) ; i++) {
81
+ r.i16[i] = ptr[i];
82
+ }
83
+ #endif
84
+ return r;
85
+ }
86
+
87
+ SIMDE__FUNCTION_ATTRIBUTES
88
+ simde_int16x8_t
89
+ simde_x_vloadq_s16 (int16_t l0, int16_t l1, int16_t l2, int16_t l3,
90
+ int16_t l4, int16_t l5, int16_t l6, int16_t l7) {
91
+ int16_t v[] = { l0, l1, l2, l3,
92
+ l4, l5, l6, l7 };
93
+ return simde_vld1q_s16(v);
94
+ }
95
+
96
+ SIMDE__FUNCTION_ATTRIBUTES
97
+ simde_int16x8_t
98
+ simde_vdupq_n_s16 (int16_t value) {
99
+ simde_int16x8_t r;
100
+ #if defined(SIMDE_NEON_NATIVE)
101
+ r.n = vdupq_n_s16(value);
102
+ #else
103
+ SIMDE__VECTORIZE
104
+ for (size_t i = 0 ; i < (sizeof(r.i16) / sizeof(r.i16[0])) ; i++) {
105
+ r.i16[i] = value;
106
+ }
107
+ #endif
108
+ return r;
109
+ }
110
+
111
+ SIMDE__FUNCTION_ATTRIBUTES
112
+ simde_int16x8_t
113
+ simde_vmulq_s16(simde_int16x8_t a, simde_int16x8_t b) {
114
+ simde_int16x8_t r;
115
+ #if defined(SIMDE_NEON_NATIVE)
116
+ r.n = vmulq_s16(a.n, b.n);
117
+ #elif defined(SIMDE_SSE2_NATIVE)
118
+ r.sse = _mm_mul_epi16(a.sse, b.sse);
119
+ #else
120
+ SIMDE__VECTORIZE
121
+ for (size_t i = 0 ; i < (sizeof(r.i16) / sizeof(r.i16[0])) ; i++) {
122
+ r.i16[i] = a.i16[i] * b.i16[i];
123
+ }
124
+ #endif
125
+ return r;
126
+ }
127
+
128
+ SIMDE__FUNCTION_ATTRIBUTES
129
+ simde_int16x8_t
130
+ simde_vsubq_s16(simde_int16x8_t a, simde_int16x8_t b) {
131
+ simde_int16x8_t r;
132
+ #if defined(SIMDE_NEON_NATIVE)
133
+ r.n = vsubq_s16(a.n, b.n);
134
+ #elif defined(SIMDE_SSE2_NATIVE)
135
+ r.sse = _mm_sub_epi16(a.sse, b.sse);
136
+ #else
137
+ SIMDE__VECTORIZE
138
+ for (size_t i = 0 ; i < (sizeof(r.i16) / sizeof(r.i16[0])) ; i++) {
139
+ r.i16[i] = a.i16[i] - b.i16[i];
140
+ }
141
+ #endif
142
+ return r;
143
+ }
144
+
145
+ #endif
@@ -0,0 +1,140 @@
1
+ /* Copyright (c) 2018-2019 Evan Nemerson <evan@nemerson.com>
2
+ *
3
+ * Permission is hereby granted, free of charge, to any person
4
+ * obtaining a copy of this software and associated documentation
5
+ * files (the "Software"), to deal in the Software without
6
+ * restriction, including without limitation the rights to use, copy,
7
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
8
+ * of the Software, and to permit persons to whom the Software is
9
+ * furnished to do so, subject to the following conditions:
10
+ *
11
+ * The above copyright notice and this permission notice shall be
12
+ * included in all copies or substantial portions of the Software.
13
+ *
14
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ * SOFTWARE.
22
+ */
23
+
24
+ #if !defined(SIMDE__INSIDE_NEON_H)
25
+ # error Do not include simde/arm/neon/int32x2.h directly; use simde/arm/neon.h.
26
+ #endif
27
+
28
+ #if !defined(SIMDE__NEON_INT32X2_H)
29
+ #define SIMDE__NEON_INT32X2_H
30
+
31
+ typedef union {
32
+ #if defined(SIMDE_VECTOR_SUBSCRIPT)
33
+ int32_t i32 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
34
+ #else
35
+ int32_t i32[2];
36
+ #endif
37
+
38
+ #if defined(SIMDE_NEON_NATIVE)
39
+ int32x2_t n;
40
+ #endif
41
+
42
+ #if defined(SIMDE_NEON_MMX)
43
+ __m64 mmx;
44
+ #endif
45
+ } simde_int32x2_t;
46
+
47
+ #if defined(SIMDE_NEON_NATIVE)
48
+ HEDLEY_STATIC_ASSERT(sizeof(int32x2_t) == sizeof(simde_int32x2_t), "int32x2_t size doesn't match simde_int32x2_t size");
49
+ #endif
50
+ HEDLEY_STATIC_ASSERT(8 == sizeof(simde_int32x2_t), "simde_int32x2_t size incorrect");
51
+
52
+ SIMDE__FUNCTION_ATTRIBUTES
53
+ simde_int32x2_t
54
+ simde_vadd_s32(simde_int32x2_t a, simde_int32x2_t b) {
55
+ simde_int32x2_t r;
56
+ #if defined(SIMDE_NEON_NATIVE)
57
+ r.n = vadd_s32(a.n, b.n);
58
+ #elif defined(SIMDE_MMX_NATIVE)
59
+ r.mmx = _mm_add_pi32(a.mmx, b.mmx);
60
+ #else
61
+ SIMDE__VECTORIZE
62
+ for (size_t i = 0 ; i < (sizeof(r.i32) / sizeof(r.i32[0])) ; i++) {
63
+ r.i32[i] = a.i32[i] + b.i32[i];
64
+ }
65
+ #endif
66
+ return r;
67
+ }
68
+
69
+ SIMDE__FUNCTION_ATTRIBUTES
70
+ simde_int32x2_t
71
+ simde_vld1_s32 (int32_t const ptr[2]) {
72
+ simde_int32x2_t r;
73
+ #if defined(SIMDE_NEON_NATIVE)
74
+ r.n = vld1_s32(ptr);
75
+ #else
76
+ SIMDE__VECTORIZE
77
+ for (size_t i = 0 ; i < (sizeof(r.i32) / sizeof(r.i32[0])) ; i++) {
78
+ r.i32[i] = ptr[i];
79
+ }
80
+ #endif
81
+ return r;
82
+ }
83
+
84
+ SIMDE__FUNCTION_ATTRIBUTES
85
+ simde_int32x2_t
86
+ simde_x_vload_s32 (int32_t l0, int32_t l1) {
87
+ int32_t v[] = { l0, l1 };
88
+ return simde_vld1_s32(v);
89
+ }
90
+
91
+ SIMDE__FUNCTION_ATTRIBUTES
92
+ simde_int32x2_t
93
+ simde_vdup_n_s32 (int32_t value) {
94
+ simde_int32x2_t r;
95
+ #if defined(SIMDE_NEON_NATIVE)
96
+ r.n = vdup_n_s32(value);
97
+ #else
98
+ SIMDE__VECTORIZE
99
+ for (size_t i = 0 ; i < (sizeof(r.i32) / sizeof(r.i32[0])) ; i++) {
100
+ r.i32[i] = value;
101
+ }
102
+ #endif
103
+ return r;
104
+ }
105
+
106
+ SIMDE__FUNCTION_ATTRIBUTES
107
+ simde_int32x2_t
108
+ simde_vmul_s32(simde_int32x2_t a, simde_int32x2_t b) {
109
+ simde_int32x2_t r;
110
+ #if defined(SIMDE_NEON_NATIVE)
111
+ r.n = vmul_s32(a.n, b.n);
112
+ #elif defined(SIMDE_MMX_NATIVE)
113
+ r.mmx = _mm_mul_pi32(a.mmx, b.mmx);
114
+ #else
115
+ SIMDE__VECTORIZE
116
+ for (size_t i = 0 ; i < (sizeof(r.i32) / sizeof(r.i32[0])) ; i++) {
117
+ r.i32[i] = a.i32[i] * b.i32[i];
118
+ }
119
+ #endif
120
+ return r;
121
+ }
122
+
123
+ SIMDE__FUNCTION_ATTRIBUTES
124
+ simde_int32x2_t
125
+ simde_vsub_s32(simde_int32x2_t a, simde_int32x2_t b) {
126
+ simde_int32x2_t r;
127
+ #if defined(SIMDE_NEON_NATIVE)
128
+ r.n = vsub_s32(a.n, b.n);
129
+ #elif defined(SIMDE_MMX_NATIVE)
130
+ r.mmx = _mm_sub_pi32(a.mmx, b.mmx);
131
+ #else
132
+ SIMDE__VECTORIZE
133
+ for (size_t i = 0 ; i < (sizeof(r.i32) / sizeof(r.i32[0])) ; i++) {
134
+ r.i32[i] = a.i32[i] - b.i32[i];
135
+ }
136
+ #endif
137
+ return r;
138
+ }
139
+
140
+ #endif
@@ -0,0 +1,143 @@
1
+ /* Copyright (c) 2018-2019 Evan Nemerson <evan@nemerson.com>
2
+ *
3
+ * Permission is hereby granted, free of charge, to any person
4
+ * obtaining a copy of this software and associated documentation
5
+ * files (the "Software"), to deal in the Software without
6
+ * restriction, including without limitation the rights to use, copy,
7
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
8
+ * of the Software, and to permit persons to whom the Software is
9
+ * furnished to do so, subject to the following conditions:
10
+ *
11
+ * The above copyright notice and this permission notice shall be
12
+ * included in all copies or substantial portions of the Software.
13
+ *
14
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ * SOFTWARE.
22
+ */
23
+
24
+ #if !defined(SIMDE__INSIDE_NEON_H)
25
+ # error Do not include simde/arm/neon/int32x4.h directly; use simde/arm/neon.h.
26
+ #endif
27
+
28
+ #if !defined(SIMDE__NEON_INT32X4_H)
29
+ #define SIMDE__NEON_INT32X4_H
30
+
31
+ typedef union {
32
+ #if defined(SIMDE_VECTOR_SUBSCRIPT)
33
+ int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
34
+ #else
35
+ int32_t i32[4];
36
+ #endif
37
+
38
+ #if defined(SIMDE_NEON_NATIVE)
39
+ int32x4_t n;
40
+ #endif
41
+
42
+ #if defined(SIMDE_NEON_MMX)
43
+ __m64 mmx[2];
44
+ #endif
45
+ #if defined(SIMDE_NEON_SSE2)
46
+ __m128i sse;
47
+ #endif
48
+ } simde_int32x4_t;
49
+
50
+ #if defined(SIMDE_NEON_NATIVE)
51
+ HEDLEY_STATIC_ASSERT(sizeof(int32x4_t) == sizeof(simde_int32x4_t), "int32x4_t size doesn't match simde_int32x4_t size");
52
+ #endif
53
+ HEDLEY_STATIC_ASSERT(16 == sizeof(simde_int32x4_t), "simde_int32x4_t size incorrect");
54
+
55
+ SIMDE__FUNCTION_ATTRIBUTES
56
+ simde_int32x4_t
57
+ simde_vaddq_s32(simde_int32x4_t a, simde_int32x4_t b) {
58
+ simde_int32x4_t r;
59
+ #if defined(SIMDE_NEON_NATIVE)
60
+ r.n = vaddq_s32(a.n, b.n);
61
+ #elif defined(SIMDE_SSE2_NATIVE)
62
+ r.sse = _mm_add_epi32(a.sse, b.sse);
63
+ #else
64
+ SIMDE__VECTORIZE
65
+ for (size_t i = 0 ; i < (sizeof(r.i32) / sizeof(r.i32[0])) ; i++) {
66
+ r.i32[i] = a.i32[i] + b.i32[i];
67
+ }
68
+ #endif
69
+ return r;
70
+ }
71
+
72
+ SIMDE__FUNCTION_ATTRIBUTES
73
+ simde_int32x4_t
74
+ simde_vld1q_s32 (int32_t const ptr[4]) {
75
+ simde_int32x4_t r;
76
+ #if defined(SIMDE_NEON_NATIVE)
77
+ r.n = vld1q_s32(ptr);
78
+ #else
79
+ SIMDE__VECTORIZE
80
+ for (size_t i = 0 ; i < (sizeof(r.i32) / sizeof(r.i32[0])) ; i++) {
81
+ r.i32[i] = ptr[i];
82
+ }
83
+ #endif
84
+ return r;
85
+ }
86
+
87
+ SIMDE__FUNCTION_ATTRIBUTES
88
+ simde_int32x4_t
89
+ simde_x_vloadq_s32 (int32_t l0, int32_t l1, int32_t l2, int32_t l3) {
90
+ int32_t v[] = { l0, l1, l2, l3 };
91
+ return simde_vld1q_s32(v);
92
+ }
93
+
94
+ SIMDE__FUNCTION_ATTRIBUTES
95
+ simde_int32x4_t
96
+ simde_vdupq_n_s32 (int32_t value) {
97
+ simde_int32x4_t r;
98
+ #if defined(SIMDE_NEON_NATIVE)
99
+ r.n = vdupq_n_s32(value);
100
+ #else
101
+ SIMDE__VECTORIZE
102
+ for (size_t i = 0 ; i < (sizeof(r.i32) / sizeof(r.i32[0])) ; i++) {
103
+ r.i32[i] = value;
104
+ }
105
+ #endif
106
+ return r;
107
+ }
108
+
109
+ SIMDE__FUNCTION_ATTRIBUTES
110
+ simde_int32x4_t
111
+ simde_vmulq_s32(simde_int32x4_t a, simde_int32x4_t b) {
112
+ simde_int32x4_t r;
113
+ #if defined(SIMDE_NEON_NATIVE)
114
+ r.n = vmulq_s32(a.n, b.n);
115
+ #elif defined(SIMDE_SSE2_NATIVE)
116
+ r.sse = _mm_mul_epi32(a.sse, b.sse);
117
+ #else
118
+ SIMDE__VECTORIZE
119
+ for (size_t i = 0 ; i < (sizeof(r.i32) / sizeof(r.i32[0])) ; i++) {
120
+ r.i32[i] = a.i32[i] * b.i32[i];
121
+ }
122
+ #endif
123
+ return r;
124
+ }
125
+
126
+ SIMDE__FUNCTION_ATTRIBUTES
127
+ simde_int32x4_t
128
+ simde_vsubq_s32(simde_int32x4_t a, simde_int32x4_t b) {
129
+ simde_int32x4_t r;
130
+ #if defined(SIMDE_NEON_NATIVE)
131
+ r.n = vsubq_s32(a.n, b.n);
132
+ #elif defined(SIMDE_SSE2_NATIVE)
133
+ r.sse = _mm_sub_epi32(a.sse, b.sse);
134
+ #else
135
+ SIMDE__VECTORIZE
136
+ for (size_t i = 0 ; i < (sizeof(r.i32) / sizeof(r.i32[0])) ; i++) {
137
+ r.i32[i] = a.i32[i] - b.i32[i];
138
+ }
139
+ #endif
140
+ return r;
141
+ }
142
+
143
+ #endif
@@ -0,0 +1,137 @@
1
+ /* Copyright (c) 2018-2019 Evan Nemerson <evan@nemerson.com>
2
+ *
3
+ * Permission is hereby granted, free of charge, to any person
4
+ * obtaining a copy of this software and associated documentation
5
+ * files (the "Software"), to deal in the Software without
6
+ * restriction, including without limitation the rights to use, copy,
7
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
8
+ * of the Software, and to permit persons to whom the Software is
9
+ * furnished to do so, subject to the following conditions:
10
+ *
11
+ * The above copyright notice and this permission notice shall be
12
+ * included in all copies or substantial portions of the Software.
13
+ *
14
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ * SOFTWARE.
22
+ */
23
+
24
+ #if !defined(SIMDE__INSIDE_NEON_H)
25
+ # error Do not include simde/arm/neon/int64x1.h directly; use simde/arm/neon.h.
26
+ #endif
27
+
28
+ #if !defined(SIMDE__NEON_INT64X1_H)
29
+ #define SIMDE__NEON_INT64X1_H
30
+
31
+ typedef union {
32
+ #if defined(SIMDE_VECTOR_SUBSCRIPT)
33
+ int64_t i64 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
34
+ #else
35
+ int64_t i64[1];
36
+ #endif
37
+
38
+ #if defined(SIMDE_NEON_NATIVE)
39
+ int64x1_t n;
40
+ #endif
41
+
42
+ #if defined(SIMDE_NEON_MMX)
43
+ __m64 mmx;
44
+ #endif
45
+ } simde_int64x1_t;
46
+
47
+ #if defined(SIMDE_NEON_NATIVE)
48
+ HEDLEY_STATIC_ASSERT(sizeof(int64x1_t) == sizeof(simde_int64x1_t), "int64x1_t size doesn't match simde_int64x1_t size");
49
+ #endif
50
+ HEDLEY_STATIC_ASSERT(8 == sizeof(simde_int64x1_t), "simde_int64x1_t size incorrect");
51
+
52
+ SIMDE__FUNCTION_ATTRIBUTES
53
+ simde_int64x1_t
54
+ simde_vadd_s64(simde_int64x1_t a, simde_int64x1_t b) {
55
+ simde_int64x1_t r;
56
+ #if defined(SIMDE_NEON_NATIVE)
57
+ r.n = vadd_s64(a.n, b.n);
58
+ #elif defined(SIMDE_MMX_NATIVE)
59
+ r.mmx = _mm_add_pi64(a.mmx, b.mmx);
60
+ #else
61
+ SIMDE__VECTORIZE
62
+ for (size_t i = 0 ; i < (sizeof(r.i64) / sizeof(r.i64[0])) ; i++) {
63
+ r.i64[i] = a.i64[i] + b.i64[i];
64
+ }
65
+ #endif
66
+ return r;
67
+ }
68
+
69
+ SIMDE__FUNCTION_ATTRIBUTES
70
+ simde_int64x1_t
71
+ simde_vld1_s64 (int64_t const ptr[1]) {
72
+ simde_int64x1_t r;
73
+ #if defined(SIMDE_NEON_NATIVE)
74
+ r.n = vld1_s64(ptr);
75
+ #else
76
+ SIMDE__VECTORIZE
77
+ for (size_t i = 0 ; i < (sizeof(r.i64) / sizeof(r.i64[0])) ; i++) {
78
+ r.i64[i] = ptr[i];
79
+ }
80
+ #endif
81
+ return r;
82
+ }
83
+
84
+ SIMDE__FUNCTION_ATTRIBUTES
85
+ simde_int64x1_t
86
+ simde_x_vload_s64 (int64_t l0) {
87
+ return simde_vld1_s64(&l0);
88
+ }
89
+
90
+ SIMDE__FUNCTION_ATTRIBUTES
91
+ simde_int64x1_t
92
+ simde_vdup_n_s64 (int64_t value) {
93
+ simde_int64x1_t r;
94
+ #if defined(SIMDE_NEON_NATIVE)
95
+ r.n = vdup_n_s64(value);
96
+ #else
97
+ SIMDE__VECTORIZE
98
+ for (size_t i = 0 ; i < (sizeof(r.i64) / sizeof(r.i64[0])) ; i++) {
99
+ r.i64[i] = value;
100
+ }
101
+ #endif
102
+ return r;
103
+ }
104
+
105
+ SIMDE__FUNCTION_ATTRIBUTES
106
+ simde_int64x1_t
107
+ simde_x_vmul_s64(simde_int64x1_t a, simde_int64x1_t b) {
108
+ simde_int64x1_t r;
109
+ #if defined(SIMDE_MMX_NATIVE)
110
+ r.mmx = _mm_mul_pi64(a.mmx, b.mmx);
111
+ #else
112
+ SIMDE__VECTORIZE
113
+ for (size_t i = 0 ; i < (sizeof(r.i64) / sizeof(r.i64[0])) ; i++) {
114
+ r.i64[i] = a.i64[i] * b.i64[i];
115
+ }
116
+ #endif
117
+ return r;
118
+ }
119
+
120
+ SIMDE__FUNCTION_ATTRIBUTES
121
+ simde_int64x1_t
122
+ simde_vsub_s64(simde_int64x1_t a, simde_int64x1_t b) {
123
+ simde_int64x1_t r;
124
+ #if defined(SIMDE_NEON_NATIVE)
125
+ r.n = vsub_s64(a.n, b.n);
126
+ #elif defined(SIMDE_MMX_NATIVE)
127
+ r.mmx = _mm_sub_pi64(a.mmx, b.mmx);
128
+ #else
129
+ SIMDE__VECTORIZE
130
+ for (size_t i = 0 ; i < (sizeof(r.i64) / sizeof(r.i64[0])) ; i++) {
131
+ r.i64[i] = a.i64[i] - b.i64[i];
132
+ }
133
+ #endif
134
+ return r;
135
+ }
136
+
137
+ #endif
@@ -0,0 +1,141 @@
1
+ /* Copyright (c) 2018-2019 Evan Nemerson <evan@nemerson.com>
2
+ *
3
+ * Permission is hereby granted, free of charge, to any person
4
+ * obtaining a copy of this software and associated documentation
5
+ * files (the "Software"), to deal in the Software without
6
+ * restriction, including without limitation the rights to use, copy,
7
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
8
+ * of the Software, and to permit persons to whom the Software is
9
+ * furnished to do so, subject to the following conditions:
10
+ *
11
+ * The above copyright notice and this permission notice shall be
12
+ * included in all copies or substantial portions of the Software.
13
+ *
14
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ * SOFTWARE.
22
+ */
23
+
24
+ #if !defined(SIMDE__INSIDE_NEON_H)
25
+ # error Do not include simde/arm/neon/int64x2.h directly; use simde/arm/neon.h.
26
+ #endif
27
+
28
+ #if !defined(SIMDE__NEON_INT64X2_H)
29
+ #define SIMDE__NEON_INT64X2_H
30
+
31
+ typedef union {
32
+ #if defined(SIMDE_VECTOR_SUBSCRIPT)
33
+ int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
34
+ #else
35
+ int64_t i64[2];
36
+ #endif
37
+
38
+ #if defined(SIMDE_NEON_NATIVE)
39
+ int64x2_t n;
40
+ #endif
41
+
42
+ #if defined(SIMDE_NEON_MMX)
43
+ __m64 mmx[2];
44
+ #endif
45
+ #if defined(SIMDE_NEON_SSE2)
46
+ __m128i sse;
47
+ #endif
48
+ } simde_int64x2_t;
49
+
50
+ #if defined(SIMDE_NEON_NATIVE)
51
+ HEDLEY_STATIC_ASSERT(sizeof(int64x2_t) == sizeof(simde_int64x2_t), "int64x2_t size doesn't match simde_int64x2_t size");
52
+ #endif
53
+ HEDLEY_STATIC_ASSERT(16 == sizeof(simde_int64x2_t), "simde_int64x2_t size incorrect");
54
+
55
+ SIMDE__FUNCTION_ATTRIBUTES
56
+ simde_int64x2_t
57
+ simde_vaddq_s64(simde_int64x2_t a, simde_int64x2_t b) {
58
+ simde_int64x2_t r;
59
+ #if defined(SIMDE_NEON_NATIVE)
60
+ r.n = vaddq_s64(a.n, b.n);
61
+ #elif defined(SIMDE_SSE2_NATIVE)
62
+ r.sse = _mm_add_epi64(a.sse, b.sse);
63
+ #else
64
+ SIMDE__VECTORIZE
65
+ for (size_t i = 0 ; i < (sizeof(r.i64) / sizeof(r.i64[0])) ; i++) {
66
+ r.i64[i] = a.i64[i] + b.i64[i];
67
+ }
68
+ #endif
69
+ return r;
70
+ }
71
+
72
+ SIMDE__FUNCTION_ATTRIBUTES
73
+ simde_int64x2_t
74
+ simde_vld1q_s64 (int64_t const ptr[2]) {
75
+ simde_int64x2_t r;
76
+ #if defined(SIMDE_NEON_NATIVE)
77
+ r.n = vld1q_s64(ptr);
78
+ #else
79
+ SIMDE__VECTORIZE
80
+ for (size_t i = 0 ; i < (sizeof(r.i64) / sizeof(r.i64[0])) ; i++) {
81
+ r.i64[i] = ptr[i];
82
+ }
83
+ #endif
84
+ return r;
85
+ }
86
+
87
+ SIMDE__FUNCTION_ATTRIBUTES
88
+ simde_int64x2_t
89
+ simde_x_vloadq_s64 (int64_t l0, int64_t l1) {
90
+ int64_t v[] = { l0, l1 };
91
+ return simde_vld1q_s64(v);
92
+ }
93
+
94
+ SIMDE__FUNCTION_ATTRIBUTES
95
+ simde_int64x2_t
96
+ simde_vdupq_n_s64 (int64_t value) {
97
+ simde_int64x2_t r;
98
+ #if defined(SIMDE_NEON_NATIVE)
99
+ r.n = vdupq_n_s64(value);
100
+ #else
101
+ SIMDE__VECTORIZE
102
+ for (size_t i = 0 ; i < (sizeof(r.i64) / sizeof(r.i64[0])) ; i++) {
103
+ r.i64[i] = value;
104
+ }
105
+ #endif
106
+ return r;
107
+ }
108
+
109
+ SIMDE__FUNCTION_ATTRIBUTES
110
+ simde_int64x2_t
111
+ simde_x_vmulq_s64(simde_int64x2_t a, simde_int64x2_t b) {
112
+ simde_int64x2_t r;
113
+ #if defined(SIMDE_SSE2_NATIVE)
114
+ r.sse = _mm_mul_epi64(a.sse, b.sse);
115
+ #else
116
+ SIMDE__VECTORIZE
117
+ for (size_t i = 0 ; i < (sizeof(r.i64) / sizeof(r.i64[0])) ; i++) {
118
+ r.i64[i] = a.i64[i] * b.i64[i];
119
+ }
120
+ #endif
121
+ return r;
122
+ }
123
+
124
+ SIMDE__FUNCTION_ATTRIBUTES
125
+ simde_int64x2_t
126
+ simde_vsubq_s64(simde_int64x2_t a, simde_int64x2_t b) {
127
+ simde_int64x2_t r;
128
+ #if defined(SIMDE_NEON_NATIVE)
129
+ r.n = vsubq_s64(a.n, b.n);
130
+ #elif defined(SIMDE_SSE2_NATIVE)
131
+ r.sse = _mm_sub_epi64(a.sse, b.sse);
132
+ #else
133
+ SIMDE__VECTORIZE
134
+ for (size_t i = 0 ; i < (sizeof(r.i64) / sizeof(r.i64[0])) ; i++) {
135
+ r.i64[i] = a.i64[i] - b.i64[i];
136
+ }
137
+ #endif
138
+ return r;
139
+ }
140
+
141
+ #endif