gccxml_gem 0.9.2-x86-linux → 0.9.3-x86-linux

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. data/Rakefile +15 -6
  2. data/bin/gccxml +0 -0
  3. data/bin/gccxml_cc1plus +0 -0
  4. data/gccxml.rb +5 -5
  5. data/share/gccxml-0.9/GCC/3.2/bits/gthr-default.h +4 -0
  6. data/share/gccxml-0.9/GCC/3.4/bits/gthr-default.h +5 -0
  7. data/share/gccxml-0.9/GCC/4.0/emmintrin.h +5 -0
  8. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_emmintrin.h +1037 -0
  9. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_mmintrin.h +669 -0
  10. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_xmmintrin.h +870 -0
  11. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_emmintrin.h +977 -0
  12. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_mmintrin.h +636 -0
  13. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_xmmintrin.h +833 -0
  14. data/share/gccxml-0.9/GCC/4.0/mmintrin.h +5 -0
  15. data/share/gccxml-0.9/GCC/4.0/xmmintrin.h +5 -0
  16. data/share/gccxml-0.9/GCC/4.1/bits/gthr-default.h +4 -0
  17. data/share/gccxml-0.9/GCC/4.1/emmintrin.h +5 -0
  18. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_emmintrin.h +1509 -0
  19. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_mmintrin.h +942 -0
  20. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_xmmintrin.h +1192 -0
  21. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_emmintrin.h +1004 -0
  22. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_mmintrin.h +637 -0
  23. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_xmmintrin.h +834 -0
  24. data/share/gccxml-0.9/GCC/4.1/mmintrin.h +5 -0
  25. data/share/gccxml-0.9/GCC/4.1/xmmintrin.h +5 -0
  26. data/share/gccxml-0.9/GCC/4.2/emmintrin.h +5 -0
  27. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_emmintrin.h +1509 -0
  28. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_mmintrin.h +942 -0
  29. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_xmmintrin.h +1192 -0
  30. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_emmintrin.h +1013 -0
  31. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_mmintrin.h +663 -0
  32. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_xmmintrin.h +860 -0
  33. data/share/gccxml-0.9/GCC/4.2/mmintrin.h +5 -0
  34. data/share/gccxml-0.9/GCC/4.2/xmmintrin.h +5 -0
  35. data/share/gccxml-0.9/GCC/4.3/emmintrin.h +1043 -0
  36. data/share/gccxml-0.9/GCC/4.3/gccxml_builtins.h +1 -0
  37. data/share/gccxml-0.9/GCC/4.3/mmintrin.h +663 -0
  38. data/share/gccxml-0.9/GCC/4.3/xmmintrin.h +867 -0
  39. data/share/gccxml-0.9/GCC/4.4/bits/c++config.h +1431 -0
  40. data/share/gccxml-0.9/GCC/4.4/emmintrin.h +1041 -0
  41. data/share/gccxml-0.9/GCC/4.4/gccxml_builtins.h +153 -0
  42. data/share/gccxml-0.9/GCC/4.4/mmintrin.h +662 -0
  43. data/share/gccxml-0.9/GCC/4.4/xmmintrin.h +864 -0
  44. data/share/gccxml-0.9/GCC/4.5/gccxml_builtins.h +154 -0
  45. data/share/gccxml-0.9/GCC/4.5/iomanip +349 -0
  46. data/share/gccxml-0.9/GCC/COPYING.RUNTIME +73 -0
  47. data/share/gccxml-0.9/GCC/COPYING3 +674 -0
  48. data/share/man/man1/gccxml.1 +1 -1
  49. metadata +165 -114
@@ -0,0 +1,153 @@
1
+ #define __builtin_apply(x,y,z) ((void*)0)
2
+ #define __builtin_nan(x) (0.0)
3
+ #define __builtin_nanf(x) (0.0f)
4
+ #define __builtin_nanl(x) (0.0l)
5
+ #define __builtin_huge_val(x) (0.0)
6
+ #define __builtin_huge_valf(x) (0.0f)
7
+ #define __builtin_huge_vall(x) (0.0l)
8
+ #define __builtin_apply_args(x) ((void*)0)
9
+ #define __builtin_types_compatible_p(x,y) 0
10
+ #define __builtin_choose_expr(x,y,z) int
11
+ #define __builtin_constant_p(x) 0
12
+ void* __builtin_memchr(void const*, int, unsigned int);
13
+ void __builtin_return (void *RESULT);
14
+ void * __builtin_return_address (unsigned int LEVEL);
15
+ void * __builtin_frame_address (unsigned int LEVEL);
16
+ long __builtin_expect (long EXP, long C);
17
+ void __builtin_prefetch (const void *ADDR, ...);
18
+ double __builtin_inf (void);
19
+ float __builtin_inff (void);
20
+ long double __builtin_infl (void);
21
+ double __builtin_nans (const char *str);
22
+ float __builtin_nansf (const char *str);
23
+ long double __builtin_nansl (const char *str);
24
+ double __builtin_acos(double);
25
+ float __builtin_acosf(float);
26
+ long double __builtin_acosl(long double);
27
+ double __builtin_asin(double);
28
+ float __builtin_asinf(float);
29
+ long double __builtin_asinl(long double);
30
+ double __builtin_atan(double);
31
+ double __builtin_atan2(double, double);
32
+ float __builtin_atan2f(float, float);
33
+ long double __builtin_atan2l(long double, long double);
34
+ float __builtin_atanf(float);
35
+ long double __builtin_atanl(long double);
36
+ double __builtin_ceil(double);
37
+ float __builtin_ceilf(float);
38
+ long double __builtin_ceill(long double);
39
+ double __builtin_cos(double);
40
+ float __builtin_cosf(float);
41
+ double __builtin_cosh(double);
42
+ float __builtin_coshf(float);
43
+ long double __builtin_coshl(long double);
44
+ long double __builtin_cosl(long double);
45
+ double __builtin_exp(double);
46
+ float __builtin_expf(float);
47
+ long double __builtin_expl(long double);
48
+ double __builtin_fabs(double);
49
+ float __builtin_fabsf(float);
50
+ long double __builtin_fabsl(long double);
51
+ double __builtin_floor(double);
52
+ float __builtin_floorf(float);
53
+ long double __builtin_floorl(long double);
54
+ float __builtin_fmodf(float, float);
55
+ long double __builtin_fmodl(long double, long double);
56
+ double __builtin_frexp(double, int*);
57
+ float __builtin_frexpf(float, int*);
58
+ long double __builtin_frexpl(long double, int*);
59
+ double __builtin_ldexp(double, int);
60
+ float __builtin_ldexpf(float, int);
61
+ long double __builtin_ldexpl(long double, int);
62
+ double __builtin_log(double);
63
+ double __builtin_log10(double);
64
+ float __builtin_log10f(float);
65
+ long double __builtin_log10l(long double);
66
+ float __builtin_logf(float);
67
+ long double __builtin_logl(long double);
68
+ float __builtin_modff(float, float*);
69
+ long double __builtin_modfl(long double, long double*);
70
+ float __builtin_powf(float, float);
71
+ long double __builtin_powl(long double, long double);
72
+ double __builtin_powi(double, int);
73
+ float __builtin_powif(float, int);
74
+ long double __builtin_powil(long double, int);
75
+ double __builtin_sin(double);
76
+ float __builtin_sinf(float);
77
+ double __builtin_sinh(double);
78
+ float __builtin_sinhf(float);
79
+ long double __builtin_sinhl(long double);
80
+ long double __builtin_sinl(long double);
81
+ double __builtin_sqrt(double);
82
+ float __builtin_sqrtf(float);
83
+ long double __builtin_sqrtl(long double);
84
+ double __builtin_tan(double);
85
+ float __builtin_tanf(float);
86
+ double __builtin_tanh(double);
87
+ float __builtin_tanhf(float);
88
+ long double __builtin_tanhl(long double);
89
+ long double __builtin_tanl(long double);
90
+ float __builtin_cabsf(float __complex__);
91
+ double __builtin_cabs(double __complex__);
92
+ long double __builtin_cabsl(long double __complex__);
93
+ float __builtin_cargf(float __complex__);
94
+ double __builtin_carg(double __complex__);
95
+ long double __builtin_cargl(long double __complex__);
96
+ int __builtin_ctz(int);
97
+ int __builtin_ctzl(long);
98
+ int __builtin_ctzll(long long);
99
+ int __builtin_popcount(int);
100
+ int __builtin_popcountl(long);
101
+ int __builtin_popcountll(long long);
102
+ float __complex__ __builtin_ccosf(float __complex__);
103
+ double __complex__ __builtin_ccos(double __complex__);
104
+ long double __complex__ __builtin_ccosl(long double __complex__);
105
+ float __complex__ __builtin_ccoshf(float __complex__);
106
+ double __complex__ __builtin_ccosh(double __complex__);
107
+ long double __complex__ __builtin_ccoshl(long double __complex__);
108
+ float __complex__ __builtin_cexpf(float __complex__);
109
+ double __complex__ __builtin_cexp(double __complex__);
110
+ long double __complex__ __builtin_cexpl(long double __complex__);
111
+ float __complex__ __builtin_clogf(float __complex__);
112
+ double __complex__ __builtin_clog(double __complex__);
113
+ long double __complex__ __builtin_clogl(long double __complex__);
114
+ float __complex__ __builtin_csinf(float __complex__);
115
+ double __complex__ __builtin_csin(double __complex__);
116
+ long double __complex__ __builtin_csinl(long double __complex__);
117
+ float __complex__ __builtin_csinhf(float __complex__);
118
+ double __complex__ __builtin_csinh(double __complex__);
119
+ long double __complex__ __builtin_csinhl(long double __complex__);
120
+ float __complex__ __builtin_csqrtf(float __complex__);
121
+ double __complex__ __builtin_csqrt(double __complex__);
122
+ long double __complex__ __builtin_csqrtl(long double __complex__);
123
+ float __complex__ __builtin_ctanf(float __complex__);
124
+ double __complex__ __builtin_ctan(double __complex__);
125
+ long double __complex__ __builtin_ctanl(long double __complex__);
126
+ float __complex__ __builtin_ctanhf(float __complex__);
127
+ double __complex__ __builtin_ctanh(double __complex__);
128
+ long double __complex__ __builtin_ctanhl(long double __complex__);
129
+ float __complex__ __builtin_cpowf(float __complex__, float __complex__);
130
+ double __complex__ __builtin_cpow(double __complex__, double __complex__);
131
+ long double __complex__ __builtin_cpowl(long double __complex__, long double __complex__);
132
+
133
+ /* The GCC 4.4 parser hard-codes handling of these, so they do not
134
+ have real signatures. */
135
+ bool __builtin_fpclassify(...);
136
+ bool __builtin_isfinite(...);
137
+ bool __builtin_isgreater(...);
138
+ bool __builtin_isgreaterequal(...);
139
+ bool __builtin_isinf(...);
140
+ bool __builtin_isinf_sign(...);
141
+ bool __builtin_isless(...);
142
+ bool __builtin_islessequal(...);
143
+ bool __builtin_islessgreater(...);
144
+ bool __builtin_isnan(...);
145
+ bool __builtin_isnormal(...);
146
+ bool __builtin_isunordered(...);
147
+ bool __builtin_va_arg_pack(...);
148
+ int __builtin_va_arg_pack_len(...);
149
+
150
+ /* We fake some constant expressions from GCC 4.4 parser. */
151
+ #define __is_pod(x) false
152
+ #define __is_empty(x) false
153
+ #define __has_trivial_destructor(x) false
@@ -0,0 +1,662 @@
1
+ /* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
2
+ Free Software Foundation, Inc.
3
+
4
+ This file is part of GCC.
5
+
6
+ GCC is free software; you can redistribute it and/or modify
7
+ it under the terms of the GNU General Public License as published by
8
+ the Free Software Foundation; either version 3, or (at your option)
9
+ any later version.
10
+
11
+ GCC is distributed in the hope that it will be useful,
12
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+ GNU General Public License for more details.
15
+
16
+ Under Section 7 of GPL version 3, you are granted additional
17
+ permissions described in the GCC Runtime Library Exception, version
18
+ 3.1, as published by the Free Software Foundation.
19
+
20
+ You should have received a copy of the GNU General Public License and
21
+ a copy of the GCC Runtime Library Exception along with this program;
22
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23
+ <http://www.gnu.org/licenses/>. */
24
+
25
+ /* Implemented from the specification included in the Intel C++ Compiler
26
+ User Guide and Reference, version 9.0. */
27
+
28
+ #ifndef _MMINTRIN_H_INCLUDED
29
+ #define _MMINTRIN_H_INCLUDED
30
+
31
+ #ifndef __MMX__
32
+ # error "MMX instruction set not enabled"
33
+ #else
34
+ /* The Intel API is flexible enough that we must allow aliasing with other
35
+ vector types, and their scalar components. */
36
+ typedef int __m64 __attribute__ ((__vector_size__ (8), __may_alias__));
37
+
38
+ /* Internal data types for implementing the intrinsics. */
39
+ typedef int __v2si __attribute__ ((__vector_size__ (8)));
40
+ typedef short __v4hi __attribute__ ((__vector_size__ (8)));
41
+ typedef char __v8qi __attribute__ ((__vector_size__ (8)));
42
+ typedef long long __v1di __attribute__ ((__vector_size__ (8)));
43
+ typedef float __v2sf __attribute__ ((__vector_size__ (8)));
44
+
45
+ /* Empty the multimedia state. */
46
+ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
47
+ _mm_empty (void)
48
+ ;
49
+
50
+ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
51
+ _m_empty (void)
52
+ ;
53
+
54
+ /* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
55
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
56
+ _mm_cvtsi32_si64 (int __i)
57
+ ;
58
+
59
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
60
+ _m_from_int (int __i)
61
+ ;
62
+
63
+ #ifdef __x86_64__
64
+ /* Convert I to a __m64 object. */
65
+
66
+ /* Intel intrinsic. */
67
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
68
+ _m_from_int64 (long long __i)
69
+ ;
70
+
71
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
72
+ _mm_cvtsi64_m64 (long long __i)
73
+ ;
74
+
75
+ /* Microsoft intrinsic. */
76
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
77
+ _mm_cvtsi64x_si64 (long long __i)
78
+ ;
79
+
80
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
81
+ _mm_set_pi64x (long long __i)
82
+ ;
83
+ #endif
84
+
85
+ /* Convert the lower 32 bits of the __m64 object into an integer. */
86
+ extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
87
+ _mm_cvtsi64_si32 (__m64 __i)
88
+ ;
89
+
90
+ extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
91
+ _m_to_int (__m64 __i)
92
+ ;
93
+
94
+ #ifdef __x86_64__
95
+ /* Convert the __m64 object to a 64bit integer. */
96
+
97
+ /* Intel intrinsic. */
98
+ extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
99
+ _m_to_int64 (__m64 __i)
100
+ ;
101
+
102
+ extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
103
+ _mm_cvtm64_si64 (__m64 __i)
104
+ ;
105
+
106
+ /* Microsoft intrinsic. */
107
+ extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
108
+ _mm_cvtsi64_si64x (__m64 __i)
109
+ ;
110
+ #endif
111
+
112
+ /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
113
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
114
+ values of the result, all with signed saturation. */
115
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
116
+ _mm_packs_pi16 (__m64 __m1, __m64 __m2)
117
+ ;
118
+
119
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
120
+ _m_packsswb (__m64 __m1, __m64 __m2)
121
+ ;
122
+
123
+ /* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
124
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
125
+ values of the result, all with signed saturation. */
126
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
127
+ _mm_packs_pi32 (__m64 __m1, __m64 __m2)
128
+ ;
129
+
130
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
131
+ _m_packssdw (__m64 __m1, __m64 __m2)
132
+ ;
133
+
134
+ /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
135
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
136
+ values of the result, all with unsigned saturation. */
137
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
138
+ _mm_packs_pu16 (__m64 __m1, __m64 __m2)
139
+ ;
140
+
141
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
142
+ _m_packuswb (__m64 __m1, __m64 __m2)
143
+ ;
144
+
145
+ /* Interleave the four 8-bit values from the high half of M1 with the four
146
+ 8-bit values from the high half of M2. */
147
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
148
+ _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
149
+ ;
150
+
151
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
152
+ _m_punpckhbw (__m64 __m1, __m64 __m2)
153
+ ;
154
+
155
+ /* Interleave the two 16-bit values from the high half of M1 with the two
156
+ 16-bit values from the high half of M2. */
157
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
158
+ _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
159
+ ;
160
+
161
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
162
+ _m_punpckhwd (__m64 __m1, __m64 __m2)
163
+ ;
164
+
165
+ /* Interleave the 32-bit value from the high half of M1 with the 32-bit
166
+ value from the high half of M2. */
167
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
168
+ _mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
169
+ ;
170
+
171
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
172
+ _m_punpckhdq (__m64 __m1, __m64 __m2)
173
+ ;
174
+
175
+ /* Interleave the four 8-bit values from the low half of M1 with the four
176
+ 8-bit values from the low half of M2. */
177
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
178
+ _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
179
+ ;
180
+
181
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
182
+ _m_punpcklbw (__m64 __m1, __m64 __m2)
183
+ ;
184
+
185
+ /* Interleave the two 16-bit values from the low half of M1 with the two
186
+ 16-bit values from the low half of M2. */
187
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
188
+ _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
189
+ ;
190
+
191
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
192
+ _m_punpcklwd (__m64 __m1, __m64 __m2)
193
+ ;
194
+
195
+ /* Interleave the 32-bit value from the low half of M1 with the 32-bit
196
+ value from the low half of M2. */
197
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
198
+ _mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
199
+ ;
200
+
201
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
202
+ _m_punpckldq (__m64 __m1, __m64 __m2)
203
+ ;
204
+
205
+ /* Add the 8-bit values in M1 to the 8-bit values in M2. */
206
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
207
+ _mm_add_pi8 (__m64 __m1, __m64 __m2)
208
+ ;
209
+
210
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
211
+ _m_paddb (__m64 __m1, __m64 __m2)
212
+ ;
213
+
214
+ /* Add the 16-bit values in M1 to the 16-bit values in M2. */
215
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
216
+ _mm_add_pi16 (__m64 __m1, __m64 __m2)
217
+ ;
218
+
219
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
220
+ _m_paddw (__m64 __m1, __m64 __m2)
221
+ ;
222
+
223
+ /* Add the 32-bit values in M1 to the 32-bit values in M2. */
224
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
225
+ _mm_add_pi32 (__m64 __m1, __m64 __m2)
226
+ ;
227
+
228
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
229
+ _m_paddd (__m64 __m1, __m64 __m2)
230
+ ;
231
+
232
+ /* Add the 64-bit values in M1 to the 64-bit values in M2. */
233
+ #ifdef __SSE2__
234
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
235
+ _mm_add_si64 (__m64 __m1, __m64 __m2)
236
+ ;
237
+ #endif
238
+
239
+ /* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
240
+ saturated arithmetic. */
241
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
242
+ _mm_adds_pi8 (__m64 __m1, __m64 __m2)
243
+ ;
244
+
245
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
246
+ _m_paddsb (__m64 __m1, __m64 __m2)
247
+ ;
248
+
249
+ /* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
250
+ saturated arithmetic. */
251
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
252
+ _mm_adds_pi16 (__m64 __m1, __m64 __m2)
253
+ ;
254
+
255
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
256
+ _m_paddsw (__m64 __m1, __m64 __m2)
257
+ ;
258
+
259
+ /* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
260
+ saturated arithmetic. */
261
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
262
+ _mm_adds_pu8 (__m64 __m1, __m64 __m2)
263
+ ;
264
+
265
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
266
+ _m_paddusb (__m64 __m1, __m64 __m2)
267
+ ;
268
+
269
+ /* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
270
+ saturated arithmetic. */
271
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
272
+ _mm_adds_pu16 (__m64 __m1, __m64 __m2)
273
+ ;
274
+
275
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
276
+ _m_paddusw (__m64 __m1, __m64 __m2)
277
+ ;
278
+
279
+ /* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
280
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
281
+ _mm_sub_pi8 (__m64 __m1, __m64 __m2)
282
+ ;
283
+
284
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
285
+ _m_psubb (__m64 __m1, __m64 __m2)
286
+ ;
287
+
288
+ /* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
289
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
290
+ _mm_sub_pi16 (__m64 __m1, __m64 __m2)
291
+ ;
292
+
293
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
294
+ _m_psubw (__m64 __m1, __m64 __m2)
295
+ ;
296
+
297
+ /* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
298
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
299
+ _mm_sub_pi32 (__m64 __m1, __m64 __m2)
300
+ ;
301
+
302
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
303
+ _m_psubd (__m64 __m1, __m64 __m2)
304
+ ;
305
+
306
+ /* Add the 64-bit values in M1 to the 64-bit values in M2. */
307
+ #ifdef __SSE2__
308
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
309
+ _mm_sub_si64 (__m64 __m1, __m64 __m2)
310
+ ;
311
+ #endif
312
+
313
+ /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
314
+ saturating arithmetic. */
315
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
316
+ _mm_subs_pi8 (__m64 __m1, __m64 __m2)
317
+ ;
318
+
319
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
320
+ _m_psubsb (__m64 __m1, __m64 __m2)
321
+ ;
322
+
323
+ /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
324
+ signed saturating arithmetic. */
325
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
326
+ _mm_subs_pi16 (__m64 __m1, __m64 __m2)
327
+ ;
328
+
329
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
330
+ _m_psubsw (__m64 __m1, __m64 __m2)
331
+ ;
332
+
333
+ /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
334
+ unsigned saturating arithmetic. */
335
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
336
+ _mm_subs_pu8 (__m64 __m1, __m64 __m2)
337
+ ;
338
+
339
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
340
+ _m_psubusb (__m64 __m1, __m64 __m2)
341
+ ;
342
+
343
+ /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
344
+ unsigned saturating arithmetic. */
345
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
346
+ _mm_subs_pu16 (__m64 __m1, __m64 __m2)
347
+ ;
348
+
349
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
350
+ _m_psubusw (__m64 __m1, __m64 __m2)
351
+ ;
352
+
353
+ /* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
354
+ four 32-bit intermediate results, which are then summed by pairs to
355
+ produce two 32-bit results. */
356
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
357
+ _mm_madd_pi16 (__m64 __m1, __m64 __m2)
358
+ ;
359
+
360
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
361
+ _m_pmaddwd (__m64 __m1, __m64 __m2)
362
+ ;
363
+
364
+ /* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
365
+ M2 and produce the high 16 bits of the 32-bit results. */
366
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
367
+ _mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
368
+ ;
369
+
370
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
371
+ _m_pmulhw (__m64 __m1, __m64 __m2)
372
+ ;
373
+
374
+ /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
375
+ the low 16 bits of the results. */
376
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
377
+ _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
378
+ ;
379
+
380
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
381
+ _m_pmullw (__m64 __m1, __m64 __m2)
382
+ ;
383
+
384
+ /* Shift four 16-bit values in M left by COUNT. */
385
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
386
+ _mm_sll_pi16 (__m64 __m, __m64 __count)
387
+ ;
388
+
389
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
390
+ _m_psllw (__m64 __m, __m64 __count)
391
+ ;
392
+
393
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
394
+ _mm_slli_pi16 (__m64 __m, int __count)
395
+ ;
396
+
397
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
398
+ _m_psllwi (__m64 __m, int __count)
399
+ ;
400
+
401
+ /* Shift two 32-bit values in M left by COUNT. */
402
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
403
+ _mm_sll_pi32 (__m64 __m, __m64 __count)
404
+ ;
405
+
406
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
407
+ _m_pslld (__m64 __m, __m64 __count)
408
+ ;
409
+
410
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
411
+ _mm_slli_pi32 (__m64 __m, int __count)
412
+ ;
413
+
414
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
415
+ _m_pslldi (__m64 __m, int __count)
416
+ ;
417
+
418
+ /* Shift the 64-bit value in M left by COUNT. */
419
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
420
+ _mm_sll_si64 (__m64 __m, __m64 __count)
421
+ ;
422
+
423
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
424
+ _m_psllq (__m64 __m, __m64 __count)
425
+ ;
426
+
427
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
428
+ _mm_slli_si64 (__m64 __m, int __count)
429
+ ;
430
+
431
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
432
+ _m_psllqi (__m64 __m, int __count)
433
+ ;
434
+
435
+ /* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
436
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
437
+ _mm_sra_pi16 (__m64 __m, __m64 __count)
438
+ ;
439
+
440
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
441
+ _m_psraw (__m64 __m, __m64 __count)
442
+ ;
443
+
444
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
445
+ _mm_srai_pi16 (__m64 __m, int __count)
446
+ ;
447
+
448
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
449
+ _m_psrawi (__m64 __m, int __count)
450
+ ;
451
+
452
+ /* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
453
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
454
+ _mm_sra_pi32 (__m64 __m, __m64 __count)
455
+ ;
456
+
457
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
458
+ _m_psrad (__m64 __m, __m64 __count)
459
+ ;
460
+
461
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
462
+ _mm_srai_pi32 (__m64 __m, int __count)
463
+ ;
464
+
465
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
466
+ _m_psradi (__m64 __m, int __count)
467
+ ;
468
+
469
+ /* Shift four 16-bit values in M right by COUNT; shift in zeros. */
470
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
471
+ _mm_srl_pi16 (__m64 __m, __m64 __count)
472
+ ;
473
+
474
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
475
+ _m_psrlw (__m64 __m, __m64 __count)
476
+ ;
477
+
478
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
479
+ _mm_srli_pi16 (__m64 __m, int __count)
480
+ ;
481
+
482
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
483
+ _m_psrlwi (__m64 __m, int __count)
484
+ ;
485
+
486
+ /* Shift two 32-bit values in M right by COUNT; shift in zeros. */
487
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
488
+ _mm_srl_pi32 (__m64 __m, __m64 __count)
489
+ ;
490
+
491
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
492
+ _m_psrld (__m64 __m, __m64 __count)
493
+ ;
494
+
495
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
496
+ _mm_srli_pi32 (__m64 __m, int __count)
497
+ ;
498
+
499
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
500
+ _m_psrldi (__m64 __m, int __count)
501
+ ;
502
+
503
+ /* Shift the 64-bit value in M left by COUNT; shift in zeros. */
504
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
505
+ _mm_srl_si64 (__m64 __m, __m64 __count)
506
+ ;
507
+
508
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
509
+ _m_psrlq (__m64 __m, __m64 __count)
510
+ ;
511
+
512
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
513
+ _mm_srli_si64 (__m64 __m, int __count)
514
+ ;
515
+
516
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
517
+ _m_psrlqi (__m64 __m, int __count)
518
+ ;
519
+
520
+ /* Bit-wise AND the 64-bit values in M1 and M2. */
521
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
522
+ _mm_and_si64 (__m64 __m1, __m64 __m2)
523
+ ;
524
+
525
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
526
+ _m_pand (__m64 __m1, __m64 __m2)
527
+ ;
528
+
529
+ /* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
530
+ 64-bit value in M2. */
531
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
532
+ _mm_andnot_si64 (__m64 __m1, __m64 __m2)
533
+ ;
534
+
535
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
536
+ _m_pandn (__m64 __m1, __m64 __m2)
537
+ ;
538
+
539
+ /* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
540
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
541
+ _mm_or_si64 (__m64 __m1, __m64 __m2)
542
+ ;
543
+
544
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
545
+ _m_por (__m64 __m1, __m64 __m2)
546
+ ;
547
+
548
+ /* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
549
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
550
+ _mm_xor_si64 (__m64 __m1, __m64 __m2)
551
+ ;
552
+
553
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
554
+ _m_pxor (__m64 __m1, __m64 __m2)
555
+ ;
556
+
557
+ /* Compare eight 8-bit values. The result of the comparison is 0xFF if the
558
+ test is true and zero if false. */
559
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
560
+ _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
561
+ ;
562
+
563
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
564
+ _m_pcmpeqb (__m64 __m1, __m64 __m2)
565
+ ;
566
+
567
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
568
+ _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
569
+ ;
570
+
571
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
572
+ _m_pcmpgtb (__m64 __m1, __m64 __m2)
573
+ ;
574
+
575
+ /* Compare four 16-bit values. The result of the comparison is 0xFFFF if
576
+ the test is true and zero if false. */
577
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
578
+ _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
579
+ ;
580
+
581
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
582
+ _m_pcmpeqw (__m64 __m1, __m64 __m2)
583
+ ;
584
+
585
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
586
+ _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
587
+ ;
588
+
589
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
590
+ _m_pcmpgtw (__m64 __m1, __m64 __m2)
591
+ ;
592
+
593
+ /* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
594
+ the test is true and zero if false. */
595
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
596
+ _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
597
+ ;
598
+
599
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
600
+ _m_pcmpeqd (__m64 __m1, __m64 __m2)
601
+ ;
602
+
603
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
604
+ _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
605
+ ;
606
+
607
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
608
+ _m_pcmpgtd (__m64 __m1, __m64 __m2)
609
+ ;
610
+
611
+ /* Creates a 64-bit zero. */
612
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
613
+ _mm_setzero_si64 (void)
614
+ ;
615
+
616
+ /* Creates a vector of two 32-bit values; I0 is least significant. */
617
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
618
+ _mm_set_pi32 (int __i1, int __i0)
619
+ ;
620
+
621
+ /* Creates a vector of four 16-bit values; W0 is least significant. */
622
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
623
+ _mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
624
+ ;
625
+
626
+ /* Creates a vector of eight 8-bit values; B0 is least significant. */
627
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
628
+ _mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
629
+ char __b3, char __b2, char __b1, char __b0)
630
+ ;
631
+
632
+ /* Similar, but with the arguments in reverse order. */
633
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
634
+ _mm_setr_pi32 (int __i0, int __i1)
635
+ ;
636
+
637
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
638
+ _mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
639
+ ;
640
+
641
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
642
+ _mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
643
+ char __b4, char __b5, char __b6, char __b7)
644
+ ;
645
+
646
+ /* Creates a vector of two 32-bit values, both elements containing I. */
647
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
648
+ _mm_set1_pi32 (int __i)
649
+ ;
650
+
651
+ /* Creates a vector of four 16-bit values, all elements containing W. */
652
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
653
+ _mm_set1_pi16 (short __w)
654
+ ;
655
+
656
+ /* Creates a vector of eight 8-bit values, all elements containing B. */
657
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
658
+ _mm_set1_pi8 (char __b)
659
+ ;
660
+
661
+ #endif /* __MMX__ */
662
+ #endif /* _MMINTRIN_H_INCLUDED */