gccxml_gem 0.9.2-x86-linux → 0.9.3-x86-linux

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. data/Rakefile +15 -6
  2. data/bin/gccxml +0 -0
  3. data/bin/gccxml_cc1plus +0 -0
  4. data/gccxml.rb +5 -5
  5. data/share/gccxml-0.9/GCC/3.2/bits/gthr-default.h +4 -0
  6. data/share/gccxml-0.9/GCC/3.4/bits/gthr-default.h +5 -0
  7. data/share/gccxml-0.9/GCC/4.0/emmintrin.h +5 -0
  8. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_emmintrin.h +1037 -0
  9. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_mmintrin.h +669 -0
  10. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_xmmintrin.h +870 -0
  11. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_emmintrin.h +977 -0
  12. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_mmintrin.h +636 -0
  13. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_xmmintrin.h +833 -0
  14. data/share/gccxml-0.9/GCC/4.0/mmintrin.h +5 -0
  15. data/share/gccxml-0.9/GCC/4.0/xmmintrin.h +5 -0
  16. data/share/gccxml-0.9/GCC/4.1/bits/gthr-default.h +4 -0
  17. data/share/gccxml-0.9/GCC/4.1/emmintrin.h +5 -0
  18. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_emmintrin.h +1509 -0
  19. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_mmintrin.h +942 -0
  20. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_xmmintrin.h +1192 -0
  21. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_emmintrin.h +1004 -0
  22. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_mmintrin.h +637 -0
  23. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_xmmintrin.h +834 -0
  24. data/share/gccxml-0.9/GCC/4.1/mmintrin.h +5 -0
  25. data/share/gccxml-0.9/GCC/4.1/xmmintrin.h +5 -0
  26. data/share/gccxml-0.9/GCC/4.2/emmintrin.h +5 -0
  27. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_emmintrin.h +1509 -0
  28. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_mmintrin.h +942 -0
  29. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_xmmintrin.h +1192 -0
  30. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_emmintrin.h +1013 -0
  31. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_mmintrin.h +663 -0
  32. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_xmmintrin.h +860 -0
  33. data/share/gccxml-0.9/GCC/4.2/mmintrin.h +5 -0
  34. data/share/gccxml-0.9/GCC/4.2/xmmintrin.h +5 -0
  35. data/share/gccxml-0.9/GCC/4.3/emmintrin.h +1043 -0
  36. data/share/gccxml-0.9/GCC/4.3/gccxml_builtins.h +1 -0
  37. data/share/gccxml-0.9/GCC/4.3/mmintrin.h +663 -0
  38. data/share/gccxml-0.9/GCC/4.3/xmmintrin.h +867 -0
  39. data/share/gccxml-0.9/GCC/4.4/bits/c++config.h +1431 -0
  40. data/share/gccxml-0.9/GCC/4.4/emmintrin.h +1041 -0
  41. data/share/gccxml-0.9/GCC/4.4/gccxml_builtins.h +153 -0
  42. data/share/gccxml-0.9/GCC/4.4/mmintrin.h +662 -0
  43. data/share/gccxml-0.9/GCC/4.4/xmmintrin.h +864 -0
  44. data/share/gccxml-0.9/GCC/4.5/gccxml_builtins.h +154 -0
  45. data/share/gccxml-0.9/GCC/4.5/iomanip +349 -0
  46. data/share/gccxml-0.9/GCC/COPYING.RUNTIME +73 -0
  47. data/share/gccxml-0.9/GCC/COPYING3 +674 -0
  48. data/share/man/man1/gccxml.1 +1 -1
  49. metadata +165 -114
@@ -143,6 +143,7 @@ bool __builtin_islessequal(...);
143
143
  bool __builtin_islessgreater(...);
144
144
  bool __builtin_isunordered(...);
145
145
  bool __builtin_va_arg_pack(...);
146
+ int __builtin_va_arg_pack_len(...);
146
147
 
147
148
  /*
148
149
 
@@ -0,0 +1,663 @@
1
+ /* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Free Software
2
+ Foundation, Inc.
3
+
4
+ This file is part of GCC.
5
+
6
+ GCC is free software; you can redistribute it and/or modify
7
+ it under the terms of the GNU General Public License as published by
8
+ the Free Software Foundation; either version 2, or (at your option)
9
+ any later version.
10
+
11
+ GCC is distributed in the hope that it will be useful,
12
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+ GNU General Public License for more details.
15
+
16
+ You should have received a copy of the GNU General Public License
17
+ along with GCC; see the file COPYING. If not, write to
18
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
19
+ Boston, MA 02110-1301, USA. */
20
+
21
+ /* As a special exception, if you include this header file into source
22
+ files compiled by GCC, this header file does not by itself cause
23
+ the resulting executable to be covered by the GNU General Public
24
+ License. This exception does not however invalidate any other
25
+ reasons why the executable file might be covered by the GNU General
26
+ Public License. */
27
+
28
+ /* Implemented from the specification included in the Intel C++ Compiler
29
+ User Guide and Reference, version 9.0. */
30
+
31
+ #ifndef _MMINTRIN_H_INCLUDED
32
+ #define _MMINTRIN_H_INCLUDED
33
+
34
+ #ifndef __MMX__
35
+ # error "MMX instruction set not enabled"
36
+ #else
37
+ /* The Intel API is flexible enough that we must allow aliasing with other
38
+ vector types, and their scalar components. */
39
+ typedef int __m64 __attribute__ ((__vector_size__ (8), __may_alias__));
40
+
41
+ /* Internal data types for implementing the intrinsics. */
42
+ typedef int __v2si __attribute__ ((__vector_size__ (8)));
43
+ typedef short __v4hi __attribute__ ((__vector_size__ (8)));
44
+ typedef char __v8qi __attribute__ ((__vector_size__ (8)));
45
+
46
+ /* Empty the multimedia state. */
47
+ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
48
+ _mm_empty (void)
49
+ ;
50
+
51
+ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52
+ _m_empty (void)
53
+ ;
54
+
55
+ /* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
56
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
57
+ _mm_cvtsi32_si64 (int __i)
58
+ ;
59
+
60
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
61
+ _m_from_int (int __i)
62
+ ;
63
+
64
+ #ifdef __x86_64__
65
+ /* Convert I to a __m64 object. */
66
+
67
+ /* Intel intrinsic. */
68
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
69
+ _m_from_int64 (long long __i)
70
+ ;
71
+
72
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
73
+ _mm_cvtsi64_m64 (long long __i)
74
+ ;
75
+
76
+ /* Microsoft intrinsic. */
77
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
78
+ _mm_cvtsi64x_si64 (long long __i)
79
+ ;
80
+
81
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
82
+ _mm_set_pi64x (long long __i)
83
+ ;
84
+ #endif
85
+
86
+ /* Convert the lower 32 bits of the __m64 object into an integer. */
87
+ extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
88
+ _mm_cvtsi64_si32 (__m64 __i)
89
+ ;
90
+
91
+ extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
92
+ _m_to_int (__m64 __i)
93
+ ;
94
+
95
+ #ifdef __x86_64__
96
+ /* Convert the __m64 object to a 64bit integer. */
97
+
98
+ /* Intel intrinsic. */
99
+ extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
100
+ _m_to_int64 (__m64 __i)
101
+ ;
102
+
103
+ extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
104
+ _mm_cvtm64_si64 (__m64 __i)
105
+ ;
106
+
107
+ /* Microsoft intrinsic. */
108
+ extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
109
+ _mm_cvtsi64_si64x (__m64 __i)
110
+ ;
111
+ #endif
112
+
113
+ /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
114
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
115
+ values of the result, all with signed saturation. */
116
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
117
+ _mm_packs_pi16 (__m64 __m1, __m64 __m2)
118
+ ;
119
+
120
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
121
+ _m_packsswb (__m64 __m1, __m64 __m2)
122
+ ;
123
+
124
+ /* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
125
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
126
+ values of the result, all with signed saturation. */
127
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
128
+ _mm_packs_pi32 (__m64 __m1, __m64 __m2)
129
+ ;
130
+
131
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
132
+ _m_packssdw (__m64 __m1, __m64 __m2)
133
+ ;
134
+
135
+ /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
136
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
137
+ values of the result, all with unsigned saturation. */
138
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
139
+ _mm_packs_pu16 (__m64 __m1, __m64 __m2)
140
+ ;
141
+
142
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
143
+ _m_packuswb (__m64 __m1, __m64 __m2)
144
+ ;
145
+
146
+ /* Interleave the four 8-bit values from the high half of M1 with the four
147
+ 8-bit values from the high half of M2. */
148
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
149
+ _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
150
+ ;
151
+
152
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
153
+ _m_punpckhbw (__m64 __m1, __m64 __m2)
154
+ ;
155
+
156
+ /* Interleave the two 16-bit values from the high half of M1 with the two
157
+ 16-bit values from the high half of M2. */
158
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
159
+ _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
160
+ ;
161
+
162
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
163
+ _m_punpckhwd (__m64 __m1, __m64 __m2)
164
+ ;
165
+
166
+ /* Interleave the 32-bit value from the high half of M1 with the 32-bit
167
+ value from the high half of M2. */
168
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
169
+ _mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
170
+ ;
171
+
172
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
173
+ _m_punpckhdq (__m64 __m1, __m64 __m2)
174
+ ;
175
+
176
+ /* Interleave the four 8-bit values from the low half of M1 with the four
177
+ 8-bit values from the low half of M2. */
178
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
179
+ _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
180
+ ;
181
+
182
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
183
+ _m_punpcklbw (__m64 __m1, __m64 __m2)
184
+ ;
185
+
186
+ /* Interleave the two 16-bit values from the low half of M1 with the two
187
+ 16-bit values from the low half of M2. */
188
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
189
+ _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
190
+ ;
191
+
192
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
193
+ _m_punpcklwd (__m64 __m1, __m64 __m2)
194
+ ;
195
+
196
+ /* Interleave the 32-bit value from the low half of M1 with the 32-bit
197
+ value from the low half of M2. */
198
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
199
+ _mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
200
+ ;
201
+
202
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
203
+ _m_punpckldq (__m64 __m1, __m64 __m2)
204
+ ;
205
+
206
+ /* Add the 8-bit values in M1 to the 8-bit values in M2. */
207
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
208
+ _mm_add_pi8 (__m64 __m1, __m64 __m2)
209
+ ;
210
+
211
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
212
+ _m_paddb (__m64 __m1, __m64 __m2)
213
+ ;
214
+
215
+ /* Add the 16-bit values in M1 to the 16-bit values in M2. */
216
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
217
+ _mm_add_pi16 (__m64 __m1, __m64 __m2)
218
+ ;
219
+
220
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
221
+ _m_paddw (__m64 __m1, __m64 __m2)
222
+ ;
223
+
224
+ /* Add the 32-bit values in M1 to the 32-bit values in M2. */
225
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
226
+ _mm_add_pi32 (__m64 __m1, __m64 __m2)
227
+ ;
228
+
229
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
230
+ _m_paddd (__m64 __m1, __m64 __m2)
231
+ ;
232
+
233
+ /* Add the 64-bit values in M1 to the 64-bit values in M2. */
234
+ #ifdef __SSE2__
235
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
236
+ _mm_add_si64 (__m64 __m1, __m64 __m2)
237
+ ;
238
+ #endif
239
+
240
+ /* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
241
+ saturated arithmetic. */
242
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
243
+ _mm_adds_pi8 (__m64 __m1, __m64 __m2)
244
+ ;
245
+
246
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
247
+ _m_paddsb (__m64 __m1, __m64 __m2)
248
+ ;
249
+
250
+ /* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
251
+ saturated arithmetic. */
252
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
253
+ _mm_adds_pi16 (__m64 __m1, __m64 __m2)
254
+ ;
255
+
256
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
257
+ _m_paddsw (__m64 __m1, __m64 __m2)
258
+ ;
259
+
260
+ /* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
261
+ saturated arithmetic. */
262
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
263
+ _mm_adds_pu8 (__m64 __m1, __m64 __m2)
264
+ ;
265
+
266
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
267
+ _m_paddusb (__m64 __m1, __m64 __m2)
268
+ ;
269
+
270
+ /* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
271
+ saturated arithmetic. */
272
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
273
+ _mm_adds_pu16 (__m64 __m1, __m64 __m2)
274
+ ;
275
+
276
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
277
+ _m_paddusw (__m64 __m1, __m64 __m2)
278
+ ;
279
+
280
+ /* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
281
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
282
+ _mm_sub_pi8 (__m64 __m1, __m64 __m2)
283
+ ;
284
+
285
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
286
+ _m_psubb (__m64 __m1, __m64 __m2)
287
+ ;
288
+
289
+ /* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
290
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
291
+ _mm_sub_pi16 (__m64 __m1, __m64 __m2)
292
+ ;
293
+
294
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
295
+ _m_psubw (__m64 __m1, __m64 __m2)
296
+ ;
297
+
298
+ /* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
299
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
300
+ _mm_sub_pi32 (__m64 __m1, __m64 __m2)
301
+ ;
302
+
303
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
304
+ _m_psubd (__m64 __m1, __m64 __m2)
305
+ ;
306
+
307
+ /* Add the 64-bit values in M1 to the 64-bit values in M2. */
308
+ #ifdef __SSE2__
309
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
310
+ _mm_sub_si64 (__m64 __m1, __m64 __m2)
311
+ ;
312
+ #endif
313
+
314
+ /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
315
+ saturating arithmetic. */
316
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
317
+ _mm_subs_pi8 (__m64 __m1, __m64 __m2)
318
+ ;
319
+
320
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
321
+ _m_psubsb (__m64 __m1, __m64 __m2)
322
+ ;
323
+
324
+ /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
325
+ signed saturating arithmetic. */
326
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
327
+ _mm_subs_pi16 (__m64 __m1, __m64 __m2)
328
+ ;
329
+
330
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
331
+ _m_psubsw (__m64 __m1, __m64 __m2)
332
+ ;
333
+
334
+ /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
335
+ unsigned saturating arithmetic. */
336
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
337
+ _mm_subs_pu8 (__m64 __m1, __m64 __m2)
338
+ ;
339
+
340
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
341
+ _m_psubusb (__m64 __m1, __m64 __m2)
342
+ ;
343
+
344
+ /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
345
+ unsigned saturating arithmetic. */
346
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
347
+ _mm_subs_pu16 (__m64 __m1, __m64 __m2)
348
+ ;
349
+
350
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
351
+ _m_psubusw (__m64 __m1, __m64 __m2)
352
+ ;
353
+
354
+ /* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
355
+ four 32-bit intermediate results, which are then summed by pairs to
356
+ produce two 32-bit results. */
357
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
358
+ _mm_madd_pi16 (__m64 __m1, __m64 __m2)
359
+ ;
360
+
361
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
362
+ _m_pmaddwd (__m64 __m1, __m64 __m2)
363
+ ;
364
+
365
+ /* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
366
+ M2 and produce the high 16 bits of the 32-bit results. */
367
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
368
+ _mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
369
+ ;
370
+
371
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
372
+ _m_pmulhw (__m64 __m1, __m64 __m2)
373
+ ;
374
+
375
+ /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
376
+ the low 16 bits of the results. */
377
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
378
+ _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
379
+ ;
380
+
381
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
382
+ _m_pmullw (__m64 __m1, __m64 __m2)
383
+ ;
384
+
385
+ /* Shift four 16-bit values in M left by COUNT. */
386
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
387
+ _mm_sll_pi16 (__m64 __m, __m64 __count)
388
+ ;
389
+
390
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
391
+ _m_psllw (__m64 __m, __m64 __count)
392
+ ;
393
+
394
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
395
+ _mm_slli_pi16 (__m64 __m, int __count)
396
+ ;
397
+
398
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
399
+ _m_psllwi (__m64 __m, int __count)
400
+ ;
401
+
402
+ /* Shift two 32-bit values in M left by COUNT. */
403
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
404
+ _mm_sll_pi32 (__m64 __m, __m64 __count)
405
+ ;
406
+
407
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
408
+ _m_pslld (__m64 __m, __m64 __count)
409
+ ;
410
+
411
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
412
+ _mm_slli_pi32 (__m64 __m, int __count)
413
+ ;
414
+
415
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
416
+ _m_pslldi (__m64 __m, int __count)
417
+ ;
418
+
419
+ /* Shift the 64-bit value in M left by COUNT. */
420
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
421
+ _mm_sll_si64 (__m64 __m, __m64 __count)
422
+ ;
423
+
424
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
425
+ _m_psllq (__m64 __m, __m64 __count)
426
+ ;
427
+
428
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
429
+ _mm_slli_si64 (__m64 __m, int __count)
430
+ ;
431
+
432
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
433
+ _m_psllqi (__m64 __m, int __count)
434
+ ;
435
+
436
+ /* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
437
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
438
+ _mm_sra_pi16 (__m64 __m, __m64 __count)
439
+ ;
440
+
441
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
442
+ _m_psraw (__m64 __m, __m64 __count)
443
+ ;
444
+
445
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
446
+ _mm_srai_pi16 (__m64 __m, int __count)
447
+ ;
448
+
449
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
450
+ _m_psrawi (__m64 __m, int __count)
451
+ ;
452
+
453
+ /* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
454
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
455
+ _mm_sra_pi32 (__m64 __m, __m64 __count)
456
+ ;
457
+
458
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
459
+ _m_psrad (__m64 __m, __m64 __count)
460
+ ;
461
+
462
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
463
+ _mm_srai_pi32 (__m64 __m, int __count)
464
+ ;
465
+
466
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
467
+ _m_psradi (__m64 __m, int __count)
468
+ ;
469
+
470
+ /* Shift four 16-bit values in M right by COUNT; shift in zeros. */
471
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
472
+ _mm_srl_pi16 (__m64 __m, __m64 __count)
473
+ ;
474
+
475
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
476
+ _m_psrlw (__m64 __m, __m64 __count)
477
+ ;
478
+
479
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
480
+ _mm_srli_pi16 (__m64 __m, int __count)
481
+ ;
482
+
483
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
484
+ _m_psrlwi (__m64 __m, int __count)
485
+ ;
486
+
487
+ /* Shift two 32-bit values in M right by COUNT; shift in zeros. */
488
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
489
+ _mm_srl_pi32 (__m64 __m, __m64 __count)
490
+ ;
491
+
492
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
493
+ _m_psrld (__m64 __m, __m64 __count)
494
+ ;
495
+
496
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
497
+ _mm_srli_pi32 (__m64 __m, int __count)
498
+ ;
499
+
500
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
501
+ _m_psrldi (__m64 __m, int __count)
502
+ ;
503
+
504
+ /* Shift the 64-bit value in M left by COUNT; shift in zeros. */
505
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
506
+ _mm_srl_si64 (__m64 __m, __m64 __count)
507
+ ;
508
+
509
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
510
+ _m_psrlq (__m64 __m, __m64 __count)
511
+ ;
512
+
513
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
514
+ _mm_srli_si64 (__m64 __m, int __count)
515
+ ;
516
+
517
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
518
+ _m_psrlqi (__m64 __m, int __count)
519
+ ;
520
+
521
+ /* Bit-wise AND the 64-bit values in M1 and M2. */
522
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
523
+ _mm_and_si64 (__m64 __m1, __m64 __m2)
524
+ ;
525
+
526
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
527
+ _m_pand (__m64 __m1, __m64 __m2)
528
+ ;
529
+
530
+ /* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
531
+ 64-bit value in M2. */
532
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
533
+ _mm_andnot_si64 (__m64 __m1, __m64 __m2)
534
+ ;
535
+
536
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
537
+ _m_pandn (__m64 __m1, __m64 __m2)
538
+ ;
539
+
540
+ /* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
541
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
542
+ _mm_or_si64 (__m64 __m1, __m64 __m2)
543
+ ;
544
+
545
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
546
+ _m_por (__m64 __m1, __m64 __m2)
547
+ ;
548
+
549
+ /* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
550
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
551
+ _mm_xor_si64 (__m64 __m1, __m64 __m2)
552
+ ;
553
+
554
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
555
+ _m_pxor (__m64 __m1, __m64 __m2)
556
+ ;
557
+
558
+ /* Compare eight 8-bit values. The result of the comparison is 0xFF if the
559
+ test is true and zero if false. */
560
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
561
+ _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
562
+ ;
563
+
564
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
565
+ _m_pcmpeqb (__m64 __m1, __m64 __m2)
566
+ ;
567
+
568
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
569
+ _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
570
+ ;
571
+
572
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
573
+ _m_pcmpgtb (__m64 __m1, __m64 __m2)
574
+ ;
575
+
576
+ /* Compare four 16-bit values. The result of the comparison is 0xFFFF if
577
+ the test is true and zero if false. */
578
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
579
+ _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
580
+ ;
581
+
582
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
583
+ _m_pcmpeqw (__m64 __m1, __m64 __m2)
584
+ ;
585
+
586
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
587
+ _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
588
+ ;
589
+
590
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
591
+ _m_pcmpgtw (__m64 __m1, __m64 __m2)
592
+ ;
593
+
594
+ /* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
595
+ the test is true and zero if false. */
596
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
597
+ _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
598
+ ;
599
+
600
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
601
+ _m_pcmpeqd (__m64 __m1, __m64 __m2)
602
+ ;
603
+
604
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
605
+ _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
606
+ ;
607
+
608
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
609
+ _m_pcmpgtd (__m64 __m1, __m64 __m2)
610
+ ;
611
+
612
+ /* Creates a 64-bit zero. */
613
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
614
+ _mm_setzero_si64 (void)
615
+ ;
616
+
617
+ /* Creates a vector of two 32-bit values; I0 is least significant. */
618
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
619
+ _mm_set_pi32 (int __i1, int __i0)
620
+ ;
621
+
622
+ /* Creates a vector of four 16-bit values; W0 is least significant. */
623
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
624
+ _mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
625
+ ;
626
+
627
+ /* Creates a vector of eight 8-bit values; B0 is least significant. */
628
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
629
+ _mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
630
+ char __b3, char __b2, char __b1, char __b0)
631
+ ;
632
+
633
+ /* Similar, but with the arguments in reverse order. */
634
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
635
+ _mm_setr_pi32 (int __i0, int __i1)
636
+ ;
637
+
638
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
639
+ _mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
640
+ ;
641
+
642
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
643
+ _mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
644
+ char __b4, char __b5, char __b6, char __b7)
645
+ ;
646
+
647
+ /* Creates a vector of two 32-bit values, both elements containing I. */
648
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
649
+ _mm_set1_pi32 (int __i)
650
+ ;
651
+
652
+ /* Creates a vector of four 16-bit values, all elements containing W. */
653
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
654
+ _mm_set1_pi16 (short __w)
655
+ ;
656
+
657
+ /* Creates a vector of eight 8-bit values, all elements containing B. */
658
+ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
659
+ _mm_set1_pi8 (char __b)
660
+ ;
661
+
662
+ #endif /* __MMX__ */
663
+ #endif /* _MMINTRIN_H_INCLUDED */