gccxml_gem 0.9.2-x86-linux → 0.9.3-x86-linux

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. data/Rakefile +15 -6
  2. data/bin/gccxml +0 -0
  3. data/bin/gccxml_cc1plus +0 -0
  4. data/gccxml.rb +5 -5
  5. data/share/gccxml-0.9/GCC/3.2/bits/gthr-default.h +4 -0
  6. data/share/gccxml-0.9/GCC/3.4/bits/gthr-default.h +5 -0
  7. data/share/gccxml-0.9/GCC/4.0/emmintrin.h +5 -0
  8. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_emmintrin.h +1037 -0
  9. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_mmintrin.h +669 -0
  10. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_xmmintrin.h +870 -0
  11. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_emmintrin.h +977 -0
  12. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_mmintrin.h +636 -0
  13. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_xmmintrin.h +833 -0
  14. data/share/gccxml-0.9/GCC/4.0/mmintrin.h +5 -0
  15. data/share/gccxml-0.9/GCC/4.0/xmmintrin.h +5 -0
  16. data/share/gccxml-0.9/GCC/4.1/bits/gthr-default.h +4 -0
  17. data/share/gccxml-0.9/GCC/4.1/emmintrin.h +5 -0
  18. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_emmintrin.h +1509 -0
  19. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_mmintrin.h +942 -0
  20. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_xmmintrin.h +1192 -0
  21. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_emmintrin.h +1004 -0
  22. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_mmintrin.h +637 -0
  23. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_xmmintrin.h +834 -0
  24. data/share/gccxml-0.9/GCC/4.1/mmintrin.h +5 -0
  25. data/share/gccxml-0.9/GCC/4.1/xmmintrin.h +5 -0
  26. data/share/gccxml-0.9/GCC/4.2/emmintrin.h +5 -0
  27. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_emmintrin.h +1509 -0
  28. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_mmintrin.h +942 -0
  29. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_xmmintrin.h +1192 -0
  30. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_emmintrin.h +1013 -0
  31. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_mmintrin.h +663 -0
  32. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_xmmintrin.h +860 -0
  33. data/share/gccxml-0.9/GCC/4.2/mmintrin.h +5 -0
  34. data/share/gccxml-0.9/GCC/4.2/xmmintrin.h +5 -0
  35. data/share/gccxml-0.9/GCC/4.3/emmintrin.h +1043 -0
  36. data/share/gccxml-0.9/GCC/4.3/gccxml_builtins.h +1 -0
  37. data/share/gccxml-0.9/GCC/4.3/mmintrin.h +663 -0
  38. data/share/gccxml-0.9/GCC/4.3/xmmintrin.h +867 -0
  39. data/share/gccxml-0.9/GCC/4.4/bits/c++config.h +1431 -0
  40. data/share/gccxml-0.9/GCC/4.4/emmintrin.h +1041 -0
  41. data/share/gccxml-0.9/GCC/4.4/gccxml_builtins.h +153 -0
  42. data/share/gccxml-0.9/GCC/4.4/mmintrin.h +662 -0
  43. data/share/gccxml-0.9/GCC/4.4/xmmintrin.h +864 -0
  44. data/share/gccxml-0.9/GCC/4.5/gccxml_builtins.h +154 -0
  45. data/share/gccxml-0.9/GCC/4.5/iomanip +349 -0
  46. data/share/gccxml-0.9/GCC/COPYING.RUNTIME +73 -0
  47. data/share/gccxml-0.9/GCC/COPYING3 +674 -0
  48. data/share/man/man1/gccxml.1 +1 -1
  49. metadata +165 -114
@@ -0,0 +1,663 @@
1
+ /* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
2
+ Free Software Foundation, Inc.
3
+
4
+ This file is part of GCC.
5
+
6
+ GCC is free software; you can redistribute it and/or modify
7
+ it under the terms of the GNU General Public License as published by
8
+ the Free Software Foundation; either version 2, or (at your option)
9
+ any later version.
10
+
11
+ GCC is distributed in the hope that it will be useful,
12
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+ GNU General Public License for more details.
15
+
16
+ You should have received a copy of the GNU General Public License
17
+ along with GCC; see the file COPYING. If not, write to
18
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
19
+ Boston, MA 02110-1301, USA. */
20
+
21
+ /* As a special exception, if you include this header file into source
22
+ files compiled by GCC, this header file does not by itself cause
23
+ the resulting executable to be covered by the GNU General Public
24
+ License. This exception does not however invalidate any other
25
+ reasons why the executable file might be covered by the GNU General
26
+ Public License. */
27
+
28
+ /* Implemented from the specification included in the Intel C++ Compiler
29
+ User Guide and Reference, version 9.0. */
30
+
31
+ #ifndef _MMINTRIN_H_INCLUDED
32
+ #define _MMINTRIN_H_INCLUDED
33
+
34
+ #ifndef __MMX__
35
+ # error "MMX instruction set not enabled"
36
+ #else
37
+ /* The Intel API is flexible enough that we must allow aliasing with other
38
+ vector types, and their scalar components. */
39
+ typedef int __m64 __attribute__ ((__vector_size__ (8), __may_alias__));
40
+
41
+ /* Internal data types for implementing the intrinsics. */
42
+ typedef int __v2si __attribute__ ((__vector_size__ (8)));
43
+ typedef short __v4hi __attribute__ ((__vector_size__ (8)));
44
+ typedef char __v8qi __attribute__ ((__vector_size__ (8)));
45
+
46
+ /* Empty the multimedia state. */
47
+ static __inline void __attribute__((__always_inline__))
48
+ _mm_empty (void)
49
+ ;
50
+
51
+ static __inline void __attribute__((__always_inline__))
52
+ _m_empty (void)
53
+ ;
54
+
55
+ /* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
56
+ static __inline __m64 __attribute__((__always_inline__))
57
+ _mm_cvtsi32_si64 (int __i)
58
+ ;
59
+
60
+ static __inline __m64 __attribute__((__always_inline__))
61
+ _m_from_int (int __i)
62
+ ;
63
+
64
+ #ifdef __x86_64__
65
+ /* Convert I to a __m64 object. */
66
+
67
+ /* Intel intrinsic. */
68
+ static __inline __m64 __attribute__((__always_inline__))
69
+ _m_from_int64 (long long __i)
70
+ ;
71
+
72
+ static __inline __m64 __attribute__((__always_inline__))
73
+ _mm_cvtsi64_m64 (long long __i)
74
+ ;
75
+
76
+ /* Microsoft intrinsic. */
77
+ static __inline __m64 __attribute__((__always_inline__))
78
+ _mm_cvtsi64x_si64 (long long __i)
79
+ ;
80
+
81
+ static __inline __m64 __attribute__((__always_inline__))
82
+ _mm_set_pi64x (long long __i)
83
+ ;
84
+ #endif
85
+
86
+ /* Convert the lower 32 bits of the __m64 object into an integer. */
87
+ static __inline int __attribute__((__always_inline__))
88
+ _mm_cvtsi64_si32 (__m64 __i)
89
+ ;
90
+
91
+ static __inline int __attribute__((__always_inline__))
92
+ _m_to_int (__m64 __i)
93
+ ;
94
+
95
+ #ifdef __x86_64__
96
+ /* Convert the __m64 object to a 64bit integer. */
97
+
98
+ /* Intel intrinsic. */
99
+ static __inline long long __attribute__((__always_inline__))
100
+ _m_to_int64 (__m64 __i)
101
+ ;
102
+
103
+ static __inline long long __attribute__((__always_inline__))
104
+ _mm_cvtm64_si64 (__m64 __i)
105
+ ;
106
+
107
+ /* Microsoft intrinsic. */
108
+ static __inline long long __attribute__((__always_inline__))
109
+ _mm_cvtsi64_si64x (__m64 __i)
110
+ ;
111
+ #endif
112
+
113
+ /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
114
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
115
+ values of the result, all with signed saturation. */
116
+ static __inline __m64 __attribute__((__always_inline__))
117
+ _mm_packs_pi16 (__m64 __m1, __m64 __m2)
118
+ ;
119
+
120
+ static __inline __m64 __attribute__((__always_inline__))
121
+ _m_packsswb (__m64 __m1, __m64 __m2)
122
+ ;
123
+
124
+ /* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
125
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
126
+ values of the result, all with signed saturation. */
127
+ static __inline __m64 __attribute__((__always_inline__))
128
+ _mm_packs_pi32 (__m64 __m1, __m64 __m2)
129
+ ;
130
+
131
+ static __inline __m64 __attribute__((__always_inline__))
132
+ _m_packssdw (__m64 __m1, __m64 __m2)
133
+ ;
134
+
135
+ /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
136
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
137
+ values of the result, all with unsigned saturation. */
138
+ static __inline __m64 __attribute__((__always_inline__))
139
+ _mm_packs_pu16 (__m64 __m1, __m64 __m2)
140
+ ;
141
+
142
+ static __inline __m64 __attribute__((__always_inline__))
143
+ _m_packuswb (__m64 __m1, __m64 __m2)
144
+ ;
145
+
146
+ /* Interleave the four 8-bit values from the high half of M1 with the four
147
+ 8-bit values from the high half of M2. */
148
+ static __inline __m64 __attribute__((__always_inline__))
149
+ _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
150
+ ;
151
+
152
+ static __inline __m64 __attribute__((__always_inline__))
153
+ _m_punpckhbw (__m64 __m1, __m64 __m2)
154
+ ;
155
+
156
+ /* Interleave the two 16-bit values from the high half of M1 with the two
157
+ 16-bit values from the high half of M2. */
158
+ static __inline __m64 __attribute__((__always_inline__))
159
+ _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
160
+ ;
161
+
162
+ static __inline __m64 __attribute__((__always_inline__))
163
+ _m_punpckhwd (__m64 __m1, __m64 __m2)
164
+ ;
165
+
166
+ /* Interleave the 32-bit value from the high half of M1 with the 32-bit
167
+ value from the high half of M2. */
168
+ static __inline __m64 __attribute__((__always_inline__))
169
+ _mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
170
+ ;
171
+
172
+ static __inline __m64 __attribute__((__always_inline__))
173
+ _m_punpckhdq (__m64 __m1, __m64 __m2)
174
+ ;
175
+
176
+ /* Interleave the four 8-bit values from the low half of M1 with the four
177
+ 8-bit values from the low half of M2. */
178
+ static __inline __m64 __attribute__((__always_inline__))
179
+ _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
180
+ ;
181
+
182
+ static __inline __m64 __attribute__((__always_inline__))
183
+ _m_punpcklbw (__m64 __m1, __m64 __m2)
184
+ ;
185
+
186
+ /* Interleave the two 16-bit values from the low half of M1 with the two
187
+ 16-bit values from the low half of M2. */
188
+ static __inline __m64 __attribute__((__always_inline__))
189
+ _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
190
+ ;
191
+
192
+ static __inline __m64 __attribute__((__always_inline__))
193
+ _m_punpcklwd (__m64 __m1, __m64 __m2)
194
+ ;
195
+
196
+ /* Interleave the 32-bit value from the low half of M1 with the 32-bit
197
+ value from the low half of M2. */
198
+ static __inline __m64 __attribute__((__always_inline__))
199
+ _mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
200
+ ;
201
+
202
+ static __inline __m64 __attribute__((__always_inline__))
203
+ _m_punpckldq (__m64 __m1, __m64 __m2)
204
+ ;
205
+
206
+ /* Add the 8-bit values in M1 to the 8-bit values in M2. */
207
+ static __inline __m64 __attribute__((__always_inline__))
208
+ _mm_add_pi8 (__m64 __m1, __m64 __m2)
209
+ ;
210
+
211
+ static __inline __m64 __attribute__((__always_inline__))
212
+ _m_paddb (__m64 __m1, __m64 __m2)
213
+ ;
214
+
215
+ /* Add the 16-bit values in M1 to the 16-bit values in M2. */
216
+ static __inline __m64 __attribute__((__always_inline__))
217
+ _mm_add_pi16 (__m64 __m1, __m64 __m2)
218
+ ;
219
+
220
+ static __inline __m64 __attribute__((__always_inline__))
221
+ _m_paddw (__m64 __m1, __m64 __m2)
222
+ ;
223
+
224
+ /* Add the 32-bit values in M1 to the 32-bit values in M2. */
225
+ static __inline __m64 __attribute__((__always_inline__))
226
+ _mm_add_pi32 (__m64 __m1, __m64 __m2)
227
+ ;
228
+
229
+ static __inline __m64 __attribute__((__always_inline__))
230
+ _m_paddd (__m64 __m1, __m64 __m2)
231
+ ;
232
+
233
+ /* Add the 64-bit values in M1 to the 64-bit values in M2. */
234
+ #ifdef __SSE2__
235
+ static __inline __m64 __attribute__((__always_inline__))
236
+ _mm_add_si64 (__m64 __m1, __m64 __m2)
237
+ ;
238
+ #endif
239
+
240
+ /* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
241
+ saturated arithmetic. */
242
+ static __inline __m64 __attribute__((__always_inline__))
243
+ _mm_adds_pi8 (__m64 __m1, __m64 __m2)
244
+ ;
245
+
246
+ static __inline __m64 __attribute__((__always_inline__))
247
+ _m_paddsb (__m64 __m1, __m64 __m2)
248
+ ;
249
+
250
+ /* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
251
+ saturated arithmetic. */
252
+ static __inline __m64 __attribute__((__always_inline__))
253
+ _mm_adds_pi16 (__m64 __m1, __m64 __m2)
254
+ ;
255
+
256
+ static __inline __m64 __attribute__((__always_inline__))
257
+ _m_paddsw (__m64 __m1, __m64 __m2)
258
+ ;
259
+
260
+ /* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
261
+ saturated arithmetic. */
262
+ static __inline __m64 __attribute__((__always_inline__))
263
+ _mm_adds_pu8 (__m64 __m1, __m64 __m2)
264
+ ;
265
+
266
+ static __inline __m64 __attribute__((__always_inline__))
267
+ _m_paddusb (__m64 __m1, __m64 __m2)
268
+ ;
269
+
270
+ /* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
271
+ saturated arithmetic. */
272
+ static __inline __m64 __attribute__((__always_inline__))
273
+ _mm_adds_pu16 (__m64 __m1, __m64 __m2)
274
+ ;
275
+
276
+ static __inline __m64 __attribute__((__always_inline__))
277
+ _m_paddusw (__m64 __m1, __m64 __m2)
278
+ ;
279
+
280
+ /* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
281
+ static __inline __m64 __attribute__((__always_inline__))
282
+ _mm_sub_pi8 (__m64 __m1, __m64 __m2)
283
+ ;
284
+
285
+ static __inline __m64 __attribute__((__always_inline__))
286
+ _m_psubb (__m64 __m1, __m64 __m2)
287
+ ;
288
+
289
+ /* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
290
+ static __inline __m64 __attribute__((__always_inline__))
291
+ _mm_sub_pi16 (__m64 __m1, __m64 __m2)
292
+ ;
293
+
294
+ static __inline __m64 __attribute__((__always_inline__))
295
+ _m_psubw (__m64 __m1, __m64 __m2)
296
+ ;
297
+
298
+ /* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
299
+ static __inline __m64 __attribute__((__always_inline__))
300
+ _mm_sub_pi32 (__m64 __m1, __m64 __m2)
301
+ ;
302
+
303
+ static __inline __m64 __attribute__((__always_inline__))
304
+ _m_psubd (__m64 __m1, __m64 __m2)
305
+ ;
306
+
307
+ /* Add the 64-bit values in M1 to the 64-bit values in M2. */
308
+ #ifdef __SSE2__
309
+ static __inline __m64 __attribute__((__always_inline__))
310
+ _mm_sub_si64 (__m64 __m1, __m64 __m2)
311
+ ;
312
+ #endif
313
+
314
+ /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
315
+ saturating arithmetic. */
316
+ static __inline __m64 __attribute__((__always_inline__))
317
+ _mm_subs_pi8 (__m64 __m1, __m64 __m2)
318
+ ;
319
+
320
+ static __inline __m64 __attribute__((__always_inline__))
321
+ _m_psubsb (__m64 __m1, __m64 __m2)
322
+ ;
323
+
324
+ /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
325
+ signed saturating arithmetic. */
326
+ static __inline __m64 __attribute__((__always_inline__))
327
+ _mm_subs_pi16 (__m64 __m1, __m64 __m2)
328
+ ;
329
+
330
+ static __inline __m64 __attribute__((__always_inline__))
331
+ _m_psubsw (__m64 __m1, __m64 __m2)
332
+ ;
333
+
334
+ /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
335
+ unsigned saturating arithmetic. */
336
+ static __inline __m64 __attribute__((__always_inline__))
337
+ _mm_subs_pu8 (__m64 __m1, __m64 __m2)
338
+ ;
339
+
340
+ static __inline __m64 __attribute__((__always_inline__))
341
+ _m_psubusb (__m64 __m1, __m64 __m2)
342
+ ;
343
+
344
+ /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
345
+ unsigned saturating arithmetic. */
346
+ static __inline __m64 __attribute__((__always_inline__))
347
+ _mm_subs_pu16 (__m64 __m1, __m64 __m2)
348
+ ;
349
+
350
+ static __inline __m64 __attribute__((__always_inline__))
351
+ _m_psubusw (__m64 __m1, __m64 __m2)
352
+ ;
353
+
354
+ /* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
355
+ four 32-bit intermediate results, which are then summed by pairs to
356
+ produce two 32-bit results. */
357
+ static __inline __m64 __attribute__((__always_inline__))
358
+ _mm_madd_pi16 (__m64 __m1, __m64 __m2)
359
+ ;
360
+
361
+ static __inline __m64 __attribute__((__always_inline__))
362
+ _m_pmaddwd (__m64 __m1, __m64 __m2)
363
+ ;
364
+
365
+ /* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
366
+ M2 and produce the high 16 bits of the 32-bit results. */
367
+ static __inline __m64 __attribute__((__always_inline__))
368
+ _mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
369
+ ;
370
+
371
+ static __inline __m64 __attribute__((__always_inline__))
372
+ _m_pmulhw (__m64 __m1, __m64 __m2)
373
+ ;
374
+
375
+ /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
376
+ the low 16 bits of the results. */
377
+ static __inline __m64 __attribute__((__always_inline__))
378
+ _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
379
+ ;
380
+
381
+ static __inline __m64 __attribute__((__always_inline__))
382
+ _m_pmullw (__m64 __m1, __m64 __m2)
383
+ ;
384
+
385
+ /* Shift four 16-bit values in M left by COUNT. */
386
+ static __inline __m64 __attribute__((__always_inline__))
387
+ _mm_sll_pi16 (__m64 __m, __m64 __count)
388
+ ;
389
+
390
+ static __inline __m64 __attribute__((__always_inline__))
391
+ _m_psllw (__m64 __m, __m64 __count)
392
+ ;
393
+
394
+ static __inline __m64 __attribute__((__always_inline__))
395
+ _mm_slli_pi16 (__m64 __m, int __count)
396
+ ;
397
+
398
+ static __inline __m64 __attribute__((__always_inline__))
399
+ _m_psllwi (__m64 __m, int __count)
400
+ ;
401
+
402
+ /* Shift two 32-bit values in M left by COUNT. */
403
+ static __inline __m64 __attribute__((__always_inline__))
404
+ _mm_sll_pi32 (__m64 __m, __m64 __count)
405
+ ;
406
+
407
+ static __inline __m64 __attribute__((__always_inline__))
408
+ _m_pslld (__m64 __m, __m64 __count)
409
+ ;
410
+
411
+ static __inline __m64 __attribute__((__always_inline__))
412
+ _mm_slli_pi32 (__m64 __m, int __count)
413
+ ;
414
+
415
+ static __inline __m64 __attribute__((__always_inline__))
416
+ _m_pslldi (__m64 __m, int __count)
417
+ ;
418
+
419
+ /* Shift the 64-bit value in M left by COUNT. */
420
+ static __inline __m64 __attribute__((__always_inline__))
421
+ _mm_sll_si64 (__m64 __m, __m64 __count)
422
+ ;
423
+
424
+ static __inline __m64 __attribute__((__always_inline__))
425
+ _m_psllq (__m64 __m, __m64 __count)
426
+ ;
427
+
428
+ static __inline __m64 __attribute__((__always_inline__))
429
+ _mm_slli_si64 (__m64 __m, int __count)
430
+ ;
431
+
432
+ static __inline __m64 __attribute__((__always_inline__))
433
+ _m_psllqi (__m64 __m, int __count)
434
+ ;
435
+
436
+ /* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
437
+ static __inline __m64 __attribute__((__always_inline__))
438
+ _mm_sra_pi16 (__m64 __m, __m64 __count)
439
+ ;
440
+
441
+ static __inline __m64 __attribute__((__always_inline__))
442
+ _m_psraw (__m64 __m, __m64 __count)
443
+ ;
444
+
445
+ static __inline __m64 __attribute__((__always_inline__))
446
+ _mm_srai_pi16 (__m64 __m, int __count)
447
+ ;
448
+
449
+ static __inline __m64 __attribute__((__always_inline__))
450
+ _m_psrawi (__m64 __m, int __count)
451
+ ;
452
+
453
+ /* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
454
+ static __inline __m64 __attribute__((__always_inline__))
455
+ _mm_sra_pi32 (__m64 __m, __m64 __count)
456
+ ;
457
+
458
+ static __inline __m64 __attribute__((__always_inline__))
459
+ _m_psrad (__m64 __m, __m64 __count)
460
+ ;
461
+
462
+ static __inline __m64 __attribute__((__always_inline__))
463
+ _mm_srai_pi32 (__m64 __m, int __count)
464
+ ;
465
+
466
+ static __inline __m64 __attribute__((__always_inline__))
467
+ _m_psradi (__m64 __m, int __count)
468
+ ;
469
+
470
+ /* Shift four 16-bit values in M right by COUNT; shift in zeros. */
471
+ static __inline __m64 __attribute__((__always_inline__))
472
+ _mm_srl_pi16 (__m64 __m, __m64 __count)
473
+ ;
474
+
475
+ static __inline __m64 __attribute__((__always_inline__))
476
+ _m_psrlw (__m64 __m, __m64 __count)
477
+ ;
478
+
479
+ static __inline __m64 __attribute__((__always_inline__))
480
+ _mm_srli_pi16 (__m64 __m, int __count)
481
+ ;
482
+
483
+ static __inline __m64 __attribute__((__always_inline__))
484
+ _m_psrlwi (__m64 __m, int __count)
485
+ ;
486
+
487
+ /* Shift two 32-bit values in M right by COUNT; shift in zeros. */
488
+ static __inline __m64 __attribute__((__always_inline__))
489
+ _mm_srl_pi32 (__m64 __m, __m64 __count)
490
+ ;
491
+
492
+ static __inline __m64 __attribute__((__always_inline__))
493
+ _m_psrld (__m64 __m, __m64 __count)
494
+ ;
495
+
496
+ static __inline __m64 __attribute__((__always_inline__))
497
+ _mm_srli_pi32 (__m64 __m, int __count)
498
+ ;
499
+
500
+ static __inline __m64 __attribute__((__always_inline__))
501
+ _m_psrldi (__m64 __m, int __count)
502
+ ;
503
+
504
+ /* Shift the 64-bit value in M left by COUNT; shift in zeros. */
505
+ static __inline __m64 __attribute__((__always_inline__))
506
+ _mm_srl_si64 (__m64 __m, __m64 __count)
507
+ ;
508
+
509
+ static __inline __m64 __attribute__((__always_inline__))
510
+ _m_psrlq (__m64 __m, __m64 __count)
511
+ ;
512
+
513
+ static __inline __m64 __attribute__((__always_inline__))
514
+ _mm_srli_si64 (__m64 __m, int __count)
515
+ ;
516
+
517
+ static __inline __m64 __attribute__((__always_inline__))
518
+ _m_psrlqi (__m64 __m, int __count)
519
+ ;
520
+
521
+ /* Bit-wise AND the 64-bit values in M1 and M2. */
522
+ static __inline __m64 __attribute__((__always_inline__))
523
+ _mm_and_si64 (__m64 __m1, __m64 __m2)
524
+ ;
525
+
526
+ static __inline __m64 __attribute__((__always_inline__))
527
+ _m_pand (__m64 __m1, __m64 __m2)
528
+ ;
529
+
530
+ /* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
531
+ 64-bit value in M2. */
532
+ static __inline __m64 __attribute__((__always_inline__))
533
+ _mm_andnot_si64 (__m64 __m1, __m64 __m2)
534
+ ;
535
+
536
+ static __inline __m64 __attribute__((__always_inline__))
537
+ _m_pandn (__m64 __m1, __m64 __m2)
538
+ ;
539
+
540
+ /* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
541
+ static __inline __m64 __attribute__((__always_inline__))
542
+ _mm_or_si64 (__m64 __m1, __m64 __m2)
543
+ ;
544
+
545
+ static __inline __m64 __attribute__((__always_inline__))
546
+ _m_por (__m64 __m1, __m64 __m2)
547
+ ;
548
+
549
+ /* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
550
+ static __inline __m64 __attribute__((__always_inline__))
551
+ _mm_xor_si64 (__m64 __m1, __m64 __m2)
552
+ ;
553
+
554
+ static __inline __m64 __attribute__((__always_inline__))
555
+ _m_pxor (__m64 __m1, __m64 __m2)
556
+ ;
557
+
558
+ /* Compare eight 8-bit values. The result of the comparison is 0xFF if the
559
+ test is true and zero if false. */
560
+ static __inline __m64 __attribute__((__always_inline__))
561
+ _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
562
+ ;
563
+
564
+ static __inline __m64 __attribute__((__always_inline__))
565
+ _m_pcmpeqb (__m64 __m1, __m64 __m2)
566
+ ;
567
+
568
+ static __inline __m64 __attribute__((__always_inline__))
569
+ _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
570
+ ;
571
+
572
+ static __inline __m64 __attribute__((__always_inline__))
573
+ _m_pcmpgtb (__m64 __m1, __m64 __m2)
574
+ ;
575
+
576
+ /* Compare four 16-bit values. The result of the comparison is 0xFFFF if
577
+ the test is true and zero if false. */
578
+ static __inline __m64 __attribute__((__always_inline__))
579
+ _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
580
+ ;
581
+
582
+ static __inline __m64 __attribute__((__always_inline__))
583
+ _m_pcmpeqw (__m64 __m1, __m64 __m2)
584
+ ;
585
+
586
+ static __inline __m64 __attribute__((__always_inline__))
587
+ _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
588
+ ;
589
+
590
+ static __inline __m64 __attribute__((__always_inline__))
591
+ _m_pcmpgtw (__m64 __m1, __m64 __m2)
592
+ ;
593
+
594
+ /* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
595
+ the test is true and zero if false. */
596
+ static __inline __m64 __attribute__((__always_inline__))
597
+ _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
598
+ ;
599
+
600
+ static __inline __m64 __attribute__((__always_inline__))
601
+ _m_pcmpeqd (__m64 __m1, __m64 __m2)
602
+ ;
603
+
604
+ static __inline __m64 __attribute__((__always_inline__))
605
+ _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
606
+ ;
607
+
608
+ static __inline __m64 __attribute__((__always_inline__))
609
+ _m_pcmpgtd (__m64 __m1, __m64 __m2)
610
+ ;
611
+
612
+ /* Creates a 64-bit zero. */
613
+ static __inline __m64 __attribute__((__always_inline__))
614
+ _mm_setzero_si64 (void)
615
+ ;
616
+
617
+ /* Creates a vector of two 32-bit values; I0 is least significant. */
618
+ static __inline __m64 __attribute__((__always_inline__))
619
+ _mm_set_pi32 (int __i1, int __i0)
620
+ ;
621
+
622
+ /* Creates a vector of four 16-bit values; W0 is least significant. */
623
+ static __inline __m64 __attribute__((__always_inline__))
624
+ _mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
625
+ ;
626
+
627
+ /* Creates a vector of eight 8-bit values; B0 is least significant. */
628
+ static __inline __m64 __attribute__((__always_inline__))
629
+ _mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
630
+ char __b3, char __b2, char __b1, char __b0)
631
+ ;
632
+
633
+ /* Similar, but with the arguments in reverse order. */
634
+ static __inline __m64 __attribute__((__always_inline__))
635
+ _mm_setr_pi32 (int __i0, int __i1)
636
+ ;
637
+
638
+ static __inline __m64 __attribute__((__always_inline__))
639
+ _mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
640
+ ;
641
+
642
+ static __inline __m64 __attribute__((__always_inline__))
643
+ _mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
644
+ char __b4, char __b5, char __b6, char __b7)
645
+ ;
646
+
647
+ /* Creates a vector of two 32-bit values, both elements containing I. */
648
+ static __inline __m64 __attribute__((__always_inline__))
649
+ _mm_set1_pi32 (int __i)
650
+ ;
651
+
652
+ /* Creates a vector of four 16-bit values, all elements containing W. */
653
+ static __inline __m64 __attribute__((__always_inline__))
654
+ _mm_set1_pi16 (short __w)
655
+ ;
656
+
657
+ /* Creates a vector of eight 8-bit values, all elements containing B. */
658
+ static __inline __m64 __attribute__((__always_inline__))
659
+ _mm_set1_pi8 (char __b)
660
+ ;
661
+
662
+ #endif /* __MMX__ */
663
+ #endif /* _MMINTRIN_H_INCLUDED */