gccxml_gem 0.9.2-x86-linux → 0.9.3-x86-linux

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. data/Rakefile +15 -6
  2. data/bin/gccxml +0 -0
  3. data/bin/gccxml_cc1plus +0 -0
  4. data/gccxml.rb +5 -5
  5. data/share/gccxml-0.9/GCC/3.2/bits/gthr-default.h +4 -0
  6. data/share/gccxml-0.9/GCC/3.4/bits/gthr-default.h +5 -0
  7. data/share/gccxml-0.9/GCC/4.0/emmintrin.h +5 -0
  8. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_emmintrin.h +1037 -0
  9. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_mmintrin.h +669 -0
  10. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_xmmintrin.h +870 -0
  11. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_emmintrin.h +977 -0
  12. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_mmintrin.h +636 -0
  13. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_xmmintrin.h +833 -0
  14. data/share/gccxml-0.9/GCC/4.0/mmintrin.h +5 -0
  15. data/share/gccxml-0.9/GCC/4.0/xmmintrin.h +5 -0
  16. data/share/gccxml-0.9/GCC/4.1/bits/gthr-default.h +4 -0
  17. data/share/gccxml-0.9/GCC/4.1/emmintrin.h +5 -0
  18. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_emmintrin.h +1509 -0
  19. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_mmintrin.h +942 -0
  20. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_xmmintrin.h +1192 -0
  21. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_emmintrin.h +1004 -0
  22. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_mmintrin.h +637 -0
  23. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_xmmintrin.h +834 -0
  24. data/share/gccxml-0.9/GCC/4.1/mmintrin.h +5 -0
  25. data/share/gccxml-0.9/GCC/4.1/xmmintrin.h +5 -0
  26. data/share/gccxml-0.9/GCC/4.2/emmintrin.h +5 -0
  27. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_emmintrin.h +1509 -0
  28. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_mmintrin.h +942 -0
  29. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_xmmintrin.h +1192 -0
  30. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_emmintrin.h +1013 -0
  31. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_mmintrin.h +663 -0
  32. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_xmmintrin.h +860 -0
  33. data/share/gccxml-0.9/GCC/4.2/mmintrin.h +5 -0
  34. data/share/gccxml-0.9/GCC/4.2/xmmintrin.h +5 -0
  35. data/share/gccxml-0.9/GCC/4.3/emmintrin.h +1043 -0
  36. data/share/gccxml-0.9/GCC/4.3/gccxml_builtins.h +1 -0
  37. data/share/gccxml-0.9/GCC/4.3/mmintrin.h +663 -0
  38. data/share/gccxml-0.9/GCC/4.3/xmmintrin.h +867 -0
  39. data/share/gccxml-0.9/GCC/4.4/bits/c++config.h +1431 -0
  40. data/share/gccxml-0.9/GCC/4.4/emmintrin.h +1041 -0
  41. data/share/gccxml-0.9/GCC/4.4/gccxml_builtins.h +153 -0
  42. data/share/gccxml-0.9/GCC/4.4/mmintrin.h +662 -0
  43. data/share/gccxml-0.9/GCC/4.4/xmmintrin.h +864 -0
  44. data/share/gccxml-0.9/GCC/4.5/gccxml_builtins.h +154 -0
  45. data/share/gccxml-0.9/GCC/4.5/iomanip +349 -0
  46. data/share/gccxml-0.9/GCC/COPYING.RUNTIME +73 -0
  47. data/share/gccxml-0.9/GCC/COPYING3 +674 -0
  48. data/share/man/man1/gccxml.1 +1 -1
  49. metadata +165 -114
@@ -0,0 +1,637 @@
1
+ /* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
2
+
3
+ This file is part of GCC.
4
+
5
+ GCC is free software; you can redistribute it and/or modify
6
+ it under the terms of the GNU General Public License as published by
7
+ the Free Software Foundation; either version 2, or (at your option)
8
+ any later version.
9
+
10
+ GCC is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ GNU General Public License for more details.
14
+
15
+ You should have received a copy of the GNU General Public License
16
+ along with GCC; see the file COPYING. If not, write to
17
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
18
+ Boston, MA 02110-1301, USA. */
19
+
20
+ /* As a special exception, if you include this header file into source
21
+ files compiled by GCC, this header file does not by itself cause
22
+ the resulting executable to be covered by the GNU General Public
23
+ License. This exception does not however invalidate any other
24
+ reasons why the executable file might be covered by the GNU General
25
+ Public License. */
26
+
27
+ /* Implemented from the specification included in the Intel C++ Compiler
28
+ User Guide and Reference, version 8.0. */
29
+
30
+ #ifndef _MMINTRIN_H_INCLUDED
31
+ #define _MMINTRIN_H_INCLUDED
32
+
33
+ #ifndef __MMX__
34
+ # error "MMX instruction set not enabled"
35
+ #else
36
+ /* The Intel API is flexible enough that we must allow aliasing with other
37
+ vector types, and their scalar components. */
38
+ typedef int __m64 __attribute__ ((__vector_size__ (8), __may_alias__));
39
+
40
+ /* Internal data types for implementing the intrinsics. */
41
+ typedef int __v2si __attribute__ ((__vector_size__ (8)));
42
+ typedef short __v4hi __attribute__ ((__vector_size__ (8)));
43
+ typedef char __v8qi __attribute__ ((__vector_size__ (8)));
44
+
45
+ /* Empty the multimedia state. */
46
+ static __inline void __attribute__((__always_inline__))
47
+ _mm_empty (void)
48
+ ;
49
+
50
+ static __inline void __attribute__((__always_inline__))
51
+ _m_empty (void)
52
+ ;
53
+
54
+ /* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
55
+ static __inline __m64 __attribute__((__always_inline__))
56
+ _mm_cvtsi32_si64 (int __i)
57
+ ;
58
+
59
+ static __inline __m64 __attribute__((__always_inline__))
60
+ _m_from_int (int __i)
61
+ ;
62
+
63
+ #ifdef __x86_64__
64
+ /* Convert I to a __m64 object. */
65
+ static __inline __m64 __attribute__((__always_inline__))
66
+ _mm_cvtsi64x_si64 (long long __i)
67
+ ;
68
+
69
+ /* Convert I to a __m64 object. */
70
+ static __inline __m64 __attribute__((__always_inline__))
71
+ _mm_set_pi64x (long long __i)
72
+ ;
73
+ #endif
74
+
75
+ /* Convert the lower 32 bits of the __m64 object into an integer. */
76
+ static __inline int __attribute__((__always_inline__))
77
+ _mm_cvtsi64_si32 (__m64 __i)
78
+ ;
79
+
80
+ static __inline int __attribute__((__always_inline__))
81
+ _m_to_int (__m64 __i)
82
+ ;
83
+
84
+ #ifdef __x86_64__
85
+ /* Convert the lower 32 bits of the __m64 object into an integer. */
86
+ static __inline long long __attribute__((__always_inline__))
87
+ _mm_cvtsi64_si64x (__m64 __i)
88
+ ;
89
+ #endif
90
+
91
+ /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
92
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
93
+ values of the result, all with signed saturation. */
94
+ static __inline __m64 __attribute__((__always_inline__))
95
+ _mm_packs_pi16 (__m64 __m1, __m64 __m2)
96
+ ;
97
+
98
+ static __inline __m64 __attribute__((__always_inline__))
99
+ _m_packsswb (__m64 __m1, __m64 __m2)
100
+ ;
101
+
102
+ /* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
103
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
104
+ values of the result, all with signed saturation. */
105
+ static __inline __m64 __attribute__((__always_inline__))
106
+ _mm_packs_pi32 (__m64 __m1, __m64 __m2)
107
+ ;
108
+
109
+ static __inline __m64 __attribute__((__always_inline__))
110
+ _m_packssdw (__m64 __m1, __m64 __m2)
111
+ ;
112
+
113
+ /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
114
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
115
+ values of the result, all with unsigned saturation. */
116
+ static __inline __m64 __attribute__((__always_inline__))
117
+ _mm_packs_pu16 (__m64 __m1, __m64 __m2)
118
+ ;
119
+
120
+ static __inline __m64 __attribute__((__always_inline__))
121
+ _m_packuswb (__m64 __m1, __m64 __m2)
122
+ ;
123
+
124
+ /* Interleave the four 8-bit values from the high half of M1 with the four
125
+ 8-bit values from the high half of M2. */
126
+ static __inline __m64 __attribute__((__always_inline__))
127
+ _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
128
+ ;
129
+
130
+ static __inline __m64 __attribute__((__always_inline__))
131
+ _m_punpckhbw (__m64 __m1, __m64 __m2)
132
+ ;
133
+
134
+ /* Interleave the two 16-bit values from the high half of M1 with the two
135
+ 16-bit values from the high half of M2. */
136
+ static __inline __m64 __attribute__((__always_inline__))
137
+ _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
138
+ ;
139
+
140
+ static __inline __m64 __attribute__((__always_inline__))
141
+ _m_punpckhwd (__m64 __m1, __m64 __m2)
142
+ ;
143
+
144
+ /* Interleave the 32-bit value from the high half of M1 with the 32-bit
145
+ value from the high half of M2. */
146
+ static __inline __m64 __attribute__((__always_inline__))
147
+ _mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
148
+ ;
149
+
150
+ static __inline __m64 __attribute__((__always_inline__))
151
+ _m_punpckhdq (__m64 __m1, __m64 __m2)
152
+ ;
153
+
154
+ /* Interleave the four 8-bit values from the low half of M1 with the four
155
+ 8-bit values from the low half of M2. */
156
+ static __inline __m64 __attribute__((__always_inline__))
157
+ _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
158
+ ;
159
+
160
+ static __inline __m64 __attribute__((__always_inline__))
161
+ _m_punpcklbw (__m64 __m1, __m64 __m2)
162
+ ;
163
+
164
+ /* Interleave the two 16-bit values from the low half of M1 with the two
165
+ 16-bit values from the low half of M2. */
166
+ static __inline __m64 __attribute__((__always_inline__))
167
+ _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
168
+ ;
169
+
170
+ static __inline __m64 __attribute__((__always_inline__))
171
+ _m_punpcklwd (__m64 __m1, __m64 __m2)
172
+ ;
173
+
174
+ /* Interleave the 32-bit value from the low half of M1 with the 32-bit
175
+ value from the low half of M2. */
176
+ static __inline __m64 __attribute__((__always_inline__))
177
+ _mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
178
+ ;
179
+
180
+ static __inline __m64 __attribute__((__always_inline__))
181
+ _m_punpckldq (__m64 __m1, __m64 __m2)
182
+ ;
183
+
184
+ /* Add the 8-bit values in M1 to the 8-bit values in M2. */
185
+ static __inline __m64 __attribute__((__always_inline__))
186
+ _mm_add_pi8 (__m64 __m1, __m64 __m2)
187
+ ;
188
+
189
+ static __inline __m64 __attribute__((__always_inline__))
190
+ _m_paddb (__m64 __m1, __m64 __m2)
191
+ ;
192
+
193
+ /* Add the 16-bit values in M1 to the 16-bit values in M2. */
194
+ static __inline __m64 __attribute__((__always_inline__))
195
+ _mm_add_pi16 (__m64 __m1, __m64 __m2)
196
+ ;
197
+
198
+ static __inline __m64 __attribute__((__always_inline__))
199
+ _m_paddw (__m64 __m1, __m64 __m2)
200
+ ;
201
+
202
+ /* Add the 32-bit values in M1 to the 32-bit values in M2. */
203
+ static __inline __m64 __attribute__((__always_inline__))
204
+ _mm_add_pi32 (__m64 __m1, __m64 __m2)
205
+ ;
206
+
207
+ static __inline __m64 __attribute__((__always_inline__))
208
+ _m_paddd (__m64 __m1, __m64 __m2)
209
+ ;
210
+
211
+ /* Add the 64-bit values in M1 to the 64-bit values in M2. */
212
+ static __inline __m64 __attribute__((__always_inline__))
213
+ _mm_add_si64 (__m64 __m1, __m64 __m2)
214
+ ;
215
+
216
+ /* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
217
+ saturated arithmetic. */
218
+ static __inline __m64 __attribute__((__always_inline__))
219
+ _mm_adds_pi8 (__m64 __m1, __m64 __m2)
220
+ ;
221
+
222
+ static __inline __m64 __attribute__((__always_inline__))
223
+ _m_paddsb (__m64 __m1, __m64 __m2)
224
+ ;
225
+
226
+ /* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
227
+ saturated arithmetic. */
228
+ static __inline __m64 __attribute__((__always_inline__))
229
+ _mm_adds_pi16 (__m64 __m1, __m64 __m2)
230
+ ;
231
+
232
+ static __inline __m64 __attribute__((__always_inline__))
233
+ _m_paddsw (__m64 __m1, __m64 __m2)
234
+ ;
235
+
236
+ /* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
237
+ saturated arithmetic. */
238
+ static __inline __m64 __attribute__((__always_inline__))
239
+ _mm_adds_pu8 (__m64 __m1, __m64 __m2)
240
+ ;
241
+
242
+ static __inline __m64 __attribute__((__always_inline__))
243
+ _m_paddusb (__m64 __m1, __m64 __m2)
244
+ ;
245
+
246
+ /* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
247
+ saturated arithmetic. */
248
+ static __inline __m64 __attribute__((__always_inline__))
249
+ _mm_adds_pu16 (__m64 __m1, __m64 __m2)
250
+ ;
251
+
252
+ static __inline __m64 __attribute__((__always_inline__))
253
+ _m_paddusw (__m64 __m1, __m64 __m2)
254
+ ;
255
+
256
+ /* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
257
+ static __inline __m64 __attribute__((__always_inline__))
258
+ _mm_sub_pi8 (__m64 __m1, __m64 __m2)
259
+ ;
260
+
261
+ static __inline __m64 __attribute__((__always_inline__))
262
+ _m_psubb (__m64 __m1, __m64 __m2)
263
+ ;
264
+
265
+ /* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
266
+ static __inline __m64 __attribute__((__always_inline__))
267
+ _mm_sub_pi16 (__m64 __m1, __m64 __m2)
268
+ ;
269
+
270
+ static __inline __m64 __attribute__((__always_inline__))
271
+ _m_psubw (__m64 __m1, __m64 __m2)
272
+ ;
273
+
274
+ /* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
275
+ static __inline __m64 __attribute__((__always_inline__))
276
+ _mm_sub_pi32 (__m64 __m1, __m64 __m2)
277
+ ;
278
+
279
+ static __inline __m64 __attribute__((__always_inline__))
280
+ _m_psubd (__m64 __m1, __m64 __m2)
281
+ ;
282
+
283
+ /* Add the 64-bit values in M1 to the 64-bit values in M2. */
284
+ static __inline __m64 __attribute__((__always_inline__))
285
+ _mm_sub_si64 (__m64 __m1, __m64 __m2)
286
+ ;
287
+
288
+ /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
289
+ saturating arithmetic. */
290
+ static __inline __m64 __attribute__((__always_inline__))
291
+ _mm_subs_pi8 (__m64 __m1, __m64 __m2)
292
+ ;
293
+
294
+ static __inline __m64 __attribute__((__always_inline__))
295
+ _m_psubsb (__m64 __m1, __m64 __m2)
296
+ ;
297
+
298
+ /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
299
+ signed saturating arithmetic. */
300
+ static __inline __m64 __attribute__((__always_inline__))
301
+ _mm_subs_pi16 (__m64 __m1, __m64 __m2)
302
+ ;
303
+
304
+ static __inline __m64 __attribute__((__always_inline__))
305
+ _m_psubsw (__m64 __m1, __m64 __m2)
306
+ ;
307
+
308
+ /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
309
+ unsigned saturating arithmetic. */
310
+ static __inline __m64 __attribute__((__always_inline__))
311
+ _mm_subs_pu8 (__m64 __m1, __m64 __m2)
312
+ ;
313
+
314
+ static __inline __m64 __attribute__((__always_inline__))
315
+ _m_psubusb (__m64 __m1, __m64 __m2)
316
+ ;
317
+
318
+ /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
319
+ unsigned saturating arithmetic. */
320
+ static __inline __m64 __attribute__((__always_inline__))
321
+ _mm_subs_pu16 (__m64 __m1, __m64 __m2)
322
+ ;
323
+
324
+ static __inline __m64 __attribute__((__always_inline__))
325
+ _m_psubusw (__m64 __m1, __m64 __m2)
326
+ ;
327
+
328
+ /* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
329
+ four 32-bit intermediate results, which are then summed by pairs to
330
+ produce two 32-bit results. */
331
+ static __inline __m64 __attribute__((__always_inline__))
332
+ _mm_madd_pi16 (__m64 __m1, __m64 __m2)
333
+ ;
334
+
335
+ static __inline __m64 __attribute__((__always_inline__))
336
+ _m_pmaddwd (__m64 __m1, __m64 __m2)
337
+ ;
338
+
339
+ /* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
340
+ M2 and produce the high 16 bits of the 32-bit results. */
341
+ static __inline __m64 __attribute__((__always_inline__))
342
+ _mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
343
+ ;
344
+
345
+ static __inline __m64 __attribute__((__always_inline__))
346
+ _m_pmulhw (__m64 __m1, __m64 __m2)
347
+ ;
348
+
349
+ /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
350
+ the low 16 bits of the results. */
351
+ static __inline __m64 __attribute__((__always_inline__))
352
+ _mm_mullo_pi16 (__m64 __m1, __m64 __m2)
353
+ ;
354
+
355
+ static __inline __m64 __attribute__((__always_inline__))
356
+ _m_pmullw (__m64 __m1, __m64 __m2)
357
+ ;
358
+
359
+ /* Shift four 16-bit values in M left by COUNT. */
360
+ static __inline __m64 __attribute__((__always_inline__))
361
+ _mm_sll_pi16 (__m64 __m, __m64 __count)
362
+ ;
363
+
364
+ static __inline __m64 __attribute__((__always_inline__))
365
+ _m_psllw (__m64 __m, __m64 __count)
366
+ ;
367
+
368
+ static __inline __m64 __attribute__((__always_inline__))
369
+ _mm_slli_pi16 (__m64 __m, int __count)
370
+ ;
371
+
372
+ static __inline __m64 __attribute__((__always_inline__))
373
+ _m_psllwi (__m64 __m, int __count)
374
+ ;
375
+
376
+ /* Shift two 32-bit values in M left by COUNT. */
377
+ static __inline __m64 __attribute__((__always_inline__))
378
+ _mm_sll_pi32 (__m64 __m, __m64 __count)
379
+ ;
380
+
381
+ static __inline __m64 __attribute__((__always_inline__))
382
+ _m_pslld (__m64 __m, __m64 __count)
383
+ ;
384
+
385
+ static __inline __m64 __attribute__((__always_inline__))
386
+ _mm_slli_pi32 (__m64 __m, int __count)
387
+ ;
388
+
389
+ static __inline __m64 __attribute__((__always_inline__))
390
+ _m_pslldi (__m64 __m, int __count)
391
+ ;
392
+
393
+ /* Shift the 64-bit value in M left by COUNT. */
394
+ static __inline __m64 __attribute__((__always_inline__))
395
+ _mm_sll_si64 (__m64 __m, __m64 __count)
396
+ ;
397
+
398
+ static __inline __m64 __attribute__((__always_inline__))
399
+ _m_psllq (__m64 __m, __m64 __count)
400
+ ;
401
+
402
+ static __inline __m64 __attribute__((__always_inline__))
403
+ _mm_slli_si64 (__m64 __m, int __count)
404
+ ;
405
+
406
+ static __inline __m64 __attribute__((__always_inline__))
407
+ _m_psllqi (__m64 __m, int __count)
408
+ ;
409
+
410
+ /* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
411
+ static __inline __m64 __attribute__((__always_inline__))
412
+ _mm_sra_pi16 (__m64 __m, __m64 __count)
413
+ ;
414
+
415
+ static __inline __m64 __attribute__((__always_inline__))
416
+ _m_psraw (__m64 __m, __m64 __count)
417
+ ;
418
+
419
+ static __inline __m64 __attribute__((__always_inline__))
420
+ _mm_srai_pi16 (__m64 __m, int __count)
421
+ ;
422
+
423
+ static __inline __m64 __attribute__((__always_inline__))
424
+ _m_psrawi (__m64 __m, int __count)
425
+ ;
426
+
427
+ /* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
428
+ static __inline __m64 __attribute__((__always_inline__))
429
+ _mm_sra_pi32 (__m64 __m, __m64 __count)
430
+ ;
431
+
432
+ static __inline __m64 __attribute__((__always_inline__))
433
+ _m_psrad (__m64 __m, __m64 __count)
434
+ ;
435
+
436
+ static __inline __m64 __attribute__((__always_inline__))
437
+ _mm_srai_pi32 (__m64 __m, int __count)
438
+ ;
439
+
440
+ static __inline __m64 __attribute__((__always_inline__))
441
+ _m_psradi (__m64 __m, int __count)
442
+ ;
443
+
444
+ /* Shift four 16-bit values in M right by COUNT; shift in zeros. */
445
+ static __inline __m64 __attribute__((__always_inline__))
446
+ _mm_srl_pi16 (__m64 __m, __m64 __count)
447
+ ;
448
+
449
+ static __inline __m64 __attribute__((__always_inline__))
450
+ _m_psrlw (__m64 __m, __m64 __count)
451
+ ;
452
+
453
+ static __inline __m64 __attribute__((__always_inline__))
454
+ _mm_srli_pi16 (__m64 __m, int __count)
455
+ ;
456
+
457
+ static __inline __m64 __attribute__((__always_inline__))
458
+ _m_psrlwi (__m64 __m, int __count)
459
+ ;
460
+
461
+ /* Shift two 32-bit values in M right by COUNT; shift in zeros. */
462
+ static __inline __m64 __attribute__((__always_inline__))
463
+ _mm_srl_pi32 (__m64 __m, __m64 __count)
464
+ ;
465
+
466
+ static __inline __m64 __attribute__((__always_inline__))
467
+ _m_psrld (__m64 __m, __m64 __count)
468
+ ;
469
+
470
+ static __inline __m64 __attribute__((__always_inline__))
471
+ _mm_srli_pi32 (__m64 __m, int __count)
472
+ ;
473
+
474
+ static __inline __m64 __attribute__((__always_inline__))
475
+ _m_psrldi (__m64 __m, int __count)
476
+ ;
477
+
478
+ /* Shift the 64-bit value in M left by COUNT; shift in zeros. */
479
+ static __inline __m64 __attribute__((__always_inline__))
480
+ _mm_srl_si64 (__m64 __m, __m64 __count)
481
+ ;
482
+
483
+ static __inline __m64 __attribute__((__always_inline__))
484
+ _m_psrlq (__m64 __m, __m64 __count)
485
+ ;
486
+
487
+ static __inline __m64 __attribute__((__always_inline__))
488
+ _mm_srli_si64 (__m64 __m, int __count)
489
+ ;
490
+
491
+ static __inline __m64 __attribute__((__always_inline__))
492
+ _m_psrlqi (__m64 __m, int __count)
493
+ ;
494
+
495
+ /* Bit-wise AND the 64-bit values in M1 and M2. */
496
+ static __inline __m64 __attribute__((__always_inline__))
497
+ _mm_and_si64 (__m64 __m1, __m64 __m2)
498
+ ;
499
+
500
+ static __inline __m64 __attribute__((__always_inline__))
501
+ _m_pand (__m64 __m1, __m64 __m2)
502
+ ;
503
+
504
+ /* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
505
+ 64-bit value in M2. */
506
+ static __inline __m64 __attribute__((__always_inline__))
507
+ _mm_andnot_si64 (__m64 __m1, __m64 __m2)
508
+ ;
509
+
510
+ static __inline __m64 __attribute__((__always_inline__))
511
+ _m_pandn (__m64 __m1, __m64 __m2)
512
+ ;
513
+
514
+ /* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
515
+ static __inline __m64 __attribute__((__always_inline__))
516
+ _mm_or_si64 (__m64 __m1, __m64 __m2)
517
+ ;
518
+
519
+ static __inline __m64 __attribute__((__always_inline__))
520
+ _m_por (__m64 __m1, __m64 __m2)
521
+ ;
522
+
523
+ /* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
524
+ static __inline __m64 __attribute__((__always_inline__))
525
+ _mm_xor_si64 (__m64 __m1, __m64 __m2)
526
+ ;
527
+
528
+ static __inline __m64 __attribute__((__always_inline__))
529
+ _m_pxor (__m64 __m1, __m64 __m2)
530
+ ;
531
+
532
+ /* Compare eight 8-bit values. The result of the comparison is 0xFF if the
533
+ test is true and zero if false. */
534
+ static __inline __m64 __attribute__((__always_inline__))
535
+ _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
536
+ ;
537
+
538
+ static __inline __m64 __attribute__((__always_inline__))
539
+ _m_pcmpeqb (__m64 __m1, __m64 __m2)
540
+ ;
541
+
542
+ static __inline __m64 __attribute__((__always_inline__))
543
+ _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
544
+ ;
545
+
546
+ static __inline __m64 __attribute__((__always_inline__))
547
+ _m_pcmpgtb (__m64 __m1, __m64 __m2)
548
+ ;
549
+
550
+ /* Compare four 16-bit values. The result of the comparison is 0xFFFF if
551
+ the test is true and zero if false. */
552
+ static __inline __m64 __attribute__((__always_inline__))
553
+ _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
554
+ ;
555
+
556
+ static __inline __m64 __attribute__((__always_inline__))
557
+ _m_pcmpeqw (__m64 __m1, __m64 __m2)
558
+ ;
559
+
560
+ static __inline __m64 __attribute__((__always_inline__))
561
+ _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
562
+ ;
563
+
564
+ static __inline __m64 __attribute__((__always_inline__))
565
+ _m_pcmpgtw (__m64 __m1, __m64 __m2)
566
+ ;
567
+
568
+ /* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
569
+ the test is true and zero if false. */
570
+ static __inline __m64 __attribute__((__always_inline__))
571
+ _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
572
+ ;
573
+
574
+ static __inline __m64 __attribute__((__always_inline__))
575
+ _m_pcmpeqd (__m64 __m1, __m64 __m2)
576
+ ;
577
+
578
+ static __inline __m64 __attribute__((__always_inline__))
579
+ _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
580
+ ;
581
+
582
+ static __inline __m64 __attribute__((__always_inline__))
583
+ _m_pcmpgtd (__m64 __m1, __m64 __m2)
584
+ ;
585
+
586
+ /* Creates a 64-bit zero. */
587
+ static __inline __m64 __attribute__((__always_inline__))
588
+ _mm_setzero_si64 (void)
589
+ ;
590
+
591
+ /* Creates a vector of two 32-bit values; I0 is least significant. */
592
+ static __inline __m64 __attribute__((__always_inline__))
593
+ _mm_set_pi32 (int __i1, int __i0)
594
+ ;
595
+
596
+ /* Creates a vector of four 16-bit values; W0 is least significant. */
597
+ static __inline __m64 __attribute__((__always_inline__))
598
+ _mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
599
+ ;
600
+
601
+ /* Creates a vector of eight 8-bit values; B0 is least significant. */
602
+ static __inline __m64 __attribute__((__always_inline__))
603
+ _mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
604
+ char __b3, char __b2, char __b1, char __b0)
605
+ ;
606
+
607
+ /* Similar, but with the arguments in reverse order. */
608
+ static __inline __m64 __attribute__((__always_inline__))
609
+ _mm_setr_pi32 (int __i0, int __i1)
610
+ ;
611
+
612
+ static __inline __m64 __attribute__((__always_inline__))
613
+ _mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
614
+ ;
615
+
616
+ static __inline __m64 __attribute__((__always_inline__))
617
+ _mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
618
+ char __b4, char __b5, char __b6, char __b7)
619
+ ;
620
+
621
+ /* Creates a vector of two 32-bit values, both elements containing I. */
622
+ static __inline __m64 __attribute__((__always_inline__))
623
+ _mm_set1_pi32 (int __i)
624
+ ;
625
+
626
+ /* Creates a vector of four 16-bit values, all elements containing W. */
627
+ static __inline __m64 __attribute__((__always_inline__))
628
+ _mm_set1_pi16 (short __w)
629
+ ;
630
+
631
+ /* Creates a vector of eight 8-bit values, all elements containing B. */
632
+ static __inline __m64 __attribute__((__always_inline__))
633
+ _mm_set1_pi8 (char __b)
634
+ ;
635
+
636
+ #endif /* __MMX__ */
637
+ #endif /* _MMINTRIN_H_INCLUDED */