gccxml_gem 0.9.2-x86-linux → 0.9.3-x86-linux

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. data/Rakefile +15 -6
  2. data/bin/gccxml +0 -0
  3. data/bin/gccxml_cc1plus +0 -0
  4. data/gccxml.rb +5 -5
  5. data/share/gccxml-0.9/GCC/3.2/bits/gthr-default.h +4 -0
  6. data/share/gccxml-0.9/GCC/3.4/bits/gthr-default.h +5 -0
  7. data/share/gccxml-0.9/GCC/4.0/emmintrin.h +5 -0
  8. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_emmintrin.h +1037 -0
  9. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_mmintrin.h +669 -0
  10. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_xmmintrin.h +870 -0
  11. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_emmintrin.h +977 -0
  12. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_mmintrin.h +636 -0
  13. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_xmmintrin.h +833 -0
  14. data/share/gccxml-0.9/GCC/4.0/mmintrin.h +5 -0
  15. data/share/gccxml-0.9/GCC/4.0/xmmintrin.h +5 -0
  16. data/share/gccxml-0.9/GCC/4.1/bits/gthr-default.h +4 -0
  17. data/share/gccxml-0.9/GCC/4.1/emmintrin.h +5 -0
  18. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_emmintrin.h +1509 -0
  19. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_mmintrin.h +942 -0
  20. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_xmmintrin.h +1192 -0
  21. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_emmintrin.h +1004 -0
  22. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_mmintrin.h +637 -0
  23. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_xmmintrin.h +834 -0
  24. data/share/gccxml-0.9/GCC/4.1/mmintrin.h +5 -0
  25. data/share/gccxml-0.9/GCC/4.1/xmmintrin.h +5 -0
  26. data/share/gccxml-0.9/GCC/4.2/emmintrin.h +5 -0
  27. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_emmintrin.h +1509 -0
  28. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_mmintrin.h +942 -0
  29. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_xmmintrin.h +1192 -0
  30. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_emmintrin.h +1013 -0
  31. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_mmintrin.h +663 -0
  32. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_xmmintrin.h +860 -0
  33. data/share/gccxml-0.9/GCC/4.2/mmintrin.h +5 -0
  34. data/share/gccxml-0.9/GCC/4.2/xmmintrin.h +5 -0
  35. data/share/gccxml-0.9/GCC/4.3/emmintrin.h +1043 -0
  36. data/share/gccxml-0.9/GCC/4.3/gccxml_builtins.h +1 -0
  37. data/share/gccxml-0.9/GCC/4.3/mmintrin.h +663 -0
  38. data/share/gccxml-0.9/GCC/4.3/xmmintrin.h +867 -0
  39. data/share/gccxml-0.9/GCC/4.4/bits/c++config.h +1431 -0
  40. data/share/gccxml-0.9/GCC/4.4/emmintrin.h +1041 -0
  41. data/share/gccxml-0.9/GCC/4.4/gccxml_builtins.h +153 -0
  42. data/share/gccxml-0.9/GCC/4.4/mmintrin.h +662 -0
  43. data/share/gccxml-0.9/GCC/4.4/xmmintrin.h +864 -0
  44. data/share/gccxml-0.9/GCC/4.5/gccxml_builtins.h +154 -0
  45. data/share/gccxml-0.9/GCC/4.5/iomanip +349 -0
  46. data/share/gccxml-0.9/GCC/COPYING.RUNTIME +73 -0
  47. data/share/gccxml-0.9/GCC/COPYING3 +674 -0
  48. data/share/man/man1/gccxml.1 +1 -1
  49. metadata +165 -114
@@ -0,0 +1,1004 @@
1
+ /* Copyright (C) 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
2
+
3
+ This file is part of GCC.
4
+
5
+ GCC is free software; you can redistribute it and/or modify
6
+ it under the terms of the GNU General Public License as published by
7
+ the Free Software Foundation; either version 2, or (at your option)
8
+ any later version.
9
+
10
+ GCC is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ GNU General Public License for more details.
14
+
15
+ You should have received a copy of the GNU General Public License
16
+ along with GCC; see the file COPYING. If not, write to
17
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
18
+ Boston, MA 02110-1301, USA. */
19
+
20
+ /* As a special exception, if you include this header file into source
21
+ files compiled by GCC, this header file does not by itself cause
22
+ the resulting executable to be covered by the GNU General Public
23
+ License. This exception does not however invalidate any other
24
+ reasons why the executable file might be covered by the GNU General
25
+ Public License. */
26
+
27
+ /* Implemented from the specification included in the Intel C++ Compiler
28
+ User Guide and Reference, version 8.0. */
29
+
30
+ #ifndef _EMMINTRIN_H_INCLUDED
31
+ #define _EMMINTRIN_H_INCLUDED
32
+
33
+ #ifdef __SSE2__
34
+ #include <xmmintrin.h>
35
+
36
+ /* SSE2 */
37
+ typedef double __v2df __attribute__ ((__vector_size__ (16)));
38
+ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
39
+ typedef int __v4si __attribute__ ((__vector_size__ (16)));
40
+ typedef short __v8hi __attribute__ ((__vector_size__ (16)));
41
+ typedef char __v16qi __attribute__ ((__vector_size__ (16)));
42
+
43
+ /* The Intel API is flexible enough that we must allow aliasing with other
44
+ vector types, and their scalar components. */
45
+ typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__));
46
+ typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
47
+
48
+ /* Create a selector for use with the SHUFPD instruction. */
49
+ #define _MM_SHUFFLE2(fp1,fp0) \
50
+ (((fp1) << 1) | (fp0))
51
+
52
+ /* Create a vector with element 0 as F and the rest zero. */
53
+ static __inline __m128d __attribute__((__always_inline__))
54
+ _mm_set_sd (double __F)
55
+ ;
56
+
57
+ /* Create a vector with both elements equal to F. */
58
+ static __inline __m128d __attribute__((__always_inline__))
59
+ _mm_set1_pd (double __F)
60
+ ;
61
+
62
+ static __inline __m128d __attribute__((__always_inline__))
63
+ _mm_set_pd1 (double __F)
64
+ ;
65
+
66
+ /* Create a vector with the lower value X and upper value W. */
67
+ static __inline __m128d __attribute__((__always_inline__))
68
+ _mm_set_pd (double __W, double __X)
69
+ ;
70
+
71
+ /* Create a vector with the lower value W and upper value X. */
72
+ static __inline __m128d __attribute__((__always_inline__))
73
+ _mm_setr_pd (double __W, double __X)
74
+ ;
75
+
76
+ /* Create a vector of zeros. */
77
+ static __inline __m128d __attribute__((__always_inline__))
78
+ _mm_setzero_pd (void)
79
+ ;
80
+
81
+ /* Sets the low DPFP value of A from the low value of B. */
82
+ static __inline __m128d __attribute__((__always_inline__))
83
+ _mm_move_sd (__m128d __A, __m128d __B)
84
+ ;
85
+
86
+ /* Load two DPFP values from P. The address must be 16-byte aligned. */
87
+ static __inline __m128d __attribute__((__always_inline__))
88
+ _mm_load_pd (double const *__P)
89
+ ;
90
+
91
+ /* Load two DPFP values from P. The address need not be 16-byte aligned. */
92
+ static __inline __m128d __attribute__((__always_inline__))
93
+ _mm_loadu_pd (double const *__P)
94
+ ;
95
+
96
+ /* Create a vector with all two elements equal to *P. */
97
+ static __inline __m128d __attribute__((__always_inline__))
98
+ _mm_load1_pd (double const *__P)
99
+ ;
100
+
101
+ /* Create a vector with element 0 as *P and the rest zero. */
102
+ static __inline __m128d __attribute__((__always_inline__))
103
+ _mm_load_sd (double const *__P)
104
+ ;
105
+
106
+ static __inline __m128d __attribute__((__always_inline__))
107
+ _mm_load_pd1 (double const *__P)
108
+ ;
109
+
110
+ /* Load two DPFP values in reverse order. The address must be aligned. */
111
+ static __inline __m128d __attribute__((__always_inline__))
112
+ _mm_loadr_pd (double const *__P)
113
+ ;
114
+
115
+ /* Store two DPFP values. The address must be 16-byte aligned. */
116
+ static __inline void __attribute__((__always_inline__))
117
+ _mm_store_pd (double *__P, __m128d __A)
118
+ ;
119
+
120
+ /* Store two DPFP values. The address need not be 16-byte aligned. */
121
+ static __inline void __attribute__((__always_inline__))
122
+ _mm_storeu_pd (double *__P, __m128d __A)
123
+ ;
124
+
125
+ /* Stores the lower DPFP value. */
126
+ static __inline void __attribute__((__always_inline__))
127
+ _mm_store_sd (double *__P, __m128d __A)
128
+ ;
129
+
130
+ static __inline void __attribute__((__always_inline__))
131
+ _mm_storel_pd (double *__P, __m128d __A)
132
+ ;
133
+
134
+ /* Stores the upper DPFP value. */
135
+ static __inline void __attribute__((__always_inline__))
136
+ _mm_storeh_pd (double *__P, __m128d __A)
137
+ ;
138
+
139
+ /* Store the lower DPFP value across two words.
140
+ The address must be 16-byte aligned. */
141
+ static __inline void __attribute__((__always_inline__))
142
+ _mm_store1_pd (double *__P, __m128d __A)
143
+ ;
144
+
145
+ static __inline void __attribute__((__always_inline__))
146
+ _mm_store_pd1 (double *__P, __m128d __A)
147
+ ;
148
+
149
+ /* Store two DPFP values in reverse order. The address must be aligned. */
150
+ static __inline void __attribute__((__always_inline__))
151
+ _mm_storer_pd (double *__P, __m128d __A)
152
+ ;
153
+
154
+ static __inline int __attribute__((__always_inline__))
155
+ _mm_cvtsi128_si32 (__m128i __A)
156
+ ;
157
+
158
+ #ifdef __x86_64__
159
+ static __inline long long __attribute__((__always_inline__))
160
+ _mm_cvtsi128_si64x (__m128i __A)
161
+ ;
162
+ #endif
163
+
164
+ static __inline __m128d __attribute__((__always_inline__))
165
+ _mm_add_pd (__m128d __A, __m128d __B)
166
+ ;
167
+
168
+ static __inline __m128d __attribute__((__always_inline__))
169
+ _mm_add_sd (__m128d __A, __m128d __B)
170
+ ;
171
+
172
+ static __inline __m128d __attribute__((__always_inline__))
173
+ _mm_sub_pd (__m128d __A, __m128d __B)
174
+ ;
175
+
176
+ static __inline __m128d __attribute__((__always_inline__))
177
+ _mm_sub_sd (__m128d __A, __m128d __B)
178
+ ;
179
+
180
+ static __inline __m128d __attribute__((__always_inline__))
181
+ _mm_mul_pd (__m128d __A, __m128d __B)
182
+ ;
183
+
184
+ static __inline __m128d __attribute__((__always_inline__))
185
+ _mm_mul_sd (__m128d __A, __m128d __B)
186
+ ;
187
+
188
+ static __inline __m128d __attribute__((__always_inline__))
189
+ _mm_div_pd (__m128d __A, __m128d __B)
190
+ ;
191
+
192
+ static __inline __m128d __attribute__((__always_inline__))
193
+ _mm_div_sd (__m128d __A, __m128d __B)
194
+ ;
195
+
196
+ static __inline __m128d __attribute__((__always_inline__))
197
+ _mm_sqrt_pd (__m128d __A)
198
+ ;
199
+
200
+ /* Return pair {sqrt (A[0), B[1]}. */
201
+ static __inline __m128d __attribute__((__always_inline__))
202
+ _mm_sqrt_sd (__m128d __A, __m128d __B)
203
+ ;
204
+
205
+ static __inline __m128d __attribute__((__always_inline__))
206
+ _mm_min_pd (__m128d __A, __m128d __B)
207
+ ;
208
+
209
+ static __inline __m128d __attribute__((__always_inline__))
210
+ _mm_min_sd (__m128d __A, __m128d __B)
211
+ ;
212
+
213
+ static __inline __m128d __attribute__((__always_inline__))
214
+ _mm_max_pd (__m128d __A, __m128d __B)
215
+ ;
216
+
217
+ static __inline __m128d __attribute__((__always_inline__))
218
+ _mm_max_sd (__m128d __A, __m128d __B)
219
+ ;
220
+
221
+ static __inline __m128d __attribute__((__always_inline__))
222
+ _mm_and_pd (__m128d __A, __m128d __B)
223
+ ;
224
+
225
+ static __inline __m128d __attribute__((__always_inline__))
226
+ _mm_andnot_pd (__m128d __A, __m128d __B)
227
+ ;
228
+
229
+ static __inline __m128d __attribute__((__always_inline__))
230
+ _mm_or_pd (__m128d __A, __m128d __B)
231
+ ;
232
+
233
+ static __inline __m128d __attribute__((__always_inline__))
234
+ _mm_xor_pd (__m128d __A, __m128d __B)
235
+ ;
236
+
237
+ static __inline __m128d __attribute__((__always_inline__))
238
+ _mm_cmpeq_pd (__m128d __A, __m128d __B)
239
+ ;
240
+
241
+ static __inline __m128d __attribute__((__always_inline__))
242
+ _mm_cmplt_pd (__m128d __A, __m128d __B)
243
+ ;
244
+
245
+ static __inline __m128d __attribute__((__always_inline__))
246
+ _mm_cmple_pd (__m128d __A, __m128d __B)
247
+ ;
248
+
249
+ static __inline __m128d __attribute__((__always_inline__))
250
+ _mm_cmpgt_pd (__m128d __A, __m128d __B)
251
+ ;
252
+
253
+ static __inline __m128d __attribute__((__always_inline__))
254
+ _mm_cmpge_pd (__m128d __A, __m128d __B)
255
+ ;
256
+
257
+ static __inline __m128d __attribute__((__always_inline__))
258
+ _mm_cmpneq_pd (__m128d __A, __m128d __B)
259
+ ;
260
+
261
+ static __inline __m128d __attribute__((__always_inline__))
262
+ _mm_cmpnlt_pd (__m128d __A, __m128d __B)
263
+ ;
264
+
265
+ static __inline __m128d __attribute__((__always_inline__))
266
+ _mm_cmpnle_pd (__m128d __A, __m128d __B)
267
+ ;
268
+
269
+ static __inline __m128d __attribute__((__always_inline__))
270
+ _mm_cmpngt_pd (__m128d __A, __m128d __B)
271
+ ;
272
+
273
+ static __inline __m128d __attribute__((__always_inline__))
274
+ _mm_cmpnge_pd (__m128d __A, __m128d __B)
275
+ ;
276
+
277
+ static __inline __m128d __attribute__((__always_inline__))
278
+ _mm_cmpord_pd (__m128d __A, __m128d __B)
279
+ ;
280
+
281
+ static __inline __m128d __attribute__((__always_inline__))
282
+ _mm_cmpunord_pd (__m128d __A, __m128d __B)
283
+ ;
284
+
285
+ static __inline __m128d __attribute__((__always_inline__))
286
+ _mm_cmpeq_sd (__m128d __A, __m128d __B)
287
+ ;
288
+
289
+ static __inline __m128d __attribute__((__always_inline__))
290
+ _mm_cmplt_sd (__m128d __A, __m128d __B)
291
+ ;
292
+
293
+ static __inline __m128d __attribute__((__always_inline__))
294
+ _mm_cmple_sd (__m128d __A, __m128d __B)
295
+ ;
296
+
297
+ static __inline __m128d __attribute__((__always_inline__))
298
+ _mm_cmpgt_sd (__m128d __A, __m128d __B)
299
+ ;
300
+
301
+ static __inline __m128d __attribute__((__always_inline__))
302
+ _mm_cmpge_sd (__m128d __A, __m128d __B)
303
+ ;
304
+
305
+ static __inline __m128d __attribute__((__always_inline__))
306
+ _mm_cmpneq_sd (__m128d __A, __m128d __B)
307
+ ;
308
+
309
+ static __inline __m128d __attribute__((__always_inline__))
310
+ _mm_cmpnlt_sd (__m128d __A, __m128d __B)
311
+ ;
312
+
313
+ static __inline __m128d __attribute__((__always_inline__))
314
+ _mm_cmpnle_sd (__m128d __A, __m128d __B)
315
+ ;
316
+
317
+ static __inline __m128d __attribute__((__always_inline__))
318
+ _mm_cmpngt_sd (__m128d __A, __m128d __B)
319
+ ;
320
+
321
+ static __inline __m128d __attribute__((__always_inline__))
322
+ _mm_cmpnge_sd (__m128d __A, __m128d __B)
323
+ ;
324
+
325
+ static __inline __m128d __attribute__((__always_inline__))
326
+ _mm_cmpord_sd (__m128d __A, __m128d __B)
327
+ ;
328
+
329
+ static __inline __m128d __attribute__((__always_inline__))
330
+ _mm_cmpunord_sd (__m128d __A, __m128d __B)
331
+ ;
332
+
333
+ static __inline int __attribute__((__always_inline__))
334
+ _mm_comieq_sd (__m128d __A, __m128d __B)
335
+ ;
336
+
337
+ static __inline int __attribute__((__always_inline__))
338
+ _mm_comilt_sd (__m128d __A, __m128d __B)
339
+ ;
340
+
341
+ static __inline int __attribute__((__always_inline__))
342
+ _mm_comile_sd (__m128d __A, __m128d __B)
343
+ ;
344
+
345
+ static __inline int __attribute__((__always_inline__))
346
+ _mm_comigt_sd (__m128d __A, __m128d __B)
347
+ ;
348
+
349
+ static __inline int __attribute__((__always_inline__))
350
+ _mm_comige_sd (__m128d __A, __m128d __B)
351
+ ;
352
+
353
+ static __inline int __attribute__((__always_inline__))
354
+ _mm_comineq_sd (__m128d __A, __m128d __B)
355
+ ;
356
+
357
+ static __inline int __attribute__((__always_inline__))
358
+ _mm_ucomieq_sd (__m128d __A, __m128d __B)
359
+ ;
360
+
361
+ static __inline int __attribute__((__always_inline__))
362
+ _mm_ucomilt_sd (__m128d __A, __m128d __B)
363
+ ;
364
+
365
+ static __inline int __attribute__((__always_inline__))
366
+ _mm_ucomile_sd (__m128d __A, __m128d __B)
367
+ ;
368
+
369
+ static __inline int __attribute__((__always_inline__))
370
+ _mm_ucomigt_sd (__m128d __A, __m128d __B)
371
+ ;
372
+
373
+ static __inline int __attribute__((__always_inline__))
374
+ _mm_ucomige_sd (__m128d __A, __m128d __B)
375
+ ;
376
+
377
+ static __inline int __attribute__((__always_inline__))
378
+ _mm_ucomineq_sd (__m128d __A, __m128d __B)
379
+ ;
380
+
381
+ /* Create a vector of Qi, where i is the element number. */
382
+
383
+ static __inline __m128i __attribute__((__always_inline__))
384
+ _mm_set_epi64x (long long __q1, long long __q0)
385
+ ;
386
+
387
+ static __inline __m128i __attribute__((__always_inline__))
388
+ _mm_set_epi64 (__m64 __q1, __m64 __q0)
389
+ ;
390
+
391
+ static __inline __m128i __attribute__((__always_inline__))
392
+ _mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
393
+ ;
394
+
395
+ static __inline __m128i __attribute__((__always_inline__))
396
+ _mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
397
+ short __q3, short __q2, short __q1, short __q0)
398
+ ;
399
+
400
+ static __inline __m128i __attribute__((__always_inline__))
401
+ _mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
402
+ char __q11, char __q10, char __q09, char __q08,
403
+ char __q07, char __q06, char __q05, char __q04,
404
+ char __q03, char __q02, char __q01, char __q00)
405
+ ;
406
+
407
+ /* Set all of the elements of the vector to A. */
408
+
409
+ static __inline __m128i __attribute__((__always_inline__))
410
+ _mm_set1_epi64x (long long __A)
411
+ ;
412
+
413
+ static __inline __m128i __attribute__((__always_inline__))
414
+ _mm_set1_epi64 (__m64 __A)
415
+ ;
416
+
417
+ static __inline __m128i __attribute__((__always_inline__))
418
+ _mm_set1_epi32 (int __A)
419
+ ;
420
+
421
+ static __inline __m128i __attribute__((__always_inline__))
422
+ _mm_set1_epi16 (short __A)
423
+ ;
424
+
425
+ static __inline __m128i __attribute__((__always_inline__))
426
+ _mm_set1_epi8 (char __A)
427
+ ;
428
+
429
+ /* Create a vector of Qi, where i is the element number.
430
+ The parameter order is reversed from the _mm_set_epi* functions. */
431
+
432
+ static __inline __m128i __attribute__((__always_inline__))
433
+ _mm_setr_epi64 (__m64 __q0, __m64 __q1)
434
+ ;
435
+
436
+ static __inline __m128i __attribute__((__always_inline__))
437
+ _mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
438
+ ;
439
+
440
+ static __inline __m128i __attribute__((__always_inline__))
441
+ _mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
442
+ short __q4, short __q5, short __q6, short __q7)
443
+ ;
444
+
445
+ static __inline __m128i __attribute__((__always_inline__))
446
+ _mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
447
+ char __q04, char __q05, char __q06, char __q07,
448
+ char __q08, char __q09, char __q10, char __q11,
449
+ char __q12, char __q13, char __q14, char __q15)
450
+ ;
451
+
452
+ /* Create a vector with element 0 as *P and the rest zero. */
453
+
454
+ static __inline __m128i __attribute__((__always_inline__))
455
+ _mm_load_si128 (__m128i const *__P)
456
+ ;
457
+
458
+ static __inline __m128i __attribute__((__always_inline__))
459
+ _mm_loadu_si128 (__m128i const *__P)
460
+ ;
461
+
462
+ static __inline __m128i __attribute__((__always_inline__))
463
+ _mm_loadl_epi64 (__m128i const *__P)
464
+ ;
465
+
466
+ static __inline void __attribute__((__always_inline__))
467
+ _mm_store_si128 (__m128i *__P, __m128i __B)
468
+ ;
469
+
470
+ static __inline void __attribute__((__always_inline__))
471
+ _mm_storeu_si128 (__m128i *__P, __m128i __B)
472
+ ;
473
+
474
+ static __inline void __attribute__((__always_inline__))
475
+ _mm_storel_epi64 (__m128i *__P, __m128i __B)
476
+ ;
477
+
478
+ static __inline __m64 __attribute__((__always_inline__))
479
+ _mm_movepi64_pi64 (__m128i __B)
480
+ ;
481
+
482
+ static __inline __m128i __attribute__((__always_inline__))
483
+ _mm_movpi64_epi64 (__m64 __A)
484
+ ;
485
+
486
+ static __inline __m128i __attribute__((__always_inline__))
487
+ _mm_move_epi64 (__m128i __A)
488
+ ;
489
+
490
+ /* Create a vector of zeros. */
491
+ static __inline __m128i __attribute__((__always_inline__))
492
+ _mm_setzero_si128 (void)
493
+ ;
494
+
495
+ static __inline __m128d __attribute__((__always_inline__))
496
+ _mm_cvtepi32_pd (__m128i __A)
497
+ ;
498
+
499
+ static __inline __m128 __attribute__((__always_inline__))
500
+ _mm_cvtepi32_ps (__m128i __A)
501
+ ;
502
+
503
+ static __inline __m128i __attribute__((__always_inline__))
504
+ _mm_cvtpd_epi32 (__m128d __A)
505
+ ;
506
+
507
+ static __inline __m64 __attribute__((__always_inline__))
508
+ _mm_cvtpd_pi32 (__m128d __A)
509
+ ;
510
+
511
+ static __inline __m128 __attribute__((__always_inline__))
512
+ _mm_cvtpd_ps (__m128d __A)
513
+ ;
514
+
515
+ static __inline __m128i __attribute__((__always_inline__))
516
+ _mm_cvttpd_epi32 (__m128d __A)
517
+ ;
518
+
519
+ static __inline __m64 __attribute__((__always_inline__))
520
+ _mm_cvttpd_pi32 (__m128d __A)
521
+ ;
522
+
523
+ static __inline __m128d __attribute__((__always_inline__))
524
+ _mm_cvtpi32_pd (__m64 __A)
525
+ ;
526
+
527
+ static __inline __m128i __attribute__((__always_inline__))
528
+ _mm_cvtps_epi32 (__m128 __A)
529
+ ;
530
+
531
+ static __inline __m128i __attribute__((__always_inline__))
532
+ _mm_cvttps_epi32 (__m128 __A)
533
+ ;
534
+
535
+ static __inline __m128d __attribute__((__always_inline__))
536
+ _mm_cvtps_pd (__m128 __A)
537
+ ;
538
+
539
+ static __inline int __attribute__((__always_inline__))
540
+ _mm_cvtsd_si32 (__m128d __A)
541
+ ;
542
+
543
+ #ifdef __x86_64__
544
+ static __inline long long __attribute__((__always_inline__))
545
+ _mm_cvtsd_si64x (__m128d __A)
546
+ ;
547
+ #endif
548
+
549
+ static __inline int __attribute__((__always_inline__))
550
+ _mm_cvttsd_si32 (__m128d __A)
551
+ ;
552
+
553
+ #ifdef __x86_64__
554
+ static __inline long long __attribute__((__always_inline__))
555
+ _mm_cvttsd_si64x (__m128d __A)
556
+ ;
557
+ #endif
558
+
559
+ static __inline __m128 __attribute__((__always_inline__))
560
+ _mm_cvtsd_ss (__m128 __A, __m128d __B)
561
+ ;
562
+
563
+ static __inline __m128d __attribute__((__always_inline__))
564
+ _mm_cvtsi32_sd (__m128d __A, int __B)
565
+ ;
566
+
567
+ #ifdef __x86_64__
568
+ static __inline __m128d __attribute__((__always_inline__))
569
+ _mm_cvtsi64x_sd (__m128d __A, long long __B)
570
+ ;
571
+ #endif
572
+
573
+ static __inline __m128d __attribute__((__always_inline__))
574
+ _mm_cvtss_sd (__m128d __A, __m128 __B)
575
+ ;
576
+
577
+ #define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C)))
578
+
579
+ static __inline __m128d __attribute__((__always_inline__))
580
+ _mm_unpackhi_pd (__m128d __A, __m128d __B)
581
+ ;
582
+
583
+ static __inline __m128d __attribute__((__always_inline__))
584
+ _mm_unpacklo_pd (__m128d __A, __m128d __B)
585
+ ;
586
+
587
+ static __inline __m128d __attribute__((__always_inline__))
588
+ _mm_loadh_pd (__m128d __A, double const *__B)
589
+ ;
590
+
591
+ static __inline __m128d __attribute__((__always_inline__))
592
+ _mm_loadl_pd (__m128d __A, double const *__B)
593
+ ;
594
+
595
+ static __inline int __attribute__((__always_inline__))
596
+ _mm_movemask_pd (__m128d __A)
597
+ ;
598
+
599
+ static __inline __m128i __attribute__((__always_inline__))
600
+ _mm_packs_epi16 (__m128i __A, __m128i __B)
601
+ ;
602
+
603
+ static __inline __m128i __attribute__((__always_inline__))
604
+ _mm_packs_epi32 (__m128i __A, __m128i __B)
605
+ ;
606
+
607
+ static __inline __m128i __attribute__((__always_inline__))
608
+ _mm_packus_epi16 (__m128i __A, __m128i __B)
609
+ ;
610
+
611
+ static __inline __m128i __attribute__((__always_inline__))
612
+ _mm_unpackhi_epi8 (__m128i __A, __m128i __B)
613
+ ;
614
+
615
+ static __inline __m128i __attribute__((__always_inline__))
616
+ _mm_unpackhi_epi16 (__m128i __A, __m128i __B)
617
+ ;
618
+
619
+ static __inline __m128i __attribute__((__always_inline__))
620
+ _mm_unpackhi_epi32 (__m128i __A, __m128i __B)
621
+ ;
622
+
623
+ static __inline __m128i __attribute__((__always_inline__))
624
+ _mm_unpackhi_epi64 (__m128i __A, __m128i __B)
625
+ ;
626
+
627
+ static __inline __m128i __attribute__((__always_inline__))
628
+ _mm_unpacklo_epi8 (__m128i __A, __m128i __B)
629
+ ;
630
+
631
+ static __inline __m128i __attribute__((__always_inline__))
632
+ _mm_unpacklo_epi16 (__m128i __A, __m128i __B)
633
+ ;
634
+
635
+ static __inline __m128i __attribute__((__always_inline__))
636
+ _mm_unpacklo_epi32 (__m128i __A, __m128i __B)
637
+ ;
638
+
639
+ static __inline __m128i __attribute__((__always_inline__))
640
+ _mm_unpacklo_epi64 (__m128i __A, __m128i __B)
641
+ ;
642
+
643
+ static __inline __m128i __attribute__((__always_inline__))
644
+ _mm_add_epi8 (__m128i __A, __m128i __B)
645
+ ;
646
+
647
+ static __inline __m128i __attribute__((__always_inline__))
648
+ _mm_add_epi16 (__m128i __A, __m128i __B)
649
+ ;
650
+
651
+ static __inline __m128i __attribute__((__always_inline__))
652
+ _mm_add_epi32 (__m128i __A, __m128i __B)
653
+ ;
654
+
655
+ static __inline __m128i __attribute__((__always_inline__))
656
+ _mm_add_epi64 (__m128i __A, __m128i __B)
657
+ ;
658
+
659
+ static __inline __m128i __attribute__((__always_inline__))
660
+ _mm_adds_epi8 (__m128i __A, __m128i __B)
661
+ ;
662
+
663
+ static __inline __m128i __attribute__((__always_inline__))
664
+ _mm_adds_epi16 (__m128i __A, __m128i __B)
665
+ ;
666
+
667
+ static __inline __m128i __attribute__((__always_inline__))
668
+ _mm_adds_epu8 (__m128i __A, __m128i __B)
669
+ ;
670
+
671
+ static __inline __m128i __attribute__((__always_inline__))
672
+ _mm_adds_epu16 (__m128i __A, __m128i __B)
673
+ ;
674
+
675
+ static __inline __m128i __attribute__((__always_inline__))
676
+ _mm_sub_epi8 (__m128i __A, __m128i __B)
677
+ ;
678
+
679
+ static __inline __m128i __attribute__((__always_inline__))
680
+ _mm_sub_epi16 (__m128i __A, __m128i __B)
681
+ ;
682
+
683
+ static __inline __m128i __attribute__((__always_inline__))
684
+ _mm_sub_epi32 (__m128i __A, __m128i __B)
685
+ ;
686
+
687
+ static __inline __m128i __attribute__((__always_inline__))
688
+ _mm_sub_epi64 (__m128i __A, __m128i __B)
689
+ ;
690
+
691
+ static __inline __m128i __attribute__((__always_inline__))
692
+ _mm_subs_epi8 (__m128i __A, __m128i __B)
693
+ ;
694
+
695
+ static __inline __m128i __attribute__((__always_inline__))
696
+ _mm_subs_epi16 (__m128i __A, __m128i __B)
697
+ ;
698
+
699
+ static __inline __m128i __attribute__((__always_inline__))
700
+ _mm_subs_epu8 (__m128i __A, __m128i __B)
701
+ ;
702
+
703
+ static __inline __m128i __attribute__((__always_inline__))
704
+ _mm_subs_epu16 (__m128i __A, __m128i __B)
705
+ ;
706
+
707
+ static __inline __m128i __attribute__((__always_inline__))
708
+ _mm_madd_epi16 (__m128i __A, __m128i __B)
709
+ ;
710
+
711
+ static __inline __m128i __attribute__((__always_inline__))
712
+ _mm_mulhi_epi16 (__m128i __A, __m128i __B)
713
+ ;
714
+
715
+ static __inline __m128i __attribute__((__always_inline__))
716
+ _mm_mullo_epi16 (__m128i __A, __m128i __B)
717
+ ;
718
+
719
+ static __inline __m64 __attribute__((__always_inline__))
720
+ _mm_mul_su32 (__m64 __A, __m64 __B)
721
+ ;
722
+
723
+ static __inline __m128i __attribute__((__always_inline__))
724
+ _mm_mul_epu32 (__m128i __A, __m128i __B)
725
+ ;
726
+
727
+ #if 0
728
+ static __inline __m128i __attribute__((__always_inline__))
729
+ _mm_slli_epi16 (__m128i __A, int __B)
730
+ ;
731
+
732
+ static __inline __m128i __attribute__((__always_inline__))
733
+ _mm_slli_epi32 (__m128i __A, int __B)
734
+ ;
735
+
736
+ static __inline __m128i __attribute__((__always_inline__))
737
+ _mm_slli_epi64 (__m128i __A, int __B)
738
+ ;
739
+ #else
740
+ #define _mm_slli_epi16(__A, __B) \
741
+ ((__m128i)__builtin_ia32_psllwi128 ((__v8hi)(__A), __B))
742
+ #define _mm_slli_epi32(__A, __B) \
743
+ ((__m128i)__builtin_ia32_pslldi128 ((__v8hi)(__A), __B))
744
+ #define _mm_slli_epi64(__A, __B) \
745
+ ((__m128i)__builtin_ia32_psllqi128 ((__v8hi)(__A), __B))
746
+ #endif
747
+
748
+ #if 0
749
+ static __inline __m128i __attribute__((__always_inline__))
750
+ _mm_srai_epi16 (__m128i __A, int __B)
751
+ ;
752
+
753
+ static __inline __m128i __attribute__((__always_inline__))
754
+ _mm_srai_epi32 (__m128i __A, int __B)
755
+ ;
756
+ #else
757
+ #define _mm_srai_epi16(__A, __B) \
758
+ ((__m128i)__builtin_ia32_psrawi128 ((__v8hi)(__A), __B))
759
+ #define _mm_srai_epi32(__A, __B) \
760
+ ((__m128i)__builtin_ia32_psradi128 ((__v8hi)(__A), __B))
761
+ #endif
762
+
763
+ #if 0
764
+ static __m128i __attribute__((__always_inline__))
765
+ _mm_srli_si128 (__m128i __A, int __B)
766
+ ;
767
+
768
+ static __m128i __attribute__((__always_inline__))
769
+ _mm_srli_si128 (__m128i __A, int __B)
770
+ ;
771
+ #else
772
+ #define _mm_srli_si128(__A, __B) \
773
+ ((__m128i)__builtin_ia32_psrldqi128 (__A, (__B) * 8))
774
+ #define _mm_slli_si128(__A, __B) \
775
+ ((__m128i)__builtin_ia32_pslldqi128 (__A, (__B) * 8))
776
+ #endif
777
+
778
+ #if 0
779
+ static __inline __m128i __attribute__((__always_inline__))
780
+ _mm_srli_epi16 (__m128i __A, int __B)
781
+ ;
782
+
783
+ static __inline __m128i __attribute__((__always_inline__))
784
+ _mm_srli_epi32 (__m128i __A, int __B)
785
+ ;
786
+
787
+ static __inline __m128i __attribute__((__always_inline__))
788
+ _mm_srli_epi64 (__m128i __A, int __B)
789
+ ;
790
+ #else
791
+ #define _mm_srli_epi16(__A, __B) \
792
+ ((__m128i)__builtin_ia32_psrlwi128 ((__v8hi)(__A), __B))
793
+ #define _mm_srli_epi32(__A, __B) \
794
+ ((__m128i)__builtin_ia32_psrldi128 ((__v4si)(__A), __B))
795
+ #define _mm_srli_epi64(__A, __B) \
796
+ ((__m128i)__builtin_ia32_psrlqi128 ((__v4si)(__A), __B))
797
+ #endif
798
+
799
+ static __inline __m128i __attribute__((__always_inline__))
800
+ _mm_sll_epi16 (__m128i __A, __m128i __B)
801
+ ;
802
+
803
+ static __inline __m128i __attribute__((__always_inline__))
804
+ _mm_sll_epi32 (__m128i __A, __m128i __B)
805
+ ;
806
+
807
+ static __inline __m128i __attribute__((__always_inline__))
808
+ _mm_sll_epi64 (__m128i __A, __m128i __B)
809
+ ;
810
+
811
+ static __inline __m128i __attribute__((__always_inline__))
812
+ _mm_sra_epi16 (__m128i __A, __m128i __B)
813
+ ;
814
+
815
+ static __inline __m128i __attribute__((__always_inline__))
816
+ _mm_sra_epi32 (__m128i __A, __m128i __B)
817
+ ;
818
+
819
+ static __inline __m128i __attribute__((__always_inline__))
820
+ _mm_srl_epi16 (__m128i __A, __m128i __B)
821
+ ;
822
+
823
+ static __inline __m128i __attribute__((__always_inline__))
824
+ _mm_srl_epi32 (__m128i __A, __m128i __B)
825
+ ;
826
+
827
+ static __inline __m128i __attribute__((__always_inline__))
828
+ _mm_srl_epi64 (__m128i __A, __m128i __B)
829
+ ;
830
+
831
+ static __inline __m128i __attribute__((__always_inline__))
832
+ _mm_and_si128 (__m128i __A, __m128i __B)
833
+ ;
834
+
835
+ static __inline __m128i __attribute__((__always_inline__))
836
+ _mm_andnot_si128 (__m128i __A, __m128i __B)
837
+ ;
838
+
839
+ static __inline __m128i __attribute__((__always_inline__))
840
+ _mm_or_si128 (__m128i __A, __m128i __B)
841
+ ;
842
+
843
+ static __inline __m128i __attribute__((__always_inline__))
844
+ _mm_xor_si128 (__m128i __A, __m128i __B)
845
+ ;
846
+
847
+ static __inline __m128i __attribute__((__always_inline__))
848
+ _mm_cmpeq_epi8 (__m128i __A, __m128i __B)
849
+ ;
850
+
851
+ static __inline __m128i __attribute__((__always_inline__))
852
+ _mm_cmpeq_epi16 (__m128i __A, __m128i __B)
853
+ ;
854
+
855
+ static __inline __m128i __attribute__((__always_inline__))
856
+ _mm_cmpeq_epi32 (__m128i __A, __m128i __B)
857
+ ;
858
+
859
+ static __inline __m128i __attribute__((__always_inline__))
860
+ _mm_cmplt_epi8 (__m128i __A, __m128i __B)
861
+ ;
862
+
863
+ static __inline __m128i __attribute__((__always_inline__))
864
+ _mm_cmplt_epi16 (__m128i __A, __m128i __B)
865
+ ;
866
+
867
+ static __inline __m128i __attribute__((__always_inline__))
868
+ _mm_cmplt_epi32 (__m128i __A, __m128i __B)
869
+ ;
870
+
871
+ static __inline __m128i __attribute__((__always_inline__))
872
+ _mm_cmpgt_epi8 (__m128i __A, __m128i __B)
873
+ ;
874
+
875
+ static __inline __m128i __attribute__((__always_inline__))
876
+ _mm_cmpgt_epi16 (__m128i __A, __m128i __B)
877
+ ;
878
+
879
+ static __inline __m128i __attribute__((__always_inline__))
880
+ _mm_cmpgt_epi32 (__m128i __A, __m128i __B)
881
+ ;
882
+
883
+ #if 0
884
+ static __inline int __attribute__((__always_inline__))
885
+ _mm_extract_epi16 (__m128i const __A, int const __N)
886
+ ;
887
+
888
+ static __inline __m128i __attribute__((__always_inline__))
889
+ _mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
890
+ ;
891
+ #else
892
+ #define _mm_extract_epi16(A, N) \
893
+ ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(A), (N)))
894
+ #define _mm_insert_epi16(A, D, N) \
895
+ ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N)))
896
+ #endif
897
+
898
+ static __inline __m128i __attribute__((__always_inline__))
899
+ _mm_max_epi16 (__m128i __A, __m128i __B)
900
+ ;
901
+
902
+ static __inline __m128i __attribute__((__always_inline__))
903
+ _mm_max_epu8 (__m128i __A, __m128i __B)
904
+ ;
905
+
906
+ static __inline __m128i __attribute__((__always_inline__))
907
+ _mm_min_epi16 (__m128i __A, __m128i __B)
908
+ ;
909
+
910
+ static __inline __m128i __attribute__((__always_inline__))
911
+ _mm_min_epu8 (__m128i __A, __m128i __B)
912
+ ;
913
+
914
+ static __inline int __attribute__((__always_inline__))
915
+ _mm_movemask_epi8 (__m128i __A)
916
+ ;
917
+
918
+ static __inline __m128i __attribute__((__always_inline__))
919
+ _mm_mulhi_epu16 (__m128i __A, __m128i __B)
920
+ ;
921
+
922
+ #define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __B))
923
+ #define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B))
924
+ #define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B))
925
+
926
+ static __inline void __attribute__((__always_inline__))
927
+ _mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
928
+ ;
929
+
930
+ static __inline __m128i __attribute__((__always_inline__))
931
+ _mm_avg_epu8 (__m128i __A, __m128i __B)
932
+ ;
933
+
934
+ static __inline __m128i __attribute__((__always_inline__))
935
+ _mm_avg_epu16 (__m128i __A, __m128i __B)
936
+ ;
937
+
938
+ static __inline __m128i __attribute__((__always_inline__))
939
+ _mm_sad_epu8 (__m128i __A, __m128i __B)
940
+ ;
941
+
942
+ static __inline void __attribute__((__always_inline__))
943
+ _mm_stream_si32 (int *__A, int __B)
944
+ ;
945
+
946
+ static __inline void __attribute__((__always_inline__))
947
+ _mm_stream_si128 (__m128i *__A, __m128i __B)
948
+ ;
949
+
950
+ static __inline void __attribute__((__always_inline__))
951
+ _mm_stream_pd (double *__A, __m128d __B)
952
+ ;
953
+
954
+ static __inline void __attribute__((__always_inline__))
955
+ _mm_clflush (void const *__A)
956
+ ;
957
+
958
+ static __inline void __attribute__((__always_inline__))
959
+ _mm_lfence (void)
960
+ ;
961
+
962
+ static __inline void __attribute__((__always_inline__))
963
+ _mm_mfence (void)
964
+ ;
965
+
966
+ static __inline __m128i __attribute__((__always_inline__))
967
+ _mm_cvtsi32_si128 (int __A)
968
+ ;
969
+
970
+ #ifdef __x86_64__
971
+ static __inline __m128i __attribute__((__always_inline__))
972
+ _mm_cvtsi64x_si128 (long long __A)
973
+ ;
974
+ #endif
975
+
976
+ /* Casts between various SP, DP, INT vector types. Note that these do no
977
+ conversion of values, they just change the type. */
978
+ static __inline __m128 __attribute__((__always_inline__))
979
+ _mm_castpd_ps(__m128d __A)
980
+ ;
981
+
982
+ static __inline __m128i __attribute__((__always_inline__))
983
+ _mm_castpd_si128(__m128d __A)
984
+ ;
985
+
986
+ static __inline __m128d __attribute__((__always_inline__))
987
+ _mm_castps_pd(__m128 __A)
988
+ ;
989
+
990
+ static __inline __m128i __attribute__((__always_inline__))
991
+ _mm_castps_si128(__m128 __A)
992
+ ;
993
+
994
+ static __inline __m128 __attribute__((__always_inline__))
995
+ _mm_castsi128_ps(__m128i __A)
996
+ ;
997
+
998
+ static __inline __m128d __attribute__((__always_inline__))
999
+ _mm_castsi128_pd(__m128i __A)
1000
+ ;
1001
+
1002
+ #endif /* __SSE2__ */
1003
+
1004
+ #endif /* _EMMINTRIN_H_INCLUDED */