gccxml_gem 0.9.2-x86-linux → 0.9.3-x86-linux

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. data/Rakefile +15 -6
  2. data/bin/gccxml +0 -0
  3. data/bin/gccxml_cc1plus +0 -0
  4. data/gccxml.rb +5 -5
  5. data/share/gccxml-0.9/GCC/3.2/bits/gthr-default.h +4 -0
  6. data/share/gccxml-0.9/GCC/3.4/bits/gthr-default.h +5 -0
  7. data/share/gccxml-0.9/GCC/4.0/emmintrin.h +5 -0
  8. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_emmintrin.h +1037 -0
  9. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_mmintrin.h +669 -0
  10. data/share/gccxml-0.9/GCC/4.0/gccxml_apple_xmmintrin.h +870 -0
  11. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_emmintrin.h +977 -0
  12. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_mmintrin.h +636 -0
  13. data/share/gccxml-0.9/GCC/4.0/gccxml_gnu_xmmintrin.h +833 -0
  14. data/share/gccxml-0.9/GCC/4.0/mmintrin.h +5 -0
  15. data/share/gccxml-0.9/GCC/4.0/xmmintrin.h +5 -0
  16. data/share/gccxml-0.9/GCC/4.1/bits/gthr-default.h +4 -0
  17. data/share/gccxml-0.9/GCC/4.1/emmintrin.h +5 -0
  18. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_emmintrin.h +1509 -0
  19. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_mmintrin.h +942 -0
  20. data/share/gccxml-0.9/GCC/4.1/gccxml_apple_xmmintrin.h +1192 -0
  21. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_emmintrin.h +1004 -0
  22. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_mmintrin.h +637 -0
  23. data/share/gccxml-0.9/GCC/4.1/gccxml_gnu_xmmintrin.h +834 -0
  24. data/share/gccxml-0.9/GCC/4.1/mmintrin.h +5 -0
  25. data/share/gccxml-0.9/GCC/4.1/xmmintrin.h +5 -0
  26. data/share/gccxml-0.9/GCC/4.2/emmintrin.h +5 -0
  27. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_emmintrin.h +1509 -0
  28. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_mmintrin.h +942 -0
  29. data/share/gccxml-0.9/GCC/4.2/gccxml_apple_xmmintrin.h +1192 -0
  30. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_emmintrin.h +1013 -0
  31. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_mmintrin.h +663 -0
  32. data/share/gccxml-0.9/GCC/4.2/gccxml_gnu_xmmintrin.h +860 -0
  33. data/share/gccxml-0.9/GCC/4.2/mmintrin.h +5 -0
  34. data/share/gccxml-0.9/GCC/4.2/xmmintrin.h +5 -0
  35. data/share/gccxml-0.9/GCC/4.3/emmintrin.h +1043 -0
  36. data/share/gccxml-0.9/GCC/4.3/gccxml_builtins.h +1 -0
  37. data/share/gccxml-0.9/GCC/4.3/mmintrin.h +663 -0
  38. data/share/gccxml-0.9/GCC/4.3/xmmintrin.h +867 -0
  39. data/share/gccxml-0.9/GCC/4.4/bits/c++config.h +1431 -0
  40. data/share/gccxml-0.9/GCC/4.4/emmintrin.h +1041 -0
  41. data/share/gccxml-0.9/GCC/4.4/gccxml_builtins.h +153 -0
  42. data/share/gccxml-0.9/GCC/4.4/mmintrin.h +662 -0
  43. data/share/gccxml-0.9/GCC/4.4/xmmintrin.h +864 -0
  44. data/share/gccxml-0.9/GCC/4.5/gccxml_builtins.h +154 -0
  45. data/share/gccxml-0.9/GCC/4.5/iomanip +349 -0
  46. data/share/gccxml-0.9/GCC/COPYING.RUNTIME +73 -0
  47. data/share/gccxml-0.9/GCC/COPYING3 +674 -0
  48. data/share/man/man1/gccxml.1 +1 -1
  49. metadata +165 -114
@@ -0,0 +1,1013 @@
1
+ /* Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
2
+
3
+ This file is part of GCC.
4
+
5
+ GCC is free software; you can redistribute it and/or modify
6
+ it under the terms of the GNU General Public License as published by
7
+ the Free Software Foundation; either version 2, or (at your option)
8
+ any later version.
9
+
10
+ GCC is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ GNU General Public License for more details.
14
+
15
+ You should have received a copy of the GNU General Public License
16
+ along with GCC; see the file COPYING. If not, write to
17
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
18
+ Boston, MA 02110-1301, USA. */
19
+
20
+ /* As a special exception, if you include this header file into source
21
+ files compiled by GCC, this header file does not by itself cause
22
+ the resulting executable to be covered by the GNU General Public
23
+ License. This exception does not however invalidate any other
24
+ reasons why the executable file might be covered by the GNU General
25
+ Public License. */
26
+
27
+ /* Implemented from the specification included in the Intel C++ Compiler
28
+ User Guide and Reference, version 9.0. */
29
+
30
+ #ifndef _EMMINTRIN_H_INCLUDED
31
+ #define _EMMINTRIN_H_INCLUDED
32
+
33
+ #ifdef __SSE2__
34
+ #include <xmmintrin.h>
35
+
36
+ /* SSE2 */
37
+ typedef double __v2df __attribute__ ((__vector_size__ (16)));
38
+ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
39
+ typedef int __v4si __attribute__ ((__vector_size__ (16)));
40
+ typedef short __v8hi __attribute__ ((__vector_size__ (16)));
41
+ typedef char __v16qi __attribute__ ((__vector_size__ (16)));
42
+
43
+ /* The Intel API is flexible enough that we must allow aliasing with other
44
+ vector types, and their scalar components. */
45
+ typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__));
46
+ typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
47
+
48
+ /* Create a selector for use with the SHUFPD instruction. */
49
+ #define _MM_SHUFFLE2(fp1,fp0) \
50
+ (((fp1) << 1) | (fp0))
51
+
52
+ /* Create a vector with element 0 as F and the rest zero. */
53
+ static __inline __m128d __attribute__((__always_inline__))
54
+ _mm_set_sd (double __F)
55
+ ;
56
+
57
+ /* Create a vector with both elements equal to F. */
58
+ static __inline __m128d __attribute__((__always_inline__))
59
+ _mm_set1_pd (double __F)
60
+ ;
61
+
62
+ static __inline __m128d __attribute__((__always_inline__))
63
+ _mm_set_pd1 (double __F)
64
+ ;
65
+
66
+ /* Create a vector with the lower value X and upper value W. */
67
+ static __inline __m128d __attribute__((__always_inline__))
68
+ _mm_set_pd (double __W, double __X)
69
+ ;
70
+
71
+ /* Create a vector with the lower value W and upper value X. */
72
+ static __inline __m128d __attribute__((__always_inline__))
73
+ _mm_setr_pd (double __W, double __X)
74
+ ;
75
+
76
+ /* Create a vector of zeros. */
77
+ static __inline __m128d __attribute__((__always_inline__))
78
+ _mm_setzero_pd (void)
79
+ ;
80
+
81
+ /* Sets the low DPFP value of A from the low value of B. */
82
+ static __inline __m128d __attribute__((__always_inline__))
83
+ _mm_move_sd (__m128d __A, __m128d __B)
84
+ ;
85
+
86
+ /* Load two DPFP values from P. The address must be 16-byte aligned. */
87
+ static __inline __m128d __attribute__((__always_inline__))
88
+ _mm_load_pd (double const *__P)
89
+ ;
90
+
91
+ /* Load two DPFP values from P. The address need not be 16-byte aligned. */
92
+ static __inline __m128d __attribute__((__always_inline__))
93
+ _mm_loadu_pd (double const *__P)
94
+ ;
95
+
96
+ /* Create a vector with all two elements equal to *P. */
97
+ static __inline __m128d __attribute__((__always_inline__))
98
+ _mm_load1_pd (double const *__P)
99
+ ;
100
+
101
+ /* Create a vector with element 0 as *P and the rest zero. */
102
+ static __inline __m128d __attribute__((__always_inline__))
103
+ _mm_load_sd (double const *__P)
104
+ ;
105
+
106
+ static __inline __m128d __attribute__((__always_inline__))
107
+ _mm_load_pd1 (double const *__P)
108
+ ;
109
+
110
+ /* Load two DPFP values in reverse order. The address must be aligned. */
111
+ static __inline __m128d __attribute__((__always_inline__))
112
+ _mm_loadr_pd (double const *__P)
113
+ ;
114
+
115
+ /* Store two DPFP values. The address must be 16-byte aligned. */
116
+ static __inline void __attribute__((__always_inline__))
117
+ _mm_store_pd (double *__P, __m128d __A)
118
+ ;
119
+
120
+ /* Store two DPFP values. The address need not be 16-byte aligned. */
121
+ static __inline void __attribute__((__always_inline__))
122
+ _mm_storeu_pd (double *__P, __m128d __A)
123
+ ;
124
+
125
+ /* Stores the lower DPFP value. */
126
+ static __inline void __attribute__((__always_inline__))
127
+ _mm_store_sd (double *__P, __m128d __A)
128
+ ;
129
+
130
+ static __inline double __attribute__((__always_inline__))
131
+ _mm_cvtsd_f64 (__m128d __A)
132
+ ;
133
+
134
+ static __inline void __attribute__((__always_inline__))
135
+ _mm_storel_pd (double *__P, __m128d __A)
136
+ ;
137
+
138
+ /* Stores the upper DPFP value. */
139
+ static __inline void __attribute__((__always_inline__))
140
+ _mm_storeh_pd (double *__P, __m128d __A)
141
+ ;
142
+
143
+ /* Store the lower DPFP value across two words.
144
+ The address must be 16-byte aligned. */
145
+ static __inline void __attribute__((__always_inline__))
146
+ _mm_store1_pd (double *__P, __m128d __A)
147
+ ;
148
+
149
+ static __inline void __attribute__((__always_inline__))
150
+ _mm_store_pd1 (double *__P, __m128d __A)
151
+ ;
152
+
153
+ /* Store two DPFP values in reverse order. The address must be aligned. */
154
+ static __inline void __attribute__((__always_inline__))
155
+ _mm_storer_pd (double *__P, __m128d __A)
156
+ ;
157
+
158
+ static __inline int __attribute__((__always_inline__))
159
+ _mm_cvtsi128_si32 (__m128i __A)
160
+ ;
161
+
162
+ #ifdef __x86_64__
163
+ /* Intel intrinsic. */
164
+ static __inline long long __attribute__((__always_inline__))
165
+ _mm_cvtsi128_si64 (__m128i __A)
166
+ ;
167
+
168
+ /* Microsoft intrinsic. */
169
+ static __inline long long __attribute__((__always_inline__))
170
+ _mm_cvtsi128_si64x (__m128i __A)
171
+ ;
172
+ #endif
173
+
174
+ static __inline __m128d __attribute__((__always_inline__))
175
+ _mm_add_pd (__m128d __A, __m128d __B)
176
+ ;
177
+
178
+ static __inline __m128d __attribute__((__always_inline__))
179
+ _mm_add_sd (__m128d __A, __m128d __B)
180
+ ;
181
+
182
+ static __inline __m128d __attribute__((__always_inline__))
183
+ _mm_sub_pd (__m128d __A, __m128d __B)
184
+ ;
185
+
186
+ static __inline __m128d __attribute__((__always_inline__))
187
+ _mm_sub_sd (__m128d __A, __m128d __B)
188
+ ;
189
+
190
+ static __inline __m128d __attribute__((__always_inline__))
191
+ _mm_mul_pd (__m128d __A, __m128d __B)
192
+ ;
193
+
194
+ static __inline __m128d __attribute__((__always_inline__))
195
+ _mm_mul_sd (__m128d __A, __m128d __B)
196
+ ;
197
+
198
+ static __inline __m128d __attribute__((__always_inline__))
199
+ _mm_div_pd (__m128d __A, __m128d __B)
200
+ ;
201
+
202
+ static __inline __m128d __attribute__((__always_inline__))
203
+ _mm_div_sd (__m128d __A, __m128d __B)
204
+ ;
205
+
206
+ static __inline __m128d __attribute__((__always_inline__))
207
+ _mm_sqrt_pd (__m128d __A)
208
+ ;
209
+
210
+ /* Return pair {sqrt (A[0), B[1]}. */
211
+ static __inline __m128d __attribute__((__always_inline__))
212
+ _mm_sqrt_sd (__m128d __A, __m128d __B)
213
+ ;
214
+
215
+ static __inline __m128d __attribute__((__always_inline__))
216
+ _mm_min_pd (__m128d __A, __m128d __B)
217
+ ;
218
+
219
+ static __inline __m128d __attribute__((__always_inline__))
220
+ _mm_min_sd (__m128d __A, __m128d __B)
221
+ ;
222
+
223
+ static __inline __m128d __attribute__((__always_inline__))
224
+ _mm_max_pd (__m128d __A, __m128d __B)
225
+ ;
226
+
227
+ static __inline __m128d __attribute__((__always_inline__))
228
+ _mm_max_sd (__m128d __A, __m128d __B)
229
+ ;
230
+
231
+ static __inline __m128d __attribute__((__always_inline__))
232
+ _mm_and_pd (__m128d __A, __m128d __B)
233
+ ;
234
+
235
+ static __inline __m128d __attribute__((__always_inline__))
236
+ _mm_andnot_pd (__m128d __A, __m128d __B)
237
+ ;
238
+
239
+ static __inline __m128d __attribute__((__always_inline__))
240
+ _mm_or_pd (__m128d __A, __m128d __B)
241
+ ;
242
+
243
+ static __inline __m128d __attribute__((__always_inline__))
244
+ _mm_xor_pd (__m128d __A, __m128d __B)
245
+ ;
246
+
247
+ static __inline __m128d __attribute__((__always_inline__))
248
+ _mm_cmpeq_pd (__m128d __A, __m128d __B)
249
+ ;
250
+
251
+ static __inline __m128d __attribute__((__always_inline__))
252
+ _mm_cmplt_pd (__m128d __A, __m128d __B)
253
+ ;
254
+
255
+ static __inline __m128d __attribute__((__always_inline__))
256
+ _mm_cmple_pd (__m128d __A, __m128d __B)
257
+ ;
258
+
259
+ static __inline __m128d __attribute__((__always_inline__))
260
+ _mm_cmpgt_pd (__m128d __A, __m128d __B)
261
+ ;
262
+
263
+ static __inline __m128d __attribute__((__always_inline__))
264
+ _mm_cmpge_pd (__m128d __A, __m128d __B)
265
+ ;
266
+
267
+ static __inline __m128d __attribute__((__always_inline__))
268
+ _mm_cmpneq_pd (__m128d __A, __m128d __B)
269
+ ;
270
+
271
+ static __inline __m128d __attribute__((__always_inline__))
272
+ _mm_cmpnlt_pd (__m128d __A, __m128d __B)
273
+ ;
274
+
275
+ static __inline __m128d __attribute__((__always_inline__))
276
+ _mm_cmpnle_pd (__m128d __A, __m128d __B)
277
+ ;
278
+
279
+ static __inline __m128d __attribute__((__always_inline__))
280
+ _mm_cmpngt_pd (__m128d __A, __m128d __B)
281
+ ;
282
+
283
+ static __inline __m128d __attribute__((__always_inline__))
284
+ _mm_cmpnge_pd (__m128d __A, __m128d __B)
285
+ ;
286
+
287
+ static __inline __m128d __attribute__((__always_inline__))
288
+ _mm_cmpord_pd (__m128d __A, __m128d __B)
289
+ ;
290
+
291
+ static __inline __m128d __attribute__((__always_inline__))
292
+ _mm_cmpunord_pd (__m128d __A, __m128d __B)
293
+ ;
294
+
295
+ static __inline __m128d __attribute__((__always_inline__))
296
+ _mm_cmpeq_sd (__m128d __A, __m128d __B)
297
+ ;
298
+
299
+ static __inline __m128d __attribute__((__always_inline__))
300
+ _mm_cmplt_sd (__m128d __A, __m128d __B)
301
+ ;
302
+
303
+ static __inline __m128d __attribute__((__always_inline__))
304
+ _mm_cmple_sd (__m128d __A, __m128d __B)
305
+ ;
306
+
307
+ static __inline __m128d __attribute__((__always_inline__))
308
+ _mm_cmpgt_sd (__m128d __A, __m128d __B)
309
+ ;
310
+
311
+ static __inline __m128d __attribute__((__always_inline__))
312
+ _mm_cmpge_sd (__m128d __A, __m128d __B)
313
+ ;
314
+
315
+ static __inline __m128d __attribute__((__always_inline__))
316
+ _mm_cmpneq_sd (__m128d __A, __m128d __B)
317
+ ;
318
+
319
+ static __inline __m128d __attribute__((__always_inline__))
320
+ _mm_cmpnlt_sd (__m128d __A, __m128d __B)
321
+ ;
322
+
323
+ static __inline __m128d __attribute__((__always_inline__))
324
+ _mm_cmpnle_sd (__m128d __A, __m128d __B)
325
+ ;
326
+
327
+ static __inline __m128d __attribute__((__always_inline__))
328
+ _mm_cmpngt_sd (__m128d __A, __m128d __B)
329
+ ;
330
+
331
+ static __inline __m128d __attribute__((__always_inline__))
332
+ _mm_cmpnge_sd (__m128d __A, __m128d __B)
333
+ ;
334
+
335
+ static __inline __m128d __attribute__((__always_inline__))
336
+ _mm_cmpord_sd (__m128d __A, __m128d __B)
337
+ ;
338
+
339
+ static __inline __m128d __attribute__((__always_inline__))
340
+ _mm_cmpunord_sd (__m128d __A, __m128d __B)
341
+ ;
342
+
343
+ static __inline int __attribute__((__always_inline__))
344
+ _mm_comieq_sd (__m128d __A, __m128d __B)
345
+ ;
346
+
347
+ static __inline int __attribute__((__always_inline__))
348
+ _mm_comilt_sd (__m128d __A, __m128d __B)
349
+ ;
350
+
351
+ static __inline int __attribute__((__always_inline__))
352
+ _mm_comile_sd (__m128d __A, __m128d __B)
353
+ ;
354
+
355
+ static __inline int __attribute__((__always_inline__))
356
+ _mm_comigt_sd (__m128d __A, __m128d __B)
357
+ ;
358
+
359
+ static __inline int __attribute__((__always_inline__))
360
+ _mm_comige_sd (__m128d __A, __m128d __B)
361
+ ;
362
+
363
+ static __inline int __attribute__((__always_inline__))
364
+ _mm_comineq_sd (__m128d __A, __m128d __B)
365
+ ;
366
+
367
+ static __inline int __attribute__((__always_inline__))
368
+ _mm_ucomieq_sd (__m128d __A, __m128d __B)
369
+ ;
370
+
371
+ static __inline int __attribute__((__always_inline__))
372
+ _mm_ucomilt_sd (__m128d __A, __m128d __B)
373
+ ;
374
+
375
+ static __inline int __attribute__((__always_inline__))
376
+ _mm_ucomile_sd (__m128d __A, __m128d __B)
377
+ ;
378
+
379
+ static __inline int __attribute__((__always_inline__))
380
+ _mm_ucomigt_sd (__m128d __A, __m128d __B)
381
+ ;
382
+
383
+ static __inline int __attribute__((__always_inline__))
384
+ _mm_ucomige_sd (__m128d __A, __m128d __B)
385
+ ;
386
+
387
+ static __inline int __attribute__((__always_inline__))
388
+ _mm_ucomineq_sd (__m128d __A, __m128d __B)
389
+ ;
390
+
391
+ /* Create a vector of Qi, where i is the element number. */
392
+
393
+ static __inline __m128i __attribute__((__always_inline__))
394
+ _mm_set_epi64x (long long __q1, long long __q0)
395
+ ;
396
+
397
+ static __inline __m128i __attribute__((__always_inline__))
398
+ _mm_set_epi64 (__m64 __q1, __m64 __q0)
399
+ ;
400
+
401
+ static __inline __m128i __attribute__((__always_inline__))
402
+ _mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
403
+ ;
404
+
405
+ static __inline __m128i __attribute__((__always_inline__))
406
+ _mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
407
+ short __q3, short __q2, short __q1, short __q0)
408
+ ;
409
+
410
+ static __inline __m128i __attribute__((__always_inline__))
411
+ _mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
412
+ char __q11, char __q10, char __q09, char __q08,
413
+ char __q07, char __q06, char __q05, char __q04,
414
+ char __q03, char __q02, char __q01, char __q00)
415
+ ;
416
+
417
+ /* Set all of the elements of the vector to A. */
418
+
419
+ static __inline __m128i __attribute__((__always_inline__))
420
+ _mm_set1_epi64x (long long __A)
421
+ ;
422
+
423
+ static __inline __m128i __attribute__((__always_inline__))
424
+ _mm_set1_epi64 (__m64 __A)
425
+ ;
426
+
427
+ static __inline __m128i __attribute__((__always_inline__))
428
+ _mm_set1_epi32 (int __A)
429
+ ;
430
+
431
+ static __inline __m128i __attribute__((__always_inline__))
432
+ _mm_set1_epi16 (short __A)
433
+ ;
434
+
435
+ static __inline __m128i __attribute__((__always_inline__))
436
+ _mm_set1_epi8 (char __A)
437
+ ;
438
+
439
+ /* Create a vector of Qi, where i is the element number.
440
+ The parameter order is reversed from the _mm_set_epi* functions. */
441
+
442
+ static __inline __m128i __attribute__((__always_inline__))
443
+ _mm_setr_epi64 (__m64 __q0, __m64 __q1)
444
+ ;
445
+
446
+ static __inline __m128i __attribute__((__always_inline__))
447
+ _mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
448
+ ;
449
+
450
+ static __inline __m128i __attribute__((__always_inline__))
451
+ _mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
452
+ short __q4, short __q5, short __q6, short __q7)
453
+ ;
454
+
455
+ static __inline __m128i __attribute__((__always_inline__))
456
+ _mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
457
+ char __q04, char __q05, char __q06, char __q07,
458
+ char __q08, char __q09, char __q10, char __q11,
459
+ char __q12, char __q13, char __q14, char __q15)
460
+ ;
461
+
462
+ /* Create a vector with element 0 as *P and the rest zero. */
463
+
464
+ static __inline __m128i __attribute__((__always_inline__))
465
+ _mm_load_si128 (__m128i const *__P)
466
+ ;
467
+
468
+ static __inline __m128i __attribute__((__always_inline__))
469
+ _mm_loadu_si128 (__m128i const *__P)
470
+ ;
471
+
472
+ static __inline __m128i __attribute__((__always_inline__))
473
+ _mm_loadl_epi64 (__m128i const *__P)
474
+ ;
475
+
476
+ static __inline void __attribute__((__always_inline__))
477
+ _mm_store_si128 (__m128i *__P, __m128i __B)
478
+ ;
479
+
480
+ static __inline void __attribute__((__always_inline__))
481
+ _mm_storeu_si128 (__m128i *__P, __m128i __B)
482
+ ;
483
+
484
+ static __inline void __attribute__((__always_inline__))
485
+ _mm_storel_epi64 (__m128i *__P, __m128i __B)
486
+ ;
487
+
488
+ static __inline __m64 __attribute__((__always_inline__))
489
+ _mm_movepi64_pi64 (__m128i __B)
490
+ ;
491
+
492
+ static __inline __m128i __attribute__((__always_inline__))
493
+ _mm_movpi64_epi64 (__m64 __A)
494
+ ;
495
+
496
+ static __inline __m128i __attribute__((__always_inline__))
497
+ _mm_move_epi64 (__m128i __A)
498
+ ;
499
+
500
+ /* Create a vector of zeros. */
501
+ static __inline __m128i __attribute__((__always_inline__))
502
+ _mm_setzero_si128 (void)
503
+ ;
504
+
505
+ static __inline __m128d __attribute__((__always_inline__))
506
+ _mm_cvtepi32_pd (__m128i __A)
507
+ ;
508
+
509
+ static __inline __m128 __attribute__((__always_inline__))
510
+ _mm_cvtepi32_ps (__m128i __A)
511
+ ;
512
+
513
+ static __inline __m128i __attribute__((__always_inline__))
514
+ _mm_cvtpd_epi32 (__m128d __A)
515
+ ;
516
+
517
+ static __inline __m64 __attribute__((__always_inline__))
518
+ _mm_cvtpd_pi32 (__m128d __A)
519
+ ;
520
+
521
+ static __inline __m128 __attribute__((__always_inline__))
522
+ _mm_cvtpd_ps (__m128d __A)
523
+ ;
524
+
525
+ static __inline __m128i __attribute__((__always_inline__))
526
+ _mm_cvttpd_epi32 (__m128d __A)
527
+ ;
528
+
529
+ static __inline __m64 __attribute__((__always_inline__))
530
+ _mm_cvttpd_pi32 (__m128d __A)
531
+ ;
532
+
533
+ static __inline __m128d __attribute__((__always_inline__))
534
+ _mm_cvtpi32_pd (__m64 __A)
535
+ ;
536
+
537
+ static __inline __m128i __attribute__((__always_inline__))
538
+ _mm_cvtps_epi32 (__m128 __A)
539
+ ;
540
+
541
+ static __inline __m128i __attribute__((__always_inline__))
542
+ _mm_cvttps_epi32 (__m128 __A)
543
+ ;
544
+
545
+ static __inline __m128d __attribute__((__always_inline__))
546
+ _mm_cvtps_pd (__m128 __A)
547
+ ;
548
+
549
+ static __inline int __attribute__((__always_inline__))
550
+ _mm_cvtsd_si32 (__m128d __A)
551
+ ;
552
+
553
+ #ifdef __x86_64__
554
+ /* Intel intrinsic. */
555
+ static __inline long long __attribute__((__always_inline__))
556
+ _mm_cvtsd_si64 (__m128d __A)
557
+ ;
558
+
559
+ /* Microsoft intrinsic. */
560
+ static __inline long long __attribute__((__always_inline__))
561
+ _mm_cvtsd_si64x (__m128d __A)
562
+ ;
563
+ #endif
564
+
565
+ static __inline int __attribute__((__always_inline__))
566
+ _mm_cvttsd_si32 (__m128d __A)
567
+ ;
568
+
569
+ #ifdef __x86_64__
570
+ /* Intel intrinsic. */
571
+ static __inline long long __attribute__((__always_inline__))
572
+ _mm_cvttsd_si64 (__m128d __A)
573
+ ;
574
+
575
+ /* Microsoft intrinsic. */
576
+ static __inline long long __attribute__((__always_inline__))
577
+ _mm_cvttsd_si64x (__m128d __A)
578
+ ;
579
+ #endif
580
+
581
+ static __inline __m128 __attribute__((__always_inline__))
582
+ _mm_cvtsd_ss (__m128 __A, __m128d __B)
583
+ ;
584
+
585
+ static __inline __m128d __attribute__((__always_inline__))
586
+ _mm_cvtsi32_sd (__m128d __A, int __B)
587
+ ;
588
+
589
+ #ifdef __x86_64__
590
+ /* Intel intrinsic. */
591
+ static __inline __m128d __attribute__((__always_inline__))
592
+ _mm_cvtsi64_sd (__m128d __A, long long __B)
593
+ ;
594
+
595
+ /* Microsoft intrinsic. */
596
+ static __inline __m128d __attribute__((__always_inline__))
597
+ _mm_cvtsi64x_sd (__m128d __A, long long __B)
598
+ ;
599
+ #endif
600
+
601
+ static __inline __m128d __attribute__((__always_inline__))
602
+ _mm_cvtss_sd (__m128d __A, __m128 __B)
603
+ ;
604
+
605
+ #define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C)))
606
+
607
+ static __inline __m128d __attribute__((__always_inline__))
608
+ _mm_unpackhi_pd (__m128d __A, __m128d __B)
609
+ ;
610
+
611
+ static __inline __m128d __attribute__((__always_inline__))
612
+ _mm_unpacklo_pd (__m128d __A, __m128d __B)
613
+ ;
614
+
615
+ static __inline __m128d __attribute__((__always_inline__))
616
+ _mm_loadh_pd (__m128d __A, double const *__B)
617
+ ;
618
+
619
+ static __inline __m128d __attribute__((__always_inline__))
620
+ _mm_loadl_pd (__m128d __A, double const *__B)
621
+ ;
622
+
623
+ static __inline int __attribute__((__always_inline__))
624
+ _mm_movemask_pd (__m128d __A)
625
+ ;
626
+
627
+ static __inline __m128i __attribute__((__always_inline__))
628
+ _mm_packs_epi16 (__m128i __A, __m128i __B)
629
+ ;
630
+
631
+ static __inline __m128i __attribute__((__always_inline__))
632
+ _mm_packs_epi32 (__m128i __A, __m128i __B)
633
+ ;
634
+
635
+ static __inline __m128i __attribute__((__always_inline__))
636
+ _mm_packus_epi16 (__m128i __A, __m128i __B)
637
+ ;
638
+
639
+ static __inline __m128i __attribute__((__always_inline__))
640
+ _mm_unpackhi_epi8 (__m128i __A, __m128i __B)
641
+ ;
642
+
643
+ static __inline __m128i __attribute__((__always_inline__))
644
+ _mm_unpackhi_epi16 (__m128i __A, __m128i __B)
645
+ ;
646
+
647
+ static __inline __m128i __attribute__((__always_inline__))
648
+ _mm_unpackhi_epi32 (__m128i __A, __m128i __B)
649
+ ;
650
+
651
+ static __inline __m128i __attribute__((__always_inline__))
652
+ _mm_unpackhi_epi64 (__m128i __A, __m128i __B)
653
+ ;
654
+
655
+ static __inline __m128i __attribute__((__always_inline__))
656
+ _mm_unpacklo_epi8 (__m128i __A, __m128i __B)
657
+ ;
658
+
659
+ static __inline __m128i __attribute__((__always_inline__))
660
+ _mm_unpacklo_epi16 (__m128i __A, __m128i __B)
661
+ ;
662
+
663
+ static __inline __m128i __attribute__((__always_inline__))
664
+ _mm_unpacklo_epi32 (__m128i __A, __m128i __B)
665
+ ;
666
+
667
+ static __inline __m128i __attribute__((__always_inline__))
668
+ _mm_unpacklo_epi64 (__m128i __A, __m128i __B)
669
+ ;
670
+
671
+ static __inline __m128i __attribute__((__always_inline__))
672
+ _mm_add_epi8 (__m128i __A, __m128i __B)
673
+ ;
674
+
675
+ static __inline __m128i __attribute__((__always_inline__))
676
+ _mm_add_epi16 (__m128i __A, __m128i __B)
677
+ ;
678
+
679
+ static __inline __m128i __attribute__((__always_inline__))
680
+ _mm_add_epi32 (__m128i __A, __m128i __B)
681
+ ;
682
+
683
+ static __inline __m128i __attribute__((__always_inline__))
684
+ _mm_add_epi64 (__m128i __A, __m128i __B)
685
+ ;
686
+
687
+ static __inline __m128i __attribute__((__always_inline__))
688
+ _mm_adds_epi8 (__m128i __A, __m128i __B)
689
+ ;
690
+
691
+ static __inline __m128i __attribute__((__always_inline__))
692
+ _mm_adds_epi16 (__m128i __A, __m128i __B)
693
+ ;
694
+
695
+ static __inline __m128i __attribute__((__always_inline__))
696
+ _mm_adds_epu8 (__m128i __A, __m128i __B)
697
+ ;
698
+
699
+ static __inline __m128i __attribute__((__always_inline__))
700
+ _mm_adds_epu16 (__m128i __A, __m128i __B)
701
+ ;
702
+
703
+ static __inline __m128i __attribute__((__always_inline__))
704
+ _mm_sub_epi8 (__m128i __A, __m128i __B)
705
+ ;
706
+
707
+ static __inline __m128i __attribute__((__always_inline__))
708
+ _mm_sub_epi16 (__m128i __A, __m128i __B)
709
+ ;
710
+
711
+ static __inline __m128i __attribute__((__always_inline__))
712
+ _mm_sub_epi32 (__m128i __A, __m128i __B)
713
+ ;
714
+
715
+ static __inline __m128i __attribute__((__always_inline__))
716
+ _mm_sub_epi64 (__m128i __A, __m128i __B)
717
+ ;
718
+
719
+ static __inline __m128i __attribute__((__always_inline__))
720
+ _mm_subs_epi8 (__m128i __A, __m128i __B)
721
+ ;
722
+
723
+ static __inline __m128i __attribute__((__always_inline__))
724
+ _mm_subs_epi16 (__m128i __A, __m128i __B)
725
+ ;
726
+
727
+ static __inline __m128i __attribute__((__always_inline__))
728
+ _mm_subs_epu8 (__m128i __A, __m128i __B)
729
+ ;
730
+
731
+ static __inline __m128i __attribute__((__always_inline__))
732
+ _mm_subs_epu16 (__m128i __A, __m128i __B)
733
+ ;
734
+
735
+ static __inline __m128i __attribute__((__always_inline__))
736
+ _mm_madd_epi16 (__m128i __A, __m128i __B)
737
+ ;
738
+
739
+ static __inline __m128i __attribute__((__always_inline__))
740
+ _mm_mulhi_epi16 (__m128i __A, __m128i __B)
741
+ ;
742
+
743
+ static __inline __m128i __attribute__((__always_inline__))
744
+ _mm_mullo_epi16 (__m128i __A, __m128i __B)
745
+ ;
746
+
747
+ static __inline __m64 __attribute__((__always_inline__))
748
+ _mm_mul_su32 (__m64 __A, __m64 __B)
749
+ ;
750
+
751
+ static __inline __m128i __attribute__((__always_inline__))
752
+ _mm_mul_epu32 (__m128i __A, __m128i __B)
753
+ ;
754
+
755
+ static __inline __m128i __attribute__((__always_inline__))
756
+ _mm_slli_epi16 (__m128i __A, int __B)
757
+ ;
758
+
759
+ static __inline __m128i __attribute__((__always_inline__))
760
+ _mm_slli_epi32 (__m128i __A, int __B)
761
+ ;
762
+
763
+ static __inline __m128i __attribute__((__always_inline__))
764
+ _mm_slli_epi64 (__m128i __A, int __B)
765
+ ;
766
+
767
+ static __inline __m128i __attribute__((__always_inline__))
768
+ _mm_srai_epi16 (__m128i __A, int __B)
769
+ ;
770
+
771
+ static __inline __m128i __attribute__((__always_inline__))
772
+ _mm_srai_epi32 (__m128i __A, int __B)
773
+ ;
774
+
775
+ #if 0
776
+ static __m128i __attribute__((__always_inline__))
777
+ _mm_srli_si128 (__m128i __A, int __B)
778
+ ;
779
+
780
+ static __m128i __attribute__((__always_inline__))
781
+ _mm_srli_si128 (__m128i __A, int __B)
782
+ ;
783
+ #else
784
+ #define _mm_srli_si128(__A, __B) \
785
+ ((__m128i)__builtin_ia32_psrldqi128 (__A, (__B) * 8))
786
+ #define _mm_slli_si128(__A, __B) \
787
+ ((__m128i)__builtin_ia32_pslldqi128 (__A, (__B) * 8))
788
+ #endif
789
+
790
+ static __inline __m128i __attribute__((__always_inline__))
791
+ _mm_srli_epi16 (__m128i __A, int __B)
792
+ ;
793
+
794
+ static __inline __m128i __attribute__((__always_inline__))
795
+ _mm_srli_epi32 (__m128i __A, int __B)
796
+ ;
797
+
798
+ static __inline __m128i __attribute__((__always_inline__))
799
+ _mm_srli_epi64 (__m128i __A, int __B)
800
+ ;
801
+
802
+ static __inline __m128i __attribute__((__always_inline__))
803
+ _mm_sll_epi16 (__m128i __A, __m128i __B)
804
+ ;
805
+
806
+ static __inline __m128i __attribute__((__always_inline__))
807
+ _mm_sll_epi32 (__m128i __A, __m128i __B)
808
+ ;
809
+
810
+ static __inline __m128i __attribute__((__always_inline__))
811
+ _mm_sll_epi64 (__m128i __A, __m128i __B)
812
+ ;
813
+
814
+ static __inline __m128i __attribute__((__always_inline__))
815
+ _mm_sra_epi16 (__m128i __A, __m128i __B)
816
+ ;
817
+
818
+ static __inline __m128i __attribute__((__always_inline__))
819
+ _mm_sra_epi32 (__m128i __A, __m128i __B)
820
+ ;
821
+
822
+ static __inline __m128i __attribute__((__always_inline__))
823
+ _mm_srl_epi16 (__m128i __A, __m128i __B)
824
+ ;
825
+
826
+ static __inline __m128i __attribute__((__always_inline__))
827
+ _mm_srl_epi32 (__m128i __A, __m128i __B)
828
+ ;
829
+
830
+ static __inline __m128i __attribute__((__always_inline__))
831
+ _mm_srl_epi64 (__m128i __A, __m128i __B)
832
+ ;
833
+
834
+ static __inline __m128i __attribute__((__always_inline__))
835
+ _mm_and_si128 (__m128i __A, __m128i __B)
836
+ ;
837
+
838
+ static __inline __m128i __attribute__((__always_inline__))
839
+ _mm_andnot_si128 (__m128i __A, __m128i __B)
840
+ ;
841
+
842
+ static __inline __m128i __attribute__((__always_inline__))
843
+ _mm_or_si128 (__m128i __A, __m128i __B)
844
+ ;
845
+
846
+ static __inline __m128i __attribute__((__always_inline__))
847
+ _mm_xor_si128 (__m128i __A, __m128i __B)
848
+ ;
849
+
850
+ static __inline __m128i __attribute__((__always_inline__))
851
+ _mm_cmpeq_epi8 (__m128i __A, __m128i __B)
852
+ ;
853
+
854
+ static __inline __m128i __attribute__((__always_inline__))
855
+ _mm_cmpeq_epi16 (__m128i __A, __m128i __B)
856
+ ;
857
+
858
+ static __inline __m128i __attribute__((__always_inline__))
859
+ _mm_cmpeq_epi32 (__m128i __A, __m128i __B)
860
+ ;
861
+
862
+ static __inline __m128i __attribute__((__always_inline__))
863
+ _mm_cmplt_epi8 (__m128i __A, __m128i __B)
864
+ ;
865
+
866
+ static __inline __m128i __attribute__((__always_inline__))
867
+ _mm_cmplt_epi16 (__m128i __A, __m128i __B)
868
+ ;
869
+
870
+ static __inline __m128i __attribute__((__always_inline__))
871
+ _mm_cmplt_epi32 (__m128i __A, __m128i __B)
872
+ ;
873
+
874
+ static __inline __m128i __attribute__((__always_inline__))
875
+ _mm_cmpgt_epi8 (__m128i __A, __m128i __B)
876
+ ;
877
+
878
+ static __inline __m128i __attribute__((__always_inline__))
879
+ _mm_cmpgt_epi16 (__m128i __A, __m128i __B)
880
+ ;
881
+
882
+ static __inline __m128i __attribute__((__always_inline__))
883
+ _mm_cmpgt_epi32 (__m128i __A, __m128i __B)
884
+ ;
885
+
886
+ #if 0
887
+ static __inline int __attribute__((__always_inline__))
888
+ _mm_extract_epi16 (__m128i const __A, int const __N)
889
+ ;
890
+
891
+ static __inline __m128i __attribute__((__always_inline__))
892
+ _mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
893
+ ;
894
+ #else
895
+ #define _mm_extract_epi16(A, N) \
896
+ ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(A), (N)))
897
+ #define _mm_insert_epi16(A, D, N) \
898
+ ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N)))
899
+ #endif
900
+
901
+ static __inline __m128i __attribute__((__always_inline__))
902
+ _mm_max_epi16 (__m128i __A, __m128i __B)
903
+ ;
904
+
905
+ static __inline __m128i __attribute__((__always_inline__))
906
+ _mm_max_epu8 (__m128i __A, __m128i __B)
907
+ ;
908
+
909
+ static __inline __m128i __attribute__((__always_inline__))
910
+ _mm_min_epi16 (__m128i __A, __m128i __B)
911
+ ;
912
+
913
+ static __inline __m128i __attribute__((__always_inline__))
914
+ _mm_min_epu8 (__m128i __A, __m128i __B)
915
+ ;
916
+
917
+ static __inline int __attribute__((__always_inline__))
918
+ _mm_movemask_epi8 (__m128i __A)
919
+ ;
920
+
921
+ static __inline __m128i __attribute__((__always_inline__))
922
+ _mm_mulhi_epu16 (__m128i __A, __m128i __B)
923
+ ;
924
+
925
+ #define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __B))
926
+ #define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B))
927
+ #define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B))
928
+
929
+ static __inline void __attribute__((__always_inline__))
930
+ _mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
931
+ ;
932
+
933
+ static __inline __m128i __attribute__((__always_inline__))
934
+ _mm_avg_epu8 (__m128i __A, __m128i __B)
935
+ ;
936
+
937
+ static __inline __m128i __attribute__((__always_inline__))
938
+ _mm_avg_epu16 (__m128i __A, __m128i __B)
939
+ ;
940
+
941
+ static __inline __m128i __attribute__((__always_inline__))
942
+ _mm_sad_epu8 (__m128i __A, __m128i __B)
943
+ ;
944
+
945
+ static __inline void __attribute__((__always_inline__))
946
+ _mm_stream_si32 (int *__A, int __B)
947
+ ;
948
+
949
+ static __inline void __attribute__((__always_inline__))
950
+ _mm_stream_si128 (__m128i *__A, __m128i __B)
951
+ ;
952
+
953
+ static __inline void __attribute__((__always_inline__))
954
+ _mm_stream_pd (double *__A, __m128d __B)
955
+ ;
956
+
957
+ static __inline void __attribute__((__always_inline__))
958
+ _mm_clflush (void const *__A)
959
+ ;
960
+
961
+ static __inline void __attribute__((__always_inline__))
962
+ _mm_lfence (void)
963
+ ;
964
+
965
+ static __inline void __attribute__((__always_inline__))
966
+ _mm_mfence (void)
967
+ ;
968
+
969
+ static __inline __m128i __attribute__((__always_inline__))
970
+ _mm_cvtsi32_si128 (int __A)
971
+ ;
972
+
973
+ #ifdef __x86_64__
974
+ /* Intel intrinsic. */
975
+ static __inline __m128i __attribute__((__always_inline__))
976
+ _mm_cvtsi64_si128 (long long __A)
977
+ ;
978
+
979
+ /* Microsoft intrinsic. */
980
+ static __inline __m128i __attribute__((__always_inline__))
981
+ _mm_cvtsi64x_si128 (long long __A)
982
+ ;
983
+ #endif
984
+
985
+ /* Casts between various SP, DP, INT vector types. Note that these do no
986
+ conversion of values, they just change the type. */
987
+ static __inline __m128 __attribute__((__always_inline__))
988
+ _mm_castpd_ps(__m128d __A)
989
+ ;
990
+
991
+ static __inline __m128i __attribute__((__always_inline__))
992
+ _mm_castpd_si128(__m128d __A)
993
+ ;
994
+
995
+ static __inline __m128d __attribute__((__always_inline__))
996
+ _mm_castps_pd(__m128 __A)
997
+ ;
998
+
999
+ static __inline __m128i __attribute__((__always_inline__))
1000
+ _mm_castps_si128(__m128 __A)
1001
+ ;
1002
+
1003
+ static __inline __m128 __attribute__((__always_inline__))
1004
+ _mm_castsi128_ps(__m128i __A)
1005
+ ;
1006
+
1007
+ static __inline __m128d __attribute__((__always_inline__))
1008
+ _mm_castsi128_pd(__m128i __A)
1009
+ ;
1010
+
1011
+ #endif /* __SSE2__ */
1012
+
1013
+ #endif /* _EMMINTRIN_H_INCLUDED */