kanayago 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (74) hide show
  1. checksums.yaml +7 -0
  2. data/.rubocop.yml +15 -0
  3. data/.rubocop_todo.yml +23 -0
  4. data/LICENSE.txt +21 -0
  5. data/README.md +79 -0
  6. data/Rakefile +182 -0
  7. data/ext/kanayago/ccan/check_type/check_type.h +63 -0
  8. data/ext/kanayago/ccan/container_of/container_of.h +142 -0
  9. data/ext/kanayago/ccan/list/list.h +791 -0
  10. data/ext/kanayago/ccan/str/str.h +17 -0
  11. data/ext/kanayago/constant.h +53 -0
  12. data/ext/kanayago/extconf.rb +21 -0
  13. data/ext/kanayago/id.h +347 -0
  14. data/ext/kanayago/id_table.h +39 -0
  15. data/ext/kanayago/internal/array.h +151 -0
  16. data/ext/kanayago/internal/basic_operators.h +64 -0
  17. data/ext/kanayago/internal/bignum.h +244 -0
  18. data/ext/kanayago/internal/bits.h +568 -0
  19. data/ext/kanayago/internal/compile.h +34 -0
  20. data/ext/kanayago/internal/compilers.h +107 -0
  21. data/ext/kanayago/internal/complex.h +29 -0
  22. data/ext/kanayago/internal/encoding.h +36 -0
  23. data/ext/kanayago/internal/error.h +218 -0
  24. data/ext/kanayago/internal/fixnum.h +184 -0
  25. data/ext/kanayago/internal/gc.h +322 -0
  26. data/ext/kanayago/internal/hash.h +191 -0
  27. data/ext/kanayago/internal/imemo.h +261 -0
  28. data/ext/kanayago/internal/io.h +140 -0
  29. data/ext/kanayago/internal/numeric.h +274 -0
  30. data/ext/kanayago/internal/parse.h +117 -0
  31. data/ext/kanayago/internal/rational.h +71 -0
  32. data/ext/kanayago/internal/re.h +28 -0
  33. data/ext/kanayago/internal/ruby_parser.h +125 -0
  34. data/ext/kanayago/internal/sanitizers.h +297 -0
  35. data/ext/kanayago/internal/serial.h +23 -0
  36. data/ext/kanayago/internal/static_assert.h +16 -0
  37. data/ext/kanayago/internal/string.h +186 -0
  38. data/ext/kanayago/internal/symbol.h +45 -0
  39. data/ext/kanayago/internal/thread.h +79 -0
  40. data/ext/kanayago/internal/variable.h +72 -0
  41. data/ext/kanayago/internal/vm.h +137 -0
  42. data/ext/kanayago/internal/warnings.h +16 -0
  43. data/ext/kanayago/internal.h +108 -0
  44. data/ext/kanayago/kanayago.c +420 -0
  45. data/ext/kanayago/kanayago.h +21 -0
  46. data/ext/kanayago/lex.c +302 -0
  47. data/ext/kanayago/method.h +255 -0
  48. data/ext/kanayago/node.c +440 -0
  49. data/ext/kanayago/node.h +111 -0
  50. data/ext/kanayago/node_name.inc +224 -0
  51. data/ext/kanayago/parse.c +26931 -0
  52. data/ext/kanayago/parse.h +244 -0
  53. data/ext/kanayago/parse.tmp.y +16145 -0
  54. data/ext/kanayago/parser_bits.h +564 -0
  55. data/ext/kanayago/parser_node.h +32 -0
  56. data/ext/kanayago/parser_st.c +164 -0
  57. data/ext/kanayago/parser_st.h +162 -0
  58. data/ext/kanayago/parser_value.h +106 -0
  59. data/ext/kanayago/probes.h +4 -0
  60. data/ext/kanayago/ruby_assert.h +14 -0
  61. data/ext/kanayago/ruby_atomic.h +23 -0
  62. data/ext/kanayago/ruby_parser.c +1165 -0
  63. data/ext/kanayago/rubyparser.h +1391 -0
  64. data/ext/kanayago/shape.h +234 -0
  65. data/ext/kanayago/st.c +2339 -0
  66. data/ext/kanayago/symbol.h +123 -0
  67. data/ext/kanayago/thread_pthread.h +168 -0
  68. data/ext/kanayago/universal_parser.c +230 -0
  69. data/ext/kanayago/vm_core.h +2215 -0
  70. data/ext/kanayago/vm_opts.h +67 -0
  71. data/lib/kanayago/version.rb +5 -0
  72. data/lib/kanayago.rb +11 -0
  73. data/sig/kanayago.rbs +4 -0
  74. metadata +116 -0
@@ -0,0 +1,564 @@
1
+ #ifndef INTERNAL_BITS2_H /*-*-C-*-vi:se ft=c:*/
2
+ #define INTERNAL_BITS2_H
3
+ /**
4
+ * @author Ruby developers <ruby-core@ruby-lang.org>
5
+ * @copyright This file is a part of the programming language Ruby.
6
+ * Permission is hereby granted, to either redistribute and/or
7
+ * modify this file, provided that the conditions mentioned in the
8
+ * file COPYING are met. Consult the file for details.
9
+ * @brief Internal header for bitwise integer algorithms.
10
+ * @see Henry S. Warren Jr., "Hacker's Delight" (2nd ed.), 2013.
11
+ * @see SEI CERT C Coding Standard INT32-C. "Ensure that operations on
12
+ * signed integers do not result in overflow"
13
+ * @see https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html
14
+ * @see https://clang.llvm.org/docs/LanguageExtensions.html#builtin-rotateleft
15
+ * @see https://clang.llvm.org/docs/LanguageExtensions.html#builtin-rotateright
16
+ * @see https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/byteswap-uint64-byteswap-ulong-byteswap-ushort
17
+ * @see https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/rotl-rotl64-rotr-rotr64
18
+ * @see https://docs.microsoft.com/en-us/cpp/intrinsics/bitscanforward-bitscanforward64
19
+ * @see https://docs.microsoft.com/en-us/cpp/intrinsics/bitscanreverse-bitscanreverse64
20
+ * @see https://docs.microsoft.com/en-us/cpp/intrinsics/lzcnt16-lzcnt-lzcnt64
21
+ * @see https://docs.microsoft.com/en-us/cpp/intrinsics/popcnt16-popcnt-popcnt64
22
+ * @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_lzcnt_u32
23
+ * @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_tzcnt_u32
24
+ * @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rotl64
25
+ * @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rotr64
26
+ * @see https://stackoverflow.com/a/776523
27
+ */
28
+ #include "ruby/internal/config.h"
29
+ #include <limits.h> /* for CHAR_BITS */
30
+ #include <stdint.h> /* for uintptr_t */
31
+ #include "internal/compilers.h" /* for MSC_VERSION_SINCE */
32
+
33
+ #if MSC_VERSION_SINCE(1310)
34
+ # include <stdlib.h> /* for _byteswap_uint64 */
35
+ #endif
36
+
37
+ #if defined(HAVE_X86INTRIN_H)
38
+ # include <x86intrin.h> /* for _lzcnt_u64 */
39
+ #elif MSC_VERSION_SINCE(1310)
40
+ # include <intrin.h> /* for the following intrinsics */
41
+ #endif
42
+
43
+ #if defined(_MSC_VER) && defined(__AVX__)
44
+ # pragma intrinsic(__popcnt)
45
+ # pragma intrinsic(__popcnt64)
46
+ #endif
47
+
48
+ #if defined(_MSC_VER) && defined(__AVX2__)
49
+ # pragma intrinsic(__lzcnt)
50
+ # pragma intrinsic(__lzcnt64)
51
+ #endif
52
+
53
+ #if MSC_VERSION_SINCE(1310)
54
+ # pragma intrinsic(_rotl)
55
+ # pragma intrinsic(_rotr)
56
+ # ifdef _WIN64
57
+ # pragma intrinsic(_rotl64)
58
+ # pragma intrinsic(_rotr64)
59
+ # endif
60
+ #endif
61
+
62
+ #if MSC_VERSION_SINCE(1400)
63
+ # pragma intrinsic(_BitScanForward)
64
+ # pragma intrinsic(_BitScanReverse)
65
+ # ifdef _WIN64
66
+ # pragma intrinsic(_BitScanForward64)
67
+ # pragma intrinsic(_BitScanReverse64)
68
+ # endif
69
+ #endif
70
+
71
+ #include "parser_value.h" /* for VALUE */
72
+ #include "internal/static_assert.h" /* for STATIC_ASSERT */
73
+
74
+ /* The most significant bit of the lower part of half-long integer.
75
+ * If sizeof(long) == 4, this is 0x8000.
76
+ * If sizeof(long) == 8, this is 0x80000000.
77
+ */
78
+ #define HALF_LONG_MSB ((SIGNED_VALUE)1<<((SIZEOF_LONG*CHAR_BIT-1)/2))
79
+
80
+ #define SIGNED_INTEGER_TYPE_P(T) (0 > ((T)0)-1)
81
+
82
+ #define SIGNED_INTEGER_MIN(T) \
83
+ ((sizeof(T) == sizeof(int8_t)) ? ((T)INT8_MIN) : \
84
+ ((sizeof(T) == sizeof(int16_t)) ? ((T)INT16_MIN) : \
85
+ ((sizeof(T) == sizeof(int32_t)) ? ((T)INT32_MIN) : \
86
+ ((sizeof(T) == sizeof(int64_t)) ? ((T)INT64_MIN) : \
87
+ 0))))
88
+
89
+ #define SIGNED_INTEGER_MAX(T) ((T)(SIGNED_INTEGER_MIN(T) ^ ((T)~(T)0)))
90
+
91
+ #define UNSIGNED_INTEGER_MAX(T) ((T)~(T)0)
92
+
93
+ #if __has_builtin(__builtin_mul_overflow_p)
94
+ # define MUL_OVERFLOW_P(a, b) \
95
+ __builtin_mul_overflow_p((a), (b), (__typeof__(a * b))0)
96
+ #elif __has_builtin(__builtin_mul_overflow)
97
+ # define MUL_OVERFLOW_P(a, b) \
98
+ __extension__ ({ __typeof__(a) c; __builtin_mul_overflow((a), (b), &c); })
99
+ #endif
100
+
101
+ #define MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
102
+ (a) == 0 ? 0 : \
103
+ (a) == -1 ? (b) < -(max) : \
104
+ (a) > 0 ? \
105
+ ((b) > 0 ? (max) / (a) < (b) : (min) / (a) > (b)) : \
106
+ ((b) > 0 ? (min) / (a) < (b) : (max) / (a) > (b)))
107
+
108
+ #if __has_builtin(__builtin_mul_overflow_p)
109
+ /* __builtin_mul_overflow_p can take bitfield */
110
+ /* and GCC permits bitfields for integers other than int */
111
+ # define MUL_OVERFLOW_FIXNUM_P(a, b) \
112
+ __extension__ ({ \
113
+ struct { long fixnum : sizeof(long) * CHAR_BIT - 1; } c = { 0 }; \
114
+ __builtin_mul_overflow_p((a), (b), c.fixnum); \
115
+ })
116
+ #else
117
+ # define MUL_OVERFLOW_FIXNUM_P(a, b) \
118
+ MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
119
+ #endif
120
+
121
+ #ifdef MUL_OVERFLOW_P
122
+ # define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
123
+ # define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
124
+ # define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_P(a, b)
125
+ #else
126
+ # define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
127
+ # define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
128
+ # define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
129
+ #endif
130
+
131
+ #ifdef HAVE_UINT128_T
132
+ # define bit_length(x) \
133
+ (unsigned int) \
134
+ (sizeof(x) <= sizeof(int32_t) ? 32 - nlz_int32((uint32_t)(x)) : \
135
+ sizeof(x) <= sizeof(int64_t) ? 64 - nlz_int64((uint64_t)(x)) : \
136
+ 128 - nlz_int128((uint128_t)(x)))
137
+ #else
138
+ # define bit_length(x) \
139
+ (unsigned int) \
140
+ (sizeof(x) <= sizeof(int32_t) ? 32 - nlz_int32((uint32_t)(x)) : \
141
+ 64 - nlz_int64((uint64_t)(x)))
142
+ #endif
143
+
144
+ #ifndef swap16
145
+ # define swap16 ruby_swap16
146
+ #endif
147
+
148
+ #ifndef swap32
149
+ # define swap32 ruby_swap32
150
+ #endif
151
+
152
+ #ifndef swap64
153
+ # define swap64 ruby_swap64
154
+ #endif
155
+
156
+ static inline uint16_t ruby_swap16(uint16_t);
157
+ static inline uint32_t ruby_swap32(uint32_t);
158
+ static inline uint64_t ruby_swap64(uint64_t);
159
+ static inline unsigned nlz_int(unsigned x);
160
+ static inline unsigned nlz_long(unsigned long x);
161
+ static inline unsigned nlz_long_long(unsigned long long x);
162
+ static inline unsigned nlz_intptr(uintptr_t x);
163
+ static inline unsigned nlz_int32(uint32_t x);
164
+ static inline unsigned nlz_int64(uint64_t x);
165
+ #ifdef HAVE_UINT128_T
166
+ static inline unsigned nlz_int128(uint128_t x);
167
+ #endif
168
+ static inline unsigned rb_popcount32(uint32_t x);
169
+ static inline unsigned rb_popcount64(uint64_t x);
170
+ static inline unsigned rb_popcount_intptr(uintptr_t x);
171
+ static inline int ntz_int32(uint32_t x);
172
+ static inline int ntz_int64(uint64_t x);
173
+ static inline int ntz_intptr(uintptr_t x);
174
+ static inline VALUE RUBY_BIT_ROTL(VALUE, int);
175
+ static inline VALUE RUBY_BIT_ROTR(VALUE, int);
176
+
177
+ static inline uint16_t
178
+ ruby_swap16(uint16_t x)
179
+ {
180
+ #if __has_builtin(__builtin_bswap16)
181
+ return __builtin_bswap16(x);
182
+
183
+ #elif MSC_VERSION_SINCE(1310)
184
+ return _byteswap_ushort(x);
185
+
186
+ #else
187
+ return (x << 8) | (x >> 8);
188
+
189
+ #endif
190
+ }
191
+
192
+ static inline uint32_t
193
+ ruby_swap32(uint32_t x)
194
+ {
195
+ #if __has_builtin(__builtin_bswap32)
196
+ return __builtin_bswap32(x);
197
+
198
+ #elif MSC_VERSION_SINCE(1310)
199
+ return _byteswap_ulong(x);
200
+
201
+ #else
202
+ x = ((x & 0x0000FFFF) << 16) | ((x & 0xFFFF0000) >> 16);
203
+ x = ((x & 0x00FF00FF) << 8) | ((x & 0xFF00FF00) >> 8);
204
+ return x;
205
+
206
+ #endif
207
+ }
208
+
209
+ static inline uint64_t
210
+ ruby_swap64(uint64_t x)
211
+ {
212
+ #if __has_builtin(__builtin_bswap64)
213
+ return __builtin_bswap64(x);
214
+
215
+ #elif MSC_VERSION_SINCE(1310)
216
+ return _byteswap_uint64(x);
217
+
218
+ #else
219
+ x = ((x & 0x00000000FFFFFFFFULL) << 32) | ((x & 0xFFFFFFFF00000000ULL) >> 32);
220
+ x = ((x & 0x0000FFFF0000FFFFULL) << 16) | ((x & 0xFFFF0000FFFF0000ULL) >> 16);
221
+ x = ((x & 0x00FF00FF00FF00FFULL) << 8) | ((x & 0xFF00FF00FF00FF00ULL) >> 8);
222
+ return x;
223
+
224
+ #endif
225
+ }
226
+
227
+ static inline unsigned int
228
+ nlz_int32(uint32_t x)
229
+ {
230
+ #if defined(_MSC_VER) && defined(__AVX2__)
231
+ /* Note: It seems there is no such thing like __LZCNT__ predefined in MSVC.
232
+ * AMD CPUs have had this instruction for decades (since K10) but for
233
+ * Intel, Haswell is the oldest one. We need to use __AVX2__ for maximum
234
+ * safety. */
235
+ return (unsigned int)__lzcnt(x);
236
+
237
+ #elif defined(__x86_64__) && defined(__LZCNT__)
238
+ return (unsigned int)_lzcnt_u32(x);
239
+
240
+ #elif MSC_VERSION_SINCE(1400) /* &&! defined(__AVX2__) */
241
+ unsigned long r;
242
+ return _BitScanReverse(&r, x) ? (31 - (int)r) : 32;
243
+
244
+ #elif __has_builtin(__builtin_clz)
245
+ STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT == 32);
246
+ return x ? (unsigned int)__builtin_clz(x) : 32;
247
+
248
+ #else
249
+ uint32_t y;
250
+ unsigned n = 32;
251
+ y = x >> 16; if (y) {n -= 16; x = y;}
252
+ y = x >> 8; if (y) {n -= 8; x = y;}
253
+ y = x >> 4; if (y) {n -= 4; x = y;}
254
+ y = x >> 2; if (y) {n -= 2; x = y;}
255
+ y = x >> 1; if (y) {return n - 2;}
256
+ return (unsigned int)(n - x);
257
+ #endif
258
+ }
259
+
260
+ static inline unsigned int
261
+ nlz_int64(uint64_t x)
262
+ {
263
+ #if defined(_MSC_VER) && defined(__AVX2__)
264
+ return (unsigned int)__lzcnt64(x);
265
+
266
+ #elif defined(__x86_64__) && defined(__LZCNT__)
267
+ return (unsigned int)_lzcnt_u64(x);
268
+
269
+ #elif defined(_WIN64) && MSC_VERSION_SINCE(1400) /* &&! defined(__AVX2__) */
270
+ unsigned long r;
271
+ return _BitScanReverse64(&r, x) ? (63u - (unsigned int)r) : 64;
272
+
273
+ #elif __has_builtin(__builtin_clzl)
274
+ if (x == 0) {
275
+ return 64;
276
+ }
277
+ else if (sizeof(long) * CHAR_BIT == 64) {
278
+ return (unsigned int)__builtin_clzl((unsigned long)x);
279
+ }
280
+ else if (sizeof(long long) * CHAR_BIT == 64) {
281
+ return (unsigned int)__builtin_clzll((unsigned long long)x);
282
+ }
283
+ else {
284
+ /* :FIXME: Is there a way to make this branch a compile-time error? */
285
+ UNREACHABLE_RETURN(~0);
286
+ }
287
+
288
+ #else
289
+ uint64_t y;
290
+ unsigned int n = 64;
291
+ y = x >> 32; if (y) {n -= 32; x = y;}
292
+ y = x >> 16; if (y) {n -= 16; x = y;}
293
+ y = x >> 8; if (y) {n -= 8; x = y;}
294
+ y = x >> 4; if (y) {n -= 4; x = y;}
295
+ y = x >> 2; if (y) {n -= 2; x = y;}
296
+ y = x >> 1; if (y) {return n - 2;}
297
+ return (unsigned int)(n - x);
298
+
299
+ #endif
300
+ }
301
+
302
+ #ifdef HAVE_UINT128_T
303
+ static inline unsigned int
304
+ nlz_int128(uint128_t x)
305
+ {
306
+ uint64_t y = (uint64_t)(x >> 64);
307
+
308
+ if (x == 0) {
309
+ return 128;
310
+ }
311
+ else if (y == 0) {
312
+ return (unsigned int)nlz_int64(x) + 64;
313
+ }
314
+ else {
315
+ return (unsigned int)nlz_int64(y);
316
+ }
317
+ }
318
+ #endif
319
+
320
+ static inline unsigned int
321
+ nlz_int(unsigned int x)
322
+ {
323
+ if (sizeof(unsigned int) * CHAR_BIT == 32) {
324
+ return nlz_int32((uint32_t)x);
325
+ }
326
+ else if (sizeof(unsigned int) * CHAR_BIT == 64) {
327
+ return nlz_int64((uint64_t)x);
328
+ }
329
+ else {
330
+ UNREACHABLE_RETURN(~0);
331
+ }
332
+ }
333
+
334
+ static inline unsigned int
335
+ nlz_long(unsigned long x)
336
+ {
337
+ if (sizeof(unsigned long) * CHAR_BIT == 32) {
338
+ return nlz_int32((uint32_t)x);
339
+ }
340
+ else if (sizeof(unsigned long) * CHAR_BIT == 64) {
341
+ return nlz_int64((uint64_t)x);
342
+ }
343
+ else {
344
+ UNREACHABLE_RETURN(~0);
345
+ }
346
+ }
347
+
348
+ static inline unsigned int
349
+ nlz_long_long(unsigned long long x)
350
+ {
351
+ if (sizeof(unsigned long long) * CHAR_BIT == 64) {
352
+ return nlz_int64((uint64_t)x);
353
+ }
354
+ #ifdef HAVE_UINT128_T
355
+ else if (sizeof(unsigned long long) * CHAR_BIT == 128) {
356
+ return nlz_int128((uint128_t)x);
357
+ }
358
+ #endif
359
+ else {
360
+ UNREACHABLE_RETURN(~0);
361
+ }
362
+ }
363
+
364
+ static inline unsigned int
365
+ nlz_intptr(uintptr_t x)
366
+ {
367
+ if (sizeof(uintptr_t) == sizeof(unsigned int)) {
368
+ return nlz_int((unsigned int)x);
369
+ }
370
+ if (sizeof(uintptr_t) == sizeof(unsigned long)) {
371
+ return nlz_long((unsigned long)x);
372
+ }
373
+ if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
374
+ return nlz_long_long((unsigned long long)x);
375
+ }
376
+ else {
377
+ UNREACHABLE_RETURN(~0);
378
+ }
379
+ }
380
+
381
+ static inline unsigned int
382
+ rb_popcount32(uint32_t x)
383
+ {
384
+ #if defined(_MSC_VER) && defined(__AVX__)
385
+ /* Note: CPUs since Nehalem and Barcelona have had this instruction so SSE
386
+ * 4.2 should suffice, but it seems there is no such thing like __SSE_4_2__
387
+ * predefined macro in MSVC. They do have __AVX__ so use it instead. */
388
+ return (unsigned int)__popcnt(x);
389
+
390
+ #elif __has_builtin(__builtin_popcount)
391
+ STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT >= 32);
392
+ return (unsigned int)__builtin_popcount(x);
393
+
394
+ #else
395
+ x = (x & 0x55555555) + (x >> 1 & 0x55555555);
396
+ x = (x & 0x33333333) + (x >> 2 & 0x33333333);
397
+ x = (x & 0x0f0f0f0f) + (x >> 4 & 0x0f0f0f0f);
398
+ x = (x & 0x001f001f) + (x >> 8 & 0x001f001f);
399
+ x = (x & 0x0000003f) + (x >>16 & 0x0000003f);
400
+ return (unsigned int)x;
401
+
402
+ #endif
403
+ }
404
+
405
+ static inline unsigned int
406
+ rb_popcount64(uint64_t x)
407
+ {
408
+ #if defined(_MSC_VER) && defined(__AVX__)
409
+ return (unsigned int)__popcnt64(x);
410
+
411
+ #elif __has_builtin(__builtin_popcount)
412
+ if (sizeof(long) * CHAR_BIT == 64) {
413
+ return (unsigned int)__builtin_popcountl((unsigned long)x);
414
+ }
415
+ else if (sizeof(long long) * CHAR_BIT == 64) {
416
+ return (unsigned int)__builtin_popcountll((unsigned long long)x);
417
+ }
418
+ else {
419
+ /* :FIXME: Is there a way to make this branch a compile-time error? */
420
+ UNREACHABLE_RETURN(~0);
421
+ }
422
+
423
+ #else
424
+ x = (x & 0x5555555555555555) + (x >> 1 & 0x5555555555555555);
425
+ x = (x & 0x3333333333333333) + (x >> 2 & 0x3333333333333333);
426
+ x = (x & 0x0707070707070707) + (x >> 4 & 0x0707070707070707);
427
+ x = (x & 0x001f001f001f001f) + (x >> 8 & 0x001f001f001f001f);
428
+ x = (x & 0x0000003f0000003f) + (x >>16 & 0x0000003f0000003f);
429
+ x = (x & 0x000000000000007f) + (x >>32 & 0x000000000000007f);
430
+ return (unsigned int)x;
431
+
432
+ #endif
433
+ }
434
+
435
+ static inline unsigned int
436
+ rb_popcount_intptr(uintptr_t x)
437
+ {
438
+ if (sizeof(uintptr_t) * CHAR_BIT == 64) {
439
+ return rb_popcount64((uint64_t)x);
440
+ }
441
+ else if (sizeof(uintptr_t) * CHAR_BIT == 32) {
442
+ return rb_popcount32((uint32_t)x);
443
+ }
444
+ else {
445
+ UNREACHABLE_RETURN(~0);
446
+ }
447
+ }
448
+
449
+ static inline int
450
+ ntz_int32(uint32_t x)
451
+ {
452
+ #if defined(__x86_64__) && defined(__BMI__)
453
+ return (unsigned)_tzcnt_u32(x);
454
+
455
+ #elif MSC_VERSION_SINCE(1400)
456
+ /* :FIXME: Is there any way to issue TZCNT instead of BSF, apart from using
457
+ * assembly? Because issuing LZCNT seems possible (see nlz.h). */
458
+ unsigned long r;
459
+ return _BitScanForward(&r, x) ? (int)r : 32;
460
+
461
+ #elif __has_builtin(__builtin_ctz)
462
+ STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT == 32);
463
+ return x ? (unsigned)__builtin_ctz(x) : 32;
464
+
465
+ #else
466
+ return rb_popcount32((~x) & (x-1));
467
+
468
+ #endif
469
+ }
470
+
471
+ static inline int
472
+ ntz_int64(uint64_t x)
473
+ {
474
+ #if defined(__x86_64__) && defined(__BMI__)
475
+ return (unsigned)_tzcnt_u64(x);
476
+
477
+ #elif defined(_WIN64) && MSC_VERSION_SINCE(1400)
478
+ unsigned long r;
479
+ return _BitScanForward64(&r, x) ? (int)r : 64;
480
+
481
+ #elif __has_builtin(__builtin_ctzl)
482
+ if (x == 0) {
483
+ return 64;
484
+ }
485
+ else if (sizeof(long) * CHAR_BIT == 64) {
486
+ return (unsigned)__builtin_ctzl((unsigned long)x);
487
+ }
488
+ else if (sizeof(long long) * CHAR_BIT == 64) {
489
+ return (unsigned)__builtin_ctzll((unsigned long long)x);
490
+ }
491
+ else {
492
+ /* :FIXME: Is there a way to make this branch a compile-time error? */
493
+ UNREACHABLE_RETURN(~0);
494
+ }
495
+
496
+ #else
497
+ return rb_popcount64((~x) & (x-1));
498
+
499
+ #endif
500
+ }
501
+
502
+ static inline int
503
+ ntz_intptr(uintptr_t x)
504
+ {
505
+ if (sizeof(uintptr_t) * CHAR_BIT == 64) {
506
+ return ntz_int64((uint64_t)x);
507
+ }
508
+ else if (sizeof(uintptr_t) * CHAR_BIT == 32) {
509
+ return ntz_int32((uint32_t)x);
510
+ }
511
+ else {
512
+ UNREACHABLE_RETURN(~0);
513
+ }
514
+ }
515
+
516
+ static inline VALUE
517
+ RUBY_BIT_ROTL(VALUE v, int n)
518
+ {
519
+ #if __has_builtin(__builtin_rotateleft32) && (SIZEOF_VALUE * CHAR_BIT == 32)
520
+ return __builtin_rotateleft32(v, n);
521
+
522
+ #elif __has_builtin(__builtin_rotateleft64) && (SIZEOF_VALUE * CHAR_BIT == 64)
523
+ return __builtin_rotateleft64(v, n);
524
+
525
+ #elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 32)
526
+ return _rotl(v, n);
527
+
528
+ #elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 64)
529
+ return _rotl64(v, n);
530
+
531
+ #elif defined(_lrotl) && (SIZEOF_VALUE == SIZEOF_LONG)
532
+ return _lrotl(v, n);
533
+
534
+ #else
535
+ const int m = (sizeof(VALUE) * CHAR_BIT) - 1;
536
+ return (v << (n & m)) | (v >> (-n & m));
537
+ #endif
538
+ }
539
+
540
+ static inline VALUE
541
+ RUBY_BIT_ROTR(VALUE v, int n)
542
+ {
543
+ #if __has_builtin(__builtin_rotateright32) && (SIZEOF_VALUE * CHAR_BIT == 32)
544
+ return __builtin_rotateright32(v, n);
545
+
546
+ #elif __has_builtin(__builtin_rotateright64) && (SIZEOF_VALUE * CHAR_BIT == 64)
547
+ return __builtin_rotateright64(v, n);
548
+
549
+ #elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 32)
550
+ return _rotr(v, n);
551
+
552
+ #elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 64)
553
+ return _rotr64(v, n);
554
+
555
+ #elif defined(_lrotr) && (SIZEOF_VALUE == SIZEOF_LONG)
556
+ return _lrotr(v, n);
557
+
558
+ #else
559
+ const int m = (sizeof(VALUE) * CHAR_BIT) - 1;
560
+ return (v << (-n & m)) | (v >> (n & m));
561
+ #endif
562
+ }
563
+
564
+ #endif /* INTERNAL_BITS2_H */
@@ -0,0 +1,32 @@
1
+ #ifndef RUBY_PARSER_NODE_H
2
+ #define RUBY_PARSER_NODE_H 1
3
+ /*
4
+ * This is a header file used by only "parse.y"
5
+ */
6
+ #include "rubyparser.h"
7
+ #include "internal/compilers.h"
8
+
9
+ #if defined(__cplusplus)
10
+ extern "C" {
11
+ #if 0
12
+ } /* satisfy cc-mode */
13
+ #endif
14
+ #endif
15
+
16
+ static inline rb_code_location_t
17
+ code_loc_gen(const rb_code_location_t *loc1, const rb_code_location_t *loc2)
18
+ {
19
+ rb_code_location_t loc;
20
+ loc.beg_pos = loc1->beg_pos;
21
+ loc.end_pos = loc2->end_pos;
22
+ return loc;
23
+ }
24
+
25
+ #if defined(__cplusplus)
26
+ #if 0
27
+ { /* satisfy cc-mode */
28
+ #endif
29
+ } /* extern "C" { */
30
+ #endif
31
+
32
+ #endif /* RUBY_PARSER_NODE_H */