kanayago 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (74) hide show
  1. checksums.yaml +7 -0
  2. data/.rubocop.yml +15 -0
  3. data/.rubocop_todo.yml +23 -0
  4. data/LICENSE.txt +21 -0
  5. data/README.md +79 -0
  6. data/Rakefile +182 -0
  7. data/ext/kanayago/ccan/check_type/check_type.h +63 -0
  8. data/ext/kanayago/ccan/container_of/container_of.h +142 -0
  9. data/ext/kanayago/ccan/list/list.h +791 -0
  10. data/ext/kanayago/ccan/str/str.h +17 -0
  11. data/ext/kanayago/constant.h +53 -0
  12. data/ext/kanayago/extconf.rb +21 -0
  13. data/ext/kanayago/id.h +347 -0
  14. data/ext/kanayago/id_table.h +39 -0
  15. data/ext/kanayago/internal/array.h +151 -0
  16. data/ext/kanayago/internal/basic_operators.h +64 -0
  17. data/ext/kanayago/internal/bignum.h +244 -0
  18. data/ext/kanayago/internal/bits.h +568 -0
  19. data/ext/kanayago/internal/compile.h +34 -0
  20. data/ext/kanayago/internal/compilers.h +107 -0
  21. data/ext/kanayago/internal/complex.h +29 -0
  22. data/ext/kanayago/internal/encoding.h +36 -0
  23. data/ext/kanayago/internal/error.h +218 -0
  24. data/ext/kanayago/internal/fixnum.h +184 -0
  25. data/ext/kanayago/internal/gc.h +322 -0
  26. data/ext/kanayago/internal/hash.h +191 -0
  27. data/ext/kanayago/internal/imemo.h +261 -0
  28. data/ext/kanayago/internal/io.h +140 -0
  29. data/ext/kanayago/internal/numeric.h +274 -0
  30. data/ext/kanayago/internal/parse.h +117 -0
  31. data/ext/kanayago/internal/rational.h +71 -0
  32. data/ext/kanayago/internal/re.h +28 -0
  33. data/ext/kanayago/internal/ruby_parser.h +125 -0
  34. data/ext/kanayago/internal/sanitizers.h +297 -0
  35. data/ext/kanayago/internal/serial.h +23 -0
  36. data/ext/kanayago/internal/static_assert.h +16 -0
  37. data/ext/kanayago/internal/string.h +186 -0
  38. data/ext/kanayago/internal/symbol.h +45 -0
  39. data/ext/kanayago/internal/thread.h +79 -0
  40. data/ext/kanayago/internal/variable.h +72 -0
  41. data/ext/kanayago/internal/vm.h +137 -0
  42. data/ext/kanayago/internal/warnings.h +16 -0
  43. data/ext/kanayago/internal.h +108 -0
  44. data/ext/kanayago/kanayago.c +420 -0
  45. data/ext/kanayago/kanayago.h +21 -0
  46. data/ext/kanayago/lex.c +302 -0
  47. data/ext/kanayago/method.h +255 -0
  48. data/ext/kanayago/node.c +440 -0
  49. data/ext/kanayago/node.h +111 -0
  50. data/ext/kanayago/node_name.inc +224 -0
  51. data/ext/kanayago/parse.c +26931 -0
  52. data/ext/kanayago/parse.h +244 -0
  53. data/ext/kanayago/parse.tmp.y +16145 -0
  54. data/ext/kanayago/parser_bits.h +564 -0
  55. data/ext/kanayago/parser_node.h +32 -0
  56. data/ext/kanayago/parser_st.c +164 -0
  57. data/ext/kanayago/parser_st.h +162 -0
  58. data/ext/kanayago/parser_value.h +106 -0
  59. data/ext/kanayago/probes.h +4 -0
  60. data/ext/kanayago/ruby_assert.h +14 -0
  61. data/ext/kanayago/ruby_atomic.h +23 -0
  62. data/ext/kanayago/ruby_parser.c +1165 -0
  63. data/ext/kanayago/rubyparser.h +1391 -0
  64. data/ext/kanayago/shape.h +234 -0
  65. data/ext/kanayago/st.c +2339 -0
  66. data/ext/kanayago/symbol.h +123 -0
  67. data/ext/kanayago/thread_pthread.h +168 -0
  68. data/ext/kanayago/universal_parser.c +230 -0
  69. data/ext/kanayago/vm_core.h +2215 -0
  70. data/ext/kanayago/vm_opts.h +67 -0
  71. data/lib/kanayago/version.rb +5 -0
  72. data/lib/kanayago.rb +11 -0
  73. data/sig/kanayago.rbs +4 -0
  74. metadata +116 -0
@@ -0,0 +1,568 @@
1
+ #ifndef INTERNAL_BITS_H /*-*-C-*-vi:se ft=c:*/
2
+ #define INTERNAL_BITS_H
3
+ /**
4
+ * @author Ruby developers <ruby-core@ruby-lang.org>
5
+ * @copyright This file is a part of the programming language Ruby.
6
+ * Permission is hereby granted, to either redistribute and/or
7
+ * modify this file, provided that the conditions mentioned in the
8
+ * file COPYING are met. Consult the file for details.
9
+ * @brief Internal header for bitwise integer algorithms.
10
+ * @see Henry S. Warren Jr., "Hacker's Delight" (2nd ed.), 2013.
11
+ * @see SEI CERT C Coding Standard INT32-C. "Ensure that operations on
12
+ * signed integers do not result in overflow"
13
+ * @see https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html
14
+ * @see https://clang.llvm.org/docs/LanguageExtensions.html#builtin-rotateleft
15
+ * @see https://clang.llvm.org/docs/LanguageExtensions.html#builtin-rotateright
16
+ * @see https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/byteswap-uint64-byteswap-ulong-byteswap-ushort
17
+ * @see https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/rotl-rotl64-rotr-rotr64
18
+ * @see https://docs.microsoft.com/en-us/cpp/intrinsics/bitscanforward-bitscanforward64
19
+ * @see https://docs.microsoft.com/en-us/cpp/intrinsics/bitscanreverse-bitscanreverse64
20
+ * @see https://docs.microsoft.com/en-us/cpp/intrinsics/lzcnt16-lzcnt-lzcnt64
21
+ * @see https://docs.microsoft.com/en-us/cpp/intrinsics/popcnt16-popcnt-popcnt64
22
+ * @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_lzcnt_u32
23
+ * @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_tzcnt_u32
24
+ * @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rotl64
25
+ * @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rotr64
26
+ * @see https://stackoverflow.com/a/776523
27
+ */
28
+ #include "ruby/internal/config.h"
29
+ #include <limits.h> /* for CHAR_BITS */
30
+ #include <stdint.h> /* for uintptr_t */
31
+ #include "internal/compilers.h" /* for MSC_VERSION_SINCE */
32
+
33
+ #if MSC_VERSION_SINCE(1310)
34
+ # include <stdlib.h> /* for _byteswap_uint64 */
35
+ #endif
36
+
37
+ #if defined(HAVE_X86INTRIN_H)
38
+ # include <x86intrin.h> /* for _lzcnt_u64 */
39
+ #elif MSC_VERSION_SINCE(1310)
40
+ # include <intrin.h> /* for the following intrinsics */
41
+ #endif
42
+
43
+ #if defined(_MSC_VER) && defined(__AVX__)
44
+ # pragma intrinsic(__popcnt)
45
+ # pragma intrinsic(__popcnt64)
46
+ #endif
47
+
48
+ #if defined(_MSC_VER) && defined(__AVX2__)
49
+ # pragma intrinsic(__lzcnt)
50
+ # pragma intrinsic(__lzcnt64)
51
+ #endif
52
+
53
+ #if MSC_VERSION_SINCE(1310)
54
+ # pragma intrinsic(_rotl)
55
+ # pragma intrinsic(_rotr)
56
+ # ifdef _WIN64
57
+ # pragma intrinsic(_rotl64)
58
+ # pragma intrinsic(_rotr64)
59
+ # endif
60
+ #endif
61
+
62
+ #if MSC_VERSION_SINCE(1400)
63
+ # pragma intrinsic(_BitScanForward)
64
+ # pragma intrinsic(_BitScanReverse)
65
+ # ifdef _WIN64
66
+ # pragma intrinsic(_BitScanForward64)
67
+ # pragma intrinsic(_BitScanReverse64)
68
+ # endif
69
+ #endif
70
+
71
+ #include "ruby/ruby.h" /* for VALUE */
72
+ #include "internal/static_assert.h" /* for STATIC_ASSERT */
73
+
74
+ /* The most significant bit of the lower part of half-long integer.
75
+ * If sizeof(long) == 4, this is 0x8000.
76
+ * If sizeof(long) == 8, this is 0x80000000.
77
+ */
78
+ #define HALF_LONG_MSB ((SIGNED_VALUE)1<<((SIZEOF_LONG*CHAR_BIT-1)/2))
79
+
80
+ #define SIGNED_INTEGER_TYPE_P(T) (0 > ((T)0)-1)
81
+
82
+ #define SIGNED_INTEGER_MIN(T) \
83
+ ((sizeof(T) == sizeof(int8_t)) ? ((T)INT8_MIN) : \
84
+ ((sizeof(T) == sizeof(int16_t)) ? ((T)INT16_MIN) : \
85
+ ((sizeof(T) == sizeof(int32_t)) ? ((T)INT32_MIN) : \
86
+ ((sizeof(T) == sizeof(int64_t)) ? ((T)INT64_MIN) : \
87
+ 0))))
88
+
89
+ #define SIGNED_INTEGER_MAX(T) ((T)(SIGNED_INTEGER_MIN(T) ^ ((T)~(T)0)))
90
+
91
+ #define UNSIGNED_INTEGER_MAX(T) ((T)~(T)0)
92
+
93
+ #if __has_builtin(__builtin_mul_overflow_p)
94
+ # define MUL_OVERFLOW_P(a, b) \
95
+ __builtin_mul_overflow_p((a), (b), (__typeof__(a * b))0)
96
+ #elif __has_builtin(__builtin_mul_overflow)
97
+ # define MUL_OVERFLOW_P(a, b) \
98
+ __extension__ ({ __typeof__(a) c; __builtin_mul_overflow((a), (b), &c); })
99
+ #endif
100
+
101
+ #define MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
102
+ (a) == 0 ? 0 : \
103
+ (a) == -1 ? (b) < -(max) : \
104
+ (a) > 0 ? \
105
+ ((b) > 0 ? (max) / (a) < (b) : (min) / (a) > (b)) : \
106
+ ((b) > 0 ? (min) / (a) < (b) : (max) / (a) > (b)))
107
+
108
+ #if __has_builtin(__builtin_mul_overflow_p)
109
+ /* __builtin_mul_overflow_p can take bitfield */
110
+ /* and GCC permits bitfields for integers other than int */
111
+ # define MUL_OVERFLOW_FIXNUM_P(a, b) \
112
+ __extension__ ({ \
113
+ struct { long fixnum : sizeof(long) * CHAR_BIT - 1; } c = { 0 }; \
114
+ __builtin_mul_overflow_p((a), (b), c.fixnum); \
115
+ })
116
+ #else
117
+ # define MUL_OVERFLOW_FIXNUM_P(a, b) \
118
+ MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
119
+ #endif
120
+
121
+ #if defined(MUL_OVERFLOW_P) && defined(USE___BUILTIN_MUL_OVERFLOW_LONG_LONG)
122
+ # define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
123
+ #else
124
+ # define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
125
+ #endif
126
+
127
+ #ifdef MUL_OVERFLOW_P
128
+ # define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
129
+ # define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_P(a, b)
130
+ #else
131
+ # define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
132
+ # define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
133
+ #endif
134
+
135
+ #ifdef HAVE_UINT128_T
136
+ # define bit_length(x) \
137
+ (unsigned int) \
138
+ (sizeof(x) <= sizeof(int32_t) ? 32 - nlz_int32((uint32_t)(x)) : \
139
+ sizeof(x) <= sizeof(int64_t) ? 64 - nlz_int64((uint64_t)(x)) : \
140
+ 128 - nlz_int128((uint128_t)(x)))
141
+ #else
142
+ # define bit_length(x) \
143
+ (unsigned int) \
144
+ (sizeof(x) <= sizeof(int32_t) ? 32 - nlz_int32((uint32_t)(x)) : \
145
+ 64 - nlz_int64((uint64_t)(x)))
146
+ #endif
147
+
148
+ #ifndef swap16
149
+ # define swap16 ruby_swap16
150
+ #endif
151
+
152
+ #ifndef swap32
153
+ # define swap32 ruby_swap32
154
+ #endif
155
+
156
+ #ifndef swap64
157
+ # define swap64 ruby_swap64
158
+ #endif
159
+
160
+ static inline uint16_t ruby_swap16(uint16_t);
161
+ static inline uint32_t ruby_swap32(uint32_t);
162
+ static inline uint64_t ruby_swap64(uint64_t);
163
+ static inline unsigned nlz_int(unsigned x);
164
+ static inline unsigned nlz_long(unsigned long x);
165
+ static inline unsigned nlz_long_long(unsigned long long x);
166
+ static inline unsigned nlz_intptr(uintptr_t x);
167
+ static inline unsigned nlz_int32(uint32_t x);
168
+ static inline unsigned nlz_int64(uint64_t x);
169
+ #ifdef HAVE_UINT128_T
170
+ static inline unsigned nlz_int128(uint128_t x);
171
+ #endif
172
+ static inline unsigned rb_popcount32(uint32_t x);
173
+ static inline unsigned rb_popcount64(uint64_t x);
174
+ static inline unsigned rb_popcount_intptr(uintptr_t x);
175
+ static inline int ntz_int32(uint32_t x);
176
+ static inline int ntz_int64(uint64_t x);
177
+ static inline int ntz_intptr(uintptr_t x);
178
+ static inline VALUE RUBY_BIT_ROTL(VALUE, int);
179
+ static inline VALUE RUBY_BIT_ROTR(VALUE, int);
180
+
181
+ static inline uint16_t
182
+ ruby_swap16(uint16_t x)
183
+ {
184
+ #if __has_builtin(__builtin_bswap16)
185
+ return __builtin_bswap16(x);
186
+
187
+ #elif MSC_VERSION_SINCE(1310)
188
+ return _byteswap_ushort(x);
189
+
190
+ #else
191
+ return (x << 8) | (x >> 8);
192
+
193
+ #endif
194
+ }
195
+
196
+ static inline uint32_t
197
+ ruby_swap32(uint32_t x)
198
+ {
199
+ #if __has_builtin(__builtin_bswap32)
200
+ return __builtin_bswap32(x);
201
+
202
+ #elif MSC_VERSION_SINCE(1310)
203
+ return _byteswap_ulong(x);
204
+
205
+ #else
206
+ x = ((x & 0x0000FFFF) << 16) | ((x & 0xFFFF0000) >> 16);
207
+ x = ((x & 0x00FF00FF) << 8) | ((x & 0xFF00FF00) >> 8);
208
+ return x;
209
+
210
+ #endif
211
+ }
212
+
213
+ static inline uint64_t
214
+ ruby_swap64(uint64_t x)
215
+ {
216
+ #if __has_builtin(__builtin_bswap64)
217
+ return __builtin_bswap64(x);
218
+
219
+ #elif MSC_VERSION_SINCE(1310)
220
+ return _byteswap_uint64(x);
221
+
222
+ #else
223
+ x = ((x & 0x00000000FFFFFFFFULL) << 32) | ((x & 0xFFFFFFFF00000000ULL) >> 32);
224
+ x = ((x & 0x0000FFFF0000FFFFULL) << 16) | ((x & 0xFFFF0000FFFF0000ULL) >> 16);
225
+ x = ((x & 0x00FF00FF00FF00FFULL) << 8) | ((x & 0xFF00FF00FF00FF00ULL) >> 8);
226
+ return x;
227
+
228
+ #endif
229
+ }
230
+
231
+ static inline unsigned int
232
+ nlz_int32(uint32_t x)
233
+ {
234
+ #if defined(_MSC_VER) && defined(__AVX2__)
235
+ /* Note: It seems there is no such thing like __LZCNT__ predefined in MSVC.
236
+ * AMD CPUs have had this instruction for decades (since K10) but for
237
+ * Intel, Haswell is the oldest one. We need to use __AVX2__ for maximum
238
+ * safety. */
239
+ return (unsigned int)__lzcnt(x);
240
+
241
+ #elif defined(__x86_64__) && defined(__LZCNT__)
242
+ return (unsigned int)_lzcnt_u32(x);
243
+
244
+ #elif MSC_VERSION_SINCE(1400) /* &&! defined(__AVX2__) */
245
+ unsigned long r;
246
+ return _BitScanReverse(&r, x) ? (31 - (int)r) : 32;
247
+
248
+ #elif __has_builtin(__builtin_clz)
249
+ STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT == 32);
250
+ return x ? (unsigned int)__builtin_clz(x) : 32;
251
+
252
+ #else
253
+ uint32_t y;
254
+ unsigned n = 32;
255
+ y = x >> 16; if (y) {n -= 16; x = y;}
256
+ y = x >> 8; if (y) {n -= 8; x = y;}
257
+ y = x >> 4; if (y) {n -= 4; x = y;}
258
+ y = x >> 2; if (y) {n -= 2; x = y;}
259
+ y = x >> 1; if (y) {return n - 2;}
260
+ return (unsigned int)(n - x);
261
+ #endif
262
+ }
263
+
264
+ static inline unsigned int
265
+ nlz_int64(uint64_t x)
266
+ {
267
+ #if defined(_MSC_VER) && defined(__AVX2__)
268
+ return (unsigned int)__lzcnt64(x);
269
+
270
+ #elif defined(__x86_64__) && defined(__LZCNT__)
271
+ return (unsigned int)_lzcnt_u64(x);
272
+
273
+ #elif defined(_WIN64) && MSC_VERSION_SINCE(1400) /* &&! defined(__AVX2__) */
274
+ unsigned long r;
275
+ return _BitScanReverse64(&r, x) ? (63u - (unsigned int)r) : 64;
276
+
277
+ #elif __has_builtin(__builtin_clzl)
278
+ if (x == 0) {
279
+ return 64;
280
+ }
281
+ else if (sizeof(long) * CHAR_BIT == 64) {
282
+ return (unsigned int)__builtin_clzl((unsigned long)x);
283
+ }
284
+ else if (sizeof(long long) * CHAR_BIT == 64) {
285
+ return (unsigned int)__builtin_clzll((unsigned long long)x);
286
+ }
287
+ else {
288
+ /* :FIXME: Is there a way to make this branch a compile-time error? */
289
+ UNREACHABLE_RETURN(~0);
290
+ }
291
+
292
+ #else
293
+ uint64_t y;
294
+ unsigned int n = 64;
295
+ y = x >> 32; if (y) {n -= 32; x = y;}
296
+ y = x >> 16; if (y) {n -= 16; x = y;}
297
+ y = x >> 8; if (y) {n -= 8; x = y;}
298
+ y = x >> 4; if (y) {n -= 4; x = y;}
299
+ y = x >> 2; if (y) {n -= 2; x = y;}
300
+ y = x >> 1; if (y) {return n - 2;}
301
+ return (unsigned int)(n - x);
302
+
303
+ #endif
304
+ }
305
+
306
+ #ifdef HAVE_UINT128_T
307
+ static inline unsigned int
308
+ nlz_int128(uint128_t x)
309
+ {
310
+ uint64_t y = (uint64_t)(x >> 64);
311
+
312
+ if (x == 0) {
313
+ return 128;
314
+ }
315
+ else if (y == 0) {
316
+ return (unsigned int)nlz_int64(x) + 64;
317
+ }
318
+ else {
319
+ return (unsigned int)nlz_int64(y);
320
+ }
321
+ }
322
+ #endif
323
+
324
+ static inline unsigned int
325
+ nlz_int(unsigned int x)
326
+ {
327
+ if (sizeof(unsigned int) * CHAR_BIT == 32) {
328
+ return nlz_int32((uint32_t)x);
329
+ }
330
+ else if (sizeof(unsigned int) * CHAR_BIT == 64) {
331
+ return nlz_int64((uint64_t)x);
332
+ }
333
+ else {
334
+ UNREACHABLE_RETURN(~0);
335
+ }
336
+ }
337
+
338
+ static inline unsigned int
339
+ nlz_long(unsigned long x)
340
+ {
341
+ if (sizeof(unsigned long) * CHAR_BIT == 32) {
342
+ return nlz_int32((uint32_t)x);
343
+ }
344
+ else if (sizeof(unsigned long) * CHAR_BIT == 64) {
345
+ return nlz_int64((uint64_t)x);
346
+ }
347
+ else {
348
+ UNREACHABLE_RETURN(~0);
349
+ }
350
+ }
351
+
352
+ static inline unsigned int
353
+ nlz_long_long(unsigned long long x)
354
+ {
355
+ if (sizeof(unsigned long long) * CHAR_BIT == 64) {
356
+ return nlz_int64((uint64_t)x);
357
+ }
358
+ #ifdef HAVE_UINT128_T
359
+ else if (sizeof(unsigned long long) * CHAR_BIT == 128) {
360
+ return nlz_int128((uint128_t)x);
361
+ }
362
+ #endif
363
+ else {
364
+ UNREACHABLE_RETURN(~0);
365
+ }
366
+ }
367
+
368
+ static inline unsigned int
369
+ nlz_intptr(uintptr_t x)
370
+ {
371
+ if (sizeof(uintptr_t) == sizeof(unsigned int)) {
372
+ return nlz_int((unsigned int)x);
373
+ }
374
+ if (sizeof(uintptr_t) == sizeof(unsigned long)) {
375
+ return nlz_long((unsigned long)x);
376
+ }
377
+ if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
378
+ return nlz_long_long((unsigned long long)x);
379
+ }
380
+ else {
381
+ UNREACHABLE_RETURN(~0);
382
+ }
383
+ }
384
+
385
+ static inline unsigned int
386
+ rb_popcount32(uint32_t x)
387
+ {
388
+ #if defined(_MSC_VER) && defined(__AVX__)
389
+ /* Note: CPUs since Nehalem and Barcelona have had this instruction so SSE
390
+ * 4.2 should suffice, but it seems there is no such thing like __SSE_4_2__
391
+ * predefined macro in MSVC. They do have __AVX__ so use it instead. */
392
+ return (unsigned int)__popcnt(x);
393
+
394
+ #elif __has_builtin(__builtin_popcount)
395
+ STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT >= 32);
396
+ return (unsigned int)__builtin_popcount(x);
397
+
398
+ #else
399
+ x = (x & 0x55555555) + (x >> 1 & 0x55555555);
400
+ x = (x & 0x33333333) + (x >> 2 & 0x33333333);
401
+ x = (x & 0x07070707) + (x >> 4 & 0x07070707);
402
+ x = (x & 0x000f000f) + (x >> 8 & 0x000f000f);
403
+ x = (x & 0x0000001f) + (x >>16 & 0x0000001f);
404
+ return (unsigned int)x;
405
+
406
+ #endif
407
+ }
408
+
409
+ static inline unsigned int
410
+ rb_popcount64(uint64_t x)
411
+ {
412
+ #if defined(_MSC_VER) && defined(__AVX__)
413
+ return (unsigned int)__popcnt64(x);
414
+
415
+ #elif __has_builtin(__builtin_popcount)
416
+ if (sizeof(long) * CHAR_BIT == 64) {
417
+ return (unsigned int)__builtin_popcountl((unsigned long)x);
418
+ }
419
+ else if (sizeof(long long) * CHAR_BIT == 64) {
420
+ return (unsigned int)__builtin_popcountll((unsigned long long)x);
421
+ }
422
+ else {
423
+ /* :FIXME: Is there a way to make this branch a compile-time error? */
424
+ UNREACHABLE_RETURN(~0);
425
+ }
426
+
427
+ #else
428
+ x = (x & 0x5555555555555555) + (x >> 1 & 0x5555555555555555);
429
+ x = (x & 0x3333333333333333) + (x >> 2 & 0x3333333333333333);
430
+ x = (x & 0x0707070707070707) + (x >> 4 & 0x0707070707070707);
431
+ x = (x & 0x000f000f000f000f) + (x >> 8 & 0x000f000f000f000f);
432
+ x = (x & 0x0000001f0000001f) + (x >>16 & 0x0000001f0000001f);
433
+ x = (x & 0x000000000000003f) + (x >>32 & 0x000000000000003f);
434
+ return (unsigned int)x;
435
+
436
+ #endif
437
+ }
438
+
439
+ static inline unsigned int
440
+ rb_popcount_intptr(uintptr_t x)
441
+ {
442
+ if (sizeof(uintptr_t) * CHAR_BIT == 64) {
443
+ return rb_popcount64((uint64_t)x);
444
+ }
445
+ else if (sizeof(uintptr_t) * CHAR_BIT == 32) {
446
+ return rb_popcount32((uint32_t)x);
447
+ }
448
+ else {
449
+ UNREACHABLE_RETURN(~0);
450
+ }
451
+ }
452
+
453
+ static inline int
454
+ ntz_int32(uint32_t x)
455
+ {
456
+ #if defined(__x86_64__) && defined(__BMI__)
457
+ return (unsigned)_tzcnt_u32(x);
458
+
459
+ #elif MSC_VERSION_SINCE(1400)
460
+ /* :FIXME: Is there any way to issue TZCNT instead of BSF, apart from using
461
+ * assembly? Because issuing LZCNT seems possible (see nlz.h). */
462
+ unsigned long r;
463
+ return _BitScanForward(&r, x) ? (int)r : 32;
464
+
465
+ #elif __has_builtin(__builtin_ctz)
466
+ STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT == 32);
467
+ return x ? (unsigned)__builtin_ctz(x) : 32;
468
+
469
+ #else
470
+ return rb_popcount32((~x) & (x-1));
471
+
472
+ #endif
473
+ }
474
+
475
+ static inline int
476
+ ntz_int64(uint64_t x)
477
+ {
478
+ #if defined(__x86_64__) && defined(__BMI__)
479
+ return (unsigned)_tzcnt_u64(x);
480
+
481
+ #elif defined(_WIN64) && MSC_VERSION_SINCE(1400)
482
+ unsigned long r;
483
+ return _BitScanForward64(&r, x) ? (int)r : 64;
484
+
485
+ #elif __has_builtin(__builtin_ctzl)
486
+ if (x == 0) {
487
+ return 64;
488
+ }
489
+ else if (sizeof(long) * CHAR_BIT == 64) {
490
+ return (unsigned)__builtin_ctzl((unsigned long)x);
491
+ }
492
+ else if (sizeof(long long) * CHAR_BIT == 64) {
493
+ return (unsigned)__builtin_ctzll((unsigned long long)x);
494
+ }
495
+ else {
496
+ /* :FIXME: Is there a way to make this branch a compile-time error? */
497
+ UNREACHABLE_RETURN(~0);
498
+ }
499
+
500
+ #else
501
+ return rb_popcount64((~x) & (x-1));
502
+
503
+ #endif
504
+ }
505
+
506
+ static inline int
507
+ ntz_intptr(uintptr_t x)
508
+ {
509
+ if (sizeof(uintptr_t) * CHAR_BIT == 64) {
510
+ return ntz_int64((uint64_t)x);
511
+ }
512
+ else if (sizeof(uintptr_t) * CHAR_BIT == 32) {
513
+ return ntz_int32((uint32_t)x);
514
+ }
515
+ else {
516
+ UNREACHABLE_RETURN(~0);
517
+ }
518
+ }
519
+
520
+ static inline VALUE
521
+ RUBY_BIT_ROTL(VALUE v, int n)
522
+ {
523
+ #if __has_builtin(__builtin_rotateleft32) && (SIZEOF_VALUE * CHAR_BIT == 32)
524
+ return __builtin_rotateleft32(v, n);
525
+
526
+ #elif __has_builtin(__builtin_rotateleft64) && (SIZEOF_VALUE * CHAR_BIT == 64)
527
+ return __builtin_rotateleft64(v, n);
528
+
529
+ #elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 32)
530
+ return _rotl(v, n);
531
+
532
+ #elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 64)
533
+ return _rotl64(v, n);
534
+
535
+ #elif defined(_lrotl) && (SIZEOF_VALUE == SIZEOF_LONG)
536
+ return _lrotl(v, n);
537
+
538
+ #else
539
+ const int m = (sizeof(VALUE) * CHAR_BIT) - 1;
540
+ return (v << (n & m)) | (v >> (-n & m));
541
+ #endif
542
+ }
543
+
544
+ static inline VALUE
545
+ RUBY_BIT_ROTR(VALUE v, int n)
546
+ {
547
+ #if __has_builtin(__builtin_rotateright32) && (SIZEOF_VALUE * CHAR_BIT == 32)
548
+ return __builtin_rotateright32(v, n);
549
+
550
+ #elif __has_builtin(__builtin_rotateright64) && (SIZEOF_VALUE * CHAR_BIT == 64)
551
+ return __builtin_rotateright64(v, n);
552
+
553
+ #elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 32)
554
+ return _rotr(v, n);
555
+
556
+ #elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 64)
557
+ return _rotr64(v, n);
558
+
559
+ #elif defined(_lrotr) && (SIZEOF_VALUE == SIZEOF_LONG)
560
+ return _lrotr(v, n);
561
+
562
+ #else
563
+ const int m = (sizeof(VALUE) * CHAR_BIT) - 1;
564
+ return (v << (-n & m)) | (v >> (n & m));
565
+ #endif
566
+ }
567
+
568
+ #endif /* INTERNAL_BITS_H */
@@ -0,0 +1,34 @@
1
+ #ifndef INTERNAL_COMPILE_H /*-*-C-*-vi:se ft=c:*/
2
+ #define INTERNAL_COMPILE_H
3
+ /**
4
+ * @author Ruby developers <ruby-core@ruby-lang.org>
5
+ * @copyright This file is a part of the programming language Ruby.
6
+ * Permission is hereby granted, to either redistribute and/or
7
+ * modify this file, provided that the conditions mentioned in the
8
+ * file COPYING are met. Consult the file for details.
9
+ * @brief Internal header for the compiler.
10
+ */
11
+ #include "ruby/internal/config.h"
12
+ #include <stddef.h> /* for size_t */
13
+ #include "ruby/ruby.h" /* for rb_event_flag_t */
14
+
15
+ struct rb_iseq_struct; /* in vm_core.h */
16
+
17
+ /* compile.c */
18
+ int rb_dvar_defined(ID, const struct rb_iseq_struct *);
19
+ int rb_local_defined(ID, const struct rb_iseq_struct *);
20
+ int rb_insn_len(VALUE insn);
21
+ const char *rb_insns_name(int i);
22
+ VALUE rb_insns_name_array(void);
23
+ int rb_iseq_cdhash_cmp(VALUE val, VALUE lit);
24
+ st_index_t rb_iseq_cdhash_hash(VALUE a);
25
+
26
+ /* iseq.c */
27
+ int rb_vm_insn_addr2insn(const void *);
28
+ int rb_vm_insn_decode(const VALUE encoded);
29
+ extern bool ruby_vm_keep_script_lines;
30
+
31
+ /* iseq.c (export) */
32
+ rb_event_flag_t rb_iseq_event_flags(const struct rb_iseq_struct *iseq, size_t pos);
33
+
34
+ #endif /* INTERNAL_COMPILE_H */
@@ -0,0 +1,107 @@
1
+ #ifndef INTERNAL_COMPILERS_H /*-*-C-*-vi:se ft=c:*/
2
+ #define INTERNAL_COMPILERS_H
3
+ /**
4
+ * @author Ruby developers <ruby-core@ruby-lang.org>
5
+ * @copyright This file is a part of the programming language Ruby.
6
+ * Permission is hereby granted, to either redistribute and/or
7
+ * modify this file, provided that the conditions mentioned in the
8
+ * file COPYING are met. Consult the file for details.
9
+ * @brief Internal header absorbing C compiler differences.
10
+ */
11
+ #include "ruby/internal/compiler_since.h"
12
+ #include "ruby/internal/has/attribute.h"
13
+ #include "ruby/internal/has/builtin.h"
14
+ #include "ruby/internal/has/c_attribute.h"
15
+ #include "ruby/internal/has/declspec_attribute.h"
16
+ #include "ruby/internal/has/extension.h"
17
+ #include "ruby/internal/has/feature.h"
18
+ #include "ruby/internal/has/warning.h"
19
+ #include "ruby/backward/2/gcc_version_since.h"
20
+
21
+ #define MSC_VERSION_SINCE(_) RBIMPL_COMPILER_SINCE(MSVC, (_) / 100, (_) % 100, 0)
22
+ #define MSC_VERSION_BEFORE(_) RBIMPL_COMPILER_BEFORE(MSVC, (_) / 100, (_) % 100, 0)
23
+
24
+ #ifndef __has_attribute
25
+ # define __has_attribute(...) RBIMPL_HAS_ATTRIBUTE(__VA_ARGS__)
26
+ #endif
27
+
28
+ #ifndef __has_c_attribute
29
+ # /* As of writing everything that lacks __has_c_attribute also completely
30
+ # * lacks C2x attributes as well. Might change in future? */
31
+ # define __has_c_attribute(...) 0
32
+ #endif
33
+
34
+ #ifndef __has_declspec_attribute
35
+ # define __has_declspec_attribute(...) RBIMPL_HAS_DECLSPEC_ATTRIBUTE(__VA_ARGS__)
36
+ #endif
37
+
38
+ #ifndef __has_builtin
39
+ # define __has_builtin(...) RBIMPL_HAS_BUILTIN(__VA_ARGS__)
40
+ #endif
41
+
42
+ #ifndef __has_feature
43
+ # define __has_feature(...) RBIMPL_HAS_FEATURE(__VA_ARGS__)
44
+ #endif
45
+
46
+ #ifndef __has_extension
47
+ # define __has_extension(...) RBIMPL_HAS_EXTENSION(__VA_ARGS__)
48
+ #endif
49
+
50
+ #ifndef __has_warning
51
+ # define __has_warning(...) RBIMPL_HAS_WARNING(__VA_ARGS__)
52
+ #endif
53
+
54
+ #ifndef __GNUC__
55
+ # define __extension__ /* void */
56
+ #endif
57
+
58
+ #ifndef MAYBE_UNUSED
59
+ # define MAYBE_UNUSED(x) x
60
+ #endif
61
+
62
+ #ifndef WARN_UNUSED_RESULT
63
+ # define WARN_UNUSED_RESULT(x) x
64
+ #endif
65
+
66
+ #define RB_OBJ_BUILTIN_TYPE(obj) rb_obj_builtin_type(obj)
67
+ #define OBJ_BUILTIN_TYPE(obj) RB_OBJ_BUILTIN_TYPE(obj)
68
+ #ifdef __GNUC__
69
+ #define rb_obj_builtin_type(obj) \
70
+ __extension__({ \
71
+ VALUE arg_obj = (obj); \
72
+ RB_SPECIAL_CONST_P(arg_obj) ? -1 : \
73
+ (int)RB_BUILTIN_TYPE(arg_obj); \
74
+ })
75
+ #else
76
+ # include "ruby/ruby.h"
77
+ static inline int
78
+ rb_obj_builtin_type(VALUE obj)
79
+ {
80
+ return RB_SPECIAL_CONST_P(obj) ? -1 :
81
+ (int)RB_BUILTIN_TYPE(obj);
82
+ }
83
+ #endif
84
+
85
+ /* A macro for defining a flexible array, like: VALUE ary[FLEX_ARY_LEN]; */
86
+ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
87
+ # define FLEX_ARY_LEN /* VALUE ary[]; */
88
+ #elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
89
+ # define FLEX_ARY_LEN 0 /* VALUE ary[0]; */
90
+ #else
91
+ # define FLEX_ARY_LEN 1 /* VALUE ary[1]; */
92
+ #endif
93
+
94
+ /*
95
+ * For declaring bitfields out of non-unsigned int types:
96
+ * struct date {
97
+ * BITFIELD(enum months, month, 4);
98
+ * ...
99
+ * };
100
+ */
101
+ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
102
+ # define BITFIELD(type, name, size) type name : size
103
+ #else
104
+ # define BITFIELD(type, name, size) unsigned int name : size
105
+ #endif
106
+
107
+ #endif /* INTERNAL_COMPILERS_H */