datadog-ruby_core_source 3.4.2 → 3.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. checksums.yaml +4 -4
  2. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/ccan/build_assert/build_assert.h +40 -0
  3. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/ccan/check_type/check_type.h +63 -0
  4. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/ccan/container_of/container_of.h +142 -0
  5. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/ccan/list/list.h +791 -0
  6. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/ccan/str/str.h +17 -0
  7. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/id.h +357 -0
  8. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/id_table.h +54 -0
  9. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/array.h +154 -0
  10. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/basic_operators.h +66 -0
  11. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/bignum.h +256 -0
  12. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/bits.h +647 -0
  13. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/box.h +83 -0
  14. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/class.h +806 -0
  15. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/cmdlineopt.h +64 -0
  16. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/compar.h +29 -0
  17. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/compile.h +34 -0
  18. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/compilers.h +107 -0
  19. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/complex.h +29 -0
  20. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/concurrent_set.h +21 -0
  21. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/cont.h +34 -0
  22. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/dir.h +16 -0
  23. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/enc.h +19 -0
  24. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/encoding.h +39 -0
  25. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/enum.h +18 -0
  26. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/enumerator.h +21 -0
  27. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/error.h +251 -0
  28. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/eval.h +43 -0
  29. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/file.h +38 -0
  30. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/fixnum.h +185 -0
  31. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/gc.h +360 -0
  32. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/hash.h +194 -0
  33. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/imemo.h +322 -0
  34. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/inits.h +51 -0
  35. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/io.h +163 -0
  36. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/load.h +20 -0
  37. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/loadpath.h +16 -0
  38. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/math.h +23 -0
  39. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/missing.h +19 -0
  40. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/numeric.h +323 -0
  41. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/object.h +63 -0
  42. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/parse.h +131 -0
  43. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/proc.h +30 -0
  44. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/process.h +124 -0
  45. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/ractor.h +10 -0
  46. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/random.h +17 -0
  47. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/range.h +40 -0
  48. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/rational.h +71 -0
  49. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/re.h +33 -0
  50. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/ruby_parser.h +102 -0
  51. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/sanitizers.h +346 -0
  52. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/serial.h +23 -0
  53. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/set_table.h +70 -0
  54. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/signal.h +25 -0
  55. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/st.h +11 -0
  56. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/static_assert.h +16 -0
  57. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/string.h +203 -0
  58. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/struct.h +160 -0
  59. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/symbol.h +46 -0
  60. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/thread.h +112 -0
  61. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/time.h +34 -0
  62. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/transcode.h +23 -0
  63. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/util.h +27 -0
  64. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/variable.h +74 -0
  65. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/vm.h +138 -0
  66. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal/warnings.h +16 -0
  67. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/internal.h +105 -0
  68. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/iseq.h +359 -0
  69. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/method.h +276 -0
  70. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/node.h +122 -0
  71. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/parser_st.h +162 -0
  72. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/parser_value.h +106 -0
  73. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/ast.h +8254 -0
  74. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/defines.h +260 -0
  75. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/diagnostic.h +458 -0
  76. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/encoding.h +283 -0
  77. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/extension.h +19 -0
  78. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/node.h +129 -0
  79. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/options.h +485 -0
  80. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/pack.h +163 -0
  81. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/parser.h +936 -0
  82. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/prettyprint.h +34 -0
  83. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/prism.h +408 -0
  84. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/regexp.h +43 -0
  85. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/static_literals.h +121 -0
  86. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/util/pm_buffer.h +236 -0
  87. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/util/pm_char.h +204 -0
  88. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/util/pm_constant_pool.h +218 -0
  89. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/util/pm_integer.h +130 -0
  90. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/util/pm_list.h +103 -0
  91. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/util/pm_memchr.h +29 -0
  92. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/util/pm_newline_list.h +113 -0
  93. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/util/pm_string.h +200 -0
  94. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/util/pm_strncasecmp.h +32 -0
  95. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/util/pm_strpbrk.h +46 -0
  96. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism/version.h +29 -0
  97. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/prism_compile.h +106 -0
  98. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/ractor_core.h +331 -0
  99. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/ruby_assert.h +14 -0
  100. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/ruby_atomic.h +73 -0
  101. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/rubyparser.h +1393 -0
  102. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/shape.h +464 -0
  103. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/thread_none.h +21 -0
  104. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/thread_pthread.h +177 -0
  105. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/vm_core.h +2397 -0
  106. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/vm_debug.h +124 -0
  107. data/lib/datadog/ruby_core_source/ruby-4.0.0-preview3/vm_opts.h +67 -0
  108. data/lib/datadog/ruby_core_source/version.rb +1 -1
  109. metadata +108 -3
  110. data/CHANGELOG.datadog.md +0 -19
@@ -0,0 +1,647 @@
1
+ #ifndef INTERNAL_BITS_H /*-*-C-*-vi:se ft=c:*/
2
+ #define INTERNAL_BITS_H
3
+ /**
4
+ * @author Ruby developers <ruby-core@ruby-lang.org>
5
+ * @copyright This file is a part of the programming language Ruby.
6
+ * Permission is hereby granted, to either redistribute and/or
7
+ * modify this file, provided that the conditions mentioned in the
8
+ * file COPYING are met. Consult the file for details.
9
+ * @brief Internal header for bitwise integer algorithms.
10
+ * @see Henry S. Warren Jr., "Hacker's Delight" (2nd ed.), 2013.
11
+ * @see SEI CERT C Coding Standard INT32-C. "Ensure that operations on
12
+ * signed integers do not result in overflow"
13
+ * @see https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html
14
+ * @see https://clang.llvm.org/docs/LanguageExtensions.html#builtin-rotateleft
15
+ * @see https://clang.llvm.org/docs/LanguageExtensions.html#builtin-rotateright
16
+ * @see https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/byteswap-uint64-byteswap-ulong-byteswap-ushort
17
+ * @see https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/rotl-rotl64-rotr-rotr64
18
+ * @see https://docs.microsoft.com/en-us/cpp/intrinsics/bitscanforward-bitscanforward64
19
+ * @see https://docs.microsoft.com/en-us/cpp/intrinsics/bitscanreverse-bitscanreverse64
20
+ * @see https://docs.microsoft.com/en-us/cpp/intrinsics/lzcnt16-lzcnt-lzcnt64
21
+ * @see https://docs.microsoft.com/en-us/cpp/intrinsics/popcnt16-popcnt-popcnt64
22
+ * @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_lzcnt_u32
23
+ * @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_tzcnt_u32
24
+ * @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rotl64
25
+ * @see https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rotr64
26
+ * @see https://stackoverflow.com/a/776523
27
+ */
28
+ #include "ruby/internal/config.h"
29
+ #include <limits.h> /* for CHAR_BITS */
30
+ #include <stdint.h> /* for uintptr_t */
31
+ #include "internal/compilers.h" /* for MSC_VERSION_SINCE */
32
+
33
+ #ifdef _MSC_VER
34
+ # include <stdlib.h> /* for _byteswap_uint64 */
35
+ #endif
36
+
37
+ #if defined(HAVE_X86INTRIN_H)
38
+ # include <x86intrin.h> /* for _lzcnt_u64 */
39
+ #elif defined(_MSC_VER)
40
+ # include <intrin.h> /* for the following intrinsics */
41
+ #endif
42
+
43
+ #if defined(_MSC_VER) && defined(__AVX__)
44
+ # pragma intrinsic(__popcnt)
45
+ # pragma intrinsic(__popcnt64)
46
+ #endif
47
+
48
+ #if defined(_MSC_VER) && defined(__AVX2__)
49
+ # pragma intrinsic(__lzcnt)
50
+ # pragma intrinsic(__lzcnt64)
51
+ #endif
52
+
53
+ #if defined(_MSC_VER)
54
+ # pragma intrinsic(_rotl)
55
+ # pragma intrinsic(_rotr)
56
+ # ifdef _WIN64
57
+ # pragma intrinsic(_rotl64)
58
+ # pragma intrinsic(_rotr64)
59
+ # endif
60
+ # pragma intrinsic(_BitScanForward)
61
+ # pragma intrinsic(_BitScanReverse)
62
+ # ifdef _WIN64
63
+ # pragma intrinsic(_BitScanForward64)
64
+ # pragma intrinsic(_BitScanReverse64)
65
+ # endif
66
+ #endif
67
+
68
+ #include "ruby/ruby.h" /* for VALUE */
69
+ #include "internal/static_assert.h" /* for STATIC_ASSERT */
70
+
71
+ /* The most significant bit of the lower part of half-long integer.
72
+ * If sizeof(long) == 4, this is 0x8000.
73
+ * If sizeof(long) == 8, this is 0x80000000.
74
+ */
75
+ #define HALF_LONG_MSB ((SIGNED_VALUE)1<<((SIZEOF_LONG*CHAR_BIT-1)/2))
76
+
77
+ #define SIGNED_INTEGER_TYPE_P(T) (0 > ((T)0)-1)
78
+
79
+ #define SIGNED_INTEGER_MIN(T) \
80
+ ((sizeof(T) == sizeof(int8_t)) ? ((T)INT8_MIN) : \
81
+ ((sizeof(T) == sizeof(int16_t)) ? ((T)INT16_MIN) : \
82
+ ((sizeof(T) == sizeof(int32_t)) ? ((T)INT32_MIN) : \
83
+ ((sizeof(T) == sizeof(int64_t)) ? ((T)INT64_MIN) : \
84
+ 0))))
85
+
86
+ #define SIGNED_INTEGER_MAX(T) ((T)(SIGNED_INTEGER_MIN(T) ^ ((T)~(T)0)))
87
+
88
+ #define UNSIGNED_INTEGER_MAX(T) ((T)~(T)0)
89
+
90
+ #ifndef MUL_OVERFLOW_SIGNED_INTEGER_P
91
+ #if __has_builtin(__builtin_mul_overflow_p)
92
+ # define MUL_OVERFLOW_P(a, b) \
93
+ __builtin_mul_overflow_p((a), (b), (__typeof__(a * b))0)
94
+ #elif __has_builtin(__builtin_mul_overflow)
95
+ # define MUL_OVERFLOW_P(a, b) \
96
+ __extension__ ({ __typeof__(a) c; __builtin_mul_overflow((a), (b), &c); })
97
+ #endif
98
+
99
+ #define MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
100
+ (a) == 0 ? 0 : \
101
+ (a) == -1 ? (b) < -(max) : \
102
+ (a) > 0 ? \
103
+ ((b) > 0 ? (max) / (a) < (b) : (min) / (a) > (b)) : \
104
+ ((b) > 0 ? (min) / (a) < (b) : (max) / (a) > (b)))
105
+
106
+ #if __has_builtin(__builtin_mul_overflow_p)
107
+ /* __builtin_mul_overflow_p can take bitfield */
108
+ /* and GCC permits bitfields for integers other than int */
109
+ # define MUL_OVERFLOW_FIXNUM_P(a, b) \
110
+ __extension__ ({ \
111
+ struct { long fixnum : sizeof(long) * CHAR_BIT - 1; } c = { 0 }; \
112
+ __builtin_mul_overflow_p((a), (b), c.fixnum); \
113
+ })
114
+ #else
115
+ # define MUL_OVERFLOW_FIXNUM_P(a, b) \
116
+ MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
117
+ #endif
118
+
119
+ #if defined(MUL_OVERFLOW_P) && defined(USE___BUILTIN_MUL_OVERFLOW_LONG_LONG)
120
+ # define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
121
+ #else
122
+ # define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
123
+ #endif
124
+
125
+ #ifdef MUL_OVERFLOW_P
126
+ # define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
127
+ # define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_P(a, b)
128
+ #else
129
+ # define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
130
+ # define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
131
+ #endif
132
+ #endif
133
+
134
+ #ifndef ADD_OVERFLOW_SIGNED_INTEGER_P
135
+ #if __has_builtin(__builtin_add_overflow_p)
136
+ # define ADD_OVERFLOW_P(a, b) \
137
+ __builtin_add_overflow_p((a), (b), (__typeof__(a * b))0)
138
+ #elif __has_builtin(__builtin_add_overflow)
139
+ # define ADD_OVERFLOW_P(a, b) \
140
+ __extension__ ({ __typeof__(a) c; __builtin_add_overflow((a), (b), &c); })
141
+ #endif
142
+
143
+ #define ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
144
+ (a) > 0 ? (b) > (max) - (a) : (b) < (min) - (a))
145
+
146
+ #if __has_builtin(__builtin_add_overflow_p)
147
+ /* __builtin_add_overflow_p can take bitfield */
148
+ /* and GCC permits bitfields for integers other than int */
149
+ # define ADD_OVERFLOW_FIXNUM_P(a, b) \
150
+ __extension__ ({ \
151
+ struct { long fixnum : sizeof(long) * CHAR_BIT - 1; } c = { 0 }; \
152
+ __builtin_add_overflow_p((a), (b), c.fixnum); \
153
+ })
154
+ #else
155
+ # define ADD_OVERFLOW_FIXNUM_P(a, b) \
156
+ ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
157
+ #endif
158
+
159
+ #if defined(ADD_OVERFLOW_P) && defined(USE___BUILTIN_ADD_OVERFLOW_LONG_LONG)
160
+ # define ADD_OVERFLOW_LONG_LONG_P(a, b) ADD_OVERFLOW_P(a, b)
161
+ #else
162
+ # define ADD_OVERFLOW_LONG_LONG_P(a, b) ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
163
+ #endif
164
+
165
+ #ifdef ADD_OVERFLOW_P
166
+ # define ADD_OVERFLOW_LONG_P(a, b) ADD_OVERFLOW_P(a, b)
167
+ # define ADD_OVERFLOW_INT_P(a, b) ADD_OVERFLOW_P(a, b)
168
+ #else
169
+ # define ADD_OVERFLOW_LONG_P(a, b) ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
170
+ # define ADD_OVERFLOW_INT_P(a, b) ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
171
+ #endif
172
+ #endif
173
+
174
+ #ifndef SUB_OVERFLOW_SIGNED_INTEGER_P
175
+ #if __has_builtin(__builtin_sub_overflow_p)
176
+ # define SUB_OVERFLOW_P(a, b) \
177
+ __builtin_sub_overflow_p((a), (b), (__typeof__(a * b))0)
178
+ #elif __has_builtin(__builtin_sub_overflow)
179
+ # define SUB_OVERFLOW_P(a, b) \
180
+ __extension__ ({ __typeof__(a) c; __builtin_sub_overflow((a), (b), &c); })
181
+ #endif
182
+
183
+ #define SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
184
+ (b) > 0 ? (a) < (min) + (b) : (a) > (max) + (b))
185
+
186
+ #if __has_builtin(__builtin_sub_overflow_p)
187
+ /* __builtin_sub_overflow_p can take bitfield */
188
+ /* and GCC permits bitfields for integers other than int */
189
+ # define SUB_OVERFLOW_FIXNUM_P(a, b) \
190
+ __extension__ ({ \
191
+ struct { long fixnum : sizeof(long) * CHAR_BIT - 1; } c = { 0 }; \
192
+ __builtin_sub_overflow_p((a), (b), c.fixnum); \
193
+ })
194
+ #else
195
+ # define SUB_OVERFLOW_FIXNUM_P(a, b) \
196
+ SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
197
+ #endif
198
+
199
+ #if defined(SUB_OVERFLOW_P) && defined(USE___BUILTIN_SUB_OVERFLOW_LONG_LONG)
200
+ # define SUB_OVERFLOW_LONG_LONG_P(a, b) SUB_OVERFLOW_P(a, b)
201
+ #else
202
+ # define SUB_OVERFLOW_LONG_LONG_P(a, b) SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
203
+ #endif
204
+
205
+ #ifdef SUB_OVERFLOW_P
206
+ # define SUB_OVERFLOW_LONG_P(a, b) SUB_OVERFLOW_P(a, b)
207
+ # define SUB_OVERFLOW_INT_P(a, b) SUB_OVERFLOW_P(a, b)
208
+ #else
209
+ # define SUB_OVERFLOW_LONG_P(a, b) SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
210
+ # define SUB_OVERFLOW_INT_P(a, b) SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
211
+ #endif
212
+ #endif
213
+
214
+ #ifdef HAVE_UINT128_T
215
+ # define bit_length(x) \
216
+ (unsigned int) \
217
+ (sizeof(x) <= sizeof(int32_t) ? 32 - nlz_int32((uint32_t)(x)) : \
218
+ sizeof(x) <= sizeof(int64_t) ? 64 - nlz_int64((uint64_t)(x)) : \
219
+ 128 - nlz_int128((uint128_t)(x)))
220
+ #else
221
+ # define bit_length(x) \
222
+ (unsigned int) \
223
+ (sizeof(x) <= sizeof(int32_t) ? 32 - nlz_int32((uint32_t)(x)) : \
224
+ 64 - nlz_int64((uint64_t)(x)))
225
+ #endif
226
+
227
+ #ifndef swap16
228
+ # define swap16 ruby_swap16
229
+ #endif
230
+
231
+ #ifndef swap32
232
+ # define swap32 ruby_swap32
233
+ #endif
234
+
235
+ #ifndef swap64
236
+ # define swap64 ruby_swap64
237
+ #endif
238
+
239
+ static inline uint16_t ruby_swap16(uint16_t);
240
+ static inline uint32_t ruby_swap32(uint32_t);
241
+ static inline uint64_t ruby_swap64(uint64_t);
242
+ static inline unsigned nlz_int(unsigned x);
243
+ static inline unsigned nlz_long(unsigned long x);
244
+ static inline unsigned nlz_long_long(unsigned long long x);
245
+ static inline unsigned nlz_intptr(uintptr_t x);
246
+ static inline unsigned nlz_int32(uint32_t x);
247
+ static inline unsigned nlz_int64(uint64_t x);
248
+ #ifdef HAVE_UINT128_T
249
+ static inline unsigned nlz_int128(uint128_t x);
250
+ #endif
251
+ static inline unsigned rb_popcount32(uint32_t x);
252
+ static inline unsigned rb_popcount64(uint64_t x);
253
+ static inline unsigned rb_popcount_intptr(uintptr_t x);
254
+ static inline int ntz_int32(uint32_t x);
255
+ static inline int ntz_int64(uint64_t x);
256
+ static inline int ntz_intptr(uintptr_t x);
257
+ static inline VALUE RUBY_BIT_ROTL(VALUE, int);
258
+ static inline VALUE RUBY_BIT_ROTR(VALUE, int);
259
+
260
+ static inline uint16_t
261
+ ruby_swap16(uint16_t x)
262
+ {
263
+ #if __has_builtin(__builtin_bswap16)
264
+ return __builtin_bswap16(x);
265
+
266
+ #elif defined(_MSC_VER)
267
+ return _byteswap_ushort(x);
268
+
269
+ #else
270
+ return (x << 8) | (x >> 8);
271
+
272
+ #endif
273
+ }
274
+
275
+ static inline uint32_t
276
+ ruby_swap32(uint32_t x)
277
+ {
278
+ #if __has_builtin(__builtin_bswap32)
279
+ return __builtin_bswap32(x);
280
+
281
+ #elif defined(_MSC_VER)
282
+ return _byteswap_ulong(x);
283
+
284
+ #else
285
+ x = ((x & 0x0000FFFF) << 16) | ((x & 0xFFFF0000) >> 16);
286
+ x = ((x & 0x00FF00FF) << 8) | ((x & 0xFF00FF00) >> 8);
287
+ return x;
288
+
289
+ #endif
290
+ }
291
+
292
+ static inline uint64_t
293
+ ruby_swap64(uint64_t x)
294
+ {
295
+ #if __has_builtin(__builtin_bswap64)
296
+ return __builtin_bswap64(x);
297
+
298
+ #elif defined(_MSC_VER)
299
+ return _byteswap_uint64(x);
300
+
301
+ #else
302
+ x = ((x & 0x00000000FFFFFFFFULL) << 32) | ((x & 0xFFFFFFFF00000000ULL) >> 32);
303
+ x = ((x & 0x0000FFFF0000FFFFULL) << 16) | ((x & 0xFFFF0000FFFF0000ULL) >> 16);
304
+ x = ((x & 0x00FF00FF00FF00FFULL) << 8) | ((x & 0xFF00FF00FF00FF00ULL) >> 8);
305
+ return x;
306
+
307
+ #endif
308
+ }
309
+
310
+ static inline unsigned int
311
+ nlz_int32(uint32_t x)
312
+ {
313
+ #if defined(_MSC_VER) && defined(__AVX2__)
314
+ /* Note: It seems there is no such thing like __LZCNT__ predefined in MSVC.
315
+ * AMD CPUs have had this instruction for decades (since K10) but for
316
+ * Intel, Haswell is the oldest one. We need to use __AVX2__ for maximum
317
+ * safety. */
318
+ return (unsigned int)__lzcnt(x);
319
+
320
+ #elif defined(__x86_64__) && defined(__LZCNT__)
321
+ return (unsigned int)_lzcnt_u32(x);
322
+
323
+ #elif defined(_MSC_VER) /* &&! defined(__AVX2__) */
324
+ unsigned long r;
325
+ return _BitScanReverse(&r, x) ? (31 - (int)r) : 32;
326
+
327
+ #elif __has_builtin(__builtin_clz)
328
+ STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT == 32);
329
+ return x ? (unsigned int)__builtin_clz(x) : 32;
330
+
331
+ #else
332
+ uint32_t y;
333
+ unsigned n = 32;
334
+ y = x >> 16; if (y) {n -= 16; x = y;}
335
+ y = x >> 8; if (y) {n -= 8; x = y;}
336
+ y = x >> 4; if (y) {n -= 4; x = y;}
337
+ y = x >> 2; if (y) {n -= 2; x = y;}
338
+ y = x >> 1; if (y) {return n - 2;}
339
+ return (unsigned int)(n - x);
340
+ #endif
341
+ }
342
+
343
+ static inline unsigned int
344
+ nlz_int64(uint64_t x)
345
+ {
346
+ #if defined(_MSC_VER) && defined(__AVX2__)
347
+ return (unsigned int)__lzcnt64(x);
348
+
349
+ #elif defined(__x86_64__) && defined(__LZCNT__)
350
+ return (unsigned int)_lzcnt_u64(x);
351
+
352
+ #elif defined(_WIN64) && defined(_MSC_VER) /* &&! defined(__AVX2__) */
353
+ unsigned long r;
354
+ return _BitScanReverse64(&r, x) ? (63u - (unsigned int)r) : 64;
355
+
356
+ #elif __has_builtin(__builtin_clzl)
357
+ if (x == 0) {
358
+ return 64;
359
+ }
360
+ else if (sizeof(long) * CHAR_BIT == 64) {
361
+ return (unsigned int)__builtin_clzl((unsigned long)x);
362
+ }
363
+ else if (sizeof(long long) * CHAR_BIT == 64) {
364
+ return (unsigned int)__builtin_clzll((unsigned long long)x);
365
+ }
366
+ else {
367
+ /* :FIXME: Is there a way to make this branch a compile-time error? */
368
+ UNREACHABLE_RETURN(~0);
369
+ }
370
+
371
+ #else
372
+ uint64_t y;
373
+ unsigned int n = 64;
374
+ y = x >> 32; if (y) {n -= 32; x = y;}
375
+ y = x >> 16; if (y) {n -= 16; x = y;}
376
+ y = x >> 8; if (y) {n -= 8; x = y;}
377
+ y = x >> 4; if (y) {n -= 4; x = y;}
378
+ y = x >> 2; if (y) {n -= 2; x = y;}
379
+ y = x >> 1; if (y) {return n - 2;}
380
+ return (unsigned int)(n - x);
381
+
382
+ #endif
383
+ }
384
+
385
+ #ifdef HAVE_UINT128_T
386
+ static inline unsigned int
387
+ nlz_int128(uint128_t x)
388
+ {
389
+ uint64_t y = (uint64_t)(x >> 64);
390
+
391
+ if (x == 0) {
392
+ return 128;
393
+ }
394
+ else if (y == 0) {
395
+ return (unsigned int)nlz_int64(x) + 64;
396
+ }
397
+ else {
398
+ return (unsigned int)nlz_int64(y);
399
+ }
400
+ }
401
+ #endif
402
+
403
+ static inline unsigned int
404
+ nlz_int(unsigned int x)
405
+ {
406
+ if (sizeof(unsigned int) * CHAR_BIT == 32) {
407
+ return nlz_int32((uint32_t)x);
408
+ }
409
+ else if (sizeof(unsigned int) * CHAR_BIT == 64) {
410
+ return nlz_int64((uint64_t)x);
411
+ }
412
+ else {
413
+ UNREACHABLE_RETURN(~0);
414
+ }
415
+ }
416
+
417
+ static inline unsigned int
418
+ nlz_long(unsigned long x)
419
+ {
420
+ if (sizeof(unsigned long) * CHAR_BIT == 32) {
421
+ return nlz_int32((uint32_t)x);
422
+ }
423
+ else if (sizeof(unsigned long) * CHAR_BIT == 64) {
424
+ return nlz_int64((uint64_t)x);
425
+ }
426
+ else {
427
+ UNREACHABLE_RETURN(~0);
428
+ }
429
+ }
430
+
431
+ static inline unsigned int
432
+ nlz_long_long(unsigned long long x)
433
+ {
434
+ if (sizeof(unsigned long long) * CHAR_BIT == 64) {
435
+ return nlz_int64((uint64_t)x);
436
+ }
437
+ #ifdef HAVE_UINT128_T
438
+ else if (sizeof(unsigned long long) * CHAR_BIT == 128) {
439
+ return nlz_int128((uint128_t)x);
440
+ }
441
+ #endif
442
+ else {
443
+ UNREACHABLE_RETURN(~0);
444
+ }
445
+ }
446
+
447
+ static inline unsigned int
448
+ nlz_intptr(uintptr_t x)
449
+ {
450
+ if (sizeof(uintptr_t) == sizeof(unsigned int)) {
451
+ return nlz_int((unsigned int)x);
452
+ }
453
+ if (sizeof(uintptr_t) == sizeof(unsigned long)) {
454
+ return nlz_long((unsigned long)x);
455
+ }
456
+ if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
457
+ return nlz_long_long((unsigned long long)x);
458
+ }
459
+ else {
460
+ UNREACHABLE_RETURN(~0);
461
+ }
462
+ }
463
+
464
+ static inline unsigned int
465
+ rb_popcount32(uint32_t x)
466
+ {
467
+ #if defined(_MSC_VER) && defined(__AVX__)
468
+ /* Note: CPUs since Nehalem and Barcelona have had this instruction so SSE
469
+ * 4.2 should suffice, but it seems there is no such thing like __SSE_4_2__
470
+ * predefined macro in MSVC. They do have __AVX__ so use it instead. */
471
+ return (unsigned int)__popcnt(x);
472
+
473
+ #elif __has_builtin(__builtin_popcount)
474
+ STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT >= 32);
475
+ return (unsigned int)__builtin_popcount(x);
476
+
477
+ #else
478
+ x = (x & 0x55555555) + (x >> 1 & 0x55555555);
479
+ x = (x & 0x33333333) + (x >> 2 & 0x33333333);
480
+ x = (x & 0x07070707) + (x >> 4 & 0x07070707);
481
+ x = (x & 0x000f000f) + (x >> 8 & 0x000f000f);
482
+ x = (x & 0x0000001f) + (x >>16 & 0x0000001f);
483
+ return (unsigned int)x;
484
+
485
+ #endif
486
+ }
487
+
488
+ static inline unsigned int
489
+ rb_popcount64(uint64_t x)
490
+ {
491
+ #if defined(_MSC_VER) && defined(__AVX__)
492
+ return (unsigned int)__popcnt64(x);
493
+
494
+ #elif __has_builtin(__builtin_popcount)
495
+ if (sizeof(long) * CHAR_BIT == 64) {
496
+ return (unsigned int)__builtin_popcountl((unsigned long)x);
497
+ }
498
+ else if (sizeof(long long) * CHAR_BIT == 64) {
499
+ return (unsigned int)__builtin_popcountll((unsigned long long)x);
500
+ }
501
+ else {
502
+ /* :FIXME: Is there a way to make this branch a compile-time error? */
503
+ UNREACHABLE_RETURN(~0);
504
+ }
505
+
506
+ #else
507
+ x = (x & 0x5555555555555555) + (x >> 1 & 0x5555555555555555);
508
+ x = (x & 0x3333333333333333) + (x >> 2 & 0x3333333333333333);
509
+ x = (x & 0x0707070707070707) + (x >> 4 & 0x0707070707070707);
510
+ x = (x & 0x000f000f000f000f) + (x >> 8 & 0x000f000f000f000f);
511
+ x = (x & 0x0000001f0000001f) + (x >>16 & 0x0000001f0000001f);
512
+ x = (x & 0x000000000000003f) + (x >>32 & 0x000000000000003f);
513
+ return (unsigned int)x;
514
+
515
+ #endif
516
+ }
517
+
518
+ static inline unsigned int
519
+ rb_popcount_intptr(uintptr_t x)
520
+ {
521
+ if (sizeof(uintptr_t) * CHAR_BIT == 64) {
522
+ return rb_popcount64((uint64_t)x);
523
+ }
524
+ else if (sizeof(uintptr_t) * CHAR_BIT == 32) {
525
+ return rb_popcount32((uint32_t)x);
526
+ }
527
+ else {
528
+ UNREACHABLE_RETURN(~0);
529
+ }
530
+ }
531
+
532
+ static inline int
533
+ ntz_int32(uint32_t x)
534
+ {
535
+ #if defined(__x86_64__) && defined(__BMI__)
536
+ return (unsigned)_tzcnt_u32(x);
537
+
538
+ #elif defined(_MSC_VER)
539
+ /* :FIXME: Is there any way to issue TZCNT instead of BSF, apart from using
540
+ * assembly? Because issuing LZCNT seems possible (see nlz.h). */
541
+ unsigned long r;
542
+ return _BitScanForward(&r, x) ? (int)r : 32;
543
+
544
+ #elif __has_builtin(__builtin_ctz)
545
+ STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT == 32);
546
+ return x ? (unsigned)__builtin_ctz(x) : 32;
547
+
548
+ #else
549
+ return rb_popcount32((~x) & (x-1));
550
+
551
+ #endif
552
+ }
553
+
554
+ static inline int
555
+ ntz_int64(uint64_t x)
556
+ {
557
+ #if defined(__x86_64__) && defined(__BMI__)
558
+ return (unsigned)_tzcnt_u64(x);
559
+
560
+ #elif defined(_WIN64) && defined(_MSC_VER)
561
+ unsigned long r;
562
+ return _BitScanForward64(&r, x) ? (int)r : 64;
563
+
564
+ #elif __has_builtin(__builtin_ctzl)
565
+ if (x == 0) {
566
+ return 64;
567
+ }
568
+ else if (sizeof(long) * CHAR_BIT == 64) {
569
+ return (unsigned)__builtin_ctzl((unsigned long)x);
570
+ }
571
+ else if (sizeof(long long) * CHAR_BIT == 64) {
572
+ return (unsigned)__builtin_ctzll((unsigned long long)x);
573
+ }
574
+ else {
575
+ /* :FIXME: Is there a way to make this branch a compile-time error? */
576
+ UNREACHABLE_RETURN(~0);
577
+ }
578
+
579
+ #else
580
+ return rb_popcount64((~x) & (x-1));
581
+
582
+ #endif
583
+ }
584
+
585
+ static inline int
586
+ ntz_intptr(uintptr_t x)
587
+ {
588
+ if (sizeof(uintptr_t) * CHAR_BIT == 64) {
589
+ return ntz_int64((uint64_t)x);
590
+ }
591
+ else if (sizeof(uintptr_t) * CHAR_BIT == 32) {
592
+ return ntz_int32((uint32_t)x);
593
+ }
594
+ else {
595
+ UNREACHABLE_RETURN(~0);
596
+ }
597
+ }
598
+
599
+ static inline VALUE
600
+ RUBY_BIT_ROTL(VALUE v, int n)
601
+ {
602
+ #if __has_builtin(__builtin_rotateleft32) && (SIZEOF_VALUE * CHAR_BIT == 32)
603
+ return __builtin_rotateleft32(v, n);
604
+
605
+ #elif __has_builtin(__builtin_rotateleft64) && (SIZEOF_VALUE * CHAR_BIT == 64)
606
+ return __builtin_rotateleft64(v, n);
607
+
608
+ #elif defined(_MSC_VER) && (SIZEOF_VALUE * CHAR_BIT == 32)
609
+ return _rotl(v, n);
610
+
611
+ #elif defined(_MSC_VER) && (SIZEOF_VALUE * CHAR_BIT == 64)
612
+ return _rotl64(v, n);
613
+
614
+ #elif defined(_lrotl) && (SIZEOF_VALUE == SIZEOF_LONG)
615
+ return _lrotl(v, n);
616
+
617
+ #else
618
+ const int m = (sizeof(VALUE) * CHAR_BIT) - 1;
619
+ return (v << (n & m)) | (v >> (-n & m));
620
+ #endif
621
+ }
622
+
623
+ static inline VALUE
624
+ RUBY_BIT_ROTR(VALUE v, int n)
625
+ {
626
+ #if __has_builtin(__builtin_rotateright32) && (SIZEOF_VALUE * CHAR_BIT == 32)
627
+ return __builtin_rotateright32(v, n);
628
+
629
+ #elif __has_builtin(__builtin_rotateright64) && (SIZEOF_VALUE * CHAR_BIT == 64)
630
+ return __builtin_rotateright64(v, n);
631
+
632
+ #elif defined(_MSC_VER) && (SIZEOF_VALUE * CHAR_BIT == 32)
633
+ return _rotr(v, n);
634
+
635
+ #elif defined(_MSC_VER) && (SIZEOF_VALUE * CHAR_BIT == 64)
636
+ return _rotr64(v, n);
637
+
638
+ #elif defined(_lrotr) && (SIZEOF_VALUE == SIZEOF_LONG)
639
+ return _lrotr(v, n);
640
+
641
+ #else
642
+ const int m = (sizeof(VALUE) * CHAR_BIT) - 1;
643
+ return (v << (-n & m)) | (v >> (n & m));
644
+ #endif
645
+ }
646
+
647
+ #endif /* INTERNAL_BITS_H */