kanayago 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (74) hide show
  1. checksums.yaml +7 -0
  2. data/.rubocop.yml +15 -0
  3. data/.rubocop_todo.yml +23 -0
  4. data/LICENSE.txt +21 -0
  5. data/README.md +79 -0
  6. data/Rakefile +182 -0
  7. data/ext/kanayago/ccan/check_type/check_type.h +63 -0
  8. data/ext/kanayago/ccan/container_of/container_of.h +142 -0
  9. data/ext/kanayago/ccan/list/list.h +791 -0
  10. data/ext/kanayago/ccan/str/str.h +17 -0
  11. data/ext/kanayago/constant.h +53 -0
  12. data/ext/kanayago/extconf.rb +21 -0
  13. data/ext/kanayago/id.h +347 -0
  14. data/ext/kanayago/id_table.h +39 -0
  15. data/ext/kanayago/internal/array.h +151 -0
  16. data/ext/kanayago/internal/basic_operators.h +64 -0
  17. data/ext/kanayago/internal/bignum.h +244 -0
  18. data/ext/kanayago/internal/bits.h +568 -0
  19. data/ext/kanayago/internal/compile.h +34 -0
  20. data/ext/kanayago/internal/compilers.h +107 -0
  21. data/ext/kanayago/internal/complex.h +29 -0
  22. data/ext/kanayago/internal/encoding.h +36 -0
  23. data/ext/kanayago/internal/error.h +218 -0
  24. data/ext/kanayago/internal/fixnum.h +184 -0
  25. data/ext/kanayago/internal/gc.h +322 -0
  26. data/ext/kanayago/internal/hash.h +191 -0
  27. data/ext/kanayago/internal/imemo.h +261 -0
  28. data/ext/kanayago/internal/io.h +140 -0
  29. data/ext/kanayago/internal/numeric.h +274 -0
  30. data/ext/kanayago/internal/parse.h +117 -0
  31. data/ext/kanayago/internal/rational.h +71 -0
  32. data/ext/kanayago/internal/re.h +28 -0
  33. data/ext/kanayago/internal/ruby_parser.h +125 -0
  34. data/ext/kanayago/internal/sanitizers.h +297 -0
  35. data/ext/kanayago/internal/serial.h +23 -0
  36. data/ext/kanayago/internal/static_assert.h +16 -0
  37. data/ext/kanayago/internal/string.h +186 -0
  38. data/ext/kanayago/internal/symbol.h +45 -0
  39. data/ext/kanayago/internal/thread.h +79 -0
  40. data/ext/kanayago/internal/variable.h +72 -0
  41. data/ext/kanayago/internal/vm.h +137 -0
  42. data/ext/kanayago/internal/warnings.h +16 -0
  43. data/ext/kanayago/internal.h +108 -0
  44. data/ext/kanayago/kanayago.c +420 -0
  45. data/ext/kanayago/kanayago.h +21 -0
  46. data/ext/kanayago/lex.c +302 -0
  47. data/ext/kanayago/method.h +255 -0
  48. data/ext/kanayago/node.c +440 -0
  49. data/ext/kanayago/node.h +111 -0
  50. data/ext/kanayago/node_name.inc +224 -0
  51. data/ext/kanayago/parse.c +26931 -0
  52. data/ext/kanayago/parse.h +244 -0
  53. data/ext/kanayago/parse.tmp.y +16145 -0
  54. data/ext/kanayago/parser_bits.h +564 -0
  55. data/ext/kanayago/parser_node.h +32 -0
  56. data/ext/kanayago/parser_st.c +164 -0
  57. data/ext/kanayago/parser_st.h +162 -0
  58. data/ext/kanayago/parser_value.h +106 -0
  59. data/ext/kanayago/probes.h +4 -0
  60. data/ext/kanayago/ruby_assert.h +14 -0
  61. data/ext/kanayago/ruby_atomic.h +23 -0
  62. data/ext/kanayago/ruby_parser.c +1165 -0
  63. data/ext/kanayago/rubyparser.h +1391 -0
  64. data/ext/kanayago/shape.h +234 -0
  65. data/ext/kanayago/st.c +2339 -0
  66. data/ext/kanayago/symbol.h +123 -0
  67. data/ext/kanayago/thread_pthread.h +168 -0
  68. data/ext/kanayago/universal_parser.c +230 -0
  69. data/ext/kanayago/vm_core.h +2215 -0
  70. data/ext/kanayago/vm_opts.h +67 -0
  71. data/lib/kanayago/version.rb +5 -0
  72. data/lib/kanayago.rb +11 -0
  73. data/sig/kanayago.rbs +4 -0
  74. metadata +116 -0
@@ -0,0 +1,322 @@
1
+ #ifndef INTERNAL_GC_H /*-*-C-*-vi:se ft=c:*/
2
+ #define INTERNAL_GC_H
3
+ /**
4
+ * @author Ruby developers <ruby-core@ruby-lang.org>
5
+ * @copyright This file is a part of the programming language Ruby.
6
+ * Permission is hereby granted, to either redistribute and/or
7
+ * modify this file, provided that the conditions mentioned in the
8
+ * file COPYING are met. Consult the file for details.
9
+ * @brief Internal header for GC.
10
+ */
11
+ #include "ruby/internal/config.h"
12
+
13
+ #include <stddef.h> /* for size_t */
14
+
15
+ #include "internal/compilers.h" /* for __has_attribute */
16
+ #include "ruby/ruby.h" /* for rb_event_flag_t */
17
+ #include "vm_core.h" /* for GET_EC() */
18
+
19
+ #ifndef USE_SHARED_GC
20
+ # define USE_SHARED_GC 0
21
+ #endif
22
+
23
+ #if defined(__x86_64__) && !defined(_ILP32) && defined(__GNUC__)
24
+ #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movq\t%%rsp, %0" : "=r" (*(p)))
25
+ #elif defined(__i386) && defined(__GNUC__)
26
+ #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movl\t%%esp, %0" : "=r" (*(p)))
27
+ #elif (defined(__powerpc__) || defined(__powerpc64__)) && defined(__GNUC__) && !defined(_AIX) && !defined(__APPLE__) // Not Apple is NEEDED to unbreak ppc64 build on Darwin. Don't ask.
28
+ #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mr\t%0, %%r1" : "=r" (*(p)))
29
+ #elif (defined(__powerpc__) || defined(__powerpc64__)) && defined(__GNUC__) && defined(_AIX)
30
+ #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mr %0,1" : "=r" (*(p)))
31
+ #elif defined(__POWERPC__) && defined(__APPLE__) // Darwin ppc and ppc64
32
+ #define SET_MACHINE_STACK_END(p) __asm__ volatile("mr %0, r1" : "=r" (*(p)))
33
+ #elif defined(__aarch64__) && defined(__GNUC__)
34
+ #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mov\t%0, sp" : "=r" (*(p)))
35
+ #else
36
+ NOINLINE(void rb_gc_set_stack_end(VALUE **stack_end_p));
37
+ #define SET_MACHINE_STACK_END(p) rb_gc_set_stack_end(p)
38
+ #define USE_CONSERVATIVE_STACK_END
39
+ #endif
40
+
41
+ /* for GC debug */
42
+
43
+ #ifndef RUBY_MARK_FREE_DEBUG
44
+ #define RUBY_MARK_FREE_DEBUG 0
45
+ #endif
46
+
47
+ #if RUBY_MARK_FREE_DEBUG
48
+ extern int ruby_gc_debug_indent;
49
+
50
+ static inline void
51
+ rb_gc_debug_indent(void)
52
+ {
53
+ ruby_debug_printf("%*s", ruby_gc_debug_indent, "");
54
+ }
55
+
56
+ static inline void
57
+ rb_gc_debug_body(const char *mode, const char *msg, int st, void *ptr)
58
+ {
59
+ if (st == 0) {
60
+ ruby_gc_debug_indent--;
61
+ }
62
+ rb_gc_debug_indent();
63
+ ruby_debug_printf("%s: %s %s (%p)\n", mode, st ? "->" : "<-", msg, ptr);
64
+
65
+ if (st) {
66
+ ruby_gc_debug_indent++;
67
+ }
68
+
69
+ fflush(stdout);
70
+ }
71
+
72
+ #define RUBY_MARK_ENTER(msg) rb_gc_debug_body("mark", (msg), 1, ptr)
73
+ #define RUBY_MARK_LEAVE(msg) rb_gc_debug_body("mark", (msg), 0, ptr)
74
+ #define RUBY_FREE_ENTER(msg) rb_gc_debug_body("free", (msg), 1, ptr)
75
+ #define RUBY_FREE_LEAVE(msg) rb_gc_debug_body("free", (msg), 0, ptr)
76
+ #define RUBY_GC_INFO rb_gc_debug_indent(), ruby_debug_printf
77
+
78
+ #else
79
+ #define RUBY_MARK_ENTER(msg)
80
+ #define RUBY_MARK_LEAVE(msg)
81
+ #define RUBY_FREE_ENTER(msg)
82
+ #define RUBY_FREE_LEAVE(msg)
83
+ #define RUBY_GC_INFO if(0)printf
84
+ #endif
85
+
86
+ #define RUBY_FREE_UNLESS_NULL(ptr) if(ptr){ruby_xfree(ptr);(ptr)=NULL;}
87
+
88
+ #if STACK_GROW_DIRECTION > 0
89
+ # define STACK_UPPER(x, a, b) (a)
90
+ #elif STACK_GROW_DIRECTION < 0
91
+ # define STACK_UPPER(x, a, b) (b)
92
+ #else
93
+ RUBY_EXTERN int ruby_stack_grow_direction;
94
+ int ruby_get_stack_grow_direction(volatile VALUE *addr);
95
+ # define stack_growup_p(x) ( \
96
+ (ruby_stack_grow_direction ? \
97
+ ruby_stack_grow_direction : \
98
+ ruby_get_stack_grow_direction(x)) > 0)
99
+ # define STACK_UPPER(x, a, b) (stack_growup_p(x) ? (a) : (b))
100
+ #endif
101
+
102
+ /*
103
+ STACK_GROW_DIR_DETECTION is used with STACK_DIR_UPPER.
104
+
105
+ On most normal systems, stacks grow from high address to lower address. In
106
+ this case, STACK_DIR_UPPER(a, b) will return (b), but on exotic systems where
107
+ the stack grows UP (from low address to high address), it will return (a).
108
+ */
109
+
110
+ #if STACK_GROW_DIRECTION
111
+ #define STACK_GROW_DIR_DETECTION
112
+ #define STACK_DIR_UPPER(a,b) STACK_UPPER(0, (a), (b))
113
+ #else
114
+ #define STACK_GROW_DIR_DETECTION VALUE stack_grow_dir_detection
115
+ #define STACK_DIR_UPPER(a,b) STACK_UPPER(&stack_grow_dir_detection, (a), (b))
116
+ #endif
117
+ #define IS_STACK_DIR_UPPER() STACK_DIR_UPPER(1,0)
118
+
119
+ const char *rb_obj_info(VALUE obj);
120
+ const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
121
+
122
+ struct rb_execution_context_struct; /* in vm_core.h */
123
+ struct rb_objspace; /* in vm_core.h */
124
+
125
+ #define NEWOBJ_OF(var, T, c, f, s, ec) \
126
+ T *(var) = (T *)(((f) & FL_WB_PROTECTED) ? \
127
+ rb_wb_protected_newobj_of((ec ? ec : GET_EC()), (c), (f) & ~FL_WB_PROTECTED, s) : \
128
+ rb_wb_unprotected_newobj_of((c), (f), s))
129
+
130
+ #define RB_OBJ_GC_FLAGS_MAX 6 /* used in ext/objspace */
131
+
132
+ #ifndef USE_UNALIGNED_MEMBER_ACCESS
133
+ # define UNALIGNED_MEMBER_ACCESS(expr) (expr)
134
+ #elif ! USE_UNALIGNED_MEMBER_ACCESS
135
+ # define UNALIGNED_MEMBER_ACCESS(expr) (expr)
136
+ #elif ! (__has_warning("-Waddress-of-packed-member") || GCC_VERSION_SINCE(9, 0, 0))
137
+ # define UNALIGNED_MEMBER_ACCESS(expr) (expr)
138
+ #else
139
+ # include "internal/warnings.h"
140
+ # define UNALIGNED_MEMBER_ACCESS(expr) __extension__({ \
141
+ COMPILER_WARNING_PUSH; \
142
+ COMPILER_WARNING_IGNORED(-Waddress-of-packed-member); \
143
+ __typeof__(expr) unaligned_member_access_result = (expr); \
144
+ COMPILER_WARNING_POP; \
145
+ unaligned_member_access_result; \
146
+ })
147
+
148
+ # define UNALIGNED_MEMBER_PTR(ptr, mem) __extension__({ \
149
+ COMPILER_WARNING_PUSH; \
150
+ COMPILER_WARNING_IGNORED(-Waddress-of-packed-member); \
151
+ const volatile void *unaligned_member_ptr_result = &(ptr)->mem; \
152
+ COMPILER_WARNING_POP; \
153
+ (__typeof__((ptr)->mem) *)unaligned_member_ptr_result; \
154
+ })
155
+ #endif
156
+
157
+ #ifndef UNALIGNED_MEMBER_PTR
158
+ # define UNALIGNED_MEMBER_PTR(ptr, mem) UNALIGNED_MEMBER_ACCESS(&(ptr)->mem)
159
+ #endif
160
+
161
+ #define RB_OBJ_WRITE_UNALIGNED(old, slot, young) do { \
162
+ VALUE *_slot = UNALIGNED_MEMBER_ACCESS(slot); \
163
+ RB_OBJ_WRITE(old, _slot, young); \
164
+ } while (0)
165
+
166
+ /* Used in places that could malloc during, which can cause the GC to run. We
167
+ * need to temporarily disable the GC to allow the malloc to happen.
168
+ * Allocating memory during GC is a bad idea, so use this only when absolutely
169
+ * necessary. */
170
+ #define DURING_GC_COULD_MALLOC_REGION_START() \
171
+ assert(rb_during_gc()); \
172
+ VALUE _already_disabled = rb_gc_disable_no_rest()
173
+
174
+ #define DURING_GC_COULD_MALLOC_REGION_END() \
175
+ if (_already_disabled == Qfalse) rb_gc_enable()
176
+
177
+ /* gc.c */
178
+ extern int ruby_disable_gc;
179
+ RUBY_ATTR_MALLOC void *ruby_mimmalloc(size_t size);
180
+ RUBY_ATTR_MALLOC void *ruby_mimcalloc(size_t num, size_t size);
181
+ void ruby_mimfree(void *ptr);
182
+ void rb_gc_prepare_heap(void);
183
+ void rb_objspace_set_event_hook(const rb_event_flag_t event);
184
+ VALUE rb_objspace_gc_enable(void *objspace);
185
+ VALUE rb_objspace_gc_disable(void *objspace);
186
+ void ruby_gc_set_params(void);
187
+ void rb_gc_copy_attributes(VALUE dest, VALUE obj);
188
+ size_t rb_size_mul_or_raise(size_t, size_t, VALUE); /* used in compile.c */
189
+ size_t rb_size_mul_add_or_raise(size_t, size_t, size_t, VALUE); /* used in iseq.h */
190
+ size_t rb_malloc_grow_capa(size_t current_capacity, size_t type_size);
191
+ RUBY_ATTR_MALLOC void *rb_xmalloc_mul_add(size_t, size_t, size_t);
192
+ RUBY_ATTR_MALLOC void *rb_xcalloc_mul_add(size_t, size_t, size_t);
193
+ void *rb_xrealloc_mul_add(const void *, size_t, size_t, size_t);
194
+ RUBY_ATTR_MALLOC void *rb_xmalloc_mul_add_mul(size_t, size_t, size_t, size_t);
195
+ RUBY_ATTR_MALLOC void *rb_xcalloc_mul_add_mul(size_t, size_t, size_t, size_t);
196
+ static inline void *ruby_sized_xrealloc_inlined(void *ptr, size_t new_size, size_t old_size) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2));
197
+ static inline void *ruby_sized_xrealloc2_inlined(void *ptr, size_t new_count, size_t elemsiz, size_t old_count) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2, 3));
198
+ static inline void ruby_sized_xfree_inlined(void *ptr, size_t size);
199
+
200
+ void *rb_gc_ractor_cache_alloc(void);
201
+ void rb_gc_ractor_cache_free(void *cache);
202
+
203
+ bool rb_gc_size_allocatable_p(size_t size);
204
+ size_t *rb_gc_size_pool_sizes(void);
205
+ size_t rb_gc_size_pool_id_for_size(size_t size);
206
+
207
+ void rb_gc_mark_and_move(VALUE *ptr);
208
+
209
+ void rb_gc_mark_weak(VALUE *ptr);
210
+ void rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr);
211
+
212
+ void rb_gc_ref_update_table_values_only(st_table *tbl);
213
+
214
+ void rb_gc_initial_stress_set(VALUE flag);
215
+
216
+ #define rb_gc_mark_and_move_ptr(ptr) do { \
217
+ VALUE _obj = (VALUE)*(ptr); \
218
+ rb_gc_mark_and_move(&_obj); \
219
+ if (_obj != (VALUE)*(ptr)) *(ptr) = (void *)_obj; \
220
+ } while (0)
221
+
222
+ RUBY_SYMBOL_EXPORT_BEGIN
223
+ /* exports for objspace module */
224
+ void rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data);
225
+ void rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *data);
226
+ int rb_objspace_internal_object_p(VALUE obj);
227
+ int rb_objspace_garbage_object_p(VALUE obj);
228
+
229
+ void rb_objspace_each_objects(
230
+ int (*callback)(void *start, void *end, size_t stride, void *data),
231
+ void *data);
232
+
233
+ size_t rb_gc_obj_slot_size(VALUE obj);
234
+
235
+ VALUE rb_gc_disable_no_rest(void);
236
+
237
+
238
+ /* gc.c (export) */
239
+ const char *rb_objspace_data_type_name(VALUE obj);
240
+ VALUE rb_wb_protected_newobj_of(struct rb_execution_context_struct *, VALUE, VALUE, size_t);
241
+ VALUE rb_wb_unprotected_newobj_of(VALUE, VALUE, size_t);
242
+ size_t rb_obj_memsize_of(VALUE);
243
+ size_t rb_obj_gc_flags(VALUE, ID[], size_t);
244
+ void rb_gc_mark_values(long n, const VALUE *values);
245
+ void rb_gc_mark_vm_stack_values(long n, const VALUE *values);
246
+ void rb_gc_update_values(long n, VALUE *values);
247
+ void *ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2));
248
+ void *ruby_sized_xrealloc2(void *ptr, size_t new_count, size_t element_size, size_t old_count) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2, 3));
249
+ void ruby_sized_xfree(void *x, size_t size);
250
+
251
+ #if USE_SHARED_GC
252
+ void ruby_load_external_gc_from_argv(int argc, char **argv);
253
+ #endif
254
+ RUBY_SYMBOL_EXPORT_END
255
+
256
+ int rb_ec_stack_check(struct rb_execution_context_struct *ec);
257
+ void rb_gc_writebarrier_remember(VALUE obj);
258
+ const char *rb_obj_info(VALUE obj);
259
+
260
+ #if defined(HAVE_MALLOC_USABLE_SIZE) || defined(HAVE_MALLOC_SIZE) || defined(_WIN32)
261
+
262
+ static inline void *
263
+ ruby_sized_xrealloc_inlined(void *ptr, size_t new_size, size_t old_size)
264
+ {
265
+ return ruby_xrealloc(ptr, new_size);
266
+ }
267
+
268
+ static inline void *
269
+ ruby_sized_xrealloc2_inlined(void *ptr, size_t new_count, size_t elemsiz, size_t old_count)
270
+ {
271
+ return ruby_xrealloc2(ptr, new_count, elemsiz);
272
+ }
273
+
274
+ static inline void
275
+ ruby_sized_xfree_inlined(void *ptr, size_t size)
276
+ {
277
+ ruby_xfree(ptr);
278
+ }
279
+
280
+ # define SIZED_REALLOC_N(x, y, z, w) REALLOC_N(x, y, z)
281
+
282
+ static inline void *
283
+ ruby_sized_realloc_n(void *ptr, size_t new_count, size_t element_size, size_t old_count)
284
+ {
285
+ return ruby_xrealloc2(ptr, new_count, element_size);
286
+ }
287
+
288
+ #else
289
+
290
+ static inline void *
291
+ ruby_sized_xrealloc_inlined(void *ptr, size_t new_size, size_t old_size)
292
+ {
293
+ return ruby_sized_xrealloc(ptr, new_size, old_size);
294
+ }
295
+
296
+ static inline void *
297
+ ruby_sized_xrealloc2_inlined(void *ptr, size_t new_count, size_t elemsiz, size_t old_count)
298
+ {
299
+ return ruby_sized_xrealloc2(ptr, new_count, elemsiz, old_count);
300
+ }
301
+
302
+ static inline void
303
+ ruby_sized_xfree_inlined(void *ptr, size_t size)
304
+ {
305
+ ruby_sized_xfree(ptr, size);
306
+ }
307
+
308
+ # define SIZED_REALLOC_N(v, T, m, n) \
309
+ ((v) = (T *)ruby_sized_xrealloc2((void *)(v), (m), sizeof(T), (n)))
310
+
311
+ static inline void *
312
+ ruby_sized_realloc_n(void *ptr, size_t new_count, size_t element_size, size_t old_count)
313
+ {
314
+ return ruby_sized_xrealloc2(ptr, new_count, element_size, old_count);
315
+ }
316
+
317
+ #endif /* HAVE_MALLOC_USABLE_SIZE */
318
+
319
+ #define ruby_sized_xrealloc ruby_sized_xrealloc_inlined
320
+ #define ruby_sized_xrealloc2 ruby_sized_xrealloc2_inlined
321
+ #define ruby_sized_xfree ruby_sized_xfree_inlined
322
+ #endif /* INTERNAL_GC_H */
@@ -0,0 +1,191 @@
1
+ #ifndef INTERNAL_HASH_H /*-*-C-*-vi:se ft=c:*/
2
+ #define INTERNAL_HASH_H
3
+ /**
4
+ * @author Ruby developers <ruby-core@ruby-lang.org>
5
+ * @copyright This file is a part of the programming language Ruby.
6
+ * Permission is hereby granted, to either redistribute and/or
7
+ * modify this file, provided that the conditions mentioned in the
8
+ * file COPYING are met. Consult the file for details.
9
+ * @brief Internal header for Hash.
10
+ */
11
+ #include "ruby/internal/config.h"
12
+ #include <stddef.h> /* for size_t */
13
+ #include "ruby/internal/stdbool.h" /* for bool */
14
+ #include "ruby/ruby.h" /* for struct RBasic */
15
+ #include "ruby/st.h" /* for struct st_table */
16
+
17
+ #define RHASH_AR_TABLE_MAX_SIZE SIZEOF_VALUE
18
+
19
+ struct ar_table_struct;
20
+ typedef unsigned char ar_hint_t;
21
+
22
+ enum ruby_rhash_flags {
23
+ RHASH_PASS_AS_KEYWORDS = FL_USER1, /* FL 1 */
24
+ RHASH_PROC_DEFAULT = FL_USER2, /* FL 2 */
25
+ RHASH_ST_TABLE_FLAG = FL_USER3, /* FL 3 */
26
+ RHASH_AR_TABLE_SIZE_MASK = (FL_USER4|FL_USER5|FL_USER6|FL_USER7), /* FL 4..7 */
27
+ RHASH_AR_TABLE_SIZE_SHIFT = (FL_USHIFT+4),
28
+ RHASH_AR_TABLE_BOUND_MASK = (FL_USER8|FL_USER9|FL_USER10|FL_USER11), /* FL 8..11 */
29
+ RHASH_AR_TABLE_BOUND_SHIFT = (FL_USHIFT+8),
30
+
31
+ // we can not put it in "enum" because it can exceed "int" range.
32
+ #define RHASH_LEV_MASK (FL_USER13 | FL_USER14 | FL_USER15 | /* FL 13..19 */ \
33
+ FL_USER16 | FL_USER17 | FL_USER18 | FL_USER19)
34
+
35
+ RHASH_LEV_SHIFT = (FL_USHIFT + 13),
36
+ RHASH_LEV_MAX = 127, /* 7 bits */
37
+ };
38
+
39
+ typedef struct ar_table_pair_struct {
40
+ VALUE key;
41
+ VALUE val;
42
+ } ar_table_pair;
43
+
44
+ typedef struct ar_table_struct {
45
+ union {
46
+ ar_hint_t ary[RHASH_AR_TABLE_MAX_SIZE];
47
+ VALUE word;
48
+ } ar_hint;
49
+ /* 64bit CPU: 8B * 2 * 8 = 128B */
50
+ ar_table_pair pairs[RHASH_AR_TABLE_MAX_SIZE];
51
+ } ar_table;
52
+
53
+ struct RHash {
54
+ struct RBasic basic;
55
+ const VALUE ifnone;
56
+ };
57
+
58
+ #define RHASH(obj) ((struct RHash *)(obj))
59
+
60
+ #ifdef RHASH_IFNONE
61
+ # undef RHASH_IFNONE
62
+ #endif
63
+
64
+ #ifdef RHASH_SIZE
65
+ # undef RHASH_SIZE
66
+ #endif
67
+
68
+ #ifdef RHASH_EMPTY_P
69
+ # undef RHASH_EMPTY_P
70
+ #endif
71
+
72
+ /* hash.c */
73
+ void rb_hash_st_table_set(VALUE hash, st_table *st);
74
+ VALUE rb_hash_default_value(VALUE hash, VALUE key);
75
+ VALUE rb_hash_set_default_proc(VALUE hash, VALUE proc);
76
+ long rb_dbl_long_hash(double d);
77
+ st_table *rb_init_identtable(void);
78
+ st_index_t rb_any_hash(VALUE a);
79
+ int rb_any_cmp(VALUE a, VALUE b);
80
+ VALUE rb_to_hash_type(VALUE obj);
81
+ VALUE rb_hash_key_str(VALUE);
82
+ VALUE rb_hash_values(VALUE hash);
83
+ VALUE rb_hash_rehash(VALUE hash);
84
+ int rb_hash_add_new_element(VALUE hash, VALUE key, VALUE val);
85
+ VALUE rb_hash_set_pair(VALUE hash, VALUE pair);
86
+ int rb_hash_stlike_delete(VALUE hash, st_data_t *pkey, st_data_t *pval);
87
+ int rb_hash_stlike_foreach_with_replace(VALUE hash, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg);
88
+ int rb_hash_stlike_update(VALUE hash, st_data_t key, st_update_callback_func *func, st_data_t arg);
89
+ VALUE rb_ident_hash_new_with_size(st_index_t size);
90
+ void rb_hash_free(VALUE hash);
91
+
92
+ static inline unsigned RHASH_AR_TABLE_SIZE_RAW(VALUE h);
93
+ static inline VALUE RHASH_IFNONE(VALUE h);
94
+ static inline size_t RHASH_SIZE(VALUE h);
95
+ static inline bool RHASH_EMPTY_P(VALUE h);
96
+ static inline bool RHASH_AR_TABLE_P(VALUE h);
97
+ static inline bool RHASH_ST_TABLE_P(VALUE h);
98
+ static inline struct ar_table_struct *RHASH_AR_TABLE(VALUE h);
99
+ static inline st_table *RHASH_ST_TABLE(VALUE h);
100
+ static inline size_t RHASH_ST_SIZE(VALUE h);
101
+ static inline void RHASH_ST_CLEAR(VALUE h);
102
+
103
+ RUBY_SYMBOL_EXPORT_BEGIN
104
+ /* hash.c (export) */
105
+ VALUE rb_hash_delete_entry(VALUE hash, VALUE key);
106
+ VALUE rb_ident_hash_new(void);
107
+ int rb_hash_stlike_foreach(VALUE hash, st_foreach_callback_func *func, st_data_t arg);
108
+ RUBY_SYMBOL_EXPORT_END
109
+
110
+ VALUE rb_hash_new_with_size(st_index_t size);
111
+ VALUE rb_hash_resurrect(VALUE hash);
112
+ int rb_hash_stlike_lookup(VALUE hash, st_data_t key, st_data_t *pval);
113
+ VALUE rb_hash_keys(VALUE hash);
114
+ VALUE rb_hash_has_key(VALUE hash, VALUE key);
115
+ VALUE rb_hash_compare_by_id_p(VALUE hash);
116
+
117
+ st_table *rb_hash_tbl_raw(VALUE hash, const char *file, int line);
118
+ #define RHASH_TBL_RAW(h) rb_hash_tbl_raw(h, __FILE__, __LINE__)
119
+
120
+ VALUE rb_hash_compare_by_id(VALUE hash);
121
+
122
+ static inline bool
123
+ RHASH_AR_TABLE_P(VALUE h)
124
+ {
125
+ return ! FL_TEST_RAW(h, RHASH_ST_TABLE_FLAG);
126
+ }
127
+
128
+ RBIMPL_ATTR_RETURNS_NONNULL()
129
+ static inline struct ar_table_struct *
130
+ RHASH_AR_TABLE(VALUE h)
131
+ {
132
+ return (struct ar_table_struct *)((uintptr_t)h + sizeof(struct RHash));
133
+ }
134
+
135
+ RBIMPL_ATTR_RETURNS_NONNULL()
136
+ static inline st_table *
137
+ RHASH_ST_TABLE(VALUE h)
138
+ {
139
+ return (st_table *)((uintptr_t)h + sizeof(struct RHash));
140
+ }
141
+
142
+ static inline VALUE
143
+ RHASH_IFNONE(VALUE h)
144
+ {
145
+ return RHASH(h)->ifnone;
146
+ }
147
+
148
+ static inline size_t
149
+ RHASH_SIZE(VALUE h)
150
+ {
151
+ if (RHASH_AR_TABLE_P(h)) {
152
+ return RHASH_AR_TABLE_SIZE_RAW(h);
153
+ }
154
+ else {
155
+ return RHASH_ST_SIZE(h);
156
+ }
157
+ }
158
+
159
+ static inline bool
160
+ RHASH_EMPTY_P(VALUE h)
161
+ {
162
+ return RHASH_SIZE(h) == 0;
163
+ }
164
+
165
+ static inline bool
166
+ RHASH_ST_TABLE_P(VALUE h)
167
+ {
168
+ return ! RHASH_AR_TABLE_P(h);
169
+ }
170
+
171
+ static inline size_t
172
+ RHASH_ST_SIZE(VALUE h)
173
+ {
174
+ return RHASH_ST_TABLE(h)->num_entries;
175
+ }
176
+
177
+ static inline void
178
+ RHASH_ST_CLEAR(VALUE h)
179
+ {
180
+ memset(RHASH_ST_TABLE(h), 0, sizeof(st_table));
181
+ }
182
+
183
+ static inline unsigned
184
+ RHASH_AR_TABLE_SIZE_RAW(VALUE h)
185
+ {
186
+ VALUE ret = FL_TEST_RAW(h, RHASH_AR_TABLE_SIZE_MASK);
187
+ ret >>= RHASH_AR_TABLE_SIZE_SHIFT;
188
+ return (unsigned)ret;
189
+ }
190
+
191
+ #endif /* INTERNAL_HASH_H */