kanayago 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (74) hide show
  1. checksums.yaml +7 -0
  2. data/.rubocop.yml +15 -0
  3. data/.rubocop_todo.yml +23 -0
  4. data/LICENSE.txt +21 -0
  5. data/README.md +79 -0
  6. data/Rakefile +182 -0
  7. data/ext/kanayago/ccan/check_type/check_type.h +63 -0
  8. data/ext/kanayago/ccan/container_of/container_of.h +142 -0
  9. data/ext/kanayago/ccan/list/list.h +791 -0
  10. data/ext/kanayago/ccan/str/str.h +17 -0
  11. data/ext/kanayago/constant.h +53 -0
  12. data/ext/kanayago/extconf.rb +21 -0
  13. data/ext/kanayago/id.h +347 -0
  14. data/ext/kanayago/id_table.h +39 -0
  15. data/ext/kanayago/internal/array.h +151 -0
  16. data/ext/kanayago/internal/basic_operators.h +64 -0
  17. data/ext/kanayago/internal/bignum.h +244 -0
  18. data/ext/kanayago/internal/bits.h +568 -0
  19. data/ext/kanayago/internal/compile.h +34 -0
  20. data/ext/kanayago/internal/compilers.h +107 -0
  21. data/ext/kanayago/internal/complex.h +29 -0
  22. data/ext/kanayago/internal/encoding.h +36 -0
  23. data/ext/kanayago/internal/error.h +218 -0
  24. data/ext/kanayago/internal/fixnum.h +184 -0
  25. data/ext/kanayago/internal/gc.h +322 -0
  26. data/ext/kanayago/internal/hash.h +191 -0
  27. data/ext/kanayago/internal/imemo.h +261 -0
  28. data/ext/kanayago/internal/io.h +140 -0
  29. data/ext/kanayago/internal/numeric.h +274 -0
  30. data/ext/kanayago/internal/parse.h +117 -0
  31. data/ext/kanayago/internal/rational.h +71 -0
  32. data/ext/kanayago/internal/re.h +28 -0
  33. data/ext/kanayago/internal/ruby_parser.h +125 -0
  34. data/ext/kanayago/internal/sanitizers.h +297 -0
  35. data/ext/kanayago/internal/serial.h +23 -0
  36. data/ext/kanayago/internal/static_assert.h +16 -0
  37. data/ext/kanayago/internal/string.h +186 -0
  38. data/ext/kanayago/internal/symbol.h +45 -0
  39. data/ext/kanayago/internal/thread.h +79 -0
  40. data/ext/kanayago/internal/variable.h +72 -0
  41. data/ext/kanayago/internal/vm.h +137 -0
  42. data/ext/kanayago/internal/warnings.h +16 -0
  43. data/ext/kanayago/internal.h +108 -0
  44. data/ext/kanayago/kanayago.c +420 -0
  45. data/ext/kanayago/kanayago.h +21 -0
  46. data/ext/kanayago/lex.c +302 -0
  47. data/ext/kanayago/method.h +255 -0
  48. data/ext/kanayago/node.c +440 -0
  49. data/ext/kanayago/node.h +111 -0
  50. data/ext/kanayago/node_name.inc +224 -0
  51. data/ext/kanayago/parse.c +26931 -0
  52. data/ext/kanayago/parse.h +244 -0
  53. data/ext/kanayago/parse.tmp.y +16145 -0
  54. data/ext/kanayago/parser_bits.h +564 -0
  55. data/ext/kanayago/parser_node.h +32 -0
  56. data/ext/kanayago/parser_st.c +164 -0
  57. data/ext/kanayago/parser_st.h +162 -0
  58. data/ext/kanayago/parser_value.h +106 -0
  59. data/ext/kanayago/probes.h +4 -0
  60. data/ext/kanayago/ruby_assert.h +14 -0
  61. data/ext/kanayago/ruby_atomic.h +23 -0
  62. data/ext/kanayago/ruby_parser.c +1165 -0
  63. data/ext/kanayago/rubyparser.h +1391 -0
  64. data/ext/kanayago/shape.h +234 -0
  65. data/ext/kanayago/st.c +2339 -0
  66. data/ext/kanayago/symbol.h +123 -0
  67. data/ext/kanayago/thread_pthread.h +168 -0
  68. data/ext/kanayago/universal_parser.c +230 -0
  69. data/ext/kanayago/vm_core.h +2215 -0
  70. data/ext/kanayago/vm_opts.h +67 -0
  71. data/lib/kanayago/version.rb +5 -0
  72. data/lib/kanayago.rb +11 -0
  73. data/sig/kanayago.rbs +4 -0
  74. metadata +116 -0
@@ -0,0 +1,2215 @@
1
+ #ifndef RUBY_VM_CORE_H
2
+ #define RUBY_VM_CORE_H
3
+ /**********************************************************************
4
+
5
+ vm_core.h -
6
+
7
+ $Author$
8
+ created at: 04/01/01 19:41:38 JST
9
+
10
+ Copyright (C) 2004-2007 Koichi Sasada
11
+
12
+ **********************************************************************/
13
+
14
+ /*
15
+ * Enable check mode.
16
+ * 1: enable local assertions.
17
+ */
18
+ #ifndef VM_CHECK_MODE
19
+
20
+ // respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21
+ #define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
+
23
+ #define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24
+ #endif
25
+
26
+ /**
27
+ * VM Debug Level
28
+ *
29
+ * debug level:
30
+ * 0: no debug output
31
+ * 1: show instruction name
32
+ * 2: show stack frame when control stack frame is changed
33
+ * 3: show stack status
34
+ * 4: show register
35
+ * 5:
36
+ * 10: gc check
37
+ */
38
+
39
+ #ifndef VMDEBUG
40
+ #define VMDEBUG 0
41
+ #endif
42
+
43
+ #if 0
44
+ #undef VMDEBUG
45
+ #define VMDEBUG 3
46
+ #endif
47
+
48
+ #include "ruby/internal/config.h"
49
+
50
+ #include <stddef.h>
51
+ #include <signal.h>
52
+ #include <stdarg.h>
53
+
54
+ #include "ruby_assert.h"
55
+
56
+ #define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
+
58
+ #if VM_CHECK_MODE > 0
59
+ #define VM_ASSERT(/*expr, */...) RUBY_ASSERT_WHEN(VM_CHECK_MODE > 0, __VA_ARGS__)
60
+ #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
61
+ #define RUBY_ASSERT_CRITICAL_SECTION
62
+ #define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
63
+ #else
64
+ #define VM_ASSERT(/*expr, */...) ((void)0)
65
+ #define VM_UNREACHABLE(func) UNREACHABLE
66
+ #define RUBY_DEBUG_THREAD_SCHEDULE()
67
+ #endif
68
+
69
+ #define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
70
+
71
+ #if defined(RUBY_ASSERT_CRITICAL_SECTION)
72
+ // TODO add documentation
73
+ extern int ruby_assert_critical_section_entered;
74
+ #define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
75
+ #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
76
+ #else
77
+ #define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
78
+ #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
79
+ #endif
80
+
81
+ #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
82
+ # include "wasm/setjmp.h"
83
+ #else
84
+ # include <setjmp.h>
85
+ #endif
86
+
87
+ #if defined(__linux__) || defined(__FreeBSD__)
88
+ # define RB_THREAD_T_HAS_NATIVE_ID
89
+ #endif
90
+
91
+ #include "ruby/internal/stdbool.h"
92
+ #include "ccan/list/list.h"
93
+ #include "id.h"
94
+ #include "internal.h"
95
+ #include "internal/array.h"
96
+ #include "internal/basic_operators.h"
97
+ #include "internal/sanitizers.h"
98
+ #include "internal/serial.h"
99
+ #include "internal/vm.h"
100
+ #include "method.h"
101
+ #include "node.h"
102
+ #include "ruby/ruby.h"
103
+ #include "ruby/st.h"
104
+ #include "ruby_atomic.h"
105
+ #include "vm_opts.h"
106
+
107
+ #include "ruby/thread_native.h"
108
+ /*
109
+ * implementation selector of get_insn_info algorithm
110
+ * 0: linear search
111
+ * 1: binary search
112
+ * 2: succinct bitvector
113
+ */
114
+ #ifndef VM_INSN_INFO_TABLE_IMPL
115
+ # define VM_INSN_INFO_TABLE_IMPL 2
116
+ #endif
117
+
118
+ #if defined(NSIG_MAX) /* POSIX issue 8 */
119
+ # undef NSIG
120
+ # define NSIG NSIG_MAX
121
+ #elif defined(_SIG_MAXSIG) /* FreeBSD */
122
+ # undef NSIG
123
+ # define NSIG _SIG_MAXSIG
124
+ #elif defined(_SIGMAX) /* QNX */
125
+ # define NSIG (_SIGMAX + 1)
126
+ #elif defined(NSIG) /* 99% of everything else */
127
+ # /* take it */
128
+ #else /* Last resort */
129
+ # define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
130
+ #endif
131
+
132
+ #define RUBY_NSIG NSIG
133
+
134
+ #if defined(SIGCLD)
135
+ # define RUBY_SIGCHLD (SIGCLD)
136
+ #elif defined(SIGCHLD)
137
+ # define RUBY_SIGCHLD (SIGCHLD)
138
+ #endif
139
+
140
+ #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
141
+ # define USE_SIGALTSTACK
142
+ void *rb_allocate_sigaltstack(void);
143
+ void *rb_register_sigaltstack(void *);
144
+ # define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
145
+ # define RB_ALTSTACK_FREE(var) free(var)
146
+ # define RB_ALTSTACK(var) var
147
+ #else /* noop */
148
+ # define RB_ALTSTACK_INIT(var, altstack)
149
+ # define RB_ALTSTACK_FREE(var)
150
+ # define RB_ALTSTACK(var) (0)
151
+ #endif
152
+
153
+ #include THREAD_IMPL_H
154
+ #define RUBY_VM_THREAD_MODEL 2
155
+
156
+ /*****************/
157
+ /* configuration */
158
+ /*****************/
159
+
160
+ /* gcc ver. check */
161
+ #if defined(__GNUC__) && __GNUC__ >= 2
162
+
163
+ #if OPT_TOKEN_THREADED_CODE
164
+ #if OPT_DIRECT_THREADED_CODE
165
+ #undef OPT_DIRECT_THREADED_CODE
166
+ #endif
167
+ #endif
168
+
169
+ #else /* defined(__GNUC__) && __GNUC__ >= 2 */
170
+
171
+ /* disable threaded code options */
172
+ #if OPT_DIRECT_THREADED_CODE
173
+ #undef OPT_DIRECT_THREADED_CODE
174
+ #endif
175
+ #if OPT_TOKEN_THREADED_CODE
176
+ #undef OPT_TOKEN_THREADED_CODE
177
+ #endif
178
+ #endif
179
+
180
+ /* call threaded code */
181
+ #if OPT_CALL_THREADED_CODE
182
+ #if OPT_DIRECT_THREADED_CODE
183
+ #undef OPT_DIRECT_THREADED_CODE
184
+ #endif /* OPT_DIRECT_THREADED_CODE */
185
+ #endif /* OPT_CALL_THREADED_CODE */
186
+
187
+ void rb_vm_encoded_insn_data_table_init(void);
188
+ typedef unsigned long rb_num_t;
189
+ typedef signed long rb_snum_t;
190
+
191
+ enum ruby_tag_type {
192
+ RUBY_TAG_NONE = 0x0,
193
+ RUBY_TAG_RETURN = 0x1,
194
+ RUBY_TAG_BREAK = 0x2,
195
+ RUBY_TAG_NEXT = 0x3,
196
+ RUBY_TAG_RETRY = 0x4,
197
+ RUBY_TAG_REDO = 0x5,
198
+ RUBY_TAG_RAISE = 0x6,
199
+ RUBY_TAG_THROW = 0x7,
200
+ RUBY_TAG_FATAL = 0x8,
201
+ RUBY_TAG_MASK = 0xf
202
+ };
203
+
204
+ #define TAG_NONE RUBY_TAG_NONE
205
+ #define TAG_RETURN RUBY_TAG_RETURN
206
+ #define TAG_BREAK RUBY_TAG_BREAK
207
+ #define TAG_NEXT RUBY_TAG_NEXT
208
+ #define TAG_RETRY RUBY_TAG_RETRY
209
+ #define TAG_REDO RUBY_TAG_REDO
210
+ #define TAG_RAISE RUBY_TAG_RAISE
211
+ #define TAG_THROW RUBY_TAG_THROW
212
+ #define TAG_FATAL RUBY_TAG_FATAL
213
+ #define TAG_MASK RUBY_TAG_MASK
214
+
215
+ enum ruby_vm_throw_flags {
216
+ VM_THROW_NO_ESCAPE_FLAG = 0x8000,
217
+ VM_THROW_STATE_MASK = 0xff
218
+ };
219
+
220
+ /* forward declarations */
221
+ struct rb_thread_struct;
222
+ struct rb_control_frame_struct;
223
+
224
+ /* iseq data type */
225
+ typedef struct rb_compile_option_struct rb_compile_option_t;
226
+
227
+ union ic_serial_entry {
228
+ rb_serial_t raw;
229
+ VALUE data[2];
230
+ };
231
+
232
+ // imemo_constcache
233
+ struct iseq_inline_constant_cache_entry {
234
+ VALUE flags;
235
+
236
+ VALUE value; // v0
237
+ VALUE _unused1; // v1
238
+ VALUE _unused2; // v2
239
+ const rb_cref_t *ic_cref; // v3
240
+ };
241
+ STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
242
+ (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
243
+ sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
244
+
245
+ struct iseq_inline_constant_cache {
246
+ struct iseq_inline_constant_cache_entry *entry;
247
+
248
+ /**
249
+ * A null-terminated list of ids, used to represent a constant's path
250
+ * idNULL is used to represent the :: prefix, and 0 is used to donate the end
251
+ * of the list.
252
+ *
253
+ * For example
254
+ * FOO {rb_intern("FOO"), 0}
255
+ * FOO::BAR {rb_intern("FOO"), rb_intern("BAR"), 0}
256
+ * ::FOO {idNULL, rb_intern("FOO"), 0}
257
+ * ::FOO::BAR {idNULL, rb_intern("FOO"), rb_intern("BAR"), 0}
258
+ */
259
+ const ID *segments;
260
+ };
261
+
262
+ struct iseq_inline_iv_cache_entry {
263
+ uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
264
+ ID iv_set_name;
265
+ };
266
+
267
+ struct iseq_inline_cvar_cache_entry {
268
+ struct rb_cvar_class_tbl_entry *entry;
269
+ };
270
+
271
+ union iseq_inline_storage_entry {
272
+ struct {
273
+ struct rb_thread_struct *running_thread;
274
+ VALUE value;
275
+ } once;
276
+ struct iseq_inline_constant_cache ic_cache;
277
+ struct iseq_inline_iv_cache_entry iv_cache;
278
+ };
279
+
280
+ struct rb_calling_info {
281
+ const struct rb_call_data *cd;
282
+ const struct rb_callcache *cc;
283
+ VALUE block_handler;
284
+ VALUE recv;
285
+ int argc;
286
+ bool kw_splat;
287
+ VALUE heap_argv;
288
+ };
289
+
290
+ #ifndef VM_ARGC_STACK_MAX
291
+ #define VM_ARGC_STACK_MAX 128
292
+ #endif
293
+
294
+ # define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
295
+
296
+ struct rb_execution_context_struct;
297
+
298
+ #if 1
299
+ #define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
300
+ #else
301
+ #define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
302
+ #endif
303
+ #define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
304
+
305
+ typedef struct rb_iseq_location_struct {
306
+ VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
307
+ VALUE base_label; /* String */
308
+ VALUE label; /* String */
309
+ int first_lineno;
310
+ int node_id;
311
+ rb_code_location_t code_location;
312
+ } rb_iseq_location_t;
313
+
314
+ #define PATHOBJ_PATH 0
315
+ #define PATHOBJ_REALPATH 1
316
+
317
+ static inline VALUE
318
+ pathobj_path(VALUE pathobj)
319
+ {
320
+ if (RB_TYPE_P(pathobj, T_STRING)) {
321
+ return pathobj;
322
+ }
323
+ else {
324
+ VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
325
+ return RARRAY_AREF(pathobj, PATHOBJ_PATH);
326
+ }
327
+ }
328
+
329
+ static inline VALUE
330
+ pathobj_realpath(VALUE pathobj)
331
+ {
332
+ if (RB_TYPE_P(pathobj, T_STRING)) {
333
+ return pathobj;
334
+ }
335
+ else {
336
+ VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
337
+ return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
338
+ }
339
+ }
340
+
341
+ /* Forward declarations */
342
+ struct rb_rjit_unit;
343
+
344
+ typedef uintptr_t iseq_bits_t;
345
+
346
+ #define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
347
+
348
+ /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
349
+ #define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
350
+
351
+ /* instruction sequence type */
352
+ enum rb_iseq_type {
353
+ ISEQ_TYPE_TOP,
354
+ ISEQ_TYPE_METHOD,
355
+ ISEQ_TYPE_BLOCK,
356
+ ISEQ_TYPE_CLASS,
357
+ ISEQ_TYPE_RESCUE,
358
+ ISEQ_TYPE_ENSURE,
359
+ ISEQ_TYPE_EVAL,
360
+ ISEQ_TYPE_MAIN,
361
+ ISEQ_TYPE_PLAIN
362
+ };
363
+
364
+ // Attributes specified by Primitive.attr!
365
+ enum rb_builtin_attr {
366
+ // The iseq does not call methods.
367
+ BUILTIN_ATTR_LEAF = 0x01,
368
+ // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
369
+ BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
370
+ // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
371
+ BUILTIN_ATTR_INLINE_BLOCK = 0x04,
372
+ };
373
+
374
+ typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
375
+
376
+ struct rb_iseq_constant_body {
377
+ enum rb_iseq_type type;
378
+
379
+ unsigned int iseq_size;
380
+ VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
381
+
382
+ /**
383
+ * parameter information
384
+ *
385
+ * def m(a1, a2, ..., aM, # mandatory
386
+ * b1=(...), b2=(...), ..., bN=(...), # optional
387
+ * *c, # rest
388
+ * d1, d2, ..., dO, # post
389
+ * e1:(...), e2:(...), ..., eK:(...), # keyword
390
+ * **f, # keyword_rest
391
+ * &g) # block
392
+ * =>
393
+ *
394
+ * lead_num = M
395
+ * opt_num = N
396
+ * rest_start = M+N
397
+ * post_start = M+N+(*1)
398
+ * post_num = O
399
+ * keyword_num = K
400
+ * block_start = M+N+(*1)+O+K
401
+ * keyword_bits = M+N+(*1)+O+K+(&1)
402
+ * size = M+N+O+(*1)+K+(&1)+(**1) // parameter size.
403
+ */
404
+
405
+ struct {
406
+ struct {
407
+ unsigned int has_lead : 1;
408
+ unsigned int has_opt : 1;
409
+ unsigned int has_rest : 1;
410
+ unsigned int has_post : 1;
411
+ unsigned int has_kw : 1;
412
+ unsigned int has_kwrest : 1;
413
+ unsigned int has_block : 1;
414
+
415
+ unsigned int ambiguous_param0 : 1; /* {|a|} */
416
+ unsigned int accepts_no_kwarg : 1;
417
+ unsigned int ruby2_keywords: 1;
418
+ unsigned int anon_rest: 1;
419
+ unsigned int anon_kwrest: 1;
420
+ unsigned int use_block: 1;
421
+ unsigned int forwardable: 1;
422
+ } flags;
423
+
424
+ unsigned int size;
425
+
426
+ int lead_num;
427
+ int opt_num;
428
+ int rest_start;
429
+ int post_start;
430
+ int post_num;
431
+ int block_start;
432
+
433
+ const VALUE *opt_table; /* (opt_num + 1) entries. */
434
+ /* opt_num and opt_table:
435
+ *
436
+ * def foo o1=e1, o2=e2, ..., oN=eN
437
+ * #=>
438
+ * # prologue code
439
+ * A1: e1
440
+ * A2: e2
441
+ * ...
442
+ * AN: eN
443
+ * AL: body
444
+ * opt_num = N
445
+ * opt_table = [A1, A2, ..., AN, AL]
446
+ */
447
+
448
+ const struct rb_iseq_param_keyword {
449
+ int num;
450
+ int required_num;
451
+ int bits_start;
452
+ int rest_start;
453
+ const ID *table;
454
+ VALUE *default_values;
455
+ } *keyword;
456
+ } param;
457
+
458
+ rb_iseq_location_t location;
459
+
460
+ /* insn info, must be freed */
461
+ struct iseq_insn_info {
462
+ const struct iseq_insn_info_entry *body;
463
+ unsigned int *positions;
464
+ unsigned int size;
465
+ #if VM_INSN_INFO_TABLE_IMPL == 2
466
+ struct succ_index_table *succ_index_table;
467
+ #endif
468
+ } insns_info;
469
+
470
+ const ID *local_table; /* must free */
471
+
472
+ /* catch table */
473
+ struct iseq_catch_table *catch_table;
474
+
475
+ /* for child iseq */
476
+ const struct rb_iseq_struct *parent_iseq;
477
+ struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
478
+
479
+ union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
480
+ struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
481
+
482
+ struct {
483
+ rb_snum_t flip_count;
484
+ VALUE script_lines;
485
+ VALUE coverage;
486
+ VALUE pc2branchindex;
487
+ VALUE *original_iseq;
488
+ } variable;
489
+
490
+ unsigned int local_table_size;
491
+ unsigned int ic_size; // Number of IC caches
492
+ unsigned int ise_size; // Number of ISE caches
493
+ unsigned int ivc_size; // Number of IVC caches
494
+ unsigned int icvarc_size; // Number of ICVARC caches
495
+ unsigned int ci_size;
496
+ unsigned int stack_max; /* for stack overflow check */
497
+
498
+ unsigned int builtin_attrs; // Union of rb_builtin_attr
499
+
500
+ bool prism; // ISEQ was generated from prism compiler
501
+
502
+ union {
503
+ iseq_bits_t * list; /* Find references for GC */
504
+ iseq_bits_t single;
505
+ } mark_bits;
506
+
507
+ struct rb_id_table *outer_variables;
508
+
509
+ const rb_iseq_t *mandatory_only_iseq;
510
+
511
+ #if USE_RJIT || USE_YJIT
512
+ // Function pointer for JIT code on jit_exec()
513
+ rb_jit_func_t jit_entry;
514
+ // Number of calls on jit_exec()
515
+ long unsigned jit_entry_calls;
516
+ #endif
517
+
518
+ #if USE_YJIT
519
+ // Function pointer for JIT code on jit_exec_exception()
520
+ rb_jit_func_t jit_exception;
521
+ // Number of calls on jit_exec_exception()
522
+ long unsigned jit_exception_calls;
523
+ #endif
524
+
525
+ #if USE_RJIT
526
+ // RJIT stores some data on each iseq.
527
+ VALUE rjit_blocks;
528
+ #endif
529
+
530
+ #if USE_YJIT
531
+ // YJIT stores some data on each iseq.
532
+ void *yjit_payload;
533
+ // Used to estimate how frequently this ISEQ gets called
534
+ uint64_t yjit_calls_at_interv;
535
+ #endif
536
+ };
537
+
538
+ /* T_IMEMO/iseq */
539
+ /* typedef rb_iseq_t is in method.h */
540
+ struct rb_iseq_struct {
541
+ VALUE flags; /* 1 */
542
+ VALUE wrapper; /* 2 */
543
+
544
+ struct rb_iseq_constant_body *body; /* 3 */
545
+
546
+ union { /* 4, 5 words */
547
+ struct iseq_compile_data *compile_data; /* used at compile time */
548
+
549
+ struct {
550
+ VALUE obj;
551
+ int index;
552
+ } loader;
553
+
554
+ struct {
555
+ struct rb_hook_list_struct *local_hooks;
556
+ rb_event_flag_t global_trace_events;
557
+ } exec;
558
+ } aux;
559
+ };
560
+
561
+ #define ISEQ_BODY(iseq) ((iseq)->body)
562
+
563
+ #if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
564
+ #define USE_LAZY_LOAD 0
565
+ #endif
566
+
567
+ #if !USE_LAZY_LOAD
568
+ static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
569
+ #endif
570
+ const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
571
+
572
+ static inline const rb_iseq_t *
573
+ rb_iseq_check(const rb_iseq_t *iseq)
574
+ {
575
+ if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
576
+ rb_iseq_complete((rb_iseq_t *)iseq);
577
+ }
578
+ return iseq;
579
+ }
580
+
581
+ static inline const rb_iseq_t *
582
+ def_iseq_ptr(rb_method_definition_t *def)
583
+ {
584
+ //TODO: re-visit. to check the bug, enable this assertion.
585
+ #if VM_CHECK_MODE > 0
586
+ if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
587
+ #endif
588
+ return rb_iseq_check(def->body.iseq.iseqptr);
589
+ }
590
+
591
+ enum ruby_special_exceptions {
592
+ ruby_error_reenter,
593
+ ruby_error_nomemory,
594
+ ruby_error_sysstack,
595
+ ruby_error_stackfatal,
596
+ ruby_error_stream_closed,
597
+ ruby_special_error_count
598
+ };
599
+
600
+ #define GetVMPtr(obj, ptr) \
601
+ GetCoreDataFromValue((obj), rb_vm_t, (ptr))
602
+
603
+ struct rb_vm_struct;
604
+ typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
605
+
606
+ typedef struct rb_at_exit_list {
607
+ rb_vm_at_exit_func *func;
608
+ struct rb_at_exit_list *next;
609
+ } rb_at_exit_list;
610
+
611
+ void *rb_objspace_alloc(void);
612
+ void rb_objspace_free(void *objspace);
613
+ void rb_objspace_call_finalizer(void);
614
+
615
+ typedef struct rb_hook_list_struct {
616
+ struct rb_event_hook_struct *hooks;
617
+ rb_event_flag_t events;
618
+ unsigned int running;
619
+ bool need_clean;
620
+ bool is_local;
621
+ } rb_hook_list_t;
622
+
623
+
624
+ // see builtin.h for definition
625
+ typedef const struct rb_builtin_function *RB_BUILTIN;
626
+
627
+ struct global_object_list {
628
+ VALUE *varptr;
629
+ struct global_object_list *next;
630
+ };
631
+
632
+ typedef struct rb_vm_struct {
633
+ VALUE self;
634
+
635
+ struct {
636
+ struct ccan_list_head set;
637
+ unsigned int cnt;
638
+ unsigned int blocking_cnt;
639
+
640
+ struct rb_ractor_struct *main_ractor;
641
+ struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
642
+
643
+ struct {
644
+ // monitor
645
+ rb_nativethread_lock_t lock;
646
+ struct rb_ractor_struct *lock_owner;
647
+ unsigned int lock_rec;
648
+
649
+ // join at exit
650
+ rb_nativethread_cond_t terminate_cond;
651
+ bool terminate_waiting;
652
+
653
+ #ifndef RUBY_THREAD_PTHREAD_H
654
+ bool barrier_waiting;
655
+ unsigned int barrier_cnt;
656
+ rb_nativethread_cond_t barrier_cond;
657
+ #endif
658
+ } sync;
659
+
660
+ // ractor scheduling
661
+ struct {
662
+ rb_nativethread_lock_t lock;
663
+ struct rb_ractor_struct *lock_owner;
664
+ bool locked;
665
+
666
+ rb_nativethread_cond_t cond; // GRQ
667
+ unsigned int snt_cnt; // count of shared NTs
668
+ unsigned int dnt_cnt; // count of dedicated NTs
669
+
670
+ unsigned int running_cnt;
671
+
672
+ unsigned int max_cpu;
673
+ struct ccan_list_head grq; // // Global Ready Queue
674
+ unsigned int grq_cnt;
675
+
676
+ // running threads
677
+ struct ccan_list_head running_threads;
678
+
679
+ // threads which switch context by timeslice
680
+ struct ccan_list_head timeslice_threads;
681
+
682
+ struct ccan_list_head zombie_threads;
683
+
684
+ // true if timeslice timer is not enable
685
+ bool timeslice_wait_inf;
686
+
687
+ // barrier
688
+ rb_nativethread_cond_t barrier_complete_cond;
689
+ rb_nativethread_cond_t barrier_release_cond;
690
+ bool barrier_waiting;
691
+ unsigned int barrier_waiting_cnt;
692
+ unsigned int barrier_serial;
693
+ } sched;
694
+ } ractor;
695
+
696
+ #ifdef USE_SIGALTSTACK
697
+ void *main_altstack;
698
+ #endif
699
+
700
+ rb_serial_t fork_gen;
701
+ struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
702
+
703
+ /* set in single-threaded processes only: */
704
+ volatile int ubf_async_safe;
705
+
706
+ unsigned int running: 1;
707
+ unsigned int thread_abort_on_exception: 1;
708
+ unsigned int thread_report_on_exception: 1;
709
+ unsigned int thread_ignore_deadlock: 1;
710
+
711
+ /* object management */
712
+ VALUE mark_object_ary;
713
+ struct global_object_list *global_object_list;
714
+ const VALUE special_exceptions[ruby_special_error_count];
715
+
716
+ /* load */
717
+ VALUE top_self;
718
+ VALUE load_path;
719
+ VALUE load_path_snapshot;
720
+ VALUE load_path_check_cache;
721
+ VALUE expanded_load_path;
722
+ VALUE loaded_features;
723
+ VALUE loaded_features_snapshot;
724
+ VALUE loaded_features_realpaths;
725
+ VALUE loaded_features_realpath_map;
726
+ struct st_table *loaded_features_index;
727
+ struct st_table *loading_table;
728
+ // For running the init function of statically linked
729
+ // extensions when they are loaded
730
+ struct st_table *static_ext_inits;
731
+
732
+ /* signal */
733
+ struct {
734
+ VALUE cmd[RUBY_NSIG];
735
+ } trap_list;
736
+
737
+ /* postponed_job (async-signal-safe, and thread-safe) */
738
+ struct rb_postponed_job_queue *postponed_job_queue;
739
+
740
+ int src_encoding_index;
741
+
742
+ /* workqueue (thread-safe, NOT async-signal-safe) */
743
+ struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
744
+ rb_nativethread_lock_t workqueue_lock;
745
+
746
+ VALUE orig_progname, progname;
747
+ VALUE coverages, me2counter;
748
+ int coverage_mode;
749
+
750
+ struct rb_objspace *objspace;
751
+
752
+ rb_at_exit_list *at_exit;
753
+
754
+ st_table *frozen_strings;
755
+
756
+ const struct rb_builtin_function *builtin_function_table;
757
+
758
+ st_table *ci_table;
759
+ struct rb_id_table *negative_cme_table;
760
+ st_table *overloaded_cme_table; // cme -> overloaded_cme
761
+ st_table *unused_block_warning_table;
762
+ bool unused_block_warning_strict;
763
+
764
+ // This id table contains a mapping from ID to ICs. It does this with ID
765
+ // keys and nested st_tables as values. The nested tables have ICs as keys
766
+ // and Qtrue as values. It is used when inline constant caches need to be
767
+ // invalidated or ISEQs are being freed.
768
+ struct rb_id_table *constant_cache;
769
+
770
+ #ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
771
+ #define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
772
+ #endif
773
+ const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
774
+
775
+ #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
776
+ uint32_t clock;
777
+ #endif
778
+
779
+ /* params */
780
+ struct { /* size in byte */
781
+ size_t thread_vm_stack_size;
782
+ size_t thread_machine_stack_size;
783
+ size_t fiber_vm_stack_size;
784
+ size_t fiber_machine_stack_size;
785
+ } default_params;
786
+
787
+ } rb_vm_t;
788
+
789
+ /* default values */
790
+
791
+ #define RUBY_VM_SIZE_ALIGN 4096
792
+
793
+ #define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
794
+ #define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
795
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
796
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
797
+
798
+ #define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
799
+ #define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
800
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
801
+ #if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
802
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
803
+ #else
804
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
805
+ #endif
806
+
807
+ #if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
808
+ /* It seems sanitizers consume A LOT of machine stacks */
809
+ #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
810
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
811
+ #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
812
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
813
+ #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
814
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
815
+ #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
816
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
817
+ #endif
818
+
819
+ #ifndef VM_DEBUG_BP_CHECK
820
+ #define VM_DEBUG_BP_CHECK 0
821
+ #endif
822
+
823
+ #ifndef VM_DEBUG_VERIFY_METHOD_CACHE
824
+ #define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
825
+ #endif
826
+
827
+ struct rb_captured_block {
828
+ VALUE self;
829
+ const VALUE *ep;
830
+ union {
831
+ const rb_iseq_t *iseq;
832
+ const struct vm_ifunc *ifunc;
833
+ VALUE val;
834
+ } code;
835
+ };
836
+
837
+ enum rb_block_handler_type {
838
+ block_handler_type_iseq,
839
+ block_handler_type_ifunc,
840
+ block_handler_type_symbol,
841
+ block_handler_type_proc
842
+ };
843
+
844
+ enum rb_block_type {
845
+ block_type_iseq,
846
+ block_type_ifunc,
847
+ block_type_symbol,
848
+ block_type_proc
849
+ };
850
+
851
+ struct rb_block {
852
+ union {
853
+ struct rb_captured_block captured;
854
+ VALUE symbol;
855
+ VALUE proc;
856
+ } as;
857
+ enum rb_block_type type;
858
+ };
859
+
860
+ typedef struct rb_control_frame_struct {
861
+ const VALUE *pc; // cfp[0]
862
+ VALUE *sp; // cfp[1]
863
+ const rb_iseq_t *iseq; // cfp[2]
864
+ VALUE self; // cfp[3] / block[0]
865
+ const VALUE *ep; // cfp[4] / block[1]
866
+ const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
867
+ void *jit_return; // cfp[6] -- return address for JIT code
868
+ #if VM_DEBUG_BP_CHECK
869
+ VALUE *bp_check; // cfp[7]
870
+ #endif
871
+ } rb_control_frame_t;
872
+
873
+ extern const rb_data_type_t ruby_threadptr_data_type;
874
+
875
+ static inline struct rb_thread_struct *
876
+ rb_thread_ptr(VALUE thval)
877
+ {
878
+ return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
879
+ }
880
+
881
+ enum rb_thread_status {
882
+ THREAD_RUNNABLE,
883
+ THREAD_STOPPED,
884
+ THREAD_STOPPED_FOREVER,
885
+ THREAD_KILLED
886
+ };
887
+
888
+ #ifdef RUBY_JMP_BUF
889
+ typedef RUBY_JMP_BUF rb_jmpbuf_t;
890
+ #else
891
+ typedef void *rb_jmpbuf_t[5];
892
+ #endif
893
+
894
+ /*
895
+ `rb_vm_tag_jmpbuf_t` type represents a buffer used to
896
+ long jump to a C frame associated with `rb_vm_tag`.
897
+
898
+ Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
899
+ following functions:
900
+ - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
901
+ - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
902
+
903
+ `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
904
+ `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
905
+ */
906
+ #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
907
+ /*
908
+ WebAssembly target with Asyncify-based SJLJ needs
909
+ to capture the execution context by unwind/rewind-ing
910
+ call frames into a jump buffer. The buffer space tends
911
+ to be considerably large unlike other architectures'
912
+ register-based buffers.
913
+ Therefore, we allocates the buffer on the heap on such
914
+ environments.
915
+ */
916
+ typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
917
+
918
+ #define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
919
+
920
+ static inline void
921
+ rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
922
+ {
923
+ *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
924
+ }
925
+
926
+ static inline void
927
+ rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
928
+ {
929
+ ruby_xfree(*jmpbuf);
930
+ }
931
+ #else
932
+ typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
933
+
934
+ #define RB_VM_TAG_JMPBUF_GET(buf) (buf)
935
+
936
+ static inline void
937
+ rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
938
+ {
939
+ // no-op
940
+ }
941
+
942
+ static inline void
943
+ rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
944
+ {
945
+ // no-op
946
+ }
947
+ #endif
948
+
949
+ /*
950
+ the members which are written in EC_PUSH_TAG() should be placed at
951
+ the beginning and the end, so that entire region is accessible.
952
+ */
953
+ struct rb_vm_tag {
954
+ VALUE tag;
955
+ VALUE retval;
956
+ rb_vm_tag_jmpbuf_t buf;
957
+ struct rb_vm_tag *prev;
958
+ enum ruby_tag_type state;
959
+ unsigned int lock_rec;
960
+ };
961
+
962
+ STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
963
+ STATIC_ASSERT(rb_vm_tag_buf_end,
964
+ offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
965
+ sizeof(struct rb_vm_tag));
966
+
967
+ struct rb_unblock_callback {
968
+ rb_unblock_function_t *func;
969
+ void *arg;
970
+ };
971
+
972
+ struct rb_mutex_struct;
973
+
974
+ typedef struct rb_fiber_struct rb_fiber_t;
975
+
976
+ struct rb_waiting_list {
977
+ struct rb_waiting_list *next;
978
+ struct rb_thread_struct *thread;
979
+ struct rb_fiber_struct *fiber;
980
+ };
981
+
982
+ struct rb_execution_context_struct {
983
+ /* execution information */
984
+ VALUE *vm_stack; /* must free, must mark */
985
+ size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
986
+ rb_control_frame_t *cfp;
987
+
988
+ struct rb_vm_tag *tag;
989
+
990
+ /* interrupt flags */
991
+ rb_atomic_t interrupt_flag;
992
+ rb_atomic_t interrupt_mask; /* size should match flag */
993
+ #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
994
+ uint32_t checked_clock;
995
+ #endif
996
+
997
+ rb_fiber_t *fiber_ptr;
998
+ struct rb_thread_struct *thread_ptr;
999
+
1000
+ /* storage (ec (fiber) local) */
1001
+ struct rb_id_table *local_storage;
1002
+ VALUE local_storage_recursive_hash;
1003
+ VALUE local_storage_recursive_hash_for_trace;
1004
+
1005
+ /* Inheritable fiber storage. */
1006
+ VALUE storage;
1007
+
1008
+ /* eval env */
1009
+ const VALUE *root_lep;
1010
+ VALUE root_svar;
1011
+
1012
+ /* trace information */
1013
+ struct rb_trace_arg_struct *trace_arg;
1014
+
1015
+ /* temporary places */
1016
+ VALUE errinfo;
1017
+ VALUE passed_block_handler; /* for rb_iterate */
1018
+
1019
+ uint8_t raised_flag; /* only 3 bits needed */
1020
+
1021
+ /* n.b. only 7 bits needed, really: */
1022
+ BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1023
+
1024
+ VALUE private_const_reference;
1025
+
1026
+ /* for GC */
1027
+ struct {
1028
+ VALUE *stack_start;
1029
+ VALUE *stack_end;
1030
+ size_t stack_maxsize;
1031
+ RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs;
1032
+
1033
+ #ifdef RUBY_ASAN_ENABLED
1034
+ void *asan_fake_stack_handle;
1035
+ #endif
1036
+ } machine;
1037
+ };
1038
+
1039
+ #ifndef rb_execution_context_t
1040
+ typedef struct rb_execution_context_struct rb_execution_context_t;
1041
+ #define rb_execution_context_t rb_execution_context_t
1042
+ #endif
1043
+
1044
+ // for builtin.h
1045
+ #define VM_CORE_H_EC_DEFINED 1
1046
+
1047
+ // Set the vm_stack pointer in the execution context.
1048
+ void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1049
+
1050
+ // Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1051
+ // @param ec the execution context to update.
1052
+ // @param stack a pointer to the stack to use.
1053
+ // @param size the size of the stack, as in `VALUE stack[size]`.
1054
+ void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1055
+
1056
+ // Clear (set to `NULL`) the vm_stack pointer.
1057
+ // @param ec the execution context to update.
1058
+ void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1059
+
1060
+ struct rb_ext_config {
1061
+ bool ractor_safe;
1062
+ };
1063
+
1064
+ typedef struct rb_ractor_struct rb_ractor_t;
1065
+
1066
+ struct rb_native_thread;
1067
+
1068
+ typedef struct rb_thread_struct {
1069
+ struct ccan_list_node lt_node; // managed by a ractor
1070
+ VALUE self;
1071
+ rb_ractor_t *ractor;
1072
+ rb_vm_t *vm;
1073
+ struct rb_native_thread *nt;
1074
+ rb_execution_context_t *ec;
1075
+
1076
+ struct rb_thread_sched_item sched;
1077
+ bool mn_schedulable;
1078
+ rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1079
+
1080
+ VALUE last_status; /* $? */
1081
+
1082
+ /* for cfunc */
1083
+ struct rb_calling_info *calling;
1084
+
1085
+ /* for load(true) */
1086
+ VALUE top_self;
1087
+ VALUE top_wrapper;
1088
+
1089
+ /* thread control */
1090
+
1091
+ BITFIELD(enum rb_thread_status, status, 2);
1092
+ /* bit flags */
1093
+ unsigned int has_dedicated_nt : 1;
1094
+ unsigned int to_kill : 1;
1095
+ unsigned int abort_on_exception: 1;
1096
+ unsigned int report_on_exception: 1;
1097
+ unsigned int pending_interrupt_queue_checked: 1;
1098
+ int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1099
+ uint32_t running_time_us; /* 12500..800000 */
1100
+
1101
+ void *blocking_region_buffer;
1102
+
1103
+ VALUE thgroup;
1104
+ VALUE value;
1105
+
1106
+ /* temporary place of retval on OPT_CALL_THREADED_CODE */
1107
+ #if OPT_CALL_THREADED_CODE
1108
+ VALUE retval;
1109
+ #endif
1110
+
1111
+ /* async errinfo queue */
1112
+ VALUE pending_interrupt_queue;
1113
+ VALUE pending_interrupt_mask_stack;
1114
+
1115
+ /* interrupt management */
1116
+ rb_nativethread_lock_t interrupt_lock;
1117
+ struct rb_unblock_callback unblock;
1118
+ VALUE locking_mutex;
1119
+ struct rb_mutex_struct *keeping_mutexes;
1120
+
1121
+ struct rb_waiting_list *join_list;
1122
+
1123
+ union {
1124
+ struct {
1125
+ VALUE proc;
1126
+ VALUE args;
1127
+ int kw_splat;
1128
+ } proc;
1129
+ struct {
1130
+ VALUE (*func)(void *);
1131
+ void *arg;
1132
+ } func;
1133
+ } invoke_arg;
1134
+
1135
+ enum thread_invoke_type {
1136
+ thread_invoke_type_none = 0,
1137
+ thread_invoke_type_proc,
1138
+ thread_invoke_type_ractor_proc,
1139
+ thread_invoke_type_func
1140
+ } invoke_type;
1141
+
1142
+ /* statistics data for profiler */
1143
+ VALUE stat_insn_usage;
1144
+
1145
+ /* fiber */
1146
+ rb_fiber_t *root_fiber;
1147
+
1148
+ VALUE scheduler;
1149
+ unsigned int blocking;
1150
+
1151
+ /* misc */
1152
+ VALUE name;
1153
+ void **specific_storage;
1154
+
1155
+ struct rb_ext_config ext_config;
1156
+ } rb_thread_t;
1157
+
1158
+ static inline unsigned int
1159
+ rb_th_serial(const rb_thread_t *th)
1160
+ {
1161
+ return th ? (unsigned int)th->serial : 0;
1162
+ }
1163
+
1164
+ typedef enum {
1165
+ VM_DEFINECLASS_TYPE_CLASS = 0x00,
1166
+ VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1167
+ VM_DEFINECLASS_TYPE_MODULE = 0x02,
1168
+ /* 0x03..0x06 is reserved */
1169
+ VM_DEFINECLASS_TYPE_MASK = 0x07
1170
+ } rb_vm_defineclass_type_t;
1171
+
1172
+ #define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1173
+ #define VM_DEFINECLASS_FLAG_SCOPED 0x08
1174
+ #define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1175
+ #define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1176
+ #define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1177
+ ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1178
+
1179
+ /* iseq.c */
1180
+ RUBY_SYMBOL_EXPORT_BEGIN
1181
+
1182
+ /* node -> iseq */
1183
+ rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1184
+ rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1185
+ rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1186
+ rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1187
+ rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1188
+ enum rb_iseq_type, const rb_compile_option_t*,
1189
+ VALUE script_lines);
1190
+
1191
+ struct iseq_link_anchor;
1192
+ struct rb_iseq_new_with_callback_callback_func {
1193
+ VALUE flags;
1194
+ VALUE reserved;
1195
+ void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1196
+ const void *data;
1197
+ };
1198
+ static inline struct rb_iseq_new_with_callback_callback_func *
1199
+ rb_iseq_new_with_callback_new_callback(
1200
+ void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1201
+ {
1202
+ struct rb_iseq_new_with_callback_callback_func *memo =
1203
+ IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1204
+ memo->func = func;
1205
+ memo->data = ptr;
1206
+
1207
+ return memo;
1208
+ }
1209
+ rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1210
+ VALUE name, VALUE path, VALUE realpath, int first_lineno,
1211
+ const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1212
+
1213
+ VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1214
+ int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1215
+
1216
+ VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1217
+
1218
+ RUBY_EXTERN VALUE rb_cISeq;
1219
+ RUBY_EXTERN VALUE rb_cRubyVM;
1220
+ RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1221
+ RUBY_EXTERN VALUE rb_block_param_proxy;
1222
+ RUBY_SYMBOL_EXPORT_END
1223
+
1224
+ #define GetProcPtr(obj, ptr) \
1225
+ GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1226
+
1227
+ typedef struct {
1228
+ const struct rb_block block;
1229
+ unsigned int is_from_method: 1; /* bool */
1230
+ unsigned int is_lambda: 1; /* bool */
1231
+ unsigned int is_isolated: 1; /* bool */
1232
+ } rb_proc_t;
1233
+
1234
+ RUBY_SYMBOL_EXPORT_BEGIN
1235
+ VALUE rb_proc_isolate(VALUE self);
1236
+ VALUE rb_proc_isolate_bang(VALUE self);
1237
+ VALUE rb_proc_ractor_make_shareable(VALUE self);
1238
+ RUBY_SYMBOL_EXPORT_END
1239
+
1240
+ typedef struct {
1241
+ VALUE flags; /* imemo header */
1242
+ rb_iseq_t *iseq;
1243
+ const VALUE *ep;
1244
+ const VALUE *env;
1245
+ unsigned int env_size;
1246
+ } rb_env_t;
1247
+
1248
+ extern const rb_data_type_t ruby_binding_data_type;
1249
+
1250
+ #define GetBindingPtr(obj, ptr) \
1251
+ GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1252
+
1253
+ typedef struct {
1254
+ const struct rb_block block;
1255
+ const VALUE pathobj;
1256
+ int first_lineno;
1257
+ } rb_binding_t;
1258
+
1259
+ /* used by compile time and send insn */
1260
+
1261
+ enum vm_check_match_type {
1262
+ VM_CHECKMATCH_TYPE_WHEN = 1,
1263
+ VM_CHECKMATCH_TYPE_CASE = 2,
1264
+ VM_CHECKMATCH_TYPE_RESCUE = 3
1265
+ };
1266
+
1267
+ #define VM_CHECKMATCH_TYPE_MASK 0x03
1268
+ #define VM_CHECKMATCH_ARRAY 0x04
1269
+
1270
+ enum vm_opt_newarray_send_type {
1271
+ VM_OPT_NEWARRAY_SEND_MAX = 1,
1272
+ VM_OPT_NEWARRAY_SEND_MIN = 2,
1273
+ VM_OPT_NEWARRAY_SEND_HASH = 3,
1274
+ VM_OPT_NEWARRAY_SEND_PACK = 4,
1275
+ VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1276
+ };
1277
+
1278
+ enum vm_special_object_type {
1279
+ VM_SPECIAL_OBJECT_VMCORE = 1,
1280
+ VM_SPECIAL_OBJECT_CBASE,
1281
+ VM_SPECIAL_OBJECT_CONST_BASE
1282
+ };
1283
+
1284
+ enum vm_svar_index {
1285
+ VM_SVAR_LASTLINE = 0, /* $_ */
1286
+ VM_SVAR_BACKREF = 1, /* $~ */
1287
+
1288
+ VM_SVAR_EXTRA_START = 2,
1289
+ VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1290
+ };
1291
+
1292
+ /* inline cache */
1293
+ typedef struct iseq_inline_constant_cache *IC;
1294
+ typedef struct iseq_inline_iv_cache_entry *IVC;
1295
+ typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1296
+ typedef union iseq_inline_storage_entry *ISE;
1297
+ typedef const struct rb_callinfo *CALL_INFO;
1298
+ typedef const struct rb_callcache *CALL_CACHE;
1299
+ typedef struct rb_call_data *CALL_DATA;
1300
+
1301
+ typedef VALUE CDHASH;
1302
+
1303
+ #ifndef FUNC_FASTCALL
1304
+ #define FUNC_FASTCALL(x) x
1305
+ #endif
1306
+
1307
+ typedef rb_control_frame_t *
1308
+ (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1309
+
1310
+ #define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1311
+ #define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1312
+
1313
+ #define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1314
+ #define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1315
+ #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1316
+
1317
+ enum vm_frame_env_flags {
1318
+ /* Frame/Environment flag bits:
1319
+ * MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
1320
+ *
1321
+ * X : tag for GC marking (It seems as Fixnum)
1322
+ * EEE : 4 bits Env flags
1323
+ * FF..: 7 bits Frame flags
1324
+ * MM..: 15 bits frame magic (to check frame corruption)
1325
+ */
1326
+
1327
+ /* frame types */
1328
+ VM_FRAME_MAGIC_METHOD = 0x11110001,
1329
+ VM_FRAME_MAGIC_BLOCK = 0x22220001,
1330
+ VM_FRAME_MAGIC_CLASS = 0x33330001,
1331
+ VM_FRAME_MAGIC_TOP = 0x44440001,
1332
+ VM_FRAME_MAGIC_CFUNC = 0x55550001,
1333
+ VM_FRAME_MAGIC_IFUNC = 0x66660001,
1334
+ VM_FRAME_MAGIC_EVAL = 0x77770001,
1335
+ VM_FRAME_MAGIC_RESCUE = 0x78880001,
1336
+ VM_FRAME_MAGIC_DUMMY = 0x79990001,
1337
+
1338
+ VM_FRAME_MAGIC_MASK = 0x7fff0001,
1339
+
1340
+ /* frame flag */
1341
+ VM_FRAME_FLAG_FINISH = 0x0020,
1342
+ VM_FRAME_FLAG_BMETHOD = 0x0040,
1343
+ VM_FRAME_FLAG_CFRAME = 0x0080,
1344
+ VM_FRAME_FLAG_LAMBDA = 0x0100,
1345
+ VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1346
+ VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1347
+ VM_FRAME_FLAG_PASSED = 0x0800,
1348
+
1349
+ /* env flag */
1350
+ VM_ENV_FLAG_LOCAL = 0x0002,
1351
+ VM_ENV_FLAG_ESCAPED = 0x0004,
1352
+ VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1353
+ VM_ENV_FLAG_ISOLATED = 0x0010,
1354
+ };
1355
+
1356
+ #define VM_ENV_DATA_SIZE ( 3)
1357
+
1358
+ #define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1359
+ #define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1360
+ #define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1361
+ #define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1362
+
1363
+ #define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1364
+
1365
+ static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1366
+
1367
+ static inline void
1368
+ VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1369
+ {
1370
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1371
+ VM_ASSERT(FIXNUM_P(flags));
1372
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1373
+ }
1374
+
1375
+ static inline void
1376
+ VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1377
+ {
1378
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1379
+ VM_ASSERT(FIXNUM_P(flags));
1380
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1381
+ }
1382
+
1383
+ static inline unsigned long
1384
+ VM_ENV_FLAGS(const VALUE *ep, long flag)
1385
+ {
1386
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1387
+ VM_ASSERT(FIXNUM_P(flags));
1388
+ return flags & flag;
1389
+ }
1390
+
1391
+ static inline unsigned long
1392
+ VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1393
+ {
1394
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1395
+ }
1396
+
1397
+ static inline int
1398
+ VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1399
+ {
1400
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1401
+ }
1402
+
1403
+ static inline int
1404
+ VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1405
+ {
1406
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1407
+ }
1408
+
1409
+ static inline int
1410
+ VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1411
+ {
1412
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1413
+ }
1414
+
1415
+ static inline int
1416
+ VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1417
+ {
1418
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1419
+ }
1420
+
1421
+ static inline int
1422
+ rb_obj_is_iseq(VALUE iseq)
1423
+ {
1424
+ return imemo_type_p(iseq, imemo_iseq);
1425
+ }
1426
+
1427
+ #if VM_CHECK_MODE > 0
1428
+ #define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1429
+ #endif
1430
+
1431
+ static inline int
1432
+ VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1433
+ {
1434
+ int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1435
+ VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1436
+ (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1437
+ return cframe_p;
1438
+ }
1439
+
1440
+ static inline int
1441
+ VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1442
+ {
1443
+ return !VM_FRAME_CFRAME_P(cfp);
1444
+ }
1445
+
1446
+ #define RUBYVM_CFUNC_FRAME_P(cfp) \
1447
+ (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1448
+
1449
+ #define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1450
+ #define VM_BLOCK_HANDLER_NONE 0
1451
+
1452
+ static inline int
1453
+ VM_ENV_LOCAL_P(const VALUE *ep)
1454
+ {
1455
+ return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1456
+ }
1457
+
1458
+ static inline const VALUE *
1459
+ VM_ENV_PREV_EP(const VALUE *ep)
1460
+ {
1461
+ VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1462
+ return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1463
+ }
1464
+
1465
+ static inline VALUE
1466
+ VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1467
+ {
1468
+ VM_ASSERT(VM_ENV_LOCAL_P(ep));
1469
+ return ep[VM_ENV_DATA_INDEX_SPECVAL];
1470
+ }
1471
+
1472
+ #if VM_CHECK_MODE > 0
1473
+ int rb_vm_ep_in_heap_p(const VALUE *ep);
1474
+ #endif
1475
+
1476
+ static inline int
1477
+ VM_ENV_ESCAPED_P(const VALUE *ep)
1478
+ {
1479
+ VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1480
+ return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1481
+ }
1482
+
1483
+ #if VM_CHECK_MODE > 0
1484
+ static inline int
1485
+ vm_assert_env(VALUE obj)
1486
+ {
1487
+ VM_ASSERT(imemo_type_p(obj, imemo_env));
1488
+ return 1;
1489
+ }
1490
+ #endif
1491
+
1492
+ RBIMPL_ATTR_NONNULL((1))
1493
+ static inline VALUE
1494
+ VM_ENV_ENVVAL(const VALUE *ep)
1495
+ {
1496
+ VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1497
+ VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1498
+ VM_ASSERT(vm_assert_env(envval));
1499
+ return envval;
1500
+ }
1501
+
1502
+ RBIMPL_ATTR_NONNULL((1))
1503
+ static inline const rb_env_t *
1504
+ VM_ENV_ENVVAL_PTR(const VALUE *ep)
1505
+ {
1506
+ return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1507
+ }
1508
+
1509
+ static inline const rb_env_t *
1510
+ vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1511
+ {
1512
+ rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1513
+ env->ep = env_ep;
1514
+ env->env = env_body;
1515
+ env->env_size = env_size;
1516
+ env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1517
+ return env;
1518
+ }
1519
+
1520
+ static inline void
1521
+ VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1522
+ {
1523
+ *((VALUE *)ptr) = v;
1524
+ }
1525
+
1526
+ static inline void
1527
+ VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1528
+ {
1529
+ VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1530
+ VM_FORCE_WRITE(ptr, special_const_value);
1531
+ }
1532
+
1533
+ static inline void
1534
+ VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1535
+ {
1536
+ VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1537
+ VM_FORCE_WRITE(&ep[index], v);
1538
+ }
1539
+
1540
+ const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1541
+ const VALUE *rb_vm_proc_local_ep(VALUE proc);
1542
+ void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1543
+ void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1544
+
1545
+ VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1546
+
1547
+ #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1548
+ #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1549
+
1550
+ #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1551
+ ((void *)(ecfp) > (void *)(cfp))
1552
+
1553
+ static inline const rb_control_frame_t *
1554
+ RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1555
+ {
1556
+ return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1557
+ }
1558
+
1559
+ static inline int
1560
+ RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1561
+ {
1562
+ return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1563
+ }
1564
+
1565
+ static inline int
1566
+ VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1567
+ {
1568
+ if ((block_handler & 0x03) == 0x01) {
1569
+ #if VM_CHECK_MODE > 0
1570
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1571
+ VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1572
+ #endif
1573
+ return 1;
1574
+ }
1575
+ else {
1576
+ return 0;
1577
+ }
1578
+ }
1579
+
1580
+ static inline VALUE
1581
+ VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1582
+ {
1583
+ VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1584
+ VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1585
+ return block_handler;
1586
+ }
1587
+
1588
+ static inline const struct rb_captured_block *
1589
+ VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1590
+ {
1591
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1592
+ VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1593
+ return captured;
1594
+ }
1595
+
1596
+ static inline int
1597
+ VM_BH_IFUNC_P(VALUE block_handler)
1598
+ {
1599
+ if ((block_handler & 0x03) == 0x03) {
1600
+ #if VM_CHECK_MODE > 0
1601
+ struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1602
+ VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1603
+ #endif
1604
+ return 1;
1605
+ }
1606
+ else {
1607
+ return 0;
1608
+ }
1609
+ }
1610
+
1611
+ static inline VALUE
1612
+ VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1613
+ {
1614
+ VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1615
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1616
+ return block_handler;
1617
+ }
1618
+
1619
+ static inline const struct rb_captured_block *
1620
+ VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1621
+ {
1622
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1623
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1624
+ return captured;
1625
+ }
1626
+
1627
+ static inline const struct rb_captured_block *
1628
+ VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1629
+ {
1630
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1631
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1632
+ return captured;
1633
+ }
1634
+
1635
+ static inline enum rb_block_handler_type
1636
+ vm_block_handler_type(VALUE block_handler)
1637
+ {
1638
+ if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1639
+ return block_handler_type_iseq;
1640
+ }
1641
+ else if (VM_BH_IFUNC_P(block_handler)) {
1642
+ return block_handler_type_ifunc;
1643
+ }
1644
+ else if (SYMBOL_P(block_handler)) {
1645
+ return block_handler_type_symbol;
1646
+ }
1647
+ else {
1648
+ VM_ASSERT(rb_obj_is_proc(block_handler));
1649
+ return block_handler_type_proc;
1650
+ }
1651
+ }
1652
+
1653
+ static inline void
1654
+ vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1655
+ {
1656
+ VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1657
+ (vm_block_handler_type(block_handler), 1));
1658
+ }
1659
+
1660
+ static inline enum rb_block_type
1661
+ vm_block_type(const struct rb_block *block)
1662
+ {
1663
+ #if VM_CHECK_MODE > 0
1664
+ switch (block->type) {
1665
+ case block_type_iseq:
1666
+ VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1667
+ break;
1668
+ case block_type_ifunc:
1669
+ VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1670
+ break;
1671
+ case block_type_symbol:
1672
+ VM_ASSERT(SYMBOL_P(block->as.symbol));
1673
+ break;
1674
+ case block_type_proc:
1675
+ VM_ASSERT(rb_obj_is_proc(block->as.proc));
1676
+ break;
1677
+ }
1678
+ #endif
1679
+ return block->type;
1680
+ }
1681
+
1682
+ static inline void
1683
+ vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1684
+ {
1685
+ struct rb_block *mb = (struct rb_block *)block;
1686
+ mb->type = type;
1687
+ }
1688
+
1689
+ static inline const struct rb_block *
1690
+ vm_proc_block(VALUE procval)
1691
+ {
1692
+ VM_ASSERT(rb_obj_is_proc(procval));
1693
+ return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1694
+ }
1695
+
1696
+ static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1697
+ static inline const VALUE *vm_block_ep(const struct rb_block *block);
1698
+
1699
+ static inline const rb_iseq_t *
1700
+ vm_proc_iseq(VALUE procval)
1701
+ {
1702
+ return vm_block_iseq(vm_proc_block(procval));
1703
+ }
1704
+
1705
+ static inline const VALUE *
1706
+ vm_proc_ep(VALUE procval)
1707
+ {
1708
+ return vm_block_ep(vm_proc_block(procval));
1709
+ }
1710
+
1711
+ static inline const rb_iseq_t *
1712
+ vm_block_iseq(const struct rb_block *block)
1713
+ {
1714
+ switch (vm_block_type(block)) {
1715
+ case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1716
+ case block_type_proc: return vm_proc_iseq(block->as.proc);
1717
+ case block_type_ifunc:
1718
+ case block_type_symbol: return NULL;
1719
+ }
1720
+ VM_UNREACHABLE(vm_block_iseq);
1721
+ return NULL;
1722
+ }
1723
+
1724
+ static inline const VALUE *
1725
+ vm_block_ep(const struct rb_block *block)
1726
+ {
1727
+ switch (vm_block_type(block)) {
1728
+ case block_type_iseq:
1729
+ case block_type_ifunc: return block->as.captured.ep;
1730
+ case block_type_proc: return vm_proc_ep(block->as.proc);
1731
+ case block_type_symbol: return NULL;
1732
+ }
1733
+ VM_UNREACHABLE(vm_block_ep);
1734
+ return NULL;
1735
+ }
1736
+
1737
+ static inline VALUE
1738
+ vm_block_self(const struct rb_block *block)
1739
+ {
1740
+ switch (vm_block_type(block)) {
1741
+ case block_type_iseq:
1742
+ case block_type_ifunc:
1743
+ return block->as.captured.self;
1744
+ case block_type_proc:
1745
+ return vm_block_self(vm_proc_block(block->as.proc));
1746
+ case block_type_symbol:
1747
+ return Qundef;
1748
+ }
1749
+ VM_UNREACHABLE(vm_block_self);
1750
+ return Qundef;
1751
+ }
1752
+
1753
+ static inline VALUE
1754
+ VM_BH_TO_SYMBOL(VALUE block_handler)
1755
+ {
1756
+ VM_ASSERT(SYMBOL_P(block_handler));
1757
+ return block_handler;
1758
+ }
1759
+
1760
+ static inline VALUE
1761
+ VM_BH_FROM_SYMBOL(VALUE symbol)
1762
+ {
1763
+ VM_ASSERT(SYMBOL_P(symbol));
1764
+ return symbol;
1765
+ }
1766
+
1767
+ static inline VALUE
1768
+ VM_BH_TO_PROC(VALUE block_handler)
1769
+ {
1770
+ VM_ASSERT(rb_obj_is_proc(block_handler));
1771
+ return block_handler;
1772
+ }
1773
+
1774
+ static inline VALUE
1775
+ VM_BH_FROM_PROC(VALUE procval)
1776
+ {
1777
+ VM_ASSERT(rb_obj_is_proc(procval));
1778
+ return procval;
1779
+ }
1780
+
1781
+ /* VM related object allocate functions */
1782
+ VALUE rb_thread_alloc(VALUE klass);
1783
+ VALUE rb_binding_alloc(VALUE klass);
1784
+ VALUE rb_proc_alloc(VALUE klass);
1785
+ VALUE rb_proc_dup(VALUE self);
1786
+
1787
+ /* for debug */
1788
+ extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1789
+ extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1790
+ extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1791
+
1792
+ #define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1793
+ #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1794
+ bool rb_vm_bugreport(const void *, FILE *);
1795
+ typedef void (*ruby_sighandler_t)(int);
1796
+ RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1797
+ NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1798
+
1799
+ /* functions about thread/vm execution */
1800
+ RUBY_SYMBOL_EXPORT_BEGIN
1801
+ VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1802
+ VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1803
+ VALUE rb_iseq_path(const rb_iseq_t *iseq);
1804
+ VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1805
+ RUBY_SYMBOL_EXPORT_END
1806
+
1807
+ VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1808
+ void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1809
+
1810
+ int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1811
+ void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1812
+
1813
+ VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1814
+
1815
+ VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1816
+ static inline VALUE
1817
+ rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1818
+ {
1819
+ return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1820
+ }
1821
+
1822
+ static inline VALUE
1823
+ rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1824
+ {
1825
+ return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1826
+ }
1827
+
1828
+ VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1829
+ VALUE rb_vm_env_local_variables(const rb_env_t *env);
1830
+ const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1831
+ const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1832
+ void rb_vm_inc_const_missing_count(void);
1833
+ VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1834
+ const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1835
+ void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1836
+ void rb_vm_pop_frame(rb_execution_context_t *ec);
1837
+
1838
+ void rb_thread_start_timer_thread(void);
1839
+ void rb_thread_stop_timer_thread(void);
1840
+ void rb_thread_reset_timer_thread(void);
1841
+ void rb_thread_wakeup_timer_thread(int);
1842
+
1843
+ static inline void
1844
+ rb_vm_living_threads_init(rb_vm_t *vm)
1845
+ {
1846
+ ccan_list_head_init(&vm->waiting_fds);
1847
+ ccan_list_head_init(&vm->workqueue);
1848
+ ccan_list_head_init(&vm->ractor.set);
1849
+ ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1850
+ }
1851
+
1852
+ typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1853
+ rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1854
+ rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1855
+ VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1856
+ int rb_vm_get_sourceline(const rb_control_frame_t *);
1857
+ void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1858
+ void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1859
+ rb_thread_t * ruby_thread_from_native(void);
1860
+ int ruby_thread_set_native(rb_thread_t *th);
1861
+ int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1862
+ void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1863
+ void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1864
+ VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1865
+
1866
+ void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1867
+
1868
+ #define rb_vm_register_special_exception(sp, e, m) \
1869
+ rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1870
+
1871
+ void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1872
+
1873
+ void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1874
+
1875
+ const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1876
+
1877
+ #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1878
+
1879
+ #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1880
+ STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1881
+ STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1882
+ const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1883
+ if (UNLIKELY((cfp) <= &bound[1])) { \
1884
+ vm_stackoverflow(); \
1885
+ } \
1886
+ } while (0)
1887
+
1888
+ #define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1889
+ CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1890
+
1891
+ VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1892
+
1893
+ rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1894
+
1895
+ /* for thread */
1896
+
1897
+ #if RUBY_VM_THREAD_MODEL == 2
1898
+
1899
+ RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1900
+ RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1901
+ RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1902
+ RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1903
+ RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1904
+
1905
+ #define GET_VM() rb_current_vm()
1906
+ #define GET_RACTOR() rb_current_ractor()
1907
+ #define GET_THREAD() rb_current_thread()
1908
+ #define GET_EC() rb_current_execution_context(true)
1909
+
1910
+ static inline rb_thread_t *
1911
+ rb_ec_thread_ptr(const rb_execution_context_t *ec)
1912
+ {
1913
+ return ec->thread_ptr;
1914
+ }
1915
+
1916
+ static inline rb_ractor_t *
1917
+ rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1918
+ {
1919
+ const rb_thread_t *th = rb_ec_thread_ptr(ec);
1920
+ if (th) {
1921
+ VM_ASSERT(th->ractor != NULL);
1922
+ return th->ractor;
1923
+ }
1924
+ else {
1925
+ return NULL;
1926
+ }
1927
+ }
1928
+
1929
+ static inline rb_vm_t *
1930
+ rb_ec_vm_ptr(const rb_execution_context_t *ec)
1931
+ {
1932
+ const rb_thread_t *th = rb_ec_thread_ptr(ec);
1933
+ if (th) {
1934
+ return th->vm;
1935
+ }
1936
+ else {
1937
+ return NULL;
1938
+ }
1939
+ }
1940
+
1941
+ static inline rb_execution_context_t *
1942
+ rb_current_execution_context(bool expect_ec)
1943
+ {
1944
+ #ifdef RB_THREAD_LOCAL_SPECIFIER
1945
+ #ifdef __APPLE__
1946
+ rb_execution_context_t *ec = rb_current_ec();
1947
+ #else
1948
+ rb_execution_context_t *ec = ruby_current_ec;
1949
+ #endif
1950
+
1951
+ /* On the shared objects, `__tls_get_addr()` is used to access the TLS
1952
+ * and the address of the `ruby_current_ec` can be stored on a function
1953
+ * frame. However, this address can be mis-used after native thread
1954
+ * migration of a coroutine.
1955
+ * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
1956
+ * 2) Context switch and resume it on the NT2.
1957
+ * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
1958
+ * This assertion checks such misusage.
1959
+ *
1960
+ * To avoid accidents, `GET_EC()` should be called once on the frame.
1961
+ * Note that inlining can produce the problem.
1962
+ */
1963
+ VM_ASSERT(ec == rb_current_ec_noinline());
1964
+ #else
1965
+ rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
1966
+ #endif
1967
+ VM_ASSERT(!expect_ec || ec != NULL);
1968
+ return ec;
1969
+ }
1970
+
1971
+ static inline rb_thread_t *
1972
+ rb_current_thread(void)
1973
+ {
1974
+ const rb_execution_context_t *ec = GET_EC();
1975
+ return rb_ec_thread_ptr(ec);
1976
+ }
1977
+
1978
+ static inline rb_ractor_t *
1979
+ rb_current_ractor_raw(bool expect)
1980
+ {
1981
+ if (ruby_single_main_ractor) {
1982
+ return ruby_single_main_ractor;
1983
+ }
1984
+ else {
1985
+ const rb_execution_context_t *ec = rb_current_execution_context(expect);
1986
+ return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
1987
+ }
1988
+ }
1989
+
1990
+ static inline rb_ractor_t *
1991
+ rb_current_ractor(void)
1992
+ {
1993
+ return rb_current_ractor_raw(true);
1994
+ }
1995
+
1996
+ static inline rb_vm_t *
1997
+ rb_current_vm(void)
1998
+ {
1999
+ #if 0 // TODO: reconsider the assertions
2000
+ VM_ASSERT(ruby_current_vm_ptr == NULL ||
2001
+ ruby_current_execution_context_ptr == NULL ||
2002
+ rb_ec_thread_ptr(GET_EC()) == NULL ||
2003
+ rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2004
+ rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2005
+ #endif
2006
+
2007
+ return ruby_current_vm_ptr;
2008
+ }
2009
+
2010
+ void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2011
+ unsigned int recorded_lock_rec,
2012
+ unsigned int current_lock_rec);
2013
+
2014
+ static inline unsigned int
2015
+ rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2016
+ {
2017
+ rb_vm_t *vm = rb_ec_vm_ptr(ec);
2018
+
2019
+ if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
2020
+ return 0;
2021
+ }
2022
+ else {
2023
+ return vm->ractor.sync.lock_rec;
2024
+ }
2025
+ }
2026
+
2027
+ #else
2028
+ #error "unsupported thread model"
2029
+ #endif
2030
+
2031
+ enum {
2032
+ TIMER_INTERRUPT_MASK = 0x01,
2033
+ PENDING_INTERRUPT_MASK = 0x02,
2034
+ POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2035
+ TRAP_INTERRUPT_MASK = 0x08,
2036
+ TERMINATE_INTERRUPT_MASK = 0x10,
2037
+ VM_BARRIER_INTERRUPT_MASK = 0x20,
2038
+ };
2039
+
2040
+ #define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2041
+ #define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2042
+ #define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2043
+ #define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2044
+ #define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2045
+ #define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2046
+ #define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
2047
+ (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
2048
+
2049
+ static inline bool
2050
+ RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2051
+ {
2052
+ #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2053
+ uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2054
+
2055
+ if (current_clock != ec->checked_clock) {
2056
+ ec->checked_clock = current_clock;
2057
+ RUBY_VM_SET_TIMER_INTERRUPT(ec);
2058
+ }
2059
+ #endif
2060
+ return ec->interrupt_flag & ~(ec)->interrupt_mask;
2061
+ }
2062
+
2063
+ VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2064
+ int rb_signal_buff_size(void);
2065
+ int rb_signal_exec(rb_thread_t *th, int sig);
2066
+ void rb_threadptr_check_signal(rb_thread_t *mth);
2067
+ void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2068
+ void rb_threadptr_signal_exit(rb_thread_t *th);
2069
+ int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2070
+ void rb_threadptr_interrupt(rb_thread_t *th);
2071
+ void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2072
+ void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2073
+ void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2074
+ VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2075
+ void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2076
+ void rb_execution_context_update(rb_execution_context_t *ec);
2077
+ void rb_execution_context_mark(const rb_execution_context_t *ec);
2078
+ void rb_fiber_close(rb_fiber_t *fib);
2079
+ void Init_native_thread(rb_thread_t *th);
2080
+ int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2081
+
2082
+ // vm_sync.h
2083
+ void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2084
+ void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2085
+
2086
+ #define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2087
+ static inline void
2088
+ rb_vm_check_ints(rb_execution_context_t *ec)
2089
+ {
2090
+ #ifdef RUBY_ASSERT_CRITICAL_SECTION
2091
+ VM_ASSERT(ruby_assert_critical_section_entered == 0);
2092
+ #endif
2093
+
2094
+ VM_ASSERT(ec == GET_EC());
2095
+
2096
+ if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2097
+ rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2098
+ }
2099
+ }
2100
+
2101
+ /* tracer */
2102
+
2103
+ struct rb_trace_arg_struct {
2104
+ rb_event_flag_t event;
2105
+ rb_execution_context_t *ec;
2106
+ const rb_control_frame_t *cfp;
2107
+ VALUE self;
2108
+ ID id;
2109
+ ID called_id;
2110
+ VALUE klass;
2111
+ VALUE data;
2112
+
2113
+ int klass_solved;
2114
+
2115
+ /* calc from cfp */
2116
+ int lineno;
2117
+ VALUE path;
2118
+ };
2119
+
2120
+ void rb_hook_list_mark(rb_hook_list_t *hooks);
2121
+ void rb_hook_list_mark_and_update(rb_hook_list_t *hooks);
2122
+ void rb_hook_list_free(rb_hook_list_t *hooks);
2123
+ void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2124
+ void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2125
+
2126
+ void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2127
+
2128
+ #define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2129
+ const rb_event_flag_t flag_arg_ = (flag_); \
2130
+ rb_hook_list_t *hooks_arg_ = (hooks_); \
2131
+ if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2132
+ /* defer evaluating the other arguments */ \
2133
+ rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2134
+ } \
2135
+ } while (0)
2136
+
2137
+ static inline void
2138
+ rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2139
+ VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2140
+ {
2141
+ struct rb_trace_arg_struct trace_arg;
2142
+
2143
+ VM_ASSERT((hooks->events & flag) != 0);
2144
+
2145
+ trace_arg.event = flag;
2146
+ trace_arg.ec = ec;
2147
+ trace_arg.cfp = ec->cfp;
2148
+ trace_arg.self = self;
2149
+ trace_arg.id = id;
2150
+ trace_arg.called_id = called_id;
2151
+ trace_arg.klass = klass;
2152
+ trace_arg.data = data;
2153
+ trace_arg.path = Qundef;
2154
+ trace_arg.klass_solved = 0;
2155
+
2156
+ rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2157
+ }
2158
+
2159
+ struct rb_ractor_pub {
2160
+ VALUE self;
2161
+ uint32_t id;
2162
+ rb_hook_list_t hooks;
2163
+ };
2164
+
2165
+ static inline rb_hook_list_t *
2166
+ rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2167
+ {
2168
+ struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2169
+ return &cr_pub->hooks;
2170
+ }
2171
+
2172
+ #define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2173
+ EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2174
+
2175
+ #define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2176
+ EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2177
+
2178
+ static inline void
2179
+ rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2180
+ {
2181
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2182
+ NIL_P(eval_script) ? (VALUE)iseq :
2183
+ rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2184
+ }
2185
+
2186
+ void rb_vm_trap_exit(rb_vm_t *vm);
2187
+ void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2188
+ void rb_vm_postponed_job_free(void); /* vm_trace.c */
2189
+ size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2190
+ void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2191
+
2192
+ RUBY_SYMBOL_EXPORT_BEGIN
2193
+
2194
+ int rb_thread_check_trap_pending(void);
2195
+
2196
+ /* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2197
+ #define RUBY_EVENT_COVERAGE_LINE 0x010000
2198
+ #define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2199
+
2200
+ extern VALUE rb_get_coverages(void);
2201
+ extern void rb_set_coverages(VALUE, int, VALUE);
2202
+ extern void rb_clear_coverages(void);
2203
+ extern void rb_reset_coverages(void);
2204
+ extern void rb_resume_coverages(void);
2205
+ extern void rb_suspend_coverages(void);
2206
+
2207
+ void rb_postponed_job_flush(rb_vm_t *vm);
2208
+
2209
+ // ractor.c
2210
+ RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2211
+ RUBY_EXTERN VALUE rb_eRactorIsolationError;
2212
+
2213
+ RUBY_SYMBOL_EXPORT_END
2214
+
2215
+ #endif /* RUBY_VM_CORE_H */