debase-ruby_core_source 0.10.10 → 0.10.11

Sign up to get free protection for your applications and to get access to all the features.
Files changed (115) hide show
  1. checksums.yaml +4 -4
  2. data/Rakefile +6 -3
  3. data/debase-ruby_core_source.gemspec +4 -4
  4. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/addr2line.h +20 -0
  5. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/builtin.h +83 -0
  6. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/ccan/build_assert/build_assert.h +40 -0
  7. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/ccan/check_type/check_type.h +63 -0
  8. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/ccan/container_of/container_of.h +142 -0
  9. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/ccan/list/list.h +788 -0
  10. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/ccan/str/str.h +16 -0
  11. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/constant.h +55 -0
  12. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/debug_counter.h +423 -0
  13. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/dln.h +36 -0
  14. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/encindex.h +70 -0
  15. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/eval_intern.h +312 -0
  16. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/gc.h +140 -0
  17. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/hrtime.h +168 -0
  18. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/id.h +290 -0
  19. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/id_table.h +36 -0
  20. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/insns.inc +249 -0
  21. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/insns_info.inc +8979 -0
  22. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal.h +107 -0
  23. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/array.h +114 -0
  24. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/bignum.h +246 -0
  25. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/bits.h +566 -0
  26. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/class.h +168 -0
  27. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/compar.h +50 -0
  28. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/compile.h +32 -0
  29. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/compilers.h +108 -0
  30. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/complex.h +29 -0
  31. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/cont.h +25 -0
  32. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/dir.h +17 -0
  33. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/enc.h +20 -0
  34. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/encoding.h +28 -0
  35. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/enum.h +19 -0
  36. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/enumerator.h +22 -0
  37. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/error.h +131 -0
  38. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/eval.h +33 -0
  39. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/file.h +39 -0
  40. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/fixnum.h +185 -0
  41. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/gc.h +154 -0
  42. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/hash.h +240 -0
  43. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/imemo.h +243 -0
  44. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/inits.h +51 -0
  45. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/io.h +35 -0
  46. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/load.h +19 -0
  47. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/loadpath.h +17 -0
  48. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/math.h +24 -0
  49. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/missing.h +19 -0
  50. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/mjit.h +29 -0
  51. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/numeric.h +252 -0
  52. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/object.h +83 -0
  53. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/parse.h +23 -0
  54. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/proc.h +32 -0
  55. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/process.h +136 -0
  56. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/random.h +17 -0
  57. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/range.h +37 -0
  58. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/rational.h +69 -0
  59. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/re.h +29 -0
  60. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/sanitizers.h +191 -0
  61. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/scheduler.h +35 -0
  62. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/serial.h +24 -0
  63. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/signal.h +22 -0
  64. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/static_assert.h +17 -0
  65. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/string.h +135 -0
  66. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/struct.h +154 -0
  67. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/symbol.h +41 -0
  68. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/thread.h +60 -0
  69. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/time.h +35 -0
  70. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/transcode.h +21 -0
  71. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/util.h +31 -0
  72. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/variable.h +83 -0
  73. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/vm.h +135 -0
  74. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/internal/warnings.h +17 -0
  75. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/iseq.h +304 -0
  76. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/known_errors.inc +791 -0
  77. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/method.h +245 -0
  78. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/mjit.h +197 -0
  79. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/mjit_compile.inc +8082 -0
  80. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/node.h +484 -0
  81. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/node_name.inc +210 -0
  82. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/opt_sc.inc +109 -0
  83. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/optinsn.inc +128 -0
  84. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/optunifs.inc +43 -0
  85. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/parse.h +211 -0
  86. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/probes_helper.h +44 -0
  87. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/ractor.h +271 -0
  88. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/ractor_pub.h +50 -0
  89. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/regenc.h +254 -0
  90. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/regint.h +938 -0
  91. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/regparse.h +370 -0
  92. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/revision.h +2 -0
  93. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/ruby_assert.h +15 -0
  94. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/ruby_atomic.h +236 -0
  95. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/siphash.h +48 -0
  96. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/symbol.h +119 -0
  97. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/thread_pthread.h +106 -0
  98. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/thread_win32.h +66 -0
  99. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/timev.h +57 -0
  100. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/transcode_data.h +138 -0
  101. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/transient_heap.h +61 -0
  102. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/variable.h +21 -0
  103. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/version.h +89 -0
  104. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/vm.inc +5360 -0
  105. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/vm_call_iseq_optimized.inc +244 -0
  106. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/vm_callinfo.h +457 -0
  107. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/vm_core.h +1952 -0
  108. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/vm_debug.h +116 -0
  109. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/vm_exec.h +196 -0
  110. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/vm_insnhelper.h +260 -0
  111. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/vm_opts.h +73 -0
  112. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/vm_sync.h +97 -0
  113. data/lib/debase/ruby_core_source/ruby-3.0.0-preview1/vmtc.inc +243 -0
  114. data/lib/debase/ruby_core_source/version.rb +1 -1
  115. metadata +123 -13
@@ -0,0 +1,1952 @@
1
+ #ifndef RUBY_VM_CORE_H
2
+ #define RUBY_VM_CORE_H
3
+ /**********************************************************************
4
+
5
+ vm_core.h -
6
+
7
+ $Author$
8
+ created at: 04/01/01 19:41:38 JST
9
+
10
+ Copyright (C) 2004-2007 Koichi Sasada
11
+
12
+ **********************************************************************/
13
+
14
+ /*
15
+ * Enable check mode.
16
+ * 1: enable local assertions.
17
+ */
18
+ #ifndef VM_CHECK_MODE
19
+
20
+ // respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21
+ #define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
+
23
+ #define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24
+ #endif
25
+
26
+ /**
27
+ * VM Debug Level
28
+ *
29
+ * debug level:
30
+ * 0: no debug output
31
+ * 1: show instruction name
32
+ * 2: show stack frame when control stack frame is changed
33
+ * 3: show stack status
34
+ * 4: show register
35
+ * 5:
36
+ * 10: gc check
37
+ */
38
+
39
+ #ifndef VMDEBUG
40
+ #define VMDEBUG 0
41
+ #endif
42
+
43
+ #if 0
44
+ #undef VMDEBUG
45
+ #define VMDEBUG 3
46
+ #endif
47
+
48
+ #include "ruby/internal/config.h"
49
+
50
+ #include <stddef.h>
51
+ #include <signal.h>
52
+ #include <stdarg.h>
53
+
54
+ #include "ruby_assert.h"
55
+
56
+ #if VM_CHECK_MODE > 0
57
+ #define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
58
+ #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
59
+
60
+ #else
61
+ #define VM_ASSERT(expr) ((void)0)
62
+ #define VM_UNREACHABLE(func) UNREACHABLE
63
+ #endif
64
+
65
+ #include <setjmp.h>
66
+
67
+ #include "ruby/internal/stdbool.h"
68
+ #include "ccan/list/list.h"
69
+ #include "id.h"
70
+ #include "internal.h"
71
+ #include "internal/array.h"
72
+ #include "internal/serial.h"
73
+ #include "internal/vm.h"
74
+ #include "method.h"
75
+ #include "node.h"
76
+ #include "ruby/ruby.h"
77
+ #include "ruby/st.h"
78
+ #include "ruby_atomic.h"
79
+ #include "vm_opts.h"
80
+
81
+ #include "ruby/thread_native.h"
82
+ #if defined(_WIN32)
83
+ #include "thread_win32.h"
84
+ #elif defined(HAVE_PTHREAD_H)
85
+ #include "thread_pthread.h"
86
+ #endif
87
+
88
+ #define RUBY_VM_THREAD_MODEL 2
89
+
90
+ /*
91
+ * implementation selector of get_insn_info algorithm
92
+ * 0: linear search
93
+ * 1: binary search
94
+ * 2: succinct bitvector
95
+ */
96
+ #ifndef VM_INSN_INFO_TABLE_IMPL
97
+ # define VM_INSN_INFO_TABLE_IMPL 2
98
+ #endif
99
+
100
+ #if defined(NSIG_MAX) /* POSIX issue 8 */
101
+ # undef NSIG
102
+ # define NSIG NSIG_MAX
103
+ #elif defined(_SIG_MAXSIG) /* FreeBSD */
104
+ # undef NSIG
105
+ # define NSIG _SIG_MAXSIG
106
+ #elif defined(_SIGMAX) /* QNX */
107
+ # define NSIG (_SIGMAX + 1)
108
+ #elif defined(NSIG) /* 99% of everything else */
109
+ # /* take it */
110
+ #else /* Last resort */
111
+ # define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
112
+ #endif
113
+
114
+ #define RUBY_NSIG NSIG
115
+
116
+ #if defined(SIGCLD)
117
+ # define RUBY_SIGCHLD (SIGCLD)
118
+ #elif defined(SIGCHLD)
119
+ # define RUBY_SIGCHLD (SIGCHLD)
120
+ #else
121
+ # define RUBY_SIGCHLD (0)
122
+ #endif
123
+
124
+ /* platforms with broken or non-existent SIGCHLD work by polling */
125
+ #if defined(__APPLE__)
126
+ # define SIGCHLD_LOSSY (1)
127
+ #else
128
+ # define SIGCHLD_LOSSY (0)
129
+ #endif
130
+
131
+ /* define to 0 to test old code path */
132
+ #define WAITPID_USE_SIGCHLD (RUBY_SIGCHLD || SIGCHLD_LOSSY)
133
+
134
+ #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
135
+ # define USE_SIGALTSTACK
136
+ void *rb_allocate_sigaltstack(void);
137
+ void *rb_register_sigaltstack(void *);
138
+ # define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
139
+ # define RB_ALTSTACK_FREE(var) xfree(var)
140
+ # define RB_ALTSTACK(var) var
141
+ #else /* noop */
142
+ # define RB_ALTSTACK_INIT(var, altstack)
143
+ # define RB_ALTSTACK_FREE(var)
144
+ # define RB_ALTSTACK(var) (0)
145
+ #endif
146
+
147
+ /*****************/
148
+ /* configuration */
149
+ /*****************/
150
+
151
+ /* gcc ver. check */
152
+ #if defined(__GNUC__) && __GNUC__ >= 2
153
+
154
+ #if OPT_TOKEN_THREADED_CODE
155
+ #if OPT_DIRECT_THREADED_CODE
156
+ #undef OPT_DIRECT_THREADED_CODE
157
+ #endif
158
+ #endif
159
+
160
+ #else /* defined(__GNUC__) && __GNUC__ >= 2 */
161
+
162
+ /* disable threaded code options */
163
+ #if OPT_DIRECT_THREADED_CODE
164
+ #undef OPT_DIRECT_THREADED_CODE
165
+ #endif
166
+ #if OPT_TOKEN_THREADED_CODE
167
+ #undef OPT_TOKEN_THREADED_CODE
168
+ #endif
169
+ #endif
170
+
171
+ /* call threaded code */
172
+ #if OPT_CALL_THREADED_CODE
173
+ #if OPT_DIRECT_THREADED_CODE
174
+ #undef OPT_DIRECT_THREADED_CODE
175
+ #endif /* OPT_DIRECT_THREADED_CODE */
176
+ #if OPT_STACK_CACHING
177
+ #undef OPT_STACK_CACHING
178
+ #endif /* OPT_STACK_CACHING */
179
+ #endif /* OPT_CALL_THREADED_CODE */
180
+
181
+ void rb_vm_encoded_insn_data_table_init(void);
182
+ typedef unsigned long rb_num_t;
183
+ typedef signed long rb_snum_t;
184
+
185
+ enum ruby_tag_type {
186
+ RUBY_TAG_NONE = 0x0,
187
+ RUBY_TAG_RETURN = 0x1,
188
+ RUBY_TAG_BREAK = 0x2,
189
+ RUBY_TAG_NEXT = 0x3,
190
+ RUBY_TAG_RETRY = 0x4,
191
+ RUBY_TAG_REDO = 0x5,
192
+ RUBY_TAG_RAISE = 0x6,
193
+ RUBY_TAG_THROW = 0x7,
194
+ RUBY_TAG_FATAL = 0x8,
195
+ RUBY_TAG_MASK = 0xf
196
+ };
197
+
198
+ #define TAG_NONE RUBY_TAG_NONE
199
+ #define TAG_RETURN RUBY_TAG_RETURN
200
+ #define TAG_BREAK RUBY_TAG_BREAK
201
+ #define TAG_NEXT RUBY_TAG_NEXT
202
+ #define TAG_RETRY RUBY_TAG_RETRY
203
+ #define TAG_REDO RUBY_TAG_REDO
204
+ #define TAG_RAISE RUBY_TAG_RAISE
205
+ #define TAG_THROW RUBY_TAG_THROW
206
+ #define TAG_FATAL RUBY_TAG_FATAL
207
+ #define TAG_MASK RUBY_TAG_MASK
208
+
209
+ enum ruby_vm_throw_flags {
210
+ VM_THROW_NO_ESCAPE_FLAG = 0x8000,
211
+ VM_THROW_STATE_MASK = 0xff
212
+ };
213
+
214
+ /* forward declarations */
215
+ struct rb_thread_struct;
216
+ struct rb_control_frame_struct;
217
+
218
+ /* iseq data type */
219
+ typedef struct rb_compile_option_struct rb_compile_option_t;
220
+
221
+ struct iseq_inline_cache_entry {
222
+ rb_serial_t ic_serial;
223
+ const rb_cref_t *ic_cref;
224
+ VALUE value;
225
+ };
226
+
227
+ struct iseq_inline_iv_cache_entry {
228
+ rb_serial_t ic_serial;
229
+ size_t index;
230
+ };
231
+
232
+ union iseq_inline_storage_entry {
233
+ struct {
234
+ struct rb_thread_struct *running_thread;
235
+ VALUE value;
236
+ } once;
237
+ struct iseq_inline_cache_entry cache;
238
+ struct iseq_inline_iv_cache_entry iv_cache;
239
+ };
240
+
241
+ struct rb_calling_info {
242
+ VALUE block_handler;
243
+ VALUE recv;
244
+ int argc;
245
+ int kw_splat;
246
+ };
247
+
248
+ struct rb_execution_context_struct;
249
+
250
+ #if 1
251
+ #define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
252
+ #else
253
+ #define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
254
+ #endif
255
+ #define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
256
+
257
+ typedef struct rb_iseq_location_struct {
258
+ VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
259
+ VALUE base_label; /* String */
260
+ VALUE label; /* String */
261
+ VALUE first_lineno; /* TODO: may be unsigned short */
262
+ int node_id;
263
+ rb_code_location_t code_location;
264
+ } rb_iseq_location_t;
265
+
266
+ #define PATHOBJ_PATH 0
267
+ #define PATHOBJ_REALPATH 1
268
+
269
+ static inline VALUE
270
+ pathobj_path(VALUE pathobj)
271
+ {
272
+ if (RB_TYPE_P(pathobj, T_STRING)) {
273
+ return pathobj;
274
+ }
275
+ else {
276
+ VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
277
+ return RARRAY_AREF(pathobj, PATHOBJ_PATH);
278
+ }
279
+ }
280
+
281
+ static inline VALUE
282
+ pathobj_realpath(VALUE pathobj)
283
+ {
284
+ if (RB_TYPE_P(pathobj, T_STRING)) {
285
+ return pathobj;
286
+ }
287
+ else {
288
+ VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
289
+ return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
290
+ }
291
+ }
292
+
293
+ /* Forward declarations */
294
+ struct rb_mjit_unit;
295
+
296
+ struct rb_iseq_constant_body {
297
+ enum iseq_type {
298
+ ISEQ_TYPE_TOP,
299
+ ISEQ_TYPE_METHOD,
300
+ ISEQ_TYPE_BLOCK,
301
+ ISEQ_TYPE_CLASS,
302
+ ISEQ_TYPE_RESCUE,
303
+ ISEQ_TYPE_ENSURE,
304
+ ISEQ_TYPE_EVAL,
305
+ ISEQ_TYPE_MAIN,
306
+ ISEQ_TYPE_PLAIN
307
+ } type; /* instruction sequence type */
308
+
309
+ unsigned int iseq_size;
310
+ VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
311
+
312
+ /**
313
+ * parameter information
314
+ *
315
+ * def m(a1, a2, ..., aM, # mandatory
316
+ * b1=(...), b2=(...), ..., bN=(...), # optional
317
+ * *c, # rest
318
+ * d1, d2, ..., dO, # post
319
+ * e1:(...), e2:(...), ..., eK:(...), # keyword
320
+ * **f, # keyword_rest
321
+ * &g) # block
322
+ * =>
323
+ *
324
+ * lead_num = M
325
+ * opt_num = N
326
+ * rest_start = M+N
327
+ * post_start = M+N+(*1)
328
+ * post_num = O
329
+ * keyword_num = K
330
+ * block_start = M+N+(*1)+O+K
331
+ * keyword_bits = M+N+(*1)+O+K+(&1)
332
+ * size = M+N+O+(*1)+K+(&1)+(**1) // parameter size.
333
+ */
334
+
335
+ struct {
336
+ struct {
337
+ unsigned int has_lead : 1;
338
+ unsigned int has_opt : 1;
339
+ unsigned int has_rest : 1;
340
+ unsigned int has_post : 1;
341
+ unsigned int has_kw : 1;
342
+ unsigned int has_kwrest : 1;
343
+ unsigned int has_block : 1;
344
+
345
+ unsigned int ambiguous_param0 : 1; /* {|a|} */
346
+ unsigned int accepts_no_kwarg : 1;
347
+ unsigned int ruby2_keywords: 1;
348
+ } flags;
349
+
350
+ unsigned int size;
351
+
352
+ int lead_num;
353
+ int opt_num;
354
+ int rest_start;
355
+ int post_start;
356
+ int post_num;
357
+ int block_start;
358
+
359
+ const VALUE *opt_table; /* (opt_num + 1) entries. */
360
+ /* opt_num and opt_table:
361
+ *
362
+ * def foo o1=e1, o2=e2, ..., oN=eN
363
+ * #=>
364
+ * # prologue code
365
+ * A1: e1
366
+ * A2: e2
367
+ * ...
368
+ * AN: eN
369
+ * AL: body
370
+ * opt_num = N
371
+ * opt_table = [A1, A2, ..., AN, AL]
372
+ */
373
+
374
+ const struct rb_iseq_param_keyword {
375
+ int num;
376
+ int required_num;
377
+ int bits_start;
378
+ int rest_start;
379
+ const ID *table;
380
+ VALUE *default_values;
381
+ } *keyword;
382
+ } param;
383
+
384
+ rb_iseq_location_t location;
385
+
386
+ /* insn info, must be freed */
387
+ struct iseq_insn_info {
388
+ const struct iseq_insn_info_entry *body;
389
+ unsigned int *positions;
390
+ unsigned int size;
391
+ #if VM_INSN_INFO_TABLE_IMPL == 2
392
+ struct succ_index_table *succ_index_table;
393
+ #endif
394
+ } insns_info;
395
+
396
+ const ID *local_table; /* must free */
397
+
398
+ /* catch table */
399
+ struct iseq_catch_table *catch_table;
400
+
401
+ /* for child iseq */
402
+ const struct rb_iseq_struct *parent_iseq;
403
+ struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
404
+
405
+ union iseq_inline_storage_entry *is_entries;
406
+ struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
407
+
408
+ struct {
409
+ rb_snum_t flip_count;
410
+ VALUE coverage;
411
+ VALUE pc2branchindex;
412
+ VALUE *original_iseq;
413
+ } variable;
414
+
415
+ unsigned int local_table_size;
416
+ unsigned int is_size;
417
+ unsigned int ci_size;
418
+ unsigned int stack_max; /* for stack overflow check */
419
+
420
+ char catch_except_p; /* If a frame of this ISeq may catch exception, set TRUE */
421
+ bool builtin_inline_p; // This ISeq's builtin func is safe to be inlined by MJIT
422
+ char access_outer_variables;
423
+
424
+ #if USE_MJIT
425
+ /* The following fields are MJIT related info. */
426
+ VALUE (*jit_func)(struct rb_execution_context_struct *,
427
+ struct rb_control_frame_struct *); /* function pointer for loaded native code */
428
+ long unsigned total_calls; /* number of total calls with `mjit_exec()` */
429
+ struct rb_mjit_unit *jit_unit;
430
+ #endif
431
+ };
432
+
433
+ /* T_IMEMO/iseq */
434
+ /* typedef rb_iseq_t is in method.h */
435
+ struct rb_iseq_struct {
436
+ VALUE flags; /* 1 */
437
+ VALUE wrapper; /* 2 */
438
+
439
+ struct rb_iseq_constant_body *body; /* 3 */
440
+
441
+ union { /* 4, 5 words */
442
+ struct iseq_compile_data *compile_data; /* used at compile time */
443
+
444
+ struct {
445
+ VALUE obj;
446
+ int index;
447
+ } loader;
448
+
449
+ struct {
450
+ struct rb_hook_list_struct *local_hooks;
451
+ rb_event_flag_t global_trace_events;
452
+ } exec;
453
+ } aux;
454
+ };
455
+
456
+ #ifndef USE_LAZY_LOAD
457
+ #define USE_LAZY_LOAD 0
458
+ #endif
459
+
460
+ #if USE_LAZY_LOAD
461
+ const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
462
+ #endif
463
+
464
+ static inline const rb_iseq_t *
465
+ rb_iseq_check(const rb_iseq_t *iseq)
466
+ {
467
+ #if USE_LAZY_LOAD
468
+ if (iseq->body == NULL) {
469
+ rb_iseq_complete((rb_iseq_t *)iseq);
470
+ }
471
+ #endif
472
+ return iseq;
473
+ }
474
+
475
+ static inline const rb_iseq_t *
476
+ def_iseq_ptr(rb_method_definition_t *def)
477
+ {
478
+ //TODO: re-visit. to check the bug, enable this assertion.
479
+ #if VM_CHECK_MODE > 0
480
+ if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
481
+ #endif
482
+ return rb_iseq_check(def->body.iseq.iseqptr);
483
+ }
484
+
485
+ enum ruby_special_exceptions {
486
+ ruby_error_reenter,
487
+ ruby_error_nomemory,
488
+ ruby_error_sysstack,
489
+ ruby_error_stackfatal,
490
+ ruby_error_stream_closed,
491
+ ruby_special_error_count
492
+ };
493
+
494
+ enum ruby_basic_operators {
495
+ BOP_PLUS,
496
+ BOP_MINUS,
497
+ BOP_MULT,
498
+ BOP_DIV,
499
+ BOP_MOD,
500
+ BOP_EQ,
501
+ BOP_EQQ,
502
+ BOP_LT,
503
+ BOP_LE,
504
+ BOP_LTLT,
505
+ BOP_AREF,
506
+ BOP_ASET,
507
+ BOP_LENGTH,
508
+ BOP_SIZE,
509
+ BOP_EMPTY_P,
510
+ BOP_NIL_P,
511
+ BOP_SUCC,
512
+ BOP_GT,
513
+ BOP_GE,
514
+ BOP_NOT,
515
+ BOP_NEQ,
516
+ BOP_MATCH,
517
+ BOP_FREEZE,
518
+ BOP_UMINUS,
519
+ BOP_MAX,
520
+ BOP_MIN,
521
+ BOP_CALL,
522
+ BOP_AND,
523
+ BOP_OR,
524
+
525
+ BOP_LAST_
526
+ };
527
+
528
+ #define GetVMPtr(obj, ptr) \
529
+ GetCoreDataFromValue((obj), rb_vm_t, (ptr))
530
+
531
+ struct rb_vm_struct;
532
+ typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
533
+
534
+ typedef struct rb_at_exit_list {
535
+ rb_vm_at_exit_func *func;
536
+ struct rb_at_exit_list *next;
537
+ } rb_at_exit_list;
538
+
539
+ struct rb_objspace;
540
+ struct rb_objspace *rb_objspace_alloc(void);
541
+ void rb_objspace_free(struct rb_objspace *);
542
+ void rb_objspace_call_finalizer(struct rb_objspace *);
543
+
544
+ typedef struct rb_hook_list_struct {
545
+ struct rb_event_hook_struct *hooks;
546
+ rb_event_flag_t events;
547
+ unsigned int need_clean;
548
+ unsigned int running;
549
+ } rb_hook_list_t;
550
+
551
+
552
+ // see builtin.h for definition
553
+ typedef const struct rb_builtin_function *RB_BUILTIN;
554
+
555
+ typedef struct rb_vm_struct {
556
+ VALUE self;
557
+
558
+ struct {
559
+ struct list_head set;
560
+ unsigned int cnt;
561
+ unsigned int blocking_cnt;
562
+
563
+ struct rb_ractor_struct *main_ractor;
564
+ struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
565
+
566
+ struct {
567
+ // monitor
568
+ rb_nativethread_lock_t lock;
569
+ struct rb_ractor_struct *lock_owner;
570
+ unsigned int lock_rec;
571
+
572
+ // barrier
573
+ bool barrier_waiting;
574
+ unsigned int barrier_cnt;
575
+ rb_nativethread_cond_t barrier_cond;
576
+
577
+ // join at exit
578
+ rb_nativethread_cond_t terminate_cond;
579
+ bool terminate_waiting;
580
+ } sync;
581
+ } ractor;
582
+
583
+ #ifdef USE_SIGALTSTACK
584
+ void *main_altstack;
585
+ #endif
586
+
587
+ rb_serial_t fork_gen;
588
+ rb_nativethread_lock_t waitpid_lock;
589
+ struct list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
590
+ struct list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
591
+ struct list_head waiting_fds; /* <=> struct waiting_fd */
592
+
593
+ /* set in single-threaded processes only: */
594
+ volatile int ubf_async_safe;
595
+
596
+ unsigned int running: 1;
597
+ unsigned int thread_abort_on_exception: 1;
598
+ unsigned int thread_report_on_exception: 1;
599
+ unsigned int safe_level_: 1;
600
+
601
+ /* object management */
602
+ VALUE mark_object_ary;
603
+ const VALUE special_exceptions[ruby_special_error_count];
604
+
605
+ /* load */
606
+ VALUE top_self;
607
+ VALUE load_path;
608
+ VALUE load_path_snapshot;
609
+ VALUE load_path_check_cache;
610
+ VALUE expanded_load_path;
611
+ VALUE loaded_features;
612
+ VALUE loaded_features_snapshot;
613
+ struct st_table *loaded_features_index;
614
+ struct st_table *loading_table;
615
+
616
+ /* signal */
617
+ struct {
618
+ VALUE cmd[RUBY_NSIG];
619
+ } trap_list;
620
+
621
+ /* hook */
622
+ rb_hook_list_t global_hooks;
623
+
624
+ /* relation table of ensure - rollback for callcc */
625
+ struct st_table *ensure_rollback_table;
626
+
627
+ /* postponed_job (async-signal-safe, NOT thread-safe) */
628
+ struct rb_postponed_job_struct *postponed_job_buffer;
629
+ rb_atomic_t postponed_job_index;
630
+
631
+ int src_encoding_index;
632
+
633
+ /* workqueue (thread-safe, NOT async-signal-safe) */
634
+ struct list_head workqueue; /* <=> rb_workqueue_job.jnode */
635
+ rb_nativethread_lock_t workqueue_lock;
636
+
637
+ VALUE verbose, debug, orig_progname, progname;
638
+ VALUE coverages;
639
+ int coverage_mode;
640
+
641
+ st_table * defined_module_hash;
642
+
643
+ struct rb_objspace *objspace;
644
+
645
+ rb_at_exit_list *at_exit;
646
+
647
+ VALUE *defined_strings;
648
+ st_table *frozen_strings;
649
+
650
+ const struct rb_builtin_function *builtin_function_table;
651
+ int builtin_inline_index;
652
+
653
+ /* params */
654
+ struct { /* size in byte */
655
+ size_t thread_vm_stack_size;
656
+ size_t thread_machine_stack_size;
657
+ size_t fiber_vm_stack_size;
658
+ size_t fiber_machine_stack_size;
659
+ } default_params;
660
+
661
+ short redefined_flag[BOP_LAST_];
662
+ } rb_vm_t;
663
+
664
+ /* default values */
665
+
666
+ #define RUBY_VM_SIZE_ALIGN 4096
667
+
668
+ #define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
669
+ #define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
670
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
671
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
672
+
673
+ #define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
674
+ #define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
675
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
676
+ #if defined(__powerpc64__)
677
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
678
+ #else
679
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
680
+ #endif
681
+
682
+ #if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
683
+ /* It seems sanitizers consume A LOT of machine stacks */
684
+ #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
685
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
686
+ #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
687
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
688
+ #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
689
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
690
+ #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
691
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
692
+ #endif
693
+
694
+ /* optimize insn */
695
+ #define INTEGER_REDEFINED_OP_FLAG (1 << 0)
696
+ #define FLOAT_REDEFINED_OP_FLAG (1 << 1)
697
+ #define STRING_REDEFINED_OP_FLAG (1 << 2)
698
+ #define ARRAY_REDEFINED_OP_FLAG (1 << 3)
699
+ #define HASH_REDEFINED_OP_FLAG (1 << 4)
700
+ /* #define BIGNUM_REDEFINED_OP_FLAG (1 << 5) */
701
+ #define SYMBOL_REDEFINED_OP_FLAG (1 << 6)
702
+ #define TIME_REDEFINED_OP_FLAG (1 << 7)
703
+ #define REGEXP_REDEFINED_OP_FLAG (1 << 8)
704
+ #define NIL_REDEFINED_OP_FLAG (1 << 9)
705
+ #define TRUE_REDEFINED_OP_FLAG (1 << 10)
706
+ #define FALSE_REDEFINED_OP_FLAG (1 << 11)
707
+ #define PROC_REDEFINED_OP_FLAG (1 << 12)
708
+
709
+ #define BASIC_OP_UNREDEFINED_P(op, klass) (LIKELY((GET_VM()->redefined_flag[(op)]&(klass)) == 0))
710
+
711
+ #ifndef VM_DEBUG_BP_CHECK
712
+ #define VM_DEBUG_BP_CHECK 0
713
+ #endif
714
+
715
+ #ifndef VM_DEBUG_VERIFY_METHOD_CACHE
716
+ #define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
717
+ #endif
718
+
719
+ struct rb_captured_block {
720
+ VALUE self;
721
+ const VALUE *ep;
722
+ union {
723
+ const rb_iseq_t *iseq;
724
+ const struct vm_ifunc *ifunc;
725
+ VALUE val;
726
+ } code;
727
+ };
728
+
729
+ enum rb_block_handler_type {
730
+ block_handler_type_iseq,
731
+ block_handler_type_ifunc,
732
+ block_handler_type_symbol,
733
+ block_handler_type_proc
734
+ };
735
+
736
+ enum rb_block_type {
737
+ block_type_iseq,
738
+ block_type_ifunc,
739
+ block_type_symbol,
740
+ block_type_proc
741
+ };
742
+
743
+ struct rb_block {
744
+ union {
745
+ struct rb_captured_block captured;
746
+ VALUE symbol;
747
+ VALUE proc;
748
+ } as;
749
+ enum rb_block_type type;
750
+ };
751
+
752
+ typedef struct rb_control_frame_struct {
753
+ const VALUE *pc; /* cfp[0] */
754
+ VALUE *sp; /* cfp[1] */
755
+ const rb_iseq_t *iseq; /* cfp[2] */
756
+ VALUE self; /* cfp[3] / block[0] */
757
+ const VALUE *ep; /* cfp[4] / block[1] */
758
+ const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc or forwarded block handler */
759
+ VALUE *__bp__; /* cfp[6] */ /* outside vm_push_frame, use vm_base_ptr instead. */
760
+
761
+ #if VM_DEBUG_BP_CHECK
762
+ VALUE *bp_check; /* cfp[7] */
763
+ #endif
764
+ } rb_control_frame_t;
765
+
766
+ extern const rb_data_type_t ruby_threadptr_data_type;
767
+
768
+ static inline struct rb_thread_struct *
769
+ rb_thread_ptr(VALUE thval)
770
+ {
771
+ return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
772
+ }
773
+
774
+ enum rb_thread_status {
775
+ THREAD_RUNNABLE,
776
+ THREAD_STOPPED,
777
+ THREAD_STOPPED_FOREVER,
778
+ THREAD_KILLED
779
+ };
780
+
781
+ #ifdef RUBY_JMP_BUF
782
+ typedef RUBY_JMP_BUF rb_jmpbuf_t;
783
+ #else
784
+ typedef void *rb_jmpbuf_t[5];
785
+ #endif
786
+
787
+ /*
788
+ the members which are written in EC_PUSH_TAG() should be placed at
789
+ the beginning and the end, so that entire region is accessible.
790
+ */
791
+ struct rb_vm_tag {
792
+ VALUE tag;
793
+ VALUE retval;
794
+ rb_jmpbuf_t buf;
795
+ struct rb_vm_tag *prev;
796
+ enum ruby_tag_type state;
797
+ };
798
+
799
+ STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
800
+ STATIC_ASSERT(rb_vm_tag_buf_end,
801
+ offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) <
802
+ sizeof(struct rb_vm_tag));
803
+
804
+ struct rb_vm_protect_tag {
805
+ struct rb_vm_protect_tag *prev;
806
+ };
807
+
808
+ struct rb_unblock_callback {
809
+ rb_unblock_function_t *func;
810
+ void *arg;
811
+ };
812
+
813
+ struct rb_mutex_struct;
814
+
815
+ typedef struct rb_ensure_entry {
816
+ VALUE marker;
817
+ VALUE (*e_proc)(VALUE);
818
+ VALUE data2;
819
+ } rb_ensure_entry_t;
820
+
821
+ typedef struct rb_ensure_list {
822
+ struct rb_ensure_list *next;
823
+ struct rb_ensure_entry entry;
824
+ } rb_ensure_list_t;
825
+
826
+ typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3];
827
+
828
+ typedef struct rb_fiber_struct rb_fiber_t;
829
+
830
+ struct rb_waiting_list {
831
+ struct rb_waiting_list *next;
832
+ struct rb_thread_struct *thread;
833
+ struct rb_fiber_struct *fiber;
834
+ };
835
+
836
+ struct rb_execution_context_struct {
837
+ /* execution information */
838
+ VALUE *vm_stack; /* must free, must mark */
839
+ size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
840
+ rb_control_frame_t *cfp;
841
+
842
+ struct rb_vm_tag *tag;
843
+ struct rb_vm_protect_tag *protect_tag;
844
+
845
+ /* interrupt flags */
846
+ rb_atomic_t interrupt_flag;
847
+ rb_atomic_t interrupt_mask; /* size should match flag */
848
+
849
+ rb_fiber_t *fiber_ptr;
850
+ struct rb_thread_struct *thread_ptr;
851
+
852
+ /* storage (ec (fiber) local) */
853
+ struct rb_id_table *local_storage;
854
+ VALUE local_storage_recursive_hash;
855
+ VALUE local_storage_recursive_hash_for_trace;
856
+
857
+ /* eval env */
858
+ const VALUE *root_lep;
859
+ VALUE root_svar;
860
+
861
+ /* ensure & callcc */
862
+ rb_ensure_list_t *ensure_list;
863
+
864
+ /* trace information */
865
+ struct rb_trace_arg_struct *trace_arg;
866
+
867
+ /* temporary places */
868
+ VALUE errinfo;
869
+ VALUE passed_block_handler; /* for rb_iterate */
870
+
871
+ uint8_t raised_flag; /* only 3 bits needed */
872
+
873
+ /* n.b. only 7 bits needed, really: */
874
+ BITFIELD(enum method_missing_reason, method_missing_reason, 8);
875
+
876
+ VALUE private_const_reference;
877
+
878
+ /* for GC */
879
+ struct {
880
+ VALUE *stack_start;
881
+ VALUE *stack_end;
882
+ size_t stack_maxsize;
883
+ RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs;
884
+ } machine;
885
+ };
886
+
887
+ #ifndef rb_execution_context_t
888
+ typedef struct rb_execution_context_struct rb_execution_context_t;
889
+ #define rb_execution_context_t rb_execution_context_t
890
+ #endif
891
+
892
+ // for builtin.h
893
+ #define VM_CORE_H_EC_DEFINED 1
894
+
895
+ // Set the vm_stack pointer in the execution context.
896
+ void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
897
+
898
+ // Initialize the vm_stack pointer in the execution context and push the initial stack frame.
899
+ // @param ec the execution context to update.
900
+ // @param stack a pointer to the stack to use.
901
+ // @param size the size of the stack, as in `VALUE stack[size]`.
902
+ void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
903
+
904
+ // Clear (set to `NULL`) the vm_stack pointer.
905
+ // @param ec the execution context to update.
906
+ void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
907
+
908
+ typedef struct rb_ractor_struct rb_ractor_t;
909
+
910
+ typedef struct rb_thread_struct {
911
+ struct list_node lt_node; // managed by a ractor
912
+ VALUE self;
913
+ rb_ractor_t *ractor;
914
+ rb_vm_t *vm;
915
+
916
+ rb_execution_context_t *ec;
917
+
918
+ VALUE last_status; /* $? */
919
+
920
+ /* for cfunc */
921
+ struct rb_calling_info *calling;
922
+
923
+ /* for load(true) */
924
+ VALUE top_self;
925
+ VALUE top_wrapper;
926
+
927
+ /* thread control */
928
+ rb_nativethread_id_t thread_id;
929
+ #ifdef NON_SCALAR_THREAD_ID
930
+ rb_thread_id_string_t thread_id_string;
931
+ #endif
932
+ BITFIELD(enum rb_thread_status, status, 2);
933
+ /* bit flags */
934
+ unsigned int to_kill : 1;
935
+ unsigned int abort_on_exception: 1;
936
+ unsigned int report_on_exception: 1;
937
+ unsigned int pending_interrupt_queue_checked: 1;
938
+ int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
939
+ uint32_t running_time_us; /* 12500..800000 */
940
+
941
+ native_thread_data_t native_thread_data;
942
+ void *blocking_region_buffer;
943
+
944
+ VALUE thgroup;
945
+ VALUE value;
946
+
947
+ /* temporary place of retval on OPT_CALL_THREADED_CODE */
948
+ #if OPT_CALL_THREADED_CODE
949
+ VALUE retval;
950
+ #endif
951
+
952
+ /* async errinfo queue */
953
+ VALUE pending_interrupt_queue;
954
+ VALUE pending_interrupt_mask_stack;
955
+
956
+ /* interrupt management */
957
+ rb_nativethread_lock_t interrupt_lock;
958
+ struct rb_unblock_callback unblock;
959
+ VALUE locking_mutex;
960
+ struct rb_mutex_struct *keeping_mutexes;
961
+
962
+ struct rb_waiting_list *join_list;
963
+
964
+ union {
965
+ struct {
966
+ VALUE proc;
967
+ VALUE args;
968
+ int kw_splat;
969
+ } proc;
970
+ struct {
971
+ VALUE (*func)(void *);
972
+ void *arg;
973
+ } func;
974
+ } invoke_arg;
975
+
976
+ enum thread_invoke_type {
977
+ thread_invoke_type_none = 0,
978
+ thread_invoke_type_proc,
979
+ thread_invoke_type_ractor_proc,
980
+ thread_invoke_type_func
981
+ } invoke_type;
982
+
983
+ /* statistics data for profiler */
984
+ VALUE stat_insn_usage;
985
+
986
+ /* fiber */
987
+ rb_fiber_t *root_fiber;
988
+ rb_jmpbuf_t root_jmpbuf;
989
+
990
+ VALUE scheduler;
991
+ unsigned blocking;
992
+
993
+ /* misc */
994
+ VALUE name;
995
+
996
+ #ifdef USE_SIGALTSTACK
997
+ void *altstack;
998
+ #endif
999
+ } rb_thread_t;
1000
+
1001
+ typedef enum {
1002
+ VM_DEFINECLASS_TYPE_CLASS = 0x00,
1003
+ VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1004
+ VM_DEFINECLASS_TYPE_MODULE = 0x02,
1005
+ /* 0x03..0x06 is reserved */
1006
+ VM_DEFINECLASS_TYPE_MASK = 0x07
1007
+ } rb_vm_defineclass_type_t;
1008
+
1009
+ #define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1010
+ #define VM_DEFINECLASS_FLAG_SCOPED 0x08
1011
+ #define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1012
+ #define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1013
+ #define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1014
+ ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1015
+
1016
+ /* iseq.c */
1017
+ RUBY_SYMBOL_EXPORT_BEGIN
1018
+
1019
+ /* node -> iseq */
1020
+ rb_iseq_t *rb_iseq_new (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type);
1021
+ rb_iseq_t *rb_iseq_new_top (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1022
+ rb_iseq_t *rb_iseq_new_main (const rb_ast_body_t *ast, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1023
+ rb_iseq_t *rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno,
1024
+ const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
1025
+ struct iseq_link_anchor;
1026
+ struct rb_iseq_new_with_callback_callback_func {
1027
+ VALUE flags;
1028
+ VALUE reserved;
1029
+ void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1030
+ const void *data;
1031
+ };
1032
+ static inline struct rb_iseq_new_with_callback_callback_func *
1033
+ rb_iseq_new_with_callback_new_callback(
1034
+ void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1035
+ {
1036
+ VALUE memo = rb_imemo_new(imemo_ifunc, (VALUE)func, (VALUE)ptr, Qundef, Qfalse);
1037
+ return (struct rb_iseq_new_with_callback_callback_func *)memo;
1038
+ }
1039
+ rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1040
+ VALUE name, VALUE path, VALUE realpath, VALUE first_lineno,
1041
+ const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
1042
+
1043
+ VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1044
+ int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1045
+
1046
+ VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1047
+
1048
+ RUBY_EXTERN VALUE rb_cISeq;
1049
+ RUBY_EXTERN VALUE rb_cRubyVM;
1050
+ RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1051
+ RUBY_EXTERN VALUE rb_block_param_proxy;
1052
+ RUBY_SYMBOL_EXPORT_END
1053
+
1054
+ #define GetProcPtr(obj, ptr) \
1055
+ GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1056
+
1057
+ typedef struct {
1058
+ const struct rb_block block;
1059
+ unsigned int is_from_method: 1; /* bool */
1060
+ unsigned int is_lambda: 1; /* bool */
1061
+ unsigned int is_isolated: 1; /* bool */
1062
+ } rb_proc_t;
1063
+
1064
+ VALUE rb_proc_isolate(VALUE self);
1065
+ VALUE rb_proc_isolate_bang(VALUE self);
1066
+
1067
+ typedef struct {
1068
+ VALUE flags; /* imemo header */
1069
+ rb_iseq_t *iseq;
1070
+ const VALUE *ep;
1071
+ const VALUE *env;
1072
+ unsigned int env_size;
1073
+ } rb_env_t;
1074
+
1075
+ extern const rb_data_type_t ruby_binding_data_type;
1076
+
1077
+ #define GetBindingPtr(obj, ptr) \
1078
+ GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1079
+
1080
+ typedef struct {
1081
+ const struct rb_block block;
1082
+ const VALUE pathobj;
1083
+ unsigned short first_lineno;
1084
+ } rb_binding_t;
1085
+
1086
+ /* used by compile time and send insn */
1087
+
1088
+ enum vm_check_match_type {
1089
+ VM_CHECKMATCH_TYPE_WHEN = 1,
1090
+ VM_CHECKMATCH_TYPE_CASE = 2,
1091
+ VM_CHECKMATCH_TYPE_RESCUE = 3
1092
+ };
1093
+
1094
+ #define VM_CHECKMATCH_TYPE_MASK 0x03
1095
+ #define VM_CHECKMATCH_ARRAY 0x04
1096
+
1097
+ enum vm_special_object_type {
1098
+ VM_SPECIAL_OBJECT_VMCORE = 1,
1099
+ VM_SPECIAL_OBJECT_CBASE,
1100
+ VM_SPECIAL_OBJECT_CONST_BASE
1101
+ };
1102
+
1103
+ enum vm_svar_index {
1104
+ VM_SVAR_LASTLINE = 0, /* $_ */
1105
+ VM_SVAR_BACKREF = 1, /* $~ */
1106
+
1107
+ VM_SVAR_EXTRA_START = 2,
1108
+ VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1109
+ };
1110
+
1111
+ /* inline cache */
1112
+ typedef struct iseq_inline_cache_entry *IC;
1113
+ typedef struct iseq_inline_iv_cache_entry *IVC;
1114
+ typedef union iseq_inline_storage_entry *ISE;
1115
+ typedef const struct rb_callinfo *CALL_INFO;
1116
+ typedef const struct rb_callcache *CALL_CACHE;
1117
+ typedef struct rb_call_data *CALL_DATA;
1118
+
1119
+ typedef VALUE CDHASH;
1120
+
1121
+ #ifndef FUNC_FASTCALL
1122
+ #define FUNC_FASTCALL(x) x
1123
+ #endif
1124
+
1125
+ typedef rb_control_frame_t *
1126
+ (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1127
+
1128
+ #define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1129
+ #define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1130
+
1131
+ #define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1132
+ #define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1133
+ #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1134
+
1135
+ enum {
1136
+ /* Frame/Environment flag bits:
1137
+ * MMMM MMMM MMMM MMMM ____ _FFF FFFF EEEX (LSB)
1138
+ *
1139
+ * X : tag for GC marking (It seems as Fixnum)
1140
+ * EEE : 3 bits Env flags
1141
+ * FF..: 7 bits Frame flags
1142
+ * MM..: 15 bits frame magic (to check frame corruption)
1143
+ */
1144
+
1145
+ /* frame types */
1146
+ VM_FRAME_MAGIC_METHOD = 0x11110001,
1147
+ VM_FRAME_MAGIC_BLOCK = 0x22220001,
1148
+ VM_FRAME_MAGIC_CLASS = 0x33330001,
1149
+ VM_FRAME_MAGIC_TOP = 0x44440001,
1150
+ VM_FRAME_MAGIC_CFUNC = 0x55550001,
1151
+ VM_FRAME_MAGIC_IFUNC = 0x66660001,
1152
+ VM_FRAME_MAGIC_EVAL = 0x77770001,
1153
+ VM_FRAME_MAGIC_RESCUE = 0x78880001,
1154
+ VM_FRAME_MAGIC_DUMMY = 0x79990001,
1155
+
1156
+ VM_FRAME_MAGIC_MASK = 0x7fff0001,
1157
+
1158
+ /* frame flag */
1159
+ VM_FRAME_FLAG_PASSED = 0x0010,
1160
+ VM_FRAME_FLAG_FINISH = 0x0020,
1161
+ VM_FRAME_FLAG_BMETHOD = 0x0040,
1162
+ VM_FRAME_FLAG_CFRAME = 0x0080,
1163
+ VM_FRAME_FLAG_LAMBDA = 0x0100,
1164
+ VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1165
+ VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1166
+
1167
+ /* env flag */
1168
+ VM_ENV_FLAG_LOCAL = 0x0002,
1169
+ VM_ENV_FLAG_ESCAPED = 0x0004,
1170
+ VM_ENV_FLAG_WB_REQUIRED = 0x0008
1171
+ };
1172
+
1173
+ #define VM_ENV_DATA_SIZE ( 3)
1174
+
1175
+ #define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1176
+ #define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1177
+ #define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1178
+ #define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1179
+
1180
+ #define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1181
+
1182
+ static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1183
+
1184
+ static inline void
1185
+ VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1186
+ {
1187
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1188
+ VM_ASSERT(FIXNUM_P(flags));
1189
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1190
+ }
1191
+
1192
+ static inline void
1193
+ VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1194
+ {
1195
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1196
+ VM_ASSERT(FIXNUM_P(flags));
1197
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1198
+ }
1199
+
1200
+ static inline unsigned long
1201
+ VM_ENV_FLAGS(const VALUE *ep, long flag)
1202
+ {
1203
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1204
+ VM_ASSERT(FIXNUM_P(flags));
1205
+ return flags & flag;
1206
+ }
1207
+
1208
+ static inline unsigned long
1209
+ VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1210
+ {
1211
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1212
+ }
1213
+
1214
+ static inline int
1215
+ VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1216
+ {
1217
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1218
+ }
1219
+
1220
+ static inline int
1221
+ VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1222
+ {
1223
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1224
+ }
1225
+
1226
+ static inline int
1227
+ VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1228
+ {
1229
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1230
+ }
1231
+
1232
+ static inline int
1233
+ VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1234
+ {
1235
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1236
+ }
1237
+
1238
+ static inline int
1239
+ rb_obj_is_iseq(VALUE iseq)
1240
+ {
1241
+ return imemo_type_p(iseq, imemo_iseq);
1242
+ }
1243
+
1244
+ #if VM_CHECK_MODE > 0
1245
+ #define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1246
+ #endif
1247
+
1248
+ static inline int
1249
+ VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1250
+ {
1251
+ int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1252
+ VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p);
1253
+ return cframe_p;
1254
+ }
1255
+
1256
+ static inline int
1257
+ VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1258
+ {
1259
+ return !VM_FRAME_CFRAME_P(cfp);
1260
+ }
1261
+
1262
+ #define RUBYVM_CFUNC_FRAME_P(cfp) \
1263
+ (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1264
+
1265
+ #define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1266
+ #define VM_BLOCK_HANDLER_NONE 0
1267
+
1268
+ static inline int
1269
+ VM_ENV_LOCAL_P(const VALUE *ep)
1270
+ {
1271
+ return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1272
+ }
1273
+
1274
+ static inline const VALUE *
1275
+ VM_ENV_PREV_EP(const VALUE *ep)
1276
+ {
1277
+ VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1278
+ return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1279
+ }
1280
+
1281
+ static inline VALUE
1282
+ VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1283
+ {
1284
+ VM_ASSERT(VM_ENV_LOCAL_P(ep));
1285
+ return ep[VM_ENV_DATA_INDEX_SPECVAL];
1286
+ }
1287
+
1288
+ #if VM_CHECK_MODE > 0
1289
+ int rb_vm_ep_in_heap_p(const VALUE *ep);
1290
+ #endif
1291
+
1292
+ static inline int
1293
+ VM_ENV_ESCAPED_P(const VALUE *ep)
1294
+ {
1295
+ VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1296
+ return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1297
+ }
1298
+
1299
+ #if VM_CHECK_MODE > 0
1300
+ static inline int
1301
+ vm_assert_env(VALUE obj)
1302
+ {
1303
+ VM_ASSERT(imemo_type_p(obj, imemo_env));
1304
+ return 1;
1305
+ }
1306
+ #endif
1307
+
1308
+ static inline VALUE
1309
+ VM_ENV_ENVVAL(const VALUE *ep)
1310
+ {
1311
+ VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1312
+ VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1313
+ VM_ASSERT(vm_assert_env(envval));
1314
+ return envval;
1315
+ }
1316
+
1317
+ static inline const rb_env_t *
1318
+ VM_ENV_ENVVAL_PTR(const VALUE *ep)
1319
+ {
1320
+ return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1321
+ }
1322
+
1323
+ static inline const rb_env_t *
1324
+ vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1325
+ {
1326
+ rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
1327
+ env->env_size = env_size;
1328
+ env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1329
+ return env;
1330
+ }
1331
+
1332
+ static inline void
1333
+ VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1334
+ {
1335
+ *((VALUE *)ptr) = v;
1336
+ }
1337
+
1338
+ static inline void
1339
+ VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1340
+ {
1341
+ VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1342
+ VM_FORCE_WRITE(ptr, special_const_value);
1343
+ }
1344
+
1345
+ static inline void
1346
+ VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1347
+ {
1348
+ VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1349
+ VM_FORCE_WRITE(&ep[index], v);
1350
+ }
1351
+
1352
+ const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1353
+ const VALUE *rb_vm_proc_local_ep(VALUE proc);
1354
+ void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1355
+ void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1356
+
1357
+ VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1358
+
1359
+ #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1360
+ #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1361
+
1362
+ #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1363
+ ((void *)(ecfp) > (void *)(cfp))
1364
+
1365
+ static inline const rb_control_frame_t *
1366
+ RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1367
+ {
1368
+ return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1369
+ }
1370
+
1371
+ static inline int
1372
+ RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1373
+ {
1374
+ return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1375
+ }
1376
+
1377
+ static inline int
1378
+ VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1379
+ {
1380
+ if ((block_handler & 0x03) == 0x01) {
1381
+ #if VM_CHECK_MODE > 0
1382
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1383
+ VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1384
+ #endif
1385
+ return 1;
1386
+ }
1387
+ else {
1388
+ return 0;
1389
+ }
1390
+ }
1391
+
1392
+ static inline VALUE
1393
+ VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1394
+ {
1395
+ VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1396
+ VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1397
+ return block_handler;
1398
+ }
1399
+
1400
+ static inline const struct rb_captured_block *
1401
+ VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1402
+ {
1403
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1404
+ VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1405
+ return captured;
1406
+ }
1407
+
1408
+ static inline int
1409
+ VM_BH_IFUNC_P(VALUE block_handler)
1410
+ {
1411
+ if ((block_handler & 0x03) == 0x03) {
1412
+ #if VM_CHECK_MODE > 0
1413
+ struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1414
+ VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1415
+ #endif
1416
+ return 1;
1417
+ }
1418
+ else {
1419
+ return 0;
1420
+ }
1421
+ }
1422
+
1423
+ static inline VALUE
1424
+ VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1425
+ {
1426
+ VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1427
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1428
+ return block_handler;
1429
+ }
1430
+
1431
+ static inline const struct rb_captured_block *
1432
+ VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1433
+ {
1434
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1435
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1436
+ return captured;
1437
+ }
1438
+
1439
+ static inline const struct rb_captured_block *
1440
+ VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1441
+ {
1442
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1443
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1444
+ return captured;
1445
+ }
1446
+
1447
+ static inline enum rb_block_handler_type
1448
+ vm_block_handler_type(VALUE block_handler)
1449
+ {
1450
+ if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1451
+ return block_handler_type_iseq;
1452
+ }
1453
+ else if (VM_BH_IFUNC_P(block_handler)) {
1454
+ return block_handler_type_ifunc;
1455
+ }
1456
+ else if (SYMBOL_P(block_handler)) {
1457
+ return block_handler_type_symbol;
1458
+ }
1459
+ else {
1460
+ VM_ASSERT(rb_obj_is_proc(block_handler));
1461
+ return block_handler_type_proc;
1462
+ }
1463
+ }
1464
+
1465
+ static inline void
1466
+ vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1467
+ {
1468
+ VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1469
+ (vm_block_handler_type(block_handler), 1));
1470
+ }
1471
+
1472
+ static inline int
1473
+ vm_cfp_forwarded_bh_p(const rb_control_frame_t *cfp, VALUE block_handler)
1474
+ {
1475
+ return ((VALUE) cfp->block_code) == block_handler;
1476
+ }
1477
+
1478
+ static inline enum rb_block_type
1479
+ vm_block_type(const struct rb_block *block)
1480
+ {
1481
+ #if VM_CHECK_MODE > 0
1482
+ switch (block->type) {
1483
+ case block_type_iseq:
1484
+ VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1485
+ break;
1486
+ case block_type_ifunc:
1487
+ VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1488
+ break;
1489
+ case block_type_symbol:
1490
+ VM_ASSERT(SYMBOL_P(block->as.symbol));
1491
+ break;
1492
+ case block_type_proc:
1493
+ VM_ASSERT(rb_obj_is_proc(block->as.proc));
1494
+ break;
1495
+ }
1496
+ #endif
1497
+ return block->type;
1498
+ }
1499
+
1500
+ static inline void
1501
+ vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1502
+ {
1503
+ struct rb_block *mb = (struct rb_block *)block;
1504
+ mb->type = type;
1505
+ }
1506
+
1507
+ static inline const struct rb_block *
1508
+ vm_proc_block(VALUE procval)
1509
+ {
1510
+ VM_ASSERT(rb_obj_is_proc(procval));
1511
+ return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1512
+ }
1513
+
1514
+ static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1515
+ static inline const VALUE *vm_block_ep(const struct rb_block *block);
1516
+
1517
+ static inline const rb_iseq_t *
1518
+ vm_proc_iseq(VALUE procval)
1519
+ {
1520
+ return vm_block_iseq(vm_proc_block(procval));
1521
+ }
1522
+
1523
+ static inline const VALUE *
1524
+ vm_proc_ep(VALUE procval)
1525
+ {
1526
+ return vm_block_ep(vm_proc_block(procval));
1527
+ }
1528
+
1529
+ static inline const rb_iseq_t *
1530
+ vm_block_iseq(const struct rb_block *block)
1531
+ {
1532
+ switch (vm_block_type(block)) {
1533
+ case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1534
+ case block_type_proc: return vm_proc_iseq(block->as.proc);
1535
+ case block_type_ifunc:
1536
+ case block_type_symbol: return NULL;
1537
+ }
1538
+ VM_UNREACHABLE(vm_block_iseq);
1539
+ return NULL;
1540
+ }
1541
+
1542
+ static inline const VALUE *
1543
+ vm_block_ep(const struct rb_block *block)
1544
+ {
1545
+ switch (vm_block_type(block)) {
1546
+ case block_type_iseq:
1547
+ case block_type_ifunc: return block->as.captured.ep;
1548
+ case block_type_proc: return vm_proc_ep(block->as.proc);
1549
+ case block_type_symbol: return NULL;
1550
+ }
1551
+ VM_UNREACHABLE(vm_block_ep);
1552
+ return NULL;
1553
+ }
1554
+
1555
+ static inline VALUE
1556
+ vm_block_self(const struct rb_block *block)
1557
+ {
1558
+ switch (vm_block_type(block)) {
1559
+ case block_type_iseq:
1560
+ case block_type_ifunc:
1561
+ return block->as.captured.self;
1562
+ case block_type_proc:
1563
+ return vm_block_self(vm_proc_block(block->as.proc));
1564
+ case block_type_symbol:
1565
+ return Qundef;
1566
+ }
1567
+ VM_UNREACHABLE(vm_block_self);
1568
+ return Qundef;
1569
+ }
1570
+
1571
+ static inline VALUE
1572
+ VM_BH_TO_SYMBOL(VALUE block_handler)
1573
+ {
1574
+ VM_ASSERT(SYMBOL_P(block_handler));
1575
+ return block_handler;
1576
+ }
1577
+
1578
+ static inline VALUE
1579
+ VM_BH_FROM_SYMBOL(VALUE symbol)
1580
+ {
1581
+ VM_ASSERT(SYMBOL_P(symbol));
1582
+ return symbol;
1583
+ }
1584
+
1585
+ static inline VALUE
1586
+ VM_BH_TO_PROC(VALUE block_handler)
1587
+ {
1588
+ VM_ASSERT(rb_obj_is_proc(block_handler));
1589
+ return block_handler;
1590
+ }
1591
+
1592
+ static inline VALUE
1593
+ VM_BH_FROM_PROC(VALUE procval)
1594
+ {
1595
+ VM_ASSERT(rb_obj_is_proc(procval));
1596
+ return procval;
1597
+ }
1598
+
1599
+ /* VM related object allocate functions */
1600
+ VALUE rb_thread_alloc(VALUE klass);
1601
+ VALUE rb_binding_alloc(VALUE klass);
1602
+ VALUE rb_proc_alloc(VALUE klass);
1603
+ VALUE rb_proc_dup(VALUE self);
1604
+
1605
+ /* for debug */
1606
+ extern void rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1607
+ extern void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc);
1608
+ extern void rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp
1609
+ #if OPT_STACK_CACHING
1610
+ , VALUE reg_a, VALUE reg_b
1611
+ #endif
1612
+ );
1613
+
1614
+ #define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp)
1615
+ #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp))
1616
+ void rb_vm_bugreport(const void *);
1617
+ typedef RETSIGTYPE (*ruby_sighandler_t)(int);
1618
+ NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1619
+
1620
+ /* functions about thread/vm execution */
1621
+ RUBY_SYMBOL_EXPORT_BEGIN
1622
+ VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1623
+ VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1624
+ VALUE rb_iseq_path(const rb_iseq_t *iseq);
1625
+ VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1626
+ RUBY_SYMBOL_EXPORT_END
1627
+
1628
+ VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1629
+ void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1630
+
1631
+ int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1632
+ void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1633
+
1634
+ VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1635
+
1636
+ VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1637
+ static inline VALUE
1638
+ rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1639
+ {
1640
+ return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1641
+ }
1642
+
1643
+ static inline VALUE
1644
+ rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1645
+ {
1646
+ return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1647
+ }
1648
+
1649
+ VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1650
+ VALUE rb_vm_env_local_variables(const rb_env_t *env);
1651
+ const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1652
+ const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1653
+ void rb_vm_inc_const_missing_count(void);
1654
+ VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1655
+ const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1656
+ MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec);
1657
+
1658
+ void rb_gvl_destroy(rb_global_vm_lock_t *gvl);
1659
+
1660
+ void rb_thread_start_timer_thread(void);
1661
+ void rb_thread_stop_timer_thread(void);
1662
+ void rb_thread_reset_timer_thread(void);
1663
+ void rb_thread_wakeup_timer_thread(int);
1664
+
1665
+ static inline void
1666
+ rb_vm_living_threads_init(rb_vm_t *vm)
1667
+ {
1668
+ list_head_init(&vm->waiting_fds);
1669
+ list_head_init(&vm->waiting_pids);
1670
+ list_head_init(&vm->workqueue);
1671
+ list_head_init(&vm->waiting_grps);
1672
+ list_head_init(&vm->ractor.set);
1673
+ }
1674
+
1675
+ typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1676
+ rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1677
+ rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1678
+ int rb_vm_get_sourceline(const rb_control_frame_t *);
1679
+ void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1680
+ void ruby_thread_init_stack(rb_thread_t *th);
1681
+ int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1682
+ void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1683
+ MJIT_STATIC VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1684
+
1685
+ void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1686
+
1687
+ #define rb_vm_register_special_exception(sp, e, m) \
1688
+ rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1689
+
1690
+ void rb_gc_mark_machine_stack(const rb_execution_context_t *ec);
1691
+
1692
+ void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1693
+
1694
+ MJIT_STATIC const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1695
+
1696
+ #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1697
+
1698
+ #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1699
+ STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1700
+ STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1701
+ const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1702
+ if (UNLIKELY((cfp) <= &bound[1])) { \
1703
+ vm_stackoverflow(); \
1704
+ } \
1705
+ } while (0)
1706
+
1707
+ #define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1708
+ CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1709
+
1710
+ VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1711
+
1712
+ rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1713
+
1714
+ /* for thread */
1715
+
1716
+ #if RUBY_VM_THREAD_MODEL == 2
1717
+ RUBY_SYMBOL_EXPORT_BEGIN
1718
+
1719
+ RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1720
+ RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1721
+ RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1722
+ RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1723
+
1724
+ RUBY_EXTERN native_tls_key_t ruby_current_ec_key;
1725
+
1726
+ RUBY_SYMBOL_EXPORT_END
1727
+
1728
+ #define GET_VM() rb_current_vm()
1729
+ #define GET_RACTOR() rb_current_ractor()
1730
+ #define GET_THREAD() rb_current_thread()
1731
+ #define GET_EC() rb_current_execution_context()
1732
+
1733
+ static inline rb_thread_t *
1734
+ rb_ec_thread_ptr(const rb_execution_context_t *ec)
1735
+ {
1736
+ return ec->thread_ptr;
1737
+ }
1738
+
1739
+ static inline rb_ractor_t *
1740
+ rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1741
+ {
1742
+ const rb_thread_t *th = rb_ec_thread_ptr(ec);
1743
+ if (th) {
1744
+ VM_ASSERT(th->ractor != NULL);
1745
+ return th->ractor;
1746
+ }
1747
+ else {
1748
+ return NULL;
1749
+ }
1750
+ }
1751
+
1752
+ static inline rb_vm_t *
1753
+ rb_ec_vm_ptr(const rb_execution_context_t *ec)
1754
+ {
1755
+ const rb_thread_t *th = rb_ec_thread_ptr(ec);
1756
+ if (th) {
1757
+ return th->vm;
1758
+ }
1759
+ else {
1760
+ return NULL;
1761
+ }
1762
+ }
1763
+
1764
+ static inline rb_execution_context_t *
1765
+ rb_current_execution_context(void)
1766
+ {
1767
+ rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
1768
+ VM_ASSERT(ec != NULL);
1769
+ return ec;
1770
+ }
1771
+
1772
+ static inline rb_thread_t *
1773
+ rb_current_thread(void)
1774
+ {
1775
+ const rb_execution_context_t *ec = GET_EC();
1776
+ return rb_ec_thread_ptr(ec);
1777
+ }
1778
+
1779
+ static inline rb_ractor_t *
1780
+ rb_current_ractor(void)
1781
+ {
1782
+ const rb_execution_context_t *ec = GET_EC();
1783
+ return rb_ec_ractor_ptr(ec);
1784
+ }
1785
+
1786
+ static inline rb_vm_t *
1787
+ rb_current_vm(void)
1788
+ {
1789
+ #if 0 // TODO: reconsider the assertions
1790
+ VM_ASSERT(ruby_current_vm_ptr == NULL ||
1791
+ ruby_current_execution_context_ptr == NULL ||
1792
+ rb_ec_thread_ptr(GET_EC()) == NULL ||
1793
+ rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
1794
+ rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
1795
+ #endif
1796
+
1797
+ return ruby_current_vm_ptr;
1798
+ }
1799
+
1800
+ #else
1801
+ #error "unsupported thread model"
1802
+ #endif
1803
+
1804
+ enum {
1805
+ TIMER_INTERRUPT_MASK = 0x01,
1806
+ PENDING_INTERRUPT_MASK = 0x02,
1807
+ POSTPONED_JOB_INTERRUPT_MASK = 0x04,
1808
+ TRAP_INTERRUPT_MASK = 0x08,
1809
+ TERMINATE_INTERRUPT_MASK = 0x10,
1810
+ VM_BARRIER_INTERRUPT_MASK = 0x20,
1811
+ };
1812
+
1813
+ #define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
1814
+ #define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
1815
+ #define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
1816
+ #define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
1817
+ #define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
1818
+ #define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
1819
+ #define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
1820
+ (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
1821
+ #define RUBY_VM_INTERRUPTED_ANY(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask)
1822
+
1823
+ VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
1824
+ int rb_signal_buff_size(void);
1825
+ int rb_signal_exec(rb_thread_t *th, int sig);
1826
+ void rb_threadptr_check_signal(rb_thread_t *mth);
1827
+ void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
1828
+ void rb_threadptr_signal_exit(rb_thread_t *th);
1829
+ int rb_threadptr_execute_interrupts(rb_thread_t *, int);
1830
+ void rb_threadptr_interrupt(rb_thread_t *th);
1831
+ void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
1832
+ void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
1833
+ void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
1834
+ VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
1835
+ void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
1836
+ void rb_execution_context_update(const rb_execution_context_t *ec);
1837
+ void rb_execution_context_mark(const rb_execution_context_t *ec);
1838
+ void rb_fiber_close(rb_fiber_t *fib);
1839
+ void Init_native_thread(rb_thread_t *th);
1840
+ int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
1841
+
1842
+ // vm_sync.h
1843
+ void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
1844
+ void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
1845
+
1846
+ #define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
1847
+ static inline void
1848
+ rb_vm_check_ints(rb_execution_context_t *ec)
1849
+ {
1850
+ VM_ASSERT(ec == GET_EC());
1851
+ if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
1852
+ rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
1853
+ }
1854
+ }
1855
+
1856
+ /* tracer */
1857
+
1858
+ struct rb_trace_arg_struct {
1859
+ rb_event_flag_t event;
1860
+ rb_execution_context_t *ec;
1861
+ const rb_control_frame_t *cfp;
1862
+ VALUE self;
1863
+ ID id;
1864
+ ID called_id;
1865
+ VALUE klass;
1866
+ VALUE data;
1867
+
1868
+ int klass_solved;
1869
+
1870
+ /* calc from cfp */
1871
+ int lineno;
1872
+ VALUE path;
1873
+ };
1874
+
1875
+ void rb_hook_list_mark(rb_hook_list_t *hooks);
1876
+ void rb_hook_list_free(rb_hook_list_t *hooks);
1877
+ void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
1878
+ void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
1879
+
1880
+ void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
1881
+
1882
+ #define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
1883
+ const rb_event_flag_t flag_arg_ = (flag_); \
1884
+ rb_hook_list_t *hooks_arg_ = (hooks_); \
1885
+ if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
1886
+ /* defer evaluating the other arguments */ \
1887
+ rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
1888
+ } \
1889
+ } while (0)
1890
+
1891
+ static inline void
1892
+ rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
1893
+ VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
1894
+ {
1895
+ struct rb_trace_arg_struct trace_arg;
1896
+
1897
+ VM_ASSERT((hooks->events & flag) != 0);
1898
+
1899
+ trace_arg.event = flag;
1900
+ trace_arg.ec = ec;
1901
+ trace_arg.cfp = ec->cfp;
1902
+ trace_arg.self = self;
1903
+ trace_arg.id = id;
1904
+ trace_arg.called_id = called_id;
1905
+ trace_arg.klass = klass;
1906
+ trace_arg.data = data;
1907
+ trace_arg.path = Qundef;
1908
+ trace_arg.klass_solved = 0;
1909
+
1910
+ rb_exec_event_hooks(&trace_arg, hooks, pop_p);
1911
+ }
1912
+
1913
+ static inline rb_hook_list_t *
1914
+ rb_vm_global_hooks(const rb_execution_context_t *ec)
1915
+ {
1916
+ return &rb_ec_vm_ptr(ec)->global_hooks;
1917
+ }
1918
+
1919
+ #define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
1920
+ EXEC_EVENT_HOOK_ORIG(ec_, rb_vm_global_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
1921
+
1922
+ #define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
1923
+ EXEC_EVENT_HOOK_ORIG(ec_, rb_vm_global_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
1924
+
1925
+ static inline void
1926
+ rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
1927
+ {
1928
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
1929
+ NIL_P(eval_script) ? (VALUE)iseq :
1930
+ rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
1931
+ }
1932
+
1933
+ void rb_vm_trap_exit(rb_vm_t *vm);
1934
+
1935
+ RUBY_SYMBOL_EXPORT_BEGIN
1936
+
1937
+ int rb_thread_check_trap_pending(void);
1938
+
1939
+ /* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
1940
+ #define RUBY_EVENT_COVERAGE_LINE 0x010000
1941
+ #define RUBY_EVENT_COVERAGE_BRANCH 0x020000
1942
+
1943
+ extern VALUE rb_get_coverages(void);
1944
+ extern void rb_set_coverages(VALUE, int, VALUE);
1945
+ extern void rb_clear_coverages(void);
1946
+ extern void rb_reset_coverages(void);
1947
+
1948
+ void rb_postponed_job_flush(rb_vm_t *vm);
1949
+
1950
+ RUBY_SYMBOL_EXPORT_END
1951
+
1952
+ #endif /* RUBY_VM_CORE_H */