debase-ruby_core_source 0.10.6 → 0.10.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +5 -0
  3. data/lib/debase/ruby_core_source.rb +2 -1
  4. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/addr2line.h +21 -0
  5. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/ccan/build_assert/build_assert.h +40 -0
  6. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/ccan/check_type/check_type.h +63 -0
  7. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/ccan/container_of/container_of.h +142 -0
  8. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/ccan/list/list.h +788 -0
  9. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/ccan/str/str.h +16 -0
  10. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/constant.h +54 -0
  11. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/debug_counter.h +378 -0
  12. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/dln.h +51 -0
  13. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/encindex.h +69 -0
  14. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/eval_intern.h +314 -0
  15. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/gc.h +129 -0
  16. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/hrtime.h +168 -0
  17. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/id.h +292 -0
  18. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/id_table.h +34 -0
  19. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/insns.inc +249 -0
  20. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/insns_info.inc +9614 -0
  21. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/internal.h +2682 -0
  22. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/iseq.h +312 -0
  23. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/known_errors.inc +791 -0
  24. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/method.h +232 -0
  25. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/mjit.h +179 -0
  26. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/mjit_compile.inc +7883 -0
  27. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/node.h +483 -0
  28. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/node_name.inc +210 -0
  29. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/opt_sc.inc +109 -0
  30. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/optinsn.inc +128 -0
  31. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/optunifs.inc +43 -0
  32. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/parse.h +210 -0
  33. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/probes_helper.h +42 -0
  34. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/regenc.h +254 -0
  35. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/regint.h +938 -0
  36. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/regparse.h +370 -0
  37. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/revision.h +2 -0
  38. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/ruby_assert.h +15 -0
  39. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/ruby_atomic.h +244 -0
  40. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/siphash.h +48 -0
  41. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/symbol.h +119 -0
  42. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/thread_pthread.h +75 -0
  43. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/thread_win32.h +36 -0
  44. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/timev.h +56 -0
  45. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/transcode_data.h +139 -0
  46. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/transient_heap.h +62 -0
  47. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/version.h +77 -0
  48. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/vm.inc +5269 -0
  49. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/vm_call_iseq_optimized.inc +237 -0
  50. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/vm_core.h +1955 -0
  51. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/vm_debug.h +34 -0
  52. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/vm_exec.h +193 -0
  53. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/vm_insnhelper.h +255 -0
  54. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/vm_opts.h +70 -0
  55. data/lib/debase/ruby_core_source/ruby-2.7.0-preview2/vmtc.inc +243 -0
  56. data/lib/debase/ruby_core_source/version.rb +1 -1
  57. metadata +55 -3
@@ -0,0 +1,237 @@
1
+ /* -*- c -*- */
2
+ #if 1 /* enable or disable this optimization */
3
+
4
+ /* DO NOT EDIT THIS FILE DIRECTLY
5
+ *
6
+ * This file is generated by tool/mk_call_iseq_optimized.rb
7
+ */
8
+
9
+ static VALUE
10
+ vm_call_iseq_setup_normal_0start_0params_0locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
11
+ {
12
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
13
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 0, 0);
14
+ }
15
+
16
+ static VALUE
17
+ vm_call_iseq_setup_normal_0start_0params_1locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
18
+ {
19
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
20
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 0, 1);
21
+ }
22
+
23
+ static VALUE
24
+ vm_call_iseq_setup_normal_0start_0params_2locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
25
+ {
26
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
27
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 0, 2);
28
+ }
29
+
30
+ static VALUE
31
+ vm_call_iseq_setup_normal_0start_0params_3locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
32
+ {
33
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
34
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 0, 3);
35
+ }
36
+
37
+ static VALUE
38
+ vm_call_iseq_setup_normal_0start_0params_4locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
39
+ {
40
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
41
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 0, 4);
42
+ }
43
+
44
+ static VALUE
45
+ vm_call_iseq_setup_normal_0start_0params_5locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
46
+ {
47
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
48
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 0, 5);
49
+ }
50
+
51
+ static VALUE
52
+ vm_call_iseq_setup_normal_0start_1params_0locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
53
+ {
54
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
55
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 1, 0);
56
+ }
57
+
58
+ static VALUE
59
+ vm_call_iseq_setup_normal_0start_1params_1locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
60
+ {
61
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
62
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 1, 1);
63
+ }
64
+
65
+ static VALUE
66
+ vm_call_iseq_setup_normal_0start_1params_2locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
67
+ {
68
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
69
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 1, 2);
70
+ }
71
+
72
+ static VALUE
73
+ vm_call_iseq_setup_normal_0start_1params_3locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
74
+ {
75
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
76
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 1, 3);
77
+ }
78
+
79
+ static VALUE
80
+ vm_call_iseq_setup_normal_0start_1params_4locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
81
+ {
82
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
83
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 1, 4);
84
+ }
85
+
86
+ static VALUE
87
+ vm_call_iseq_setup_normal_0start_1params_5locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
88
+ {
89
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
90
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 1, 5);
91
+ }
92
+
93
+ static VALUE
94
+ vm_call_iseq_setup_normal_0start_2params_0locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
95
+ {
96
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
97
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 2, 0);
98
+ }
99
+
100
+ static VALUE
101
+ vm_call_iseq_setup_normal_0start_2params_1locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
102
+ {
103
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
104
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 2, 1);
105
+ }
106
+
107
+ static VALUE
108
+ vm_call_iseq_setup_normal_0start_2params_2locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
109
+ {
110
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
111
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 2, 2);
112
+ }
113
+
114
+ static VALUE
115
+ vm_call_iseq_setup_normal_0start_2params_3locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
116
+ {
117
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
118
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 2, 3);
119
+ }
120
+
121
+ static VALUE
122
+ vm_call_iseq_setup_normal_0start_2params_4locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
123
+ {
124
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
125
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 2, 4);
126
+ }
127
+
128
+ static VALUE
129
+ vm_call_iseq_setup_normal_0start_2params_5locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
130
+ {
131
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
132
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 2, 5);
133
+ }
134
+
135
+ static VALUE
136
+ vm_call_iseq_setup_normal_0start_3params_0locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
137
+ {
138
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
139
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 3, 0);
140
+ }
141
+
142
+ static VALUE
143
+ vm_call_iseq_setup_normal_0start_3params_1locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
144
+ {
145
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
146
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 3, 1);
147
+ }
148
+
149
+ static VALUE
150
+ vm_call_iseq_setup_normal_0start_3params_2locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
151
+ {
152
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
153
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 3, 2);
154
+ }
155
+
156
+ static VALUE
157
+ vm_call_iseq_setup_normal_0start_3params_3locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
158
+ {
159
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
160
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 3, 3);
161
+ }
162
+
163
+ static VALUE
164
+ vm_call_iseq_setup_normal_0start_3params_4locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
165
+ {
166
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
167
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 3, 4);
168
+ }
169
+
170
+ static VALUE
171
+ vm_call_iseq_setup_normal_0start_3params_5locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
172
+ {
173
+ RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
174
+ return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, 3, 5);
175
+ }
176
+
177
+ /* vm_call_iseq_handlers[param][local] */
178
+ static const vm_call_handler vm_call_iseq_handlers[][6] = {
179
+ {vm_call_iseq_setup_normal_0start_0params_0locals,
180
+ vm_call_iseq_setup_normal_0start_0params_1locals,
181
+ vm_call_iseq_setup_normal_0start_0params_2locals,
182
+ vm_call_iseq_setup_normal_0start_0params_3locals,
183
+ vm_call_iseq_setup_normal_0start_0params_4locals,
184
+ vm_call_iseq_setup_normal_0start_0params_5locals},
185
+ {vm_call_iseq_setup_normal_0start_1params_0locals,
186
+ vm_call_iseq_setup_normal_0start_1params_1locals,
187
+ vm_call_iseq_setup_normal_0start_1params_2locals,
188
+ vm_call_iseq_setup_normal_0start_1params_3locals,
189
+ vm_call_iseq_setup_normal_0start_1params_4locals,
190
+ vm_call_iseq_setup_normal_0start_1params_5locals},
191
+ {vm_call_iseq_setup_normal_0start_2params_0locals,
192
+ vm_call_iseq_setup_normal_0start_2params_1locals,
193
+ vm_call_iseq_setup_normal_0start_2params_2locals,
194
+ vm_call_iseq_setup_normal_0start_2params_3locals,
195
+ vm_call_iseq_setup_normal_0start_2params_4locals,
196
+ vm_call_iseq_setup_normal_0start_2params_5locals},
197
+ {vm_call_iseq_setup_normal_0start_3params_0locals,
198
+ vm_call_iseq_setup_normal_0start_3params_1locals,
199
+ vm_call_iseq_setup_normal_0start_3params_2locals,
200
+ vm_call_iseq_setup_normal_0start_3params_3locals,
201
+ vm_call_iseq_setup_normal_0start_3params_4locals,
202
+ vm_call_iseq_setup_normal_0start_3params_5locals}
203
+ };
204
+
205
+ static inline vm_call_handler
206
+ vm_call_iseq_setup_func(const struct rb_call_info *ci, const int param_size, const int local_size)
207
+ {
208
+ if (UNLIKELY(ci->flag & VM_CALL_TAILCALL)) {
209
+ return &vm_call_iseq_setup_tailcall_0start;
210
+ }
211
+ else if (0) { /* to disable optimize */
212
+ return &vm_call_iseq_setup_normal_0start;
213
+ }
214
+ else {
215
+ if (param_size <= 3 &&
216
+ local_size <= 5) {
217
+ VM_ASSERT(local_size >= 0);
218
+ return vm_call_iseq_handlers[param_size][local_size];
219
+ }
220
+ return &vm_call_iseq_setup_normal_0start;
221
+ }
222
+ }
223
+
224
+ #else
225
+
226
+
227
+ static inline vm_call_handler
228
+ vm_call_iseq_setup_func(const struct rb_call_info *ci, struct rb_call_cache *cc)
229
+ {
230
+ if (UNLIKELY(ci->flag & VM_CALL_TAILCALL)) {
231
+ return &vm_call_iseq_setup_tailcall_0start;
232
+ }
233
+ else {
234
+ return &vm_call_iseq_setup_normal_0start;
235
+ }
236
+ }
237
+ #endif
@@ -0,0 +1,1955 @@
1
+ /**********************************************************************
2
+
3
+ vm_core.h -
4
+
5
+ $Author$
6
+ created at: 04/01/01 19:41:38 JST
7
+
8
+ Copyright (C) 2004-2007 Koichi Sasada
9
+
10
+ **********************************************************************/
11
+
12
+ #ifndef RUBY_VM_CORE_H
13
+ #define RUBY_VM_CORE_H
14
+
15
+ /*
16
+ * Enable check mode.
17
+ * 1: enable local assertions.
18
+ */
19
+ #ifndef VM_CHECK_MODE
20
+
21
+ // respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
22
+ #define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
23
+
24
+ #define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
25
+ #endif
26
+
27
+ /**
28
+ * VM Debug Level
29
+ *
30
+ * debug level:
31
+ * 0: no debug output
32
+ * 1: show instruction name
33
+ * 2: show stack frame when control stack frame is changed
34
+ * 3: show stack status
35
+ * 4: show register
36
+ * 5:
37
+ * 10: gc check
38
+ */
39
+
40
+ #ifndef VMDEBUG
41
+ #define VMDEBUG 0
42
+ #endif
43
+
44
+ #if 0
45
+ #undef VMDEBUG
46
+ #define VMDEBUG 3
47
+ #endif
48
+
49
+ #include "ruby_assert.h"
50
+
51
+ #if VM_CHECK_MODE > 0
52
+ #define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
53
+ #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
54
+
55
+ #else
56
+ #define VM_ASSERT(expr) ((void)0)
57
+ #define VM_UNREACHABLE(func) UNREACHABLE
58
+ #endif
59
+
60
+ #define RUBY_VM_THREAD_MODEL 2
61
+
62
+ /*
63
+ * implementation selector of get_insn_info algorithm
64
+ * 0: linear search
65
+ * 1: binary search
66
+ * 2: succinct bitvector
67
+ */
68
+ #ifndef VM_INSN_INFO_TABLE_IMPL
69
+ # define VM_INSN_INFO_TABLE_IMPL 2
70
+ #endif
71
+
72
+ #include "ruby/ruby.h"
73
+ #include "ruby/st.h"
74
+
75
+ #include "node.h"
76
+ #include "vm_opts.h"
77
+ #include "id.h"
78
+ #include "method.h"
79
+ #include "ruby_atomic.h"
80
+ #include "ccan/list/list.h"
81
+
82
+ #include "ruby/thread_native.h"
83
+ #if defined(_WIN32)
84
+ #include "thread_win32.h"
85
+ #elif defined(HAVE_PTHREAD_H)
86
+ #include "thread_pthread.h"
87
+ #endif
88
+
89
+ #include <setjmp.h>
90
+ #include <signal.h>
91
+
92
+ #if defined(NSIG_MAX) /* POSIX issue 8 */
93
+ # undef NSIG
94
+ # define NSIG NSIG_MAX
95
+ #elif defined(_SIG_MAXSIG) /* FreeBSD */
96
+ # undef NSIG
97
+ # define NSIG _SIG_MAXSIG
98
+ #elif defined(_SIGMAX) /* QNX */
99
+ # define NSIG (_SIGMAX + 1)
100
+ #elif defined(NSIG) /* 99% of everything else */
101
+ # /* take it */
102
+ #else /* Last resort */
103
+ # define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
104
+ #endif
105
+
106
+ #define RUBY_NSIG NSIG
107
+
108
+ #if defined(SIGCLD)
109
+ # define RUBY_SIGCHLD (SIGCLD)
110
+ #elif defined(SIGCHLD)
111
+ # define RUBY_SIGCHLD (SIGCHLD)
112
+ #else
113
+ # define RUBY_SIGCHLD (0)
114
+ #endif
115
+
116
+ /* platforms with broken or non-existent SIGCHLD work by polling */
117
+ #if defined(__APPLE__)
118
+ # define SIGCHLD_LOSSY (1)
119
+ #else
120
+ # define SIGCHLD_LOSSY (0)
121
+ #endif
122
+
123
+ /* define to 0 to test old code path */
124
+ #define WAITPID_USE_SIGCHLD (RUBY_SIGCHLD || SIGCHLD_LOSSY)
125
+
126
+ #ifdef HAVE_STDARG_PROTOTYPES
127
+ #include <stdarg.h>
128
+ #define va_init_list(a,b) va_start((a),(b))
129
+ #else
130
+ #include <varargs.h>
131
+ #define va_init_list(a,b) va_start((a))
132
+ #endif
133
+
134
+ #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
135
+ # define USE_SIGALTSTACK
136
+ void *rb_register_sigaltstack(void);
137
+ # define RB_ALTSTACK_INIT(var) var = rb_register_sigaltstack()
138
+ # define RB_ALTSTACK_FREE(var) xfree(var)
139
+ # define RB_ALTSTACK(var) var
140
+ #else /* noop */
141
+ # define RB_ALTSTACK_INIT(var)
142
+ # define RB_ALTSTACK_FREE(var)
143
+ # define RB_ALTSTACK(var) (0)
144
+ #endif
145
+
146
+ /*****************/
147
+ /* configuration */
148
+ /*****************/
149
+
150
+ /* gcc ver. check */
151
+ #if defined(__GNUC__) && __GNUC__ >= 2
152
+
153
+ #if OPT_TOKEN_THREADED_CODE
154
+ #if OPT_DIRECT_THREADED_CODE
155
+ #undef OPT_DIRECT_THREADED_CODE
156
+ #endif
157
+ #endif
158
+
159
+ #else /* defined(__GNUC__) && __GNUC__ >= 2 */
160
+
161
+ /* disable threaded code options */
162
+ #if OPT_DIRECT_THREADED_CODE
163
+ #undef OPT_DIRECT_THREADED_CODE
164
+ #endif
165
+ #if OPT_TOKEN_THREADED_CODE
166
+ #undef OPT_TOKEN_THREADED_CODE
167
+ #endif
168
+ #endif
169
+
170
+ /* call threaded code */
171
+ #if OPT_CALL_THREADED_CODE
172
+ #if OPT_DIRECT_THREADED_CODE
173
+ #undef OPT_DIRECT_THREADED_CODE
174
+ #endif /* OPT_DIRECT_THREADED_CODE */
175
+ #if OPT_STACK_CACHING
176
+ #undef OPT_STACK_CACHING
177
+ #endif /* OPT_STACK_CACHING */
178
+ #endif /* OPT_CALL_THREADED_CODE */
179
+
180
+ void rb_vm_encoded_insn_data_table_init(void);
181
+ typedef unsigned long rb_num_t;
182
+ typedef signed long rb_snum_t;
183
+
184
+ enum ruby_tag_type {
185
+ RUBY_TAG_NONE = 0x0,
186
+ RUBY_TAG_RETURN = 0x1,
187
+ RUBY_TAG_BREAK = 0x2,
188
+ RUBY_TAG_NEXT = 0x3,
189
+ RUBY_TAG_RETRY = 0x4,
190
+ RUBY_TAG_REDO = 0x5,
191
+ RUBY_TAG_RAISE = 0x6,
192
+ RUBY_TAG_THROW = 0x7,
193
+ RUBY_TAG_FATAL = 0x8,
194
+ RUBY_TAG_MASK = 0xf
195
+ };
196
+
197
+ #define TAG_NONE RUBY_TAG_NONE
198
+ #define TAG_RETURN RUBY_TAG_RETURN
199
+ #define TAG_BREAK RUBY_TAG_BREAK
200
+ #define TAG_NEXT RUBY_TAG_NEXT
201
+ #define TAG_RETRY RUBY_TAG_RETRY
202
+ #define TAG_REDO RUBY_TAG_REDO
203
+ #define TAG_RAISE RUBY_TAG_RAISE
204
+ #define TAG_THROW RUBY_TAG_THROW
205
+ #define TAG_FATAL RUBY_TAG_FATAL
206
+ #define TAG_MASK RUBY_TAG_MASK
207
+
208
+ enum ruby_vm_throw_flags {
209
+ VM_THROW_NO_ESCAPE_FLAG = 0x8000,
210
+ VM_THROW_STATE_MASK = 0xff
211
+ };
212
+
213
+ /* forward declarations */
214
+ struct rb_thread_struct;
215
+ struct rb_control_frame_struct;
216
+
217
+ /* iseq data type */
218
+ typedef struct rb_compile_option_struct rb_compile_option_t;
219
+
220
+ struct iseq_inline_cache_entry {
221
+ rb_serial_t ic_serial;
222
+ const rb_cref_t *ic_cref;
223
+ union {
224
+ size_t index;
225
+ VALUE value;
226
+ } ic_value;
227
+ };
228
+
229
+ union iseq_inline_storage_entry {
230
+ struct {
231
+ struct rb_thread_struct *running_thread;
232
+ VALUE value;
233
+ } once;
234
+ struct iseq_inline_cache_entry cache;
235
+ };
236
+
237
+ struct rb_call_info {
238
+ /* fixed at compile time */
239
+ ID mid;
240
+ unsigned int flag;
241
+ int orig_argc;
242
+ };
243
+
244
+ struct rb_call_info_kw_arg {
245
+ int keyword_len;
246
+ VALUE keywords[1];
247
+ };
248
+
249
+ struct rb_call_info_with_kwarg {
250
+ struct rb_call_info ci;
251
+ struct rb_call_info_kw_arg *kw_arg;
252
+ };
253
+
254
+ struct rb_calling_info {
255
+ VALUE block_handler;
256
+ VALUE recv;
257
+ int argc;
258
+ int kw_splat;
259
+ };
260
+
261
+ struct rb_execution_context_struct;
262
+ typedef VALUE (*vm_call_handler)(struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
263
+
264
+ #if 1
265
+ #define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
266
+ #else
267
+ #define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
268
+ #endif
269
+ #define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
270
+
271
+ typedef struct rb_iseq_location_struct {
272
+ VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
273
+ VALUE base_label; /* String */
274
+ VALUE label; /* String */
275
+ VALUE first_lineno; /* TODO: may be unsigned short */
276
+ int node_id;
277
+ rb_code_location_t code_location;
278
+ } rb_iseq_location_t;
279
+
280
+ #define PATHOBJ_PATH 0
281
+ #define PATHOBJ_REALPATH 1
282
+
283
+ static inline VALUE
284
+ pathobj_path(VALUE pathobj)
285
+ {
286
+ if (RB_TYPE_P(pathobj, T_STRING)) {
287
+ return pathobj;
288
+ }
289
+ else {
290
+ VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
291
+ return RARRAY_AREF(pathobj, PATHOBJ_PATH);
292
+ }
293
+ }
294
+
295
+ static inline VALUE
296
+ pathobj_realpath(VALUE pathobj)
297
+ {
298
+ if (RB_TYPE_P(pathobj, T_STRING)) {
299
+ return pathobj;
300
+ }
301
+ else {
302
+ VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
303
+ return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
304
+ }
305
+ }
306
+
307
+ /* Forward declarations */
308
+ struct rb_mjit_unit;
309
+
310
+ struct rb_iseq_constant_body {
311
+ enum iseq_type {
312
+ ISEQ_TYPE_TOP,
313
+ ISEQ_TYPE_METHOD,
314
+ ISEQ_TYPE_BLOCK,
315
+ ISEQ_TYPE_CLASS,
316
+ ISEQ_TYPE_RESCUE,
317
+ ISEQ_TYPE_ENSURE,
318
+ ISEQ_TYPE_EVAL,
319
+ ISEQ_TYPE_MAIN,
320
+ ISEQ_TYPE_PLAIN
321
+ } type; /* instruction sequence type */
322
+
323
+ unsigned int iseq_size;
324
+ VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
325
+
326
+ /**
327
+ * parameter information
328
+ *
329
+ * def m(a1, a2, ..., aM, # mandatory
330
+ * b1=(...), b2=(...), ..., bN=(...), # optional
331
+ * *c, # rest
332
+ * d1, d2, ..., dO, # post
333
+ * e1:(...), e2:(...), ..., eK:(...), # keyword
334
+ * **f, # keyword_rest
335
+ * &g) # block
336
+ * =>
337
+ *
338
+ * lead_num = M
339
+ * opt_num = N
340
+ * rest_start = M+N
341
+ * post_start = M+N+(*1)
342
+ * post_num = O
343
+ * keyword_num = K
344
+ * block_start = M+N+(*1)+O+K
345
+ * keyword_bits = M+N+(*1)+O+K+(&1)
346
+ * size = M+N+O+(*1)+K+(&1)+(**1) // parameter size.
347
+ */
348
+
349
+ struct {
350
+ struct {
351
+ unsigned int has_lead : 1;
352
+ unsigned int has_opt : 1;
353
+ unsigned int has_rest : 1;
354
+ unsigned int has_post : 1;
355
+ unsigned int has_kw : 1;
356
+ unsigned int has_kwrest : 1;
357
+ unsigned int has_block : 1;
358
+
359
+ unsigned int ambiguous_param0 : 1; /* {|a|} */
360
+ unsigned int accepts_no_kwarg : 1;
361
+ unsigned int ruby2_keywords: 1;
362
+ } flags;
363
+
364
+ unsigned int size;
365
+
366
+ int lead_num;
367
+ int opt_num;
368
+ int rest_start;
369
+ int post_start;
370
+ int post_num;
371
+ int block_start;
372
+
373
+ const VALUE *opt_table; /* (opt_num + 1) entries. */
374
+ /* opt_num and opt_table:
375
+ *
376
+ * def foo o1=e1, o2=e2, ..., oN=eN
377
+ * #=>
378
+ * # prologue code
379
+ * A1: e1
380
+ * A2: e2
381
+ * ...
382
+ * AN: eN
383
+ * AL: body
384
+ * opt_num = N
385
+ * opt_table = [A1, A2, ..., AN, AL]
386
+ */
387
+
388
+ const struct rb_iseq_param_keyword {
389
+ int num;
390
+ int required_num;
391
+ int bits_start;
392
+ int rest_start;
393
+ const ID *table;
394
+ VALUE *default_values;
395
+ } *keyword;
396
+ } param;
397
+
398
+ rb_iseq_location_t location;
399
+
400
+ /* insn info, must be freed */
401
+ struct iseq_insn_info {
402
+ const struct iseq_insn_info_entry *body;
403
+ unsigned int *positions;
404
+ unsigned int size;
405
+ #if VM_INSN_INFO_TABLE_IMPL == 2
406
+ struct succ_index_table *succ_index_table;
407
+ #endif
408
+ } insns_info;
409
+
410
+ const ID *local_table; /* must free */
411
+
412
+ /* catch table */
413
+ struct iseq_catch_table *catch_table;
414
+
415
+ /* for child iseq */
416
+ const struct rb_iseq_struct *parent_iseq;
417
+ struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
418
+
419
+ union iseq_inline_storage_entry *is_entries;
420
+ struct rb_call_info *ci_entries; /* struct rb_call_info ci_entries[ci_size];
421
+ * struct rb_call_info_with_kwarg cikw_entries[ci_kw_size];
422
+ * So that:
423
+ * struct rb_call_info_with_kwarg *cikw_entries = &body->ci_entries[ci_size];
424
+ */
425
+ struct rb_call_cache *cc_entries; /* size is ci_size + ci_kw_size */
426
+
427
+ struct {
428
+ rb_snum_t flip_count;
429
+ VALUE coverage;
430
+ VALUE pc2branchindex;
431
+ VALUE *original_iseq;
432
+ } variable;
433
+
434
+ unsigned int local_table_size;
435
+ unsigned int is_size;
436
+ unsigned int ci_size;
437
+ unsigned int ci_kw_size;
438
+ unsigned int stack_max; /* for stack overflow check */
439
+
440
+ char catch_except_p; /* If a frame of this ISeq may catch exception, set TRUE */
441
+
442
+ #if USE_MJIT
443
+ /* The following fields are MJIT related info. */
444
+ VALUE (*jit_func)(struct rb_execution_context_struct *,
445
+ struct rb_control_frame_struct *); /* function pointer for loaded native code */
446
+ long unsigned total_calls; /* number of total calls with `mjit_exec()` */
447
+ struct rb_mjit_unit *jit_unit;
448
+ #endif
449
+ };
450
+
451
+ /* T_IMEMO/iseq */
452
+ /* typedef rb_iseq_t is in method.h */
453
+ struct rb_iseq_struct {
454
+ VALUE flags; /* 1 */
455
+ VALUE wrapper; /* 2 */
456
+
457
+ struct rb_iseq_constant_body *body; /* 3 */
458
+
459
+ union { /* 4, 5 words */
460
+ struct iseq_compile_data *compile_data; /* used at compile time */
461
+
462
+ struct {
463
+ VALUE obj;
464
+ int index;
465
+ } loader;
466
+
467
+ struct {
468
+ struct rb_hook_list_struct *local_hooks;
469
+ rb_event_flag_t global_trace_events;
470
+ } exec;
471
+ } aux;
472
+ };
473
+
474
+ #ifndef USE_LAZY_LOAD
475
+ #define USE_LAZY_LOAD 0
476
+ #endif
477
+
478
+ #if USE_LAZY_LOAD
479
+ const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
480
+ #endif
481
+
482
+ static inline const rb_iseq_t *
483
+ rb_iseq_check(const rb_iseq_t *iseq)
484
+ {
485
+ #if USE_LAZY_LOAD
486
+ if (iseq->body == NULL) {
487
+ rb_iseq_complete((rb_iseq_t *)iseq);
488
+ }
489
+ #endif
490
+ return iseq;
491
+ }
492
+
493
+ static inline const rb_iseq_t *
494
+ def_iseq_ptr(rb_method_definition_t *def)
495
+ {
496
+ #if VM_CHECK_MODE > 0
497
+ if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
498
+ #endif
499
+ return rb_iseq_check(def->body.iseq.iseqptr);
500
+ }
501
+
502
+ enum ruby_special_exceptions {
503
+ ruby_error_reenter,
504
+ ruby_error_nomemory,
505
+ ruby_error_sysstack,
506
+ ruby_error_stackfatal,
507
+ ruby_error_stream_closed,
508
+ ruby_special_error_count
509
+ };
510
+
511
+ enum ruby_basic_operators {
512
+ BOP_PLUS,
513
+ BOP_MINUS,
514
+ BOP_MULT,
515
+ BOP_DIV,
516
+ BOP_MOD,
517
+ BOP_EQ,
518
+ BOP_EQQ,
519
+ BOP_LT,
520
+ BOP_LE,
521
+ BOP_LTLT,
522
+ BOP_AREF,
523
+ BOP_ASET,
524
+ BOP_LENGTH,
525
+ BOP_SIZE,
526
+ BOP_EMPTY_P,
527
+ BOP_NIL_P,
528
+ BOP_SUCC,
529
+ BOP_GT,
530
+ BOP_GE,
531
+ BOP_NOT,
532
+ BOP_NEQ,
533
+ BOP_MATCH,
534
+ BOP_FREEZE,
535
+ BOP_UMINUS,
536
+ BOP_MAX,
537
+ BOP_MIN,
538
+ BOP_CALL,
539
+ BOP_AND,
540
+ BOP_OR,
541
+
542
+ BOP_LAST_
543
+ };
544
+
545
+ #define GetVMPtr(obj, ptr) \
546
+ GetCoreDataFromValue((obj), rb_vm_t, (ptr))
547
+
548
+ struct rb_vm_struct;
549
+ typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
550
+
551
+ typedef struct rb_at_exit_list {
552
+ rb_vm_at_exit_func *func;
553
+ struct rb_at_exit_list *next;
554
+ } rb_at_exit_list;
555
+
556
+ struct rb_objspace;
557
+ struct rb_objspace *rb_objspace_alloc(void);
558
+ void rb_objspace_free(struct rb_objspace *);
559
+ void rb_objspace_call_finalizer(struct rb_objspace *);
560
+
561
+ typedef struct rb_hook_list_struct {
562
+ struct rb_event_hook_struct *hooks;
563
+ rb_event_flag_t events;
564
+ unsigned int need_clean;
565
+ unsigned int running;
566
+ } rb_hook_list_t;
567
+
568
+ typedef struct rb_vm_struct {
569
+ VALUE self;
570
+
571
+ rb_global_vm_lock_t gvl;
572
+
573
+ struct rb_thread_struct *main_thread;
574
+
575
+ /* persists across uncontended GVL release/acquire for time slice */
576
+ const struct rb_thread_struct *running_thread;
577
+
578
+ #ifdef USE_SIGALTSTACK
579
+ void *main_altstack;
580
+ #endif
581
+
582
+ rb_serial_t fork_gen;
583
+ rb_nativethread_lock_t waitpid_lock;
584
+ struct list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
585
+ struct list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
586
+ struct list_head waiting_fds; /* <=> struct waiting_fd */
587
+ struct list_head living_threads;
588
+ VALUE thgroup_default;
589
+ int living_thread_num;
590
+
591
+ /* set in single-threaded processes only: */
592
+ volatile int ubf_async_safe;
593
+
594
+ unsigned int running: 1;
595
+ unsigned int thread_abort_on_exception: 1;
596
+ unsigned int thread_report_on_exception: 1;
597
+
598
+ unsigned int safe_level_: 1;
599
+ int sleeper;
600
+
601
+ /* object management */
602
+ VALUE mark_object_ary;
603
+ const VALUE special_exceptions[ruby_special_error_count];
604
+
605
+ /* load */
606
+ VALUE top_self;
607
+ VALUE load_path;
608
+ VALUE load_path_snapshot;
609
+ VALUE load_path_check_cache;
610
+ VALUE expanded_load_path;
611
+ VALUE loaded_features;
612
+ VALUE loaded_features_snapshot;
613
+ struct st_table *loaded_features_index;
614
+ struct st_table *loading_table;
615
+
616
+ /* signal */
617
+ struct {
618
+ VALUE cmd[RUBY_NSIG];
619
+ unsigned char safe[RUBY_NSIG];
620
+ } trap_list;
621
+
622
+ /* hook */
623
+ rb_hook_list_t global_hooks;
624
+
625
+ /* relation table of ensure - rollback for callcc */
626
+ struct st_table *ensure_rollback_table;
627
+
628
+ /* postponed_job (async-signal-safe, NOT thread-safe) */
629
+ struct rb_postponed_job_struct *postponed_job_buffer;
630
+ int postponed_job_index;
631
+
632
+ int src_encoding_index;
633
+
634
+ /* workqueue (thread-safe, NOT async-signal-safe) */
635
+ struct list_head workqueue; /* <=> rb_workqueue_job.jnode */
636
+ rb_nativethread_lock_t workqueue_lock;
637
+
638
+ VALUE verbose, debug, orig_progname, progname;
639
+ VALUE coverages;
640
+ int coverage_mode;
641
+
642
+ st_table * defined_module_hash;
643
+
644
+ struct rb_objspace *objspace;
645
+
646
+ rb_at_exit_list *at_exit;
647
+
648
+ VALUE *defined_strings;
649
+ st_table *frozen_strings;
650
+
651
+ /* params */
652
+ struct { /* size in byte */
653
+ size_t thread_vm_stack_size;
654
+ size_t thread_machine_stack_size;
655
+ size_t fiber_vm_stack_size;
656
+ size_t fiber_machine_stack_size;
657
+ } default_params;
658
+
659
+ short redefined_flag[BOP_LAST_];
660
+ } rb_vm_t;
661
+
662
+ /* default values */
663
+
664
+ #define RUBY_VM_SIZE_ALIGN 4096
665
+
666
+ #define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
667
+ #define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
668
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
669
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
670
+
671
+ #define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
672
+ #define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
673
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
674
+ #if defined(__powerpc64__)
675
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
676
+ #else
677
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
678
+ #endif
679
+
680
+ #if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
681
+ /* It seems sanitizers consume A LOT of machine stacks */
682
+ #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
683
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
684
+ #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
685
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
686
+ #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
687
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
688
+ #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
689
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
690
+ #endif
691
+
692
+ /* optimize insn */
693
+ #define INTEGER_REDEFINED_OP_FLAG (1 << 0)
694
+ #define FLOAT_REDEFINED_OP_FLAG (1 << 1)
695
+ #define STRING_REDEFINED_OP_FLAG (1 << 2)
696
+ #define ARRAY_REDEFINED_OP_FLAG (1 << 3)
697
+ #define HASH_REDEFINED_OP_FLAG (1 << 4)
698
+ /* #define BIGNUM_REDEFINED_OP_FLAG (1 << 5) */
699
+ #define SYMBOL_REDEFINED_OP_FLAG (1 << 6)
700
+ #define TIME_REDEFINED_OP_FLAG (1 << 7)
701
+ #define REGEXP_REDEFINED_OP_FLAG (1 << 8)
702
+ #define NIL_REDEFINED_OP_FLAG (1 << 9)
703
+ #define TRUE_REDEFINED_OP_FLAG (1 << 10)
704
+ #define FALSE_REDEFINED_OP_FLAG (1 << 11)
705
+ #define PROC_REDEFINED_OP_FLAG (1 << 12)
706
+
707
+ #define BASIC_OP_UNREDEFINED_P(op, klass) (LIKELY((GET_VM()->redefined_flag[(op)]&(klass)) == 0))
708
+
709
+ #ifndef VM_DEBUG_BP_CHECK
710
+ #define VM_DEBUG_BP_CHECK 0
711
+ #endif
712
+
713
+ #ifndef VM_DEBUG_VERIFY_METHOD_CACHE
714
+ #define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
715
+ #endif
716
+
717
+ struct rb_captured_block {
718
+ VALUE self;
719
+ const VALUE *ep;
720
+ union {
721
+ const rb_iseq_t *iseq;
722
+ const struct vm_ifunc *ifunc;
723
+ VALUE val;
724
+ } code;
725
+ };
726
+
727
+ enum rb_block_handler_type {
728
+ block_handler_type_iseq,
729
+ block_handler_type_ifunc,
730
+ block_handler_type_symbol,
731
+ block_handler_type_proc
732
+ };
733
+
734
+ enum rb_block_type {
735
+ block_type_iseq,
736
+ block_type_ifunc,
737
+ block_type_symbol,
738
+ block_type_proc
739
+ };
740
+
741
+ struct rb_block {
742
+ union {
743
+ struct rb_captured_block captured;
744
+ VALUE symbol;
745
+ VALUE proc;
746
+ } as;
747
+ enum rb_block_type type;
748
+ };
749
+
750
+ typedef struct rb_control_frame_struct {
751
+ const VALUE *pc; /* cfp[0] */
752
+ VALUE *sp; /* cfp[1] */
753
+ const rb_iseq_t *iseq; /* cfp[2] */
754
+ VALUE self; /* cfp[3] / block[0] */
755
+ const VALUE *ep; /* cfp[4] / block[1] */
756
+ const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc */
757
+ VALUE *__bp__; /* cfp[6] */ /* outside vm_push_frame, use vm_base_ptr instead. */
758
+
759
+ #if VM_DEBUG_BP_CHECK
760
+ VALUE *bp_check; /* cfp[7] */
761
+ #endif
762
+ } rb_control_frame_t;
763
+
764
+ extern const rb_data_type_t ruby_threadptr_data_type;
765
+
766
+ static inline struct rb_thread_struct *
767
+ rb_thread_ptr(VALUE thval)
768
+ {
769
+ return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
770
+ }
771
+
772
+ enum rb_thread_status {
773
+ THREAD_RUNNABLE,
774
+ THREAD_STOPPED,
775
+ THREAD_STOPPED_FOREVER,
776
+ THREAD_KILLED
777
+ };
778
+
779
+ #ifdef RUBY_JMP_BUF
780
+ typedef RUBY_JMP_BUF rb_jmpbuf_t;
781
+ #else
782
+ typedef void *rb_jmpbuf_t[5];
783
+ #endif
784
+
785
+ /*
786
+ the members which are written in EC_PUSH_TAG() should be placed at
787
+ the beginning and the end, so that entire region is accessible.
788
+ */
789
+ struct rb_vm_tag {
790
+ VALUE tag;
791
+ VALUE retval;
792
+ rb_jmpbuf_t buf;
793
+ struct rb_vm_tag *prev;
794
+ enum ruby_tag_type state;
795
+ };
796
+
797
+ STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
798
+ STATIC_ASSERT(rb_vm_tag_buf_end,
799
+ offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) <
800
+ sizeof(struct rb_vm_tag));
801
+
802
+ struct rb_vm_protect_tag {
803
+ struct rb_vm_protect_tag *prev;
804
+ };
805
+
806
+ struct rb_unblock_callback {
807
+ rb_unblock_function_t *func;
808
+ void *arg;
809
+ };
810
+
811
+ struct rb_mutex_struct;
812
+
813
+ typedef struct rb_thread_list_struct{
814
+ struct rb_thread_list_struct *next;
815
+ struct rb_thread_struct *th;
816
+ } rb_thread_list_t;
817
+
818
+ typedef struct rb_ensure_entry {
819
+ VALUE marker;
820
+ VALUE (*e_proc)(VALUE);
821
+ VALUE data2;
822
+ } rb_ensure_entry_t;
823
+
824
+ typedef struct rb_ensure_list {
825
+ struct rb_ensure_list *next;
826
+ struct rb_ensure_entry entry;
827
+ } rb_ensure_list_t;
828
+
829
+ typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3];
830
+
831
+ typedef struct rb_fiber_struct rb_fiber_t;
832
+
833
+ typedef struct rb_execution_context_struct {
834
+ /* execution information */
835
+ VALUE *vm_stack; /* must free, must mark */
836
+ size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
837
+ rb_control_frame_t *cfp;
838
+
839
+ struct rb_vm_tag *tag;
840
+ struct rb_vm_protect_tag *protect_tag;
841
+
842
+ /* interrupt flags */
843
+ rb_atomic_t interrupt_flag;
844
+ rb_atomic_t interrupt_mask; /* size should match flag */
845
+
846
+ rb_fiber_t *fiber_ptr;
847
+ struct rb_thread_struct *thread_ptr;
848
+
849
+ /* storage (ec (fiber) local) */
850
+ st_table *local_storage;
851
+ VALUE local_storage_recursive_hash;
852
+ VALUE local_storage_recursive_hash_for_trace;
853
+
854
+ /* eval env */
855
+ const VALUE *root_lep;
856
+ VALUE root_svar;
857
+
858
+ /* ensure & callcc */
859
+ rb_ensure_list_t *ensure_list;
860
+
861
+ /* trace information */
862
+ struct rb_trace_arg_struct *trace_arg;
863
+
864
+ /* temporary places */
865
+ VALUE errinfo;
866
+ VALUE passed_block_handler; /* for rb_iterate */
867
+
868
+ uint8_t raised_flag; /* only 3 bits needed */
869
+
870
+ /* n.b. only 7 bits needed, really: */
871
+ BITFIELD(enum method_missing_reason, method_missing_reason, 8);
872
+
873
+ VALUE private_const_reference;
874
+
875
+ /* for GC */
876
+ struct {
877
+ VALUE *stack_start;
878
+ VALUE *stack_end;
879
+ size_t stack_maxsize;
880
+ RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs;
881
+ } machine;
882
+ } rb_execution_context_t;
883
+
884
+ // Set the vm_stack pointer in the execution context.
885
+ void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
886
+
887
+ // Initialize the vm_stack pointer in the execution context and push the initial stack frame.
888
+ // @param ec the execution context to update.
889
+ // @param stack a pointer to the stack to use.
890
+ // @param size the size of the stack, as in `VALUE stack[size]`.
891
+ void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
892
+
893
+ // Clear (set to `NULL`) the vm_stack pointer.
894
+ // @param ec the execution context to update.
895
+ void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
896
+
897
+ typedef struct rb_thread_struct {
898
+ struct list_node vmlt_node;
899
+ VALUE self;
900
+ rb_vm_t *vm;
901
+
902
+ rb_execution_context_t *ec;
903
+
904
+ VALUE last_status; /* $? */
905
+
906
+ /* for cfunc */
907
+ struct rb_calling_info *calling;
908
+
909
+ /* for load(true) */
910
+ VALUE top_self;
911
+ VALUE top_wrapper;
912
+
913
+ /* thread control */
914
+ rb_nativethread_id_t thread_id;
915
+ #ifdef NON_SCALAR_THREAD_ID
916
+ rb_thread_id_string_t thread_id_string;
917
+ #endif
918
+ BITFIELD(enum rb_thread_status, status, 2);
919
+ /* bit flags */
920
+ unsigned int to_kill : 1;
921
+ unsigned int abort_on_exception: 1;
922
+ unsigned int report_on_exception: 1;
923
+ unsigned int pending_interrupt_queue_checked: 1;
924
+ int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
925
+ uint32_t running_time_us; /* 12500..800000 */
926
+
927
+ native_thread_data_t native_thread_data;
928
+ void *blocking_region_buffer;
929
+
930
+ VALUE thgroup;
931
+ VALUE value;
932
+
933
+ /* temporary place of retval on OPT_CALL_THREADED_CODE */
934
+ #if OPT_CALL_THREADED_CODE
935
+ VALUE retval;
936
+ #endif
937
+
938
+ /* async errinfo queue */
939
+ VALUE pending_interrupt_queue;
940
+ VALUE pending_interrupt_mask_stack;
941
+
942
+ /* interrupt management */
943
+ rb_nativethread_lock_t interrupt_lock;
944
+ struct rb_unblock_callback unblock;
945
+ VALUE locking_mutex;
946
+ struct rb_mutex_struct *keeping_mutexes;
947
+
948
+ rb_thread_list_t *join_list;
949
+
950
+ union {
951
+ struct {
952
+ VALUE proc;
953
+ VALUE args;
954
+ int kw_splat;
955
+ } proc;
956
+ struct {
957
+ VALUE (*func)(void *);
958
+ void *arg;
959
+ } func;
960
+ } invoke_arg;
961
+
962
+ enum {
963
+ thread_invoke_type_none = 0,
964
+ thread_invoke_type_proc,
965
+ thread_invoke_type_func
966
+ } invoke_type;
967
+
968
+ /* statistics data for profiler */
969
+ VALUE stat_insn_usage;
970
+
971
+ /* fiber */
972
+ rb_fiber_t *root_fiber;
973
+ rb_jmpbuf_t root_jmpbuf;
974
+
975
+ /* misc */
976
+ VALUE name;
977
+
978
+ } rb_thread_t;
979
+
980
+ typedef enum {
981
+ VM_DEFINECLASS_TYPE_CLASS = 0x00,
982
+ VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
983
+ VM_DEFINECLASS_TYPE_MODULE = 0x02,
984
+ /* 0x03..0x06 is reserved */
985
+ VM_DEFINECLASS_TYPE_MASK = 0x07
986
+ } rb_vm_defineclass_type_t;
987
+
988
+ #define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
989
+ #define VM_DEFINECLASS_FLAG_SCOPED 0x08
990
+ #define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
991
+ #define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
992
+ #define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
993
+ ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
994
+
995
+ /* iseq.c */
996
+ RUBY_SYMBOL_EXPORT_BEGIN
997
+
998
+ /* node -> iseq */
999
+ rb_iseq_t *rb_iseq_new (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type);
1000
+ rb_iseq_t *rb_iseq_new_top (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1001
+ rb_iseq_t *rb_iseq_new_main (const rb_ast_body_t *ast, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1002
+ rb_iseq_t *rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno,
1003
+ const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
1004
+ struct iseq_link_anchor;
1005
+ struct rb_iseq_new_with_callback_callback_func {
1006
+ VALUE flags;
1007
+ VALUE reserved;
1008
+ void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1009
+ const void *data;
1010
+ };
1011
+ static inline struct rb_iseq_new_with_callback_callback_func *
1012
+ rb_iseq_new_with_callback_new_callback(
1013
+ void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1014
+ {
1015
+ VALUE memo = rb_imemo_new(imemo_ifunc, (VALUE)func, (VALUE)ptr, Qundef, Qfalse);
1016
+ return (struct rb_iseq_new_with_callback_callback_func *)memo;
1017
+ }
1018
+ rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1019
+ VALUE name, VALUE path, VALUE realpath, VALUE first_lineno,
1020
+ const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
1021
+
1022
+ /* src -> iseq */
1023
+ rb_iseq_t *rb_iseq_compile(VALUE src, VALUE file, VALUE line);
1024
+ rb_iseq_t *rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE realpath, VALUE line, VALUE opt);
1025
+
1026
+ VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1027
+ int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1028
+
1029
+ VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1030
+
1031
+ RUBY_EXTERN VALUE rb_cISeq;
1032
+ RUBY_EXTERN VALUE rb_cRubyVM;
1033
+ RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1034
+ RUBY_EXTERN VALUE rb_block_param_proxy;
1035
+ RUBY_SYMBOL_EXPORT_END
1036
+
1037
+ #define GetProcPtr(obj, ptr) \
1038
+ GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1039
+
1040
+ typedef struct {
1041
+ const struct rb_block block;
1042
+ unsigned int is_from_method: 1; /* bool */
1043
+ unsigned int is_lambda: 1; /* bool */
1044
+ } rb_proc_t;
1045
+
1046
+ typedef struct {
1047
+ VALUE flags; /* imemo header */
1048
+ rb_iseq_t *iseq;
1049
+ const VALUE *ep;
1050
+ const VALUE *env;
1051
+ unsigned int env_size;
1052
+ } rb_env_t;
1053
+
1054
+ extern const rb_data_type_t ruby_binding_data_type;
1055
+
1056
+ #define GetBindingPtr(obj, ptr) \
1057
+ GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1058
+
1059
+ typedef struct {
1060
+ const struct rb_block block;
1061
+ const VALUE pathobj;
1062
+ unsigned short first_lineno;
1063
+ } rb_binding_t;
1064
+
1065
+ /* used by compile time and send insn */
1066
+
1067
+ enum vm_check_match_type {
1068
+ VM_CHECKMATCH_TYPE_WHEN = 1,
1069
+ VM_CHECKMATCH_TYPE_CASE = 2,
1070
+ VM_CHECKMATCH_TYPE_RESCUE = 3
1071
+ };
1072
+
1073
+ #define VM_CHECKMATCH_TYPE_MASK 0x03
1074
+ #define VM_CHECKMATCH_ARRAY 0x04
1075
+
1076
+ enum vm_call_flag_bits {
1077
+ VM_CALL_ARGS_SPLAT_bit, /* m(*args) */
1078
+ VM_CALL_ARGS_BLOCKARG_bit, /* m(&block) */
1079
+ VM_CALL_FCALL_bit, /* m(...) */
1080
+ VM_CALL_VCALL_bit, /* m */
1081
+ VM_CALL_ARGS_SIMPLE_bit, /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
1082
+ VM_CALL_BLOCKISEQ_bit, /* has blockiseq */
1083
+ VM_CALL_KWARG_bit, /* has kwarg */
1084
+ VM_CALL_KW_SPLAT_bit, /* m(**opts) */
1085
+ VM_CALL_TAILCALL_bit, /* located at tail position */
1086
+ VM_CALL_SUPER_bit, /* super */
1087
+ VM_CALL_ZSUPER_bit, /* zsuper */
1088
+ VM_CALL_OPT_SEND_bit, /* internal flag */
1089
+ VM_CALL__END
1090
+ };
1091
+
1092
+ #define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
1093
+ #define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
1094
+ #define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
1095
+ #define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
1096
+ #define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
1097
+ #define VM_CALL_BLOCKISEQ (0x01 << VM_CALL_BLOCKISEQ_bit)
1098
+ #define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
1099
+ #define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
1100
+ #define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
1101
+ #define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
1102
+ #define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
1103
+ #define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
1104
+
1105
+ enum vm_special_object_type {
1106
+ VM_SPECIAL_OBJECT_VMCORE = 1,
1107
+ VM_SPECIAL_OBJECT_CBASE,
1108
+ VM_SPECIAL_OBJECT_CONST_BASE
1109
+ };
1110
+
1111
+ enum vm_svar_index {
1112
+ VM_SVAR_LASTLINE = 0, /* $_ */
1113
+ VM_SVAR_BACKREF = 1, /* $~ */
1114
+
1115
+ VM_SVAR_EXTRA_START = 2,
1116
+ VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1117
+ };
1118
+
1119
+ /* inline cache */
1120
+ typedef struct iseq_inline_cache_entry *IC;
1121
+ typedef union iseq_inline_storage_entry *ISE;
1122
+ typedef struct rb_call_info *CALL_INFO;
1123
+ typedef struct rb_call_cache *CALL_CACHE;
1124
+
1125
+ void rb_vm_change_state(void);
1126
+
1127
+ typedef VALUE CDHASH;
1128
+
1129
+ #ifndef FUNC_FASTCALL
1130
+ #define FUNC_FASTCALL(x) x
1131
+ #endif
1132
+
1133
+ typedef rb_control_frame_t *
1134
+ (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1135
+
1136
+ #define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1137
+ #define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1138
+
1139
+ #define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1140
+ #define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1141
+ #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1142
+
1143
+ enum {
1144
+ /* Frame/Environment flag bits:
1145
+ * MMMM MMMM MMMM MMMM ____ FFFF FFFF EEEX (LSB)
1146
+ *
1147
+ * X : tag for GC marking (It seems as Fixnum)
1148
+ * EEE : 3 bits Env flags
1149
+ * FF..: 8 bits Frame flags
1150
+ * MM..: 15 bits frame magic (to check frame corruption)
1151
+ */
1152
+
1153
+ /* frame types */
1154
+ VM_FRAME_MAGIC_METHOD = 0x11110001,
1155
+ VM_FRAME_MAGIC_BLOCK = 0x22220001,
1156
+ VM_FRAME_MAGIC_CLASS = 0x33330001,
1157
+ VM_FRAME_MAGIC_TOP = 0x44440001,
1158
+ VM_FRAME_MAGIC_CFUNC = 0x55550001,
1159
+ VM_FRAME_MAGIC_IFUNC = 0x66660001,
1160
+ VM_FRAME_MAGIC_EVAL = 0x77770001,
1161
+ VM_FRAME_MAGIC_RESCUE = 0x78880001,
1162
+ VM_FRAME_MAGIC_DUMMY = 0x79990001,
1163
+
1164
+ VM_FRAME_MAGIC_MASK = 0x7fff0001,
1165
+
1166
+ /* frame flag */
1167
+ VM_FRAME_FLAG_PASSED = 0x0010,
1168
+ VM_FRAME_FLAG_FINISH = 0x0020,
1169
+ VM_FRAME_FLAG_BMETHOD = 0x0040,
1170
+ VM_FRAME_FLAG_CFRAME = 0x0080,
1171
+ VM_FRAME_FLAG_LAMBDA = 0x0100,
1172
+ VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1173
+ VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1174
+ VM_FRAME_FLAG_CFRAME_EMPTY_KW = 0x0800, /* -- Remove In 3.0 -- */
1175
+
1176
+ /* env flag */
1177
+ VM_ENV_FLAG_LOCAL = 0x0002,
1178
+ VM_ENV_FLAG_ESCAPED = 0x0004,
1179
+ VM_ENV_FLAG_WB_REQUIRED = 0x0008
1180
+ };
1181
+
1182
+ #define VM_ENV_DATA_SIZE ( 3)
1183
+
1184
+ #define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1185
+ #define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1186
+ #define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1187
+ #define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1188
+
1189
+ #define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1190
+
1191
+ static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1192
+
1193
+ static inline void
1194
+ VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1195
+ {
1196
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1197
+ VM_ASSERT(FIXNUM_P(flags));
1198
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1199
+ }
1200
+
1201
+ static inline void
1202
+ VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1203
+ {
1204
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1205
+ VM_ASSERT(FIXNUM_P(flags));
1206
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1207
+ }
1208
+
1209
+ static inline unsigned long
1210
+ VM_ENV_FLAGS(const VALUE *ep, long flag)
1211
+ {
1212
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1213
+ VM_ASSERT(FIXNUM_P(flags));
1214
+ return flags & flag;
1215
+ }
1216
+
1217
+ static inline unsigned long
1218
+ VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1219
+ {
1220
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1221
+ }
1222
+
1223
+ static inline int
1224
+ VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1225
+ {
1226
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1227
+ }
1228
+
1229
+ static inline int
1230
+ VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1231
+ {
1232
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1233
+ }
1234
+
1235
+ /* -- Remove In 3.0 -- */
1236
+ static inline int
1237
+ VM_FRAME_CFRAME_EMPTY_KW_P(const rb_control_frame_t *cfp)
1238
+ {
1239
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_EMPTY_KW) != 0;
1240
+ }
1241
+
1242
+ static inline int
1243
+ VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1244
+ {
1245
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1246
+ }
1247
+
1248
+ static inline int
1249
+ VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1250
+ {
1251
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1252
+ }
1253
+
1254
+ static inline int
1255
+ rb_obj_is_iseq(VALUE iseq)
1256
+ {
1257
+ return imemo_type_p(iseq, imemo_iseq);
1258
+ }
1259
+
1260
+ #if VM_CHECK_MODE > 0
1261
+ #define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1262
+ #endif
1263
+
1264
+ static inline int
1265
+ VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1266
+ {
1267
+ int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1268
+ VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p);
1269
+ return cframe_p;
1270
+ }
1271
+
1272
+ static inline int
1273
+ VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1274
+ {
1275
+ return !VM_FRAME_CFRAME_P(cfp);
1276
+ }
1277
+
1278
+ #define RUBYVM_CFUNC_FRAME_P(cfp) \
1279
+ (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1280
+
1281
+ #define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1282
+ #define VM_BLOCK_HANDLER_NONE 0
1283
+
1284
+ static inline int
1285
+ VM_ENV_LOCAL_P(const VALUE *ep)
1286
+ {
1287
+ return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1288
+ }
1289
+
1290
+ static inline const VALUE *
1291
+ VM_ENV_PREV_EP(const VALUE *ep)
1292
+ {
1293
+ VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1294
+ return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1295
+ }
1296
+
1297
+ static inline VALUE
1298
+ VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1299
+ {
1300
+ VM_ASSERT(VM_ENV_LOCAL_P(ep));
1301
+ return ep[VM_ENV_DATA_INDEX_SPECVAL];
1302
+ }
1303
+
1304
+ #if VM_CHECK_MODE > 0
1305
+ int rb_vm_ep_in_heap_p(const VALUE *ep);
1306
+ #endif
1307
+
1308
+ static inline int
1309
+ VM_ENV_ESCAPED_P(const VALUE *ep)
1310
+ {
1311
+ VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1312
+ return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1313
+ }
1314
+
1315
+ #if VM_CHECK_MODE > 0
1316
+ static inline int
1317
+ vm_assert_env(VALUE obj)
1318
+ {
1319
+ VM_ASSERT(imemo_type_p(obj, imemo_env));
1320
+ return 1;
1321
+ }
1322
+ #endif
1323
+
1324
+ static inline VALUE
1325
+ VM_ENV_ENVVAL(const VALUE *ep)
1326
+ {
1327
+ VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1328
+ VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1329
+ VM_ASSERT(vm_assert_env(envval));
1330
+ return envval;
1331
+ }
1332
+
1333
+ static inline const rb_env_t *
1334
+ VM_ENV_ENVVAL_PTR(const VALUE *ep)
1335
+ {
1336
+ return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1337
+ }
1338
+
1339
+ static inline const rb_env_t *
1340
+ vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1341
+ {
1342
+ rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
1343
+ env->env_size = env_size;
1344
+ env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1345
+ return env;
1346
+ }
1347
+
1348
+ static inline void
1349
+ VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1350
+ {
1351
+ *((VALUE *)ptr) = v;
1352
+ }
1353
+
1354
+ static inline void
1355
+ VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1356
+ {
1357
+ VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1358
+ VM_FORCE_WRITE(ptr, special_const_value);
1359
+ }
1360
+
1361
+ static inline void
1362
+ VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1363
+ {
1364
+ VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1365
+ VM_FORCE_WRITE(&ep[index], v);
1366
+ }
1367
+
1368
+ const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1369
+ const VALUE *rb_vm_proc_local_ep(VALUE proc);
1370
+ void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1371
+ void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1372
+
1373
+ VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1374
+
1375
+ #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1376
+ #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1377
+
1378
+ #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1379
+ ((void *)(ecfp) > (void *)(cfp))
1380
+
1381
+ static inline const rb_control_frame_t *
1382
+ RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1383
+ {
1384
+ return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1385
+ }
1386
+
1387
+ static inline int
1388
+ RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1389
+ {
1390
+ return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1391
+ }
1392
+
1393
+ static inline int
1394
+ VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1395
+ {
1396
+ if ((block_handler & 0x03) == 0x01) {
1397
+ #if VM_CHECK_MODE > 0
1398
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1399
+ VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1400
+ #endif
1401
+ return 1;
1402
+ }
1403
+ else {
1404
+ return 0;
1405
+ }
1406
+ }
1407
+
1408
+ static inline VALUE
1409
+ VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1410
+ {
1411
+ VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1412
+ VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1413
+ return block_handler;
1414
+ }
1415
+
1416
+ static inline const struct rb_captured_block *
1417
+ VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1418
+ {
1419
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1420
+ VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1421
+ return captured;
1422
+ }
1423
+
1424
+ static inline int
1425
+ VM_BH_IFUNC_P(VALUE block_handler)
1426
+ {
1427
+ if ((block_handler & 0x03) == 0x03) {
1428
+ #if VM_CHECK_MODE > 0
1429
+ struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1430
+ VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1431
+ #endif
1432
+ return 1;
1433
+ }
1434
+ else {
1435
+ return 0;
1436
+ }
1437
+ }
1438
+
1439
+ static inline VALUE
1440
+ VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1441
+ {
1442
+ VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1443
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1444
+ return block_handler;
1445
+ }
1446
+
1447
+ static inline const struct rb_captured_block *
1448
+ VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1449
+ {
1450
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1451
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1452
+ return captured;
1453
+ }
1454
+
1455
+ static inline const struct rb_captured_block *
1456
+ VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1457
+ {
1458
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1459
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1460
+ return captured;
1461
+ }
1462
+
1463
+ static inline enum rb_block_handler_type
1464
+ vm_block_handler_type(VALUE block_handler)
1465
+ {
1466
+ if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1467
+ return block_handler_type_iseq;
1468
+ }
1469
+ else if (VM_BH_IFUNC_P(block_handler)) {
1470
+ return block_handler_type_ifunc;
1471
+ }
1472
+ else if (SYMBOL_P(block_handler)) {
1473
+ return block_handler_type_symbol;
1474
+ }
1475
+ else {
1476
+ VM_ASSERT(rb_obj_is_proc(block_handler));
1477
+ return block_handler_type_proc;
1478
+ }
1479
+ }
1480
+
1481
+ static inline void
1482
+ vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1483
+ {
1484
+ VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1485
+ (vm_block_handler_type(block_handler), 1));
1486
+ }
1487
+
1488
+ static inline enum rb_block_type
1489
+ vm_block_type(const struct rb_block *block)
1490
+ {
1491
+ #if VM_CHECK_MODE > 0
1492
+ switch (block->type) {
1493
+ case block_type_iseq:
1494
+ VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1495
+ break;
1496
+ case block_type_ifunc:
1497
+ VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1498
+ break;
1499
+ case block_type_symbol:
1500
+ VM_ASSERT(SYMBOL_P(block->as.symbol));
1501
+ break;
1502
+ case block_type_proc:
1503
+ VM_ASSERT(rb_obj_is_proc(block->as.proc));
1504
+ break;
1505
+ }
1506
+ #endif
1507
+ return block->type;
1508
+ }
1509
+
1510
+ static inline void
1511
+ vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1512
+ {
1513
+ struct rb_block *mb = (struct rb_block *)block;
1514
+ mb->type = type;
1515
+ }
1516
+
1517
+ static inline const struct rb_block *
1518
+ vm_proc_block(VALUE procval)
1519
+ {
1520
+ VM_ASSERT(rb_obj_is_proc(procval));
1521
+ return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1522
+ }
1523
+
1524
+ static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1525
+ static inline const VALUE *vm_block_ep(const struct rb_block *block);
1526
+
1527
+ static inline const rb_iseq_t *
1528
+ vm_proc_iseq(VALUE procval)
1529
+ {
1530
+ return vm_block_iseq(vm_proc_block(procval));
1531
+ }
1532
+
1533
+ static inline const VALUE *
1534
+ vm_proc_ep(VALUE procval)
1535
+ {
1536
+ return vm_block_ep(vm_proc_block(procval));
1537
+ }
1538
+
1539
+ static inline const rb_iseq_t *
1540
+ vm_block_iseq(const struct rb_block *block)
1541
+ {
1542
+ switch (vm_block_type(block)) {
1543
+ case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1544
+ case block_type_proc: return vm_proc_iseq(block->as.proc);
1545
+ case block_type_ifunc:
1546
+ case block_type_symbol: return NULL;
1547
+ }
1548
+ VM_UNREACHABLE(vm_block_iseq);
1549
+ return NULL;
1550
+ }
1551
+
1552
+ static inline const VALUE *
1553
+ vm_block_ep(const struct rb_block *block)
1554
+ {
1555
+ switch (vm_block_type(block)) {
1556
+ case block_type_iseq:
1557
+ case block_type_ifunc: return block->as.captured.ep;
1558
+ case block_type_proc: return vm_proc_ep(block->as.proc);
1559
+ case block_type_symbol: return NULL;
1560
+ }
1561
+ VM_UNREACHABLE(vm_block_ep);
1562
+ return NULL;
1563
+ }
1564
+
1565
+ static inline VALUE
1566
+ vm_block_self(const struct rb_block *block)
1567
+ {
1568
+ switch (vm_block_type(block)) {
1569
+ case block_type_iseq:
1570
+ case block_type_ifunc:
1571
+ return block->as.captured.self;
1572
+ case block_type_proc:
1573
+ return vm_block_self(vm_proc_block(block->as.proc));
1574
+ case block_type_symbol:
1575
+ return Qundef;
1576
+ }
1577
+ VM_UNREACHABLE(vm_block_self);
1578
+ return Qundef;
1579
+ }
1580
+
1581
+ static inline VALUE
1582
+ VM_BH_TO_SYMBOL(VALUE block_handler)
1583
+ {
1584
+ VM_ASSERT(SYMBOL_P(block_handler));
1585
+ return block_handler;
1586
+ }
1587
+
1588
+ static inline VALUE
1589
+ VM_BH_FROM_SYMBOL(VALUE symbol)
1590
+ {
1591
+ VM_ASSERT(SYMBOL_P(symbol));
1592
+ return symbol;
1593
+ }
1594
+
1595
+ static inline VALUE
1596
+ VM_BH_TO_PROC(VALUE block_handler)
1597
+ {
1598
+ VM_ASSERT(rb_obj_is_proc(block_handler));
1599
+ return block_handler;
1600
+ }
1601
+
1602
+ static inline VALUE
1603
+ VM_BH_FROM_PROC(VALUE procval)
1604
+ {
1605
+ VM_ASSERT(rb_obj_is_proc(procval));
1606
+ return procval;
1607
+ }
1608
+
1609
+ /* VM related object allocate functions */
1610
+ VALUE rb_thread_alloc(VALUE klass);
1611
+ VALUE rb_binding_alloc(VALUE klass);
1612
+ VALUE rb_proc_alloc(VALUE klass);
1613
+ VALUE rb_proc_dup(VALUE self);
1614
+
1615
+ /* for debug */
1616
+ extern void rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1617
+ extern void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc);
1618
+ extern void rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp
1619
+ #if OPT_STACK_CACHING
1620
+ , VALUE reg_a, VALUE reg_b
1621
+ #endif
1622
+ );
1623
+
1624
+ #define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp)
1625
+ #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp))
1626
+ void rb_vm_bugreport(const void *);
1627
+ typedef RETSIGTYPE (*ruby_sighandler_t)(int);
1628
+ NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1629
+
1630
+ /* functions about thread/vm execution */
1631
+ RUBY_SYMBOL_EXPORT_BEGIN
1632
+ VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1633
+ VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1634
+ VALUE rb_iseq_path(const rb_iseq_t *iseq);
1635
+ VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1636
+ RUBY_SYMBOL_EXPORT_END
1637
+
1638
+ VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1639
+ void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1640
+
1641
+ int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1642
+ void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1643
+
1644
+ VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1645
+
1646
+ VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1647
+ static inline VALUE
1648
+ rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1649
+ {
1650
+ return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1651
+ }
1652
+
1653
+ static inline VALUE
1654
+ rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1655
+ {
1656
+ return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1657
+ }
1658
+
1659
+ VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1660
+ VALUE rb_vm_env_local_variables(const rb_env_t *env);
1661
+ const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1662
+ const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1663
+ void rb_vm_inc_const_missing_count(void);
1664
+ void rb_vm_gvl_destroy(rb_vm_t *vm);
1665
+ VALUE rb_vm_call(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1666
+ const VALUE *argv, const rb_callable_method_entry_t *me);
1667
+ VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1668
+ const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1669
+ MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec);
1670
+
1671
+ void rb_thread_start_timer_thread(void);
1672
+ void rb_thread_stop_timer_thread(void);
1673
+ void rb_thread_reset_timer_thread(void);
1674
+ void rb_thread_wakeup_timer_thread(int);
1675
+
1676
+ static inline void
1677
+ rb_vm_living_threads_init(rb_vm_t *vm)
1678
+ {
1679
+ list_head_init(&vm->waiting_fds);
1680
+ list_head_init(&vm->waiting_pids);
1681
+ list_head_init(&vm->workqueue);
1682
+ list_head_init(&vm->waiting_grps);
1683
+ list_head_init(&vm->living_threads);
1684
+ vm->living_thread_num = 0;
1685
+ }
1686
+
1687
+ static inline void
1688
+ rb_vm_living_threads_insert(rb_vm_t *vm, rb_thread_t *th)
1689
+ {
1690
+ list_add_tail(&vm->living_threads, &th->vmlt_node);
1691
+ vm->living_thread_num++;
1692
+ }
1693
+
1694
+ static inline void
1695
+ rb_vm_living_threads_remove(rb_vm_t *vm, rb_thread_t *th)
1696
+ {
1697
+ list_del(&th->vmlt_node);
1698
+ vm->living_thread_num--;
1699
+ }
1700
+
1701
+ typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1702
+ rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1703
+ rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1704
+ int rb_vm_get_sourceline(const rb_control_frame_t *);
1705
+ VALUE rb_name_err_mesg_new(VALUE mesg, VALUE recv, VALUE method);
1706
+ void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1707
+ void ruby_thread_init_stack(rb_thread_t *th);
1708
+ int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1709
+ void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1710
+ MJIT_STATIC VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1711
+
1712
+ void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1713
+
1714
+ #define rb_vm_register_special_exception(sp, e, m) \
1715
+ rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1716
+
1717
+ void rb_gc_mark_machine_stack(const rb_execution_context_t *ec);
1718
+
1719
+ void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1720
+
1721
+ MJIT_STATIC const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1722
+
1723
+ #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1724
+
1725
+ #define RUBY_CONST_ASSERT(expr) (1/!!(expr)) /* expr must be a compile-time constant */
1726
+ #define VM_STACK_OVERFLOWED_P(cfp, sp, margin) \
1727
+ (!RUBY_CONST_ASSERT(sizeof(*(sp)) == sizeof(VALUE)) || \
1728
+ !RUBY_CONST_ASSERT(sizeof(*(cfp)) == sizeof(rb_control_frame_t)) || \
1729
+ ((rb_control_frame_t *)((sp) + (margin)) + 1) >= (cfp))
1730
+ #define WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) \
1731
+ if (LIKELY(!VM_STACK_OVERFLOWED_P(cfp, sp, margin))) {(void)0;} else /* overflowed */
1732
+ #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) \
1733
+ WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) vm_stackoverflow()
1734
+ #define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1735
+ WHEN_VM_STACK_OVERFLOWED(cfp, (cfp)->sp, margin) vm_stackoverflow()
1736
+
1737
+ VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1738
+
1739
+ /* for thread */
1740
+
1741
+ #if RUBY_VM_THREAD_MODEL == 2
1742
+ RUBY_SYMBOL_EXPORT_BEGIN
1743
+
1744
+ RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1745
+ RUBY_EXTERN rb_execution_context_t *ruby_current_execution_context_ptr;
1746
+ RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1747
+ RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1748
+ RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1749
+
1750
+ RUBY_SYMBOL_EXPORT_END
1751
+
1752
+ #define GET_VM() rb_current_vm()
1753
+ #define GET_THREAD() rb_current_thread()
1754
+ #define GET_EC() rb_current_execution_context()
1755
+
1756
+ static inline rb_thread_t *
1757
+ rb_ec_thread_ptr(const rb_execution_context_t *ec)
1758
+ {
1759
+ return ec->thread_ptr;
1760
+ }
1761
+
1762
+ static inline rb_vm_t *
1763
+ rb_ec_vm_ptr(const rb_execution_context_t *ec)
1764
+ {
1765
+ const rb_thread_t *th = rb_ec_thread_ptr(ec);
1766
+ if (th) {
1767
+ return th->vm;
1768
+ }
1769
+ else {
1770
+ return NULL;
1771
+ }
1772
+ }
1773
+
1774
+ static inline rb_execution_context_t *
1775
+ rb_current_execution_context(void)
1776
+ {
1777
+ return ruby_current_execution_context_ptr;
1778
+ }
1779
+
1780
+ static inline rb_thread_t *
1781
+ rb_current_thread(void)
1782
+ {
1783
+ const rb_execution_context_t *ec = GET_EC();
1784
+ return rb_ec_thread_ptr(ec);
1785
+ }
1786
+
1787
+ static inline rb_vm_t *
1788
+ rb_current_vm(void)
1789
+ {
1790
+ VM_ASSERT(ruby_current_vm_ptr == NULL ||
1791
+ ruby_current_execution_context_ptr == NULL ||
1792
+ rb_ec_thread_ptr(GET_EC()) == NULL ||
1793
+ rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
1794
+ return ruby_current_vm_ptr;
1795
+ }
1796
+
1797
+ static inline void
1798
+ rb_thread_set_current_raw(const rb_thread_t *th)
1799
+ {
1800
+ ruby_current_execution_context_ptr = th->ec;
1801
+ }
1802
+
1803
+ static inline void
1804
+ rb_thread_set_current(rb_thread_t *th)
1805
+ {
1806
+ if (th->vm->running_thread != th) {
1807
+ th->running_time_us = 0;
1808
+ }
1809
+ rb_thread_set_current_raw(th);
1810
+ th->vm->running_thread = th;
1811
+ }
1812
+
1813
+ #else
1814
+ #error "unsupported thread model"
1815
+ #endif
1816
+
1817
+ enum {
1818
+ TIMER_INTERRUPT_MASK = 0x01,
1819
+ PENDING_INTERRUPT_MASK = 0x02,
1820
+ POSTPONED_JOB_INTERRUPT_MASK = 0x04,
1821
+ TRAP_INTERRUPT_MASK = 0x08
1822
+ };
1823
+
1824
+ #define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
1825
+ #define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
1826
+ #define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
1827
+ #define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
1828
+ #define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
1829
+ (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
1830
+ #define RUBY_VM_INTERRUPTED_ANY(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask)
1831
+
1832
+ VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
1833
+ int rb_signal_buff_size(void);
1834
+ int rb_signal_exec(rb_thread_t *th, int sig);
1835
+ void rb_threadptr_check_signal(rb_thread_t *mth);
1836
+ void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
1837
+ void rb_threadptr_signal_exit(rb_thread_t *th);
1838
+ int rb_threadptr_execute_interrupts(rb_thread_t *, int);
1839
+ void rb_threadptr_interrupt(rb_thread_t *th);
1840
+ void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
1841
+ void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
1842
+ void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
1843
+ void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
1844
+ void rb_execution_context_update(const rb_execution_context_t *ec);
1845
+ void rb_execution_context_mark(const rb_execution_context_t *ec);
1846
+ void rb_fiber_close(rb_fiber_t *fib);
1847
+ void Init_native_thread(rb_thread_t *th);
1848
+
1849
+ #define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
1850
+ static inline void
1851
+ rb_vm_check_ints(rb_execution_context_t *ec)
1852
+ {
1853
+ VM_ASSERT(ec == GET_EC());
1854
+ if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
1855
+ rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
1856
+ }
1857
+ }
1858
+
1859
+ /* tracer */
1860
+
1861
+ struct rb_trace_arg_struct {
1862
+ rb_event_flag_t event;
1863
+ rb_execution_context_t *ec;
1864
+ const rb_control_frame_t *cfp;
1865
+ VALUE self;
1866
+ ID id;
1867
+ ID called_id;
1868
+ VALUE klass;
1869
+ VALUE data;
1870
+
1871
+ int klass_solved;
1872
+
1873
+ /* calc from cfp */
1874
+ int lineno;
1875
+ VALUE path;
1876
+ };
1877
+
1878
+ void rb_hook_list_mark(rb_hook_list_t *hooks);
1879
+ void rb_hook_list_free(rb_hook_list_t *hooks);
1880
+ void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
1881
+ void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
1882
+
1883
+ void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
1884
+
1885
+ #define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
1886
+ const rb_event_flag_t flag_arg_ = (flag_); \
1887
+ rb_hook_list_t *hooks_arg_ = (hooks_); \
1888
+ if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
1889
+ /* defer evaluating the other arguments */ \
1890
+ rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
1891
+ } \
1892
+ } while (0)
1893
+
1894
+ static inline void
1895
+ rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
1896
+ VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
1897
+ {
1898
+ struct rb_trace_arg_struct trace_arg;
1899
+
1900
+ VM_ASSERT((hooks->events & flag) != 0);
1901
+
1902
+ trace_arg.event = flag;
1903
+ trace_arg.ec = ec;
1904
+ trace_arg.cfp = ec->cfp;
1905
+ trace_arg.self = self;
1906
+ trace_arg.id = id;
1907
+ trace_arg.called_id = called_id;
1908
+ trace_arg.klass = klass;
1909
+ trace_arg.data = data;
1910
+ trace_arg.path = Qundef;
1911
+ trace_arg.klass_solved = 0;
1912
+
1913
+ rb_exec_event_hooks(&trace_arg, hooks, pop_p);
1914
+ }
1915
+
1916
+ static inline rb_hook_list_t *
1917
+ rb_vm_global_hooks(const rb_execution_context_t *ec)
1918
+ {
1919
+ return &rb_ec_vm_ptr(ec)->global_hooks;
1920
+ }
1921
+
1922
+ #define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
1923
+ EXEC_EVENT_HOOK_ORIG(ec_, rb_vm_global_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
1924
+
1925
+ #define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
1926
+ EXEC_EVENT_HOOK_ORIG(ec_, rb_vm_global_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
1927
+
1928
+ static inline void
1929
+ rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
1930
+ {
1931
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
1932
+ NIL_P(eval_script) ? (VALUE)iseq :
1933
+ rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
1934
+ }
1935
+
1936
+ void rb_vm_trap_exit(rb_vm_t *vm);
1937
+
1938
+ RUBY_SYMBOL_EXPORT_BEGIN
1939
+
1940
+ int rb_thread_check_trap_pending(void);
1941
+
1942
+ /* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
1943
+ #define RUBY_EVENT_COVERAGE_LINE 0x010000
1944
+ #define RUBY_EVENT_COVERAGE_BRANCH 0x020000
1945
+
1946
+ extern VALUE rb_get_coverages(void);
1947
+ extern void rb_set_coverages(VALUE, int, VALUE);
1948
+ extern void rb_clear_coverages(void);
1949
+ extern void rb_reset_coverages(void);
1950
+
1951
+ void rb_postponed_job_flush(rb_vm_t *vm);
1952
+
1953
+ RUBY_SYMBOL_EXPORT_END
1954
+
1955
+ #endif /* RUBY_VM_CORE_H */