debase-ruby_core_source 3.4.1 → 4.0.0.preview2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +4 -0
  3. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/addr2line.h +22 -0
  4. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/builtin.h +125 -0
  5. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/ccan/build_assert/build_assert.h +40 -0
  6. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/ccan/check_type/check_type.h +63 -0
  7. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/ccan/container_of/container_of.h +142 -0
  8. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/ccan/list/list.h +791 -0
  9. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/ccan/str/str.h +17 -0
  10. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/constant.h +53 -0
  11. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/darray.h +278 -0
  12. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/debug_counter.h +425 -0
  13. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/dln.h +33 -0
  14. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/encindex.h +70 -0
  15. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/eval_intern.h +331 -0
  16. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/hrtime.h +237 -0
  17. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/id.h +354 -0
  18. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/id_table.h +54 -0
  19. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/insns.inc +302 -0
  20. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/insns_info.inc +11241 -0
  21. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/array.h +154 -0
  22. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/basic_operators.h +65 -0
  23. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/bignum.h +245 -0
  24. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/bits.h +650 -0
  25. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/box.h +81 -0
  26. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/class.h +805 -0
  27. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/cmdlineopt.h +64 -0
  28. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/compar.h +29 -0
  29. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/compile.h +34 -0
  30. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/compilers.h +107 -0
  31. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/complex.h +29 -0
  32. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/concurrent_set.h +21 -0
  33. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/cont.h +35 -0
  34. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/dir.h +16 -0
  35. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/enc.h +19 -0
  36. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/encoding.h +39 -0
  37. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/enum.h +18 -0
  38. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/enumerator.h +21 -0
  39. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/error.h +251 -0
  40. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/eval.h +41 -0
  41. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/file.h +38 -0
  42. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/fixnum.h +185 -0
  43. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/gc.h +358 -0
  44. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/hash.h +194 -0
  45. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/imemo.h +322 -0
  46. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/inits.h +51 -0
  47. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/io.h +163 -0
  48. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/load.h +20 -0
  49. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/loadpath.h +16 -0
  50. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/math.h +23 -0
  51. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/missing.h +19 -0
  52. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/numeric.h +275 -0
  53. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/object.h +63 -0
  54. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/parse.h +131 -0
  55. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/proc.h +30 -0
  56. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/process.h +124 -0
  57. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/ractor.h +10 -0
  58. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/random.h +17 -0
  59. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/range.h +40 -0
  60. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/rational.h +71 -0
  61. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/re.h +33 -0
  62. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/ruby_parser.h +102 -0
  63. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/sanitizers.h +346 -0
  64. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/serial.h +23 -0
  65. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/set_table.h +70 -0
  66. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/signal.h +25 -0
  67. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/st.h +11 -0
  68. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/static_assert.h +16 -0
  69. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/string.h +203 -0
  70. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/struct.h +160 -0
  71. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/symbol.h +46 -0
  72. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/thread.h +112 -0
  73. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/time.h +37 -0
  74. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/transcode.h +23 -0
  75. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/util.h +27 -0
  76. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/variable.h +74 -0
  77. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/vm.h +136 -0
  78. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal/warnings.h +16 -0
  79. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/internal.h +105 -0
  80. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/iseq.h +357 -0
  81. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/known_errors.inc +1419 -0
  82. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/method.h +271 -0
  83. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/node.h +122 -0
  84. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/node_name.inc +224 -0
  85. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/optinsn.inc +128 -0
  86. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/optunifs.inc +41 -0
  87. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/parse.h +244 -0
  88. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/parser_bits.h +564 -0
  89. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/parser_node.h +32 -0
  90. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/parser_st.h +162 -0
  91. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/parser_value.h +106 -0
  92. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/ast.h +8233 -0
  93. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/defines.h +260 -0
  94. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/diagnostic.h +458 -0
  95. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/encoding.h +283 -0
  96. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/extension.h +19 -0
  97. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/node.h +129 -0
  98. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/options.h +485 -0
  99. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/pack.h +163 -0
  100. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/parser.h +936 -0
  101. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/prettyprint.h +34 -0
  102. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/prism.h +408 -0
  103. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/regexp.h +43 -0
  104. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/static_literals.h +121 -0
  105. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/util/pm_buffer.h +236 -0
  106. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/util/pm_char.h +204 -0
  107. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/util/pm_constant_pool.h +218 -0
  108. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/util/pm_integer.h +130 -0
  109. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/util/pm_list.h +103 -0
  110. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/util/pm_memchr.h +29 -0
  111. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/util/pm_newline_list.h +113 -0
  112. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/util/pm_string.h +200 -0
  113. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/util/pm_strncasecmp.h +32 -0
  114. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/util/pm_strpbrk.h +46 -0
  115. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism/version.h +29 -0
  116. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/prism_compile.h +106 -0
  117. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/probes_helper.h +42 -0
  118. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/ractor_core.h +306 -0
  119. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/regenc.h +259 -0
  120. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/regint.h +1005 -0
  121. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/regparse.h +371 -0
  122. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/revision.h +5 -0
  123. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/ruby_assert.h +14 -0
  124. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/ruby_atomic.h +66 -0
  125. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/rubyparser.h +1394 -0
  126. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/shape.h +444 -0
  127. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/siphash.h +48 -0
  128. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/symbol.h +116 -0
  129. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/thread_none.h +21 -0
  130. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/thread_pthread.h +175 -0
  131. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/thread_win32.h +58 -0
  132. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/timev.h +58 -0
  133. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/transcode_data.h +138 -0
  134. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/variable.h +27 -0
  135. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/version.h +69 -0
  136. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/vm.inc +6183 -0
  137. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/vm_call_iseq_optimized.inc +244 -0
  138. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/vm_callinfo.h +640 -0
  139. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/vm_core.h +2350 -0
  140. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/vm_debug.h +124 -0
  141. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/vm_exec.h +210 -0
  142. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/vm_insnhelper.h +277 -0
  143. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/vm_opts.h +67 -0
  144. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/vm_sync.h +156 -0
  145. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/vmtc.inc +289 -0
  146. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/yjit.h +81 -0
  147. data/lib/debase/ruby_core_source/ruby-4.0.0-preview2/zjit.h +47 -0
  148. data/lib/debase/ruby_core_source/version.rb +1 -1
  149. metadata +148 -3
@@ -0,0 +1,2350 @@
1
+ #ifndef RUBY_VM_CORE_H
2
+ #define RUBY_VM_CORE_H
3
+ /**********************************************************************
4
+
5
+ vm_core.h -
6
+
7
+ $Author$
8
+ created at: 04/01/01 19:41:38 JST
9
+
10
+ Copyright (C) 2004-2007 Koichi Sasada
11
+
12
+ **********************************************************************/
13
+
14
+ /*
15
+ * Enable check mode.
16
+ * 1: enable local assertions.
17
+ */
18
+ #ifndef VM_CHECK_MODE
19
+
20
+ // respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21
+ #define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
+
23
+ #define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24
+ #endif
25
+
26
+ /**
27
+ * VM Debug Level
28
+ *
29
+ * debug level:
30
+ * 0: no debug output
31
+ * 1: show instruction name
32
+ * 2: show stack frame when control stack frame is changed
33
+ * 3: show stack status
34
+ * 4: show register
35
+ * 5:
36
+ * 10: gc check
37
+ */
38
+
39
+ #ifndef VMDEBUG
40
+ #define VMDEBUG 0
41
+ #endif
42
+
43
+ #if 0
44
+ #undef VMDEBUG
45
+ #define VMDEBUG 3
46
+ #endif
47
+
48
+ #include "ruby/internal/config.h"
49
+
50
+ #include <stddef.h>
51
+ #include <signal.h>
52
+ #include <stdarg.h>
53
+
54
+ #include "ruby_assert.h"
55
+
56
+ #define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
+
58
+ #if VM_CHECK_MODE > 0
59
+ #define VM_ASSERT(expr, ...) \
60
+ RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61
+ #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62
+ #define RUBY_ASSERT_CRITICAL_SECTION
63
+ #define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64
+ #else
65
+ #define VM_ASSERT(/*expr, */...) ((void)0)
66
+ #define VM_UNREACHABLE(func) UNREACHABLE
67
+ #define RUBY_DEBUG_THREAD_SCHEDULE()
68
+ #endif
69
+
70
+ #define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71
+
72
+ #if defined(RUBY_ASSERT_CRITICAL_SECTION)
73
+ /*
74
+ # Critical Section Assertions
75
+
76
+ These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77
+ such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78
+
79
+ The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80
+ may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81
+ held by someone else.
82
+
83
+ These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84
+ is set.
85
+
86
+ ## Example Usage
87
+
88
+ ```c
89
+ RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90
+ // ... some code which does not invoke rb_vm_check_ints() ...
91
+ RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92
+ ```
93
+
94
+ If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95
+ `RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96
+ */
97
+ extern int ruby_assert_critical_section_entered;
98
+ #define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99
+ #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100
+ #else
101
+ #define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102
+ #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103
+ #endif
104
+
105
+ #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106
+ # include "wasm/setjmp.h"
107
+ #else
108
+ # include <setjmp.h>
109
+ #endif
110
+
111
+ #if defined(__linux__) || defined(__FreeBSD__)
112
+ # define RB_THREAD_T_HAS_NATIVE_ID
113
+ #endif
114
+
115
+ #include "ruby/internal/stdbool.h"
116
+ #include "ccan/list/list.h"
117
+ #include "id.h"
118
+ #include "internal.h"
119
+ #include "internal/array.h"
120
+ #include "internal/basic_operators.h"
121
+ #include "internal/box.h"
122
+ #include "internal/sanitizers.h"
123
+ #include "internal/serial.h"
124
+ #include "internal/set_table.h"
125
+ #include "internal/vm.h"
126
+ #include "method.h"
127
+ #include "node.h"
128
+ #include "ruby/ruby.h"
129
+ #include "ruby/st.h"
130
+ #include "ruby_atomic.h"
131
+ #include "vm_opts.h"
132
+
133
+ #include "ruby/thread_native.h"
134
+ /*
135
+ * implementation selector of get_insn_info algorithm
136
+ * 0: linear search
137
+ * 1: binary search
138
+ * 2: succinct bitvector
139
+ */
140
+ #ifndef VM_INSN_INFO_TABLE_IMPL
141
+ # define VM_INSN_INFO_TABLE_IMPL 2
142
+ #endif
143
+
144
+ #if defined(NSIG_MAX) /* POSIX issue 8 */
145
+ # undef NSIG
146
+ # define NSIG NSIG_MAX
147
+ #elif defined(_SIG_MAXSIG) /* FreeBSD */
148
+ # undef NSIG
149
+ # define NSIG _SIG_MAXSIG
150
+ #elif defined(_SIGMAX) /* QNX */
151
+ # define NSIG (_SIGMAX + 1)
152
+ #elif defined(NSIG) /* 99% of everything else */
153
+ # /* take it */
154
+ #else /* Last resort */
155
+ # define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
156
+ #endif
157
+
158
+ #define RUBY_NSIG NSIG
159
+
160
+ #if defined(SIGCLD)
161
+ # define RUBY_SIGCHLD (SIGCLD)
162
+ #elif defined(SIGCHLD)
163
+ # define RUBY_SIGCHLD (SIGCHLD)
164
+ #endif
165
+
166
+ #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
167
+ # define USE_SIGALTSTACK
168
+ void *rb_allocate_sigaltstack(void);
169
+ void *rb_register_sigaltstack(void *);
170
+ # define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
171
+ # define RB_ALTSTACK_FREE(var) free(var)
172
+ # define RB_ALTSTACK(var) var
173
+ #else /* noop */
174
+ # define RB_ALTSTACK_INIT(var, altstack)
175
+ # define RB_ALTSTACK_FREE(var)
176
+ # define RB_ALTSTACK(var) (0)
177
+ #endif
178
+
179
+ #include THREAD_IMPL_H
180
+ #define RUBY_VM_THREAD_MODEL 2
181
+
182
+ /*****************/
183
+ /* configuration */
184
+ /*****************/
185
+
186
+ /* gcc ver. check */
187
+ #if defined(__GNUC__) && __GNUC__ >= 2
188
+
189
+ #if OPT_TOKEN_THREADED_CODE
190
+ #if OPT_DIRECT_THREADED_CODE
191
+ #undef OPT_DIRECT_THREADED_CODE
192
+ #endif
193
+ #endif
194
+
195
+ #else /* defined(__GNUC__) && __GNUC__ >= 2 */
196
+
197
+ /* disable threaded code options */
198
+ #if OPT_DIRECT_THREADED_CODE
199
+ #undef OPT_DIRECT_THREADED_CODE
200
+ #endif
201
+ #if OPT_TOKEN_THREADED_CODE
202
+ #undef OPT_TOKEN_THREADED_CODE
203
+ #endif
204
+ #endif
205
+
206
+ /* call threaded code */
207
+ #if OPT_CALL_THREADED_CODE
208
+ #if OPT_DIRECT_THREADED_CODE
209
+ #undef OPT_DIRECT_THREADED_CODE
210
+ #endif /* OPT_DIRECT_THREADED_CODE */
211
+ #endif /* OPT_CALL_THREADED_CODE */
212
+
213
+ void rb_vm_encoded_insn_data_table_init(void);
214
+ typedef unsigned long rb_num_t;
215
+ typedef signed long rb_snum_t;
216
+
217
+ enum ruby_tag_type {
218
+ RUBY_TAG_NONE = 0x0,
219
+ RUBY_TAG_RETURN = 0x1,
220
+ RUBY_TAG_BREAK = 0x2,
221
+ RUBY_TAG_NEXT = 0x3,
222
+ RUBY_TAG_RETRY = 0x4,
223
+ RUBY_TAG_REDO = 0x5,
224
+ RUBY_TAG_RAISE = 0x6,
225
+ RUBY_TAG_THROW = 0x7,
226
+ RUBY_TAG_FATAL = 0x8,
227
+ RUBY_TAG_MASK = 0xf
228
+ };
229
+
230
+ #define TAG_NONE RUBY_TAG_NONE
231
+ #define TAG_RETURN RUBY_TAG_RETURN
232
+ #define TAG_BREAK RUBY_TAG_BREAK
233
+ #define TAG_NEXT RUBY_TAG_NEXT
234
+ #define TAG_RETRY RUBY_TAG_RETRY
235
+ #define TAG_REDO RUBY_TAG_REDO
236
+ #define TAG_RAISE RUBY_TAG_RAISE
237
+ #define TAG_THROW RUBY_TAG_THROW
238
+ #define TAG_FATAL RUBY_TAG_FATAL
239
+ #define TAG_MASK RUBY_TAG_MASK
240
+
241
+ enum ruby_vm_throw_flags {
242
+ VM_THROW_NO_ESCAPE_FLAG = 0x8000,
243
+ VM_THROW_STATE_MASK = 0xff
244
+ };
245
+
246
+ /* forward declarations */
247
+ struct rb_thread_struct;
248
+ struct rb_control_frame_struct;
249
+
250
+ /* iseq data type */
251
+ typedef struct rb_compile_option_struct rb_compile_option_t;
252
+
253
+ union ic_serial_entry {
254
+ rb_serial_t raw;
255
+ VALUE data[2];
256
+ };
257
+
258
+ #define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
259
+
260
+ // imemo_constcache
261
+ struct iseq_inline_constant_cache_entry {
262
+ VALUE flags;
263
+
264
+ VALUE value;
265
+ const rb_cref_t *ic_cref;
266
+ };
267
+ STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
268
+ (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
269
+ sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
270
+
271
+ struct iseq_inline_constant_cache {
272
+ struct iseq_inline_constant_cache_entry *entry;
273
+
274
+ /**
275
+ * A null-terminated list of ids, used to represent a constant's path
276
+ * idNULL is used to represent the :: prefix, and 0 is used to donate the end
277
+ * of the list.
278
+ *
279
+ * For example
280
+ * FOO {rb_intern("FOO"), 0}
281
+ * FOO::BAR {rb_intern("FOO"), rb_intern("BAR"), 0}
282
+ * ::FOO {idNULL, rb_intern("FOO"), 0}
283
+ * ::FOO::BAR {idNULL, rb_intern("FOO"), rb_intern("BAR"), 0}
284
+ */
285
+ const ID *segments;
286
+ };
287
+
288
+ struct iseq_inline_iv_cache_entry {
289
+ uint64_t value; // dest_shape_id in former half, attr_index in latter half
290
+ ID iv_set_name;
291
+ };
292
+
293
+ struct iseq_inline_cvar_cache_entry {
294
+ struct rb_cvar_class_tbl_entry *entry;
295
+ };
296
+
297
+ union iseq_inline_storage_entry {
298
+ struct {
299
+ struct rb_thread_struct *running_thread;
300
+ VALUE value;
301
+ } once;
302
+ struct iseq_inline_constant_cache ic_cache;
303
+ struct iseq_inline_iv_cache_entry iv_cache;
304
+ };
305
+
306
+ struct rb_calling_info {
307
+ const struct rb_call_data *cd;
308
+ const struct rb_callcache *cc;
309
+ VALUE block_handler;
310
+ VALUE recv;
311
+ int argc;
312
+ bool kw_splat;
313
+ VALUE heap_argv;
314
+ };
315
+
316
+ #ifndef VM_ARGC_STACK_MAX
317
+ #define VM_ARGC_STACK_MAX 128
318
+ #endif
319
+
320
+ # define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
321
+
322
+ struct rb_execution_context_struct;
323
+
324
+ #if 1
325
+ #define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
326
+ #else
327
+ #define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
328
+ #endif
329
+ #define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
330
+
331
+ typedef struct rb_iseq_location_struct {
332
+ VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
333
+ VALUE base_label; /* String */
334
+ VALUE label; /* String */
335
+ int first_lineno;
336
+ int node_id;
337
+ rb_code_location_t code_location;
338
+ } rb_iseq_location_t;
339
+
340
+ #define PATHOBJ_PATH 0
341
+ #define PATHOBJ_REALPATH 1
342
+
343
+ static inline VALUE
344
+ pathobj_path(VALUE pathobj)
345
+ {
346
+ if (RB_TYPE_P(pathobj, T_STRING)) {
347
+ return pathobj;
348
+ }
349
+ else {
350
+ VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
351
+ return RARRAY_AREF(pathobj, PATHOBJ_PATH);
352
+ }
353
+ }
354
+
355
+ static inline VALUE
356
+ pathobj_realpath(VALUE pathobj)
357
+ {
358
+ if (RB_TYPE_P(pathobj, T_STRING)) {
359
+ return pathobj;
360
+ }
361
+ else {
362
+ VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
363
+ return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
364
+ }
365
+ }
366
+
367
+ /* Forward declarations */
368
+ typedef uintptr_t iseq_bits_t;
369
+
370
+ #define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
371
+
372
+ /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
373
+ #define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
374
+
375
+ /* instruction sequence type */
376
+ enum rb_iseq_type {
377
+ ISEQ_TYPE_TOP,
378
+ ISEQ_TYPE_METHOD,
379
+ ISEQ_TYPE_BLOCK,
380
+ ISEQ_TYPE_CLASS,
381
+ ISEQ_TYPE_RESCUE,
382
+ ISEQ_TYPE_ENSURE,
383
+ ISEQ_TYPE_EVAL,
384
+ ISEQ_TYPE_MAIN,
385
+ ISEQ_TYPE_PLAIN
386
+ };
387
+
388
+ // Attributes specified by Primitive.attr!
389
+ enum rb_builtin_attr {
390
+ // The iseq does not call methods.
391
+ BUILTIN_ATTR_LEAF = 0x01,
392
+ // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
393
+ BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
394
+ // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
395
+ BUILTIN_ATTR_INLINE_BLOCK = 0x04,
396
+ // The iseq acts like a C method in backtraces.
397
+ BUILTIN_ATTR_C_TRACE = 0x08,
398
+ };
399
+
400
+ typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
401
+ typedef VALUE (*rb_zjit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *, rb_jit_func_t);
402
+
403
+ struct rb_iseq_constant_body {
404
+ enum rb_iseq_type type;
405
+
406
+ unsigned int iseq_size;
407
+ VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
408
+
409
+ /**
410
+ * parameter information
411
+ *
412
+ * def m(a1, a2, ..., aM, # mandatory
413
+ * b1=(...), b2=(...), ..., bN=(...), # optional
414
+ * *c, # rest
415
+ * d1, d2, ..., dO, # post
416
+ * e1:(...), e2:(...), ..., eK:(...), # keyword
417
+ * **f, # keyword_rest
418
+ * &g) # block
419
+ * =>
420
+ *
421
+ * lead_num = M
422
+ * opt_num = N
423
+ * rest_start = M+N
424
+ * post_start = M+N+(*1)
425
+ * post_num = O
426
+ * keyword_num = K
427
+ * block_start = M+N+(*1)+O+K
428
+ * keyword_bits = M+N+(*1)+O+K+(&1)
429
+ * size = M+N+O+(*1)+K+(&1)+(**1) // parameter size.
430
+ */
431
+
432
+ struct {
433
+ struct {
434
+ unsigned int has_lead : 1;
435
+ unsigned int has_opt : 1;
436
+ unsigned int has_rest : 1;
437
+ unsigned int has_post : 1;
438
+ unsigned int has_kw : 1;
439
+ unsigned int has_kwrest : 1;
440
+ unsigned int has_block : 1;
441
+
442
+ unsigned int ambiguous_param0 : 1; /* {|a|} */
443
+ unsigned int accepts_no_kwarg : 1;
444
+ unsigned int ruby2_keywords: 1;
445
+ unsigned int anon_rest: 1;
446
+ unsigned int anon_kwrest: 1;
447
+ unsigned int use_block: 1;
448
+ unsigned int forwardable: 1;
449
+ } flags;
450
+
451
+ unsigned int size;
452
+
453
+ int lead_num;
454
+ int opt_num;
455
+ int rest_start;
456
+ int post_start;
457
+ int post_num;
458
+ int block_start;
459
+
460
+ const VALUE *opt_table; /* (opt_num + 1) entries. */
461
+ /* opt_num and opt_table:
462
+ *
463
+ * def foo o1=e1, o2=e2, ..., oN=eN
464
+ * #=>
465
+ * # prologue code
466
+ * A1: e1
467
+ * A2: e2
468
+ * ...
469
+ * AN: eN
470
+ * AL: body
471
+ * opt_num = N
472
+ * opt_table = [A1, A2, ..., AN, AL]
473
+ */
474
+
475
+ const struct rb_iseq_param_keyword {
476
+ int num;
477
+ int required_num;
478
+ int bits_start;
479
+ int rest_start;
480
+ const ID *table;
481
+ VALUE *default_values;
482
+ } *keyword;
483
+ } param;
484
+
485
+ rb_iseq_location_t location;
486
+
487
+ /* insn info, must be freed */
488
+ struct iseq_insn_info {
489
+ const struct iseq_insn_info_entry *body;
490
+ unsigned int *positions;
491
+ unsigned int size;
492
+ #if VM_INSN_INFO_TABLE_IMPL == 2
493
+ struct succ_index_table *succ_index_table;
494
+ #endif
495
+ } insns_info;
496
+
497
+ const ID *local_table; /* must free */
498
+
499
+ enum lvar_state {
500
+ lvar_uninitialized,
501
+ lvar_initialized,
502
+ lvar_reassigned,
503
+ } *lvar_states;
504
+
505
+ /* catch table */
506
+ struct iseq_catch_table *catch_table;
507
+
508
+ /* for child iseq */
509
+ const struct rb_iseq_struct *parent_iseq;
510
+ struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
511
+
512
+ union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
513
+ struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
514
+
515
+ struct {
516
+ rb_snum_t flip_count;
517
+ VALUE script_lines;
518
+ VALUE coverage;
519
+ VALUE pc2branchindex;
520
+ VALUE *original_iseq;
521
+ } variable;
522
+
523
+ unsigned int local_table_size;
524
+ unsigned int ic_size; // Number of IC caches
525
+ unsigned int ise_size; // Number of ISE caches
526
+ unsigned int ivc_size; // Number of IVC caches
527
+ unsigned int icvarc_size; // Number of ICVARC caches
528
+ unsigned int ci_size;
529
+ unsigned int stack_max; /* for stack overflow check */
530
+
531
+ unsigned int builtin_attrs; // Union of rb_builtin_attr
532
+
533
+ bool prism; // ISEQ was generated from prism compiler
534
+
535
+ union {
536
+ iseq_bits_t * list; /* Find references for GC */
537
+ iseq_bits_t single;
538
+ } mark_bits;
539
+
540
+ struct rb_id_table *outer_variables;
541
+
542
+ const rb_iseq_t *mandatory_only_iseq;
543
+
544
+ #if USE_YJIT || USE_ZJIT
545
+ // Function pointer for JIT code on jit_exec()
546
+ rb_jit_func_t jit_entry;
547
+ // Number of calls on jit_exec()
548
+ long unsigned jit_entry_calls;
549
+ // Function pointer for JIT code on jit_exec_exception()
550
+ rb_jit_func_t jit_exception;
551
+ // Number of calls on jit_exec_exception()
552
+ long unsigned jit_exception_calls;
553
+ #endif
554
+
555
+ #if USE_YJIT
556
+ // YJIT stores some data on each iseq.
557
+ void *yjit_payload;
558
+ // Used to estimate how frequently this ISEQ gets called
559
+ uint64_t yjit_calls_at_interv;
560
+ #endif
561
+
562
+ #if USE_ZJIT
563
+ // ZJIT stores some data on each iseq.
564
+ void *zjit_payload;
565
+ #endif
566
+ };
567
+
568
+ /* T_IMEMO/iseq */
569
+ /* typedef rb_iseq_t is in method.h */
570
+ struct rb_iseq_struct {
571
+ VALUE flags; /* 1 */
572
+ VALUE wrapper; /* 2 */
573
+
574
+ struct rb_iseq_constant_body *body; /* 3 */
575
+
576
+ union { /* 4, 5 words */
577
+ struct iseq_compile_data *compile_data; /* used at compile time */
578
+
579
+ struct {
580
+ VALUE obj;
581
+ int index;
582
+ } loader;
583
+
584
+ struct {
585
+ struct rb_hook_list_struct *local_hooks;
586
+ rb_event_flag_t global_trace_events;
587
+ } exec;
588
+ } aux;
589
+ };
590
+
591
+ #define ISEQ_BODY(iseq) ((iseq)->body)
592
+
593
+ #if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
594
+ #define USE_LAZY_LOAD 0
595
+ #endif
596
+
597
+ #if !USE_LAZY_LOAD
598
+ static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
599
+ #endif
600
+ const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
601
+
602
+ static inline const rb_iseq_t *
603
+ rb_iseq_check(const rb_iseq_t *iseq)
604
+ {
605
+ if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
606
+ rb_iseq_complete((rb_iseq_t *)iseq);
607
+ }
608
+ return iseq;
609
+ }
610
+
611
+ static inline bool
612
+ rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
613
+ {
614
+ return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
615
+ }
616
+
617
+ static inline const rb_iseq_t *
618
+ def_iseq_ptr(rb_method_definition_t *def)
619
+ {
620
+ //TODO: re-visit. to check the bug, enable this assertion.
621
+ #if VM_CHECK_MODE > 0
622
+ if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
623
+ #endif
624
+ return rb_iseq_check(def->body.iseq.iseqptr);
625
+ }
626
+
627
+ enum ruby_special_exceptions {
628
+ ruby_error_reenter,
629
+ ruby_error_nomemory,
630
+ ruby_error_sysstack,
631
+ ruby_error_stackfatal,
632
+ ruby_error_stream_closed,
633
+ ruby_special_error_count
634
+ };
635
+
636
+ #define GetVMPtr(obj, ptr) \
637
+ GetCoreDataFromValue((obj), rb_vm_t, (ptr))
638
+
639
+ struct rb_vm_struct;
640
+ typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
641
+
642
+ typedef struct rb_at_exit_list {
643
+ rb_vm_at_exit_func *func;
644
+ struct rb_at_exit_list *next;
645
+ } rb_at_exit_list;
646
+
647
+ void *rb_objspace_alloc(void);
648
+ void rb_objspace_free(void *objspace);
649
+ void rb_objspace_call_finalizer(void);
650
+
651
+ typedef struct rb_hook_list_struct {
652
+ struct rb_event_hook_struct *hooks;
653
+ rb_event_flag_t events;
654
+ unsigned int running;
655
+ bool need_clean;
656
+ bool is_local;
657
+ } rb_hook_list_t;
658
+
659
+
660
+ // see builtin.h for definition
661
+ typedef const struct rb_builtin_function *RB_BUILTIN;
662
+
663
+ struct global_object_list {
664
+ VALUE *varptr;
665
+ struct global_object_list *next;
666
+ };
667
+
668
+ typedef struct rb_vm_struct {
669
+ VALUE self;
670
+
671
+ struct {
672
+ struct ccan_list_head set;
673
+ unsigned int cnt;
674
+ unsigned int blocking_cnt;
675
+
676
+ struct rb_ractor_struct *main_ractor;
677
+ struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
678
+
679
+ struct {
680
+ // monitor
681
+ rb_nativethread_lock_t lock;
682
+ struct rb_ractor_struct *lock_owner;
683
+ unsigned int lock_rec;
684
+
685
+ // join at exit
686
+ rb_nativethread_cond_t terminate_cond;
687
+ bool terminate_waiting;
688
+
689
+ #ifndef RUBY_THREAD_PTHREAD_H
690
+ // win32
691
+ bool barrier_waiting;
692
+ unsigned int barrier_cnt;
693
+ rb_nativethread_cond_t barrier_complete_cond;
694
+ rb_nativethread_cond_t barrier_release_cond;
695
+ #endif
696
+ } sync;
697
+
698
+ #ifdef RUBY_THREAD_PTHREAD_H
699
+ // ractor scheduling
700
+ struct {
701
+ rb_nativethread_lock_t lock;
702
+ struct rb_ractor_struct *lock_owner;
703
+ bool locked;
704
+
705
+ rb_nativethread_cond_t cond; // GRQ
706
+ unsigned int snt_cnt; // count of shared NTs
707
+ unsigned int dnt_cnt; // count of dedicated NTs
708
+
709
+ unsigned int running_cnt;
710
+
711
+ unsigned int max_cpu;
712
+ struct ccan_list_head grq; // // Global Ready Queue
713
+ unsigned int grq_cnt;
714
+
715
+ // running threads
716
+ struct ccan_list_head running_threads;
717
+
718
+ // threads which switch context by timeslice
719
+ struct ccan_list_head timeslice_threads;
720
+
721
+ struct ccan_list_head zombie_threads;
722
+
723
+ // true if timeslice timer is not enable
724
+ bool timeslice_wait_inf;
725
+
726
+ // barrier
727
+ rb_nativethread_cond_t barrier_complete_cond;
728
+ rb_nativethread_cond_t barrier_release_cond;
729
+ bool barrier_waiting;
730
+ unsigned int barrier_waiting_cnt;
731
+ unsigned int barrier_serial;
732
+ struct rb_ractor_struct *barrier_ractor;
733
+ unsigned int barrier_lock_rec;
734
+ } sched;
735
+ #endif
736
+ } ractor;
737
+
738
+ #ifdef USE_SIGALTSTACK
739
+ void *main_altstack;
740
+ #endif
741
+
742
+ rb_serial_t fork_gen;
743
+
744
+ /* set in single-threaded processes only: */
745
+ volatile int ubf_async_safe;
746
+
747
+ unsigned int running: 1;
748
+ unsigned int thread_abort_on_exception: 1;
749
+ unsigned int thread_report_on_exception: 1;
750
+ unsigned int thread_ignore_deadlock: 1;
751
+
752
+ /* object management */
753
+ VALUE mark_object_ary;
754
+ struct global_object_list *global_object_list;
755
+ const VALUE special_exceptions[ruby_special_error_count];
756
+
757
+ /* Ruby Box */
758
+ rb_box_t *root_box;
759
+ rb_box_t *main_box;
760
+
761
+ /* load */
762
+ // For running the init function of statically linked
763
+ // extensions when they are loaded
764
+ struct st_table *static_ext_inits;
765
+
766
+ /* signal */
767
+ struct {
768
+ VALUE cmd[RUBY_NSIG];
769
+ } trap_list;
770
+
771
+ /* postponed_job (async-signal-safe, and thread-safe) */
772
+ struct rb_postponed_job_queue *postponed_job_queue;
773
+
774
+ int src_encoding_index;
775
+
776
+ /* workqueue (thread-safe, NOT async-signal-safe) */
777
+ struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
778
+ rb_nativethread_lock_t workqueue_lock;
779
+
780
+ VALUE orig_progname, progname;
781
+ VALUE coverages, me2counter;
782
+ int coverage_mode;
783
+
784
+ struct {
785
+ struct rb_objspace *objspace;
786
+ struct gc_mark_func_data_struct {
787
+ void *data;
788
+ void (*mark_func)(VALUE v, void *data);
789
+ } *mark_func_data;
790
+ } gc;
791
+
792
+ rb_at_exit_list *at_exit;
793
+
794
+ const struct rb_builtin_function *builtin_function_table;
795
+
796
+ st_table *ci_table;
797
+ struct rb_id_table *negative_cme_table;
798
+ st_table *overloaded_cme_table; // cme -> overloaded_cme
799
+ set_table *unused_block_warning_table;
800
+ set_table *cc_refinement_table;
801
+
802
+ // This id table contains a mapping from ID to ICs. It does this with ID
803
+ // keys and nested st_tables as values. The nested tables have ICs as keys
804
+ // and Qtrue as values. It is used when inline constant caches need to be
805
+ // invalidated or ISEQs are being freed.
806
+ struct rb_id_table *constant_cache;
807
+ ID inserting_constant_cache_id;
808
+
809
+ #ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
810
+ #define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
811
+ #endif
812
+ const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
813
+
814
+ #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
815
+ uint32_t clock;
816
+ #endif
817
+
818
+ /* params */
819
+ struct { /* size in byte */
820
+ size_t thread_vm_stack_size;
821
+ size_t thread_machine_stack_size;
822
+ size_t fiber_vm_stack_size;
823
+ size_t fiber_machine_stack_size;
824
+ } default_params;
825
+ } rb_vm_t;
826
+
827
+ extern bool ruby_vm_during_cleanup;
828
+
829
+ /* default values */
830
+
831
+ #define RUBY_VM_SIZE_ALIGN 4096
832
+
833
+ #define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
834
+ #define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
835
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
836
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
837
+
838
+ #define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
839
+ #define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
840
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
841
+ #if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
842
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
843
+ #else
844
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
845
+ #endif
846
+
847
+ #if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
848
+ /* It seems sanitizers consume A LOT of machine stacks */
849
+ #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
850
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
851
+ #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
852
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
853
+ #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
854
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
855
+ #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
856
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
857
+ #endif
858
+
859
+ #ifndef VM_DEBUG_BP_CHECK
860
+ #define VM_DEBUG_BP_CHECK 0
861
+ #endif
862
+
863
+ #ifndef VM_DEBUG_VERIFY_METHOD_CACHE
864
+ #define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
865
+ #endif
866
+
867
+ struct rb_captured_block {
868
+ VALUE self;
869
+ const VALUE *ep;
870
+ union {
871
+ const rb_iseq_t *iseq;
872
+ const struct vm_ifunc *ifunc;
873
+ VALUE val;
874
+ } code;
875
+ };
876
+
877
+ enum rb_block_handler_type {
878
+ block_handler_type_iseq,
879
+ block_handler_type_ifunc,
880
+ block_handler_type_symbol,
881
+ block_handler_type_proc
882
+ };
883
+
884
+ enum rb_block_type {
885
+ block_type_iseq,
886
+ block_type_ifunc,
887
+ block_type_symbol,
888
+ block_type_proc
889
+ };
890
+
891
+ struct rb_block {
892
+ union {
893
+ struct rb_captured_block captured;
894
+ VALUE symbol;
895
+ VALUE proc;
896
+ } as;
897
+ enum rb_block_type type;
898
+ };
899
+
900
+ typedef struct rb_control_frame_struct {
901
+ const VALUE *pc; // cfp[0]
902
+ VALUE *sp; // cfp[1]
903
+ const rb_iseq_t *iseq; // cfp[2]
904
+ VALUE self; // cfp[3] / block[0]
905
+ const VALUE *ep; // cfp[4] / block[1]
906
+ const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
907
+ void *jit_return; // cfp[6] -- return address for JIT code
908
+ #if VM_DEBUG_BP_CHECK
909
+ VALUE *bp_check; // cfp[7]
910
+ #endif
911
+ } rb_control_frame_t;
912
+
913
+ extern const rb_data_type_t ruby_threadptr_data_type;
914
+
915
+ static inline struct rb_thread_struct *
916
+ rb_thread_ptr(VALUE thval)
917
+ {
918
+ return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
919
+ }
920
+
921
+ enum rb_thread_status {
922
+ THREAD_RUNNABLE,
923
+ THREAD_STOPPED,
924
+ THREAD_STOPPED_FOREVER,
925
+ THREAD_KILLED
926
+ };
927
+
928
+ #ifdef RUBY_JMP_BUF
929
+ typedef RUBY_JMP_BUF rb_jmpbuf_t;
930
+ #else
931
+ typedef void *rb_jmpbuf_t[5];
932
+ #endif
933
+
934
+ /*
935
+ `rb_vm_tag_jmpbuf_t` type represents a buffer used to
936
+ long jump to a C frame associated with `rb_vm_tag`.
937
+
938
+ Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
939
+ following functions:
940
+ - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
941
+ - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
942
+
943
+ `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
944
+ `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
945
+ */
946
+ #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
947
+ /*
948
+ WebAssembly target with Asyncify-based SJLJ needs
949
+ to capture the execution context by unwind/rewind-ing
950
+ call frames into a jump buffer. The buffer space tends
951
+ to be considerably large unlike other architectures'
952
+ register-based buffers.
953
+ Therefore, we allocates the buffer on the heap on such
954
+ environments.
955
+ */
956
+ typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
957
+
958
+ #define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
959
+
960
+ static inline void
961
+ rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
962
+ {
963
+ *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
964
+ }
965
+
966
+ static inline void
967
+ rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
968
+ {
969
+ ruby_xfree(*jmpbuf);
970
+ }
971
+ #else
972
+ typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
973
+
974
+ #define RB_VM_TAG_JMPBUF_GET(buf) (buf)
975
+
976
+ static inline void
977
+ rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
978
+ {
979
+ // no-op
980
+ }
981
+
982
+ static inline void
983
+ rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
984
+ {
985
+ // no-op
986
+ }
987
+ #endif
988
+
989
+ /*
990
+ the members which are written in EC_PUSH_TAG() should be placed at
991
+ the beginning and the end, so that entire region is accessible.
992
+ */
993
+ struct rb_vm_tag {
994
+ VALUE tag;
995
+ VALUE retval;
996
+ rb_vm_tag_jmpbuf_t buf;
997
+ struct rb_vm_tag *prev;
998
+ enum ruby_tag_type state;
999
+ unsigned int lock_rec;
1000
+ };
1001
+
1002
+ STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
1003
+ STATIC_ASSERT(rb_vm_tag_buf_end,
1004
+ offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
1005
+ sizeof(struct rb_vm_tag));
1006
+
1007
+ struct rb_unblock_callback {
1008
+ rb_unblock_function_t *func;
1009
+ void *arg;
1010
+ };
1011
+
1012
+ struct rb_mutex_struct;
1013
+
1014
+ typedef struct rb_fiber_struct rb_fiber_t;
1015
+
1016
+ struct rb_waiting_list {
1017
+ struct rb_waiting_list *next;
1018
+ struct rb_thread_struct *thread;
1019
+ struct rb_fiber_struct *fiber;
1020
+ };
1021
+
1022
+ struct rb_execution_context_struct {
1023
+ /* execution information */
1024
+ VALUE *vm_stack; /* must free, must mark */
1025
+ size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1026
+ rb_control_frame_t *cfp;
1027
+
1028
+ struct rb_vm_tag *tag;
1029
+
1030
+ /* interrupt flags */
1031
+ rb_atomic_t interrupt_flag;
1032
+ rb_atomic_t interrupt_mask; /* size should match flag */
1033
+ #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1034
+ uint32_t checked_clock;
1035
+ #endif
1036
+
1037
+ rb_fiber_t *fiber_ptr;
1038
+ struct rb_thread_struct *thread_ptr;
1039
+
1040
+ /* storage (ec (fiber) local) */
1041
+ struct rb_id_table *local_storage;
1042
+ VALUE local_storage_recursive_hash;
1043
+ VALUE local_storage_recursive_hash_for_trace;
1044
+
1045
+ /* Inheritable fiber storage. */
1046
+ VALUE storage;
1047
+
1048
+ /* eval env */
1049
+ const VALUE *root_lep;
1050
+ VALUE root_svar;
1051
+
1052
+ /* trace information */
1053
+ struct rb_trace_arg_struct *trace_arg;
1054
+
1055
+ /* temporary places */
1056
+ VALUE errinfo;
1057
+ VALUE passed_block_handler; /* for rb_iterate */
1058
+
1059
+ uint8_t raised_flag; /* only 3 bits needed */
1060
+
1061
+ /* n.b. only 7 bits needed, really: */
1062
+ BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1063
+
1064
+ VALUE private_const_reference;
1065
+
1066
+ struct {
1067
+ VALUE obj;
1068
+ VALUE fields_obj;
1069
+ } gen_fields_cache;
1070
+
1071
+ /* for GC */
1072
+ struct {
1073
+ VALUE *stack_start;
1074
+ VALUE *stack_end;
1075
+ size_t stack_maxsize;
1076
+ RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs;
1077
+
1078
+ #ifdef RUBY_ASAN_ENABLED
1079
+ void *asan_fake_stack_handle;
1080
+ #endif
1081
+ } machine;
1082
+ };
1083
+
1084
+ #ifndef rb_execution_context_t
1085
+ typedef struct rb_execution_context_struct rb_execution_context_t;
1086
+ #define rb_execution_context_t rb_execution_context_t
1087
+ #endif
1088
+
1089
+ // for builtin.h
1090
+ #define VM_CORE_H_EC_DEFINED 1
1091
+
1092
+ // Set the vm_stack pointer in the execution context.
1093
+ void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1094
+
1095
+ // Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1096
+ // @param ec the execution context to update.
1097
+ // @param stack a pointer to the stack to use.
1098
+ // @param size the size of the stack, as in `VALUE stack[size]`.
1099
+ void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1100
+
1101
+ // Clear (set to `NULL`) the vm_stack pointer.
1102
+ // @param ec the execution context to update.
1103
+ void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1104
+
1105
+ struct rb_ext_config {
1106
+ bool ractor_safe;
1107
+ };
1108
+
1109
+ typedef struct rb_ractor_struct rb_ractor_t;
1110
+
1111
+ struct rb_native_thread;
1112
+
1113
+ typedef struct rb_thread_struct {
1114
+ struct ccan_list_node lt_node; // managed by a ractor (r->threads.set)
1115
+ VALUE self;
1116
+ rb_ractor_t *ractor;
1117
+ rb_vm_t *vm;
1118
+ struct rb_native_thread *nt;
1119
+ rb_execution_context_t *ec;
1120
+
1121
+ struct rb_thread_sched_item sched;
1122
+ bool mn_schedulable;
1123
+ rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1124
+
1125
+ VALUE last_status; /* $? */
1126
+
1127
+ /* for cfunc */
1128
+ struct rb_calling_info *calling;
1129
+
1130
+ /* for load(true) */
1131
+ VALUE top_self;
1132
+ VALUE top_wrapper;
1133
+
1134
+ /* thread control */
1135
+
1136
+ BITFIELD(enum rb_thread_status, status, 2);
1137
+ /* bit flags */
1138
+ unsigned int has_dedicated_nt : 1;
1139
+ unsigned int to_kill : 1;
1140
+ unsigned int abort_on_exception: 1;
1141
+ unsigned int report_on_exception: 1;
1142
+ unsigned int pending_interrupt_queue_checked: 1;
1143
+ int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1144
+ uint32_t running_time_us; /* 12500..800000 */
1145
+
1146
+ void *blocking_region_buffer;
1147
+
1148
+ VALUE thgroup;
1149
+ VALUE value;
1150
+
1151
+ /* temporary place of retval on OPT_CALL_THREADED_CODE */
1152
+ #if OPT_CALL_THREADED_CODE
1153
+ VALUE retval;
1154
+ #endif
1155
+
1156
+ /* async errinfo queue */
1157
+ VALUE pending_interrupt_queue;
1158
+ VALUE pending_interrupt_mask_stack;
1159
+
1160
+ /* interrupt management */
1161
+ rb_nativethread_lock_t interrupt_lock;
1162
+ struct rb_unblock_callback unblock;
1163
+ VALUE locking_mutex;
1164
+ struct rb_mutex_struct *keeping_mutexes;
1165
+ struct ccan_list_head interrupt_exec_tasks;
1166
+
1167
+ struct rb_waiting_list *join_list;
1168
+
1169
+ union {
1170
+ struct {
1171
+ VALUE proc;
1172
+ VALUE args;
1173
+ int kw_splat;
1174
+ } proc;
1175
+ struct {
1176
+ VALUE (*func)(void *);
1177
+ void *arg;
1178
+ } func;
1179
+ } invoke_arg;
1180
+
1181
+ enum thread_invoke_type {
1182
+ thread_invoke_type_none = 0,
1183
+ thread_invoke_type_proc,
1184
+ thread_invoke_type_ractor_proc,
1185
+ thread_invoke_type_func
1186
+ } invoke_type;
1187
+
1188
+ /* fiber */
1189
+ rb_fiber_t *root_fiber;
1190
+
1191
+ VALUE scheduler;
1192
+ unsigned int blocking;
1193
+
1194
+ /* misc */
1195
+ VALUE name;
1196
+ void **specific_storage;
1197
+
1198
+ struct rb_ext_config ext_config;
1199
+ } rb_thread_t;
1200
+
1201
+ static inline unsigned int
1202
+ rb_th_serial(const rb_thread_t *th)
1203
+ {
1204
+ return th ? (unsigned int)th->serial : 0;
1205
+ }
1206
+
1207
+ typedef enum {
1208
+ VM_DEFINECLASS_TYPE_CLASS = 0x00,
1209
+ VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1210
+ VM_DEFINECLASS_TYPE_MODULE = 0x02,
1211
+ /* 0x03..0x06 is reserved */
1212
+ VM_DEFINECLASS_TYPE_MASK = 0x07
1213
+ } rb_vm_defineclass_type_t;
1214
+
1215
+ #define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1216
+ #define VM_DEFINECLASS_FLAG_SCOPED 0x08
1217
+ #define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1218
+ #define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1219
+ #define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1220
+ ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1221
+
1222
+ /* iseq.c */
1223
+ RUBY_SYMBOL_EXPORT_BEGIN
1224
+
1225
+ /* node -> iseq */
1226
+ rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1227
+ rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1228
+ rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1229
+ rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1230
+ rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1231
+ enum rb_iseq_type, const rb_compile_option_t*,
1232
+ VALUE script_lines);
1233
+
1234
+ struct iseq_link_anchor;
1235
+ struct rb_iseq_new_with_callback_callback_func {
1236
+ VALUE flags;
1237
+ VALUE reserved;
1238
+ void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1239
+ const void *data;
1240
+ };
1241
+ static inline struct rb_iseq_new_with_callback_callback_func *
1242
+ rb_iseq_new_with_callback_new_callback(
1243
+ void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1244
+ {
1245
+ struct rb_iseq_new_with_callback_callback_func *memo =
1246
+ IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1247
+ memo->func = func;
1248
+ memo->data = ptr;
1249
+
1250
+ return memo;
1251
+ }
1252
+ rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1253
+ VALUE name, VALUE path, VALUE realpath, int first_lineno,
1254
+ const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1255
+
1256
+ VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1257
+ int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1258
+
1259
+ VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1260
+
1261
+ RUBY_EXTERN VALUE rb_cISeq;
1262
+ RUBY_EXTERN VALUE rb_cRubyVM;
1263
+ RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1264
+ RUBY_EXTERN VALUE rb_block_param_proxy;
1265
+ RUBY_SYMBOL_EXPORT_END
1266
+
1267
+ #define GetProcPtr(obj, ptr) \
1268
+ GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1269
+
1270
+ typedef struct {
1271
+ const struct rb_block block;
1272
+ unsigned int is_from_method: 1; /* bool */
1273
+ unsigned int is_lambda: 1; /* bool */
1274
+ unsigned int is_isolated: 1; /* bool */
1275
+ } rb_proc_t;
1276
+
1277
+ RUBY_SYMBOL_EXPORT_BEGIN
1278
+ VALUE rb_proc_isolate(VALUE self);
1279
+ VALUE rb_proc_isolate_bang(VALUE self, VALUE replace_self);
1280
+ VALUE rb_proc_ractor_make_shareable(VALUE proc, VALUE replace_self);
1281
+ RUBY_SYMBOL_EXPORT_END
1282
+
1283
+ typedef struct {
1284
+ VALUE flags; /* imemo header */
1285
+ rb_iseq_t *iseq;
1286
+ const VALUE *ep;
1287
+ const VALUE *env;
1288
+ unsigned int env_size;
1289
+ } rb_env_t;
1290
+
1291
+ extern const rb_data_type_t ruby_binding_data_type;
1292
+
1293
+ #define GetBindingPtr(obj, ptr) \
1294
+ GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1295
+
1296
+ typedef struct {
1297
+ const struct rb_block block;
1298
+ const VALUE pathobj;
1299
+ int first_lineno;
1300
+ } rb_binding_t;
1301
+
1302
+ /* used by compile time and send insn */
1303
+
1304
+ enum vm_check_match_type {
1305
+ VM_CHECKMATCH_TYPE_WHEN = 1,
1306
+ VM_CHECKMATCH_TYPE_CASE = 2,
1307
+ VM_CHECKMATCH_TYPE_RESCUE = 3
1308
+ };
1309
+
1310
+ #define VM_CHECKMATCH_TYPE_MASK 0x03
1311
+ #define VM_CHECKMATCH_ARRAY 0x04
1312
+
1313
+ enum vm_opt_newarray_send_type {
1314
+ VM_OPT_NEWARRAY_SEND_MAX = 1,
1315
+ VM_OPT_NEWARRAY_SEND_MIN = 2,
1316
+ VM_OPT_NEWARRAY_SEND_HASH = 3,
1317
+ VM_OPT_NEWARRAY_SEND_PACK = 4,
1318
+ VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1319
+ VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1320
+ };
1321
+
1322
+ enum vm_special_object_type {
1323
+ VM_SPECIAL_OBJECT_VMCORE = 1,
1324
+ VM_SPECIAL_OBJECT_CBASE,
1325
+ VM_SPECIAL_OBJECT_CONST_BASE
1326
+ };
1327
+
1328
+ enum vm_svar_index {
1329
+ VM_SVAR_LASTLINE = 0, /* $_ */
1330
+ VM_SVAR_BACKREF = 1, /* $~ */
1331
+
1332
+ VM_SVAR_EXTRA_START = 2,
1333
+ VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1334
+ };
1335
+
1336
+ /* inline cache */
1337
+ typedef struct iseq_inline_constant_cache *IC;
1338
+ typedef struct iseq_inline_iv_cache_entry *IVC;
1339
+ typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1340
+ typedef union iseq_inline_storage_entry *ISE;
1341
+ typedef const struct rb_callinfo *CALL_INFO;
1342
+ typedef const struct rb_callcache *CALL_CACHE;
1343
+ typedef struct rb_call_data *CALL_DATA;
1344
+
1345
+ typedef VALUE CDHASH;
1346
+
1347
+ #ifndef FUNC_FASTCALL
1348
+ #define FUNC_FASTCALL(x) x
1349
+ #endif
1350
+
1351
+ typedef rb_control_frame_t *
1352
+ (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1353
+
1354
+ #define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1355
+ #define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1356
+
1357
+ #define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1358
+ #define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1359
+ #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1360
+
1361
+ enum vm_frame_env_flags {
1362
+ /* Frame/Environment flag bits:
1363
+ * MMMM MMMM MMMM MMMM ___F FFFF FFFE EEEX (LSB)
1364
+ *
1365
+ * X : tag for GC marking (It seems as Fixnum)
1366
+ * EEE : 4 bits Env flags
1367
+ * FF..: 8 bits Frame flags
1368
+ * MM..: 15 bits frame magic (to check frame corruption)
1369
+ */
1370
+
1371
+ /* frame types */
1372
+ VM_FRAME_MAGIC_METHOD = 0x11110001,
1373
+ VM_FRAME_MAGIC_BLOCK = 0x22220001,
1374
+ VM_FRAME_MAGIC_CLASS = 0x33330001,
1375
+ VM_FRAME_MAGIC_TOP = 0x44440001,
1376
+ VM_FRAME_MAGIC_CFUNC = 0x55550001,
1377
+ VM_FRAME_MAGIC_IFUNC = 0x66660001,
1378
+ VM_FRAME_MAGIC_EVAL = 0x77770001,
1379
+ VM_FRAME_MAGIC_RESCUE = 0x78880001,
1380
+ VM_FRAME_MAGIC_DUMMY = 0x79990001,
1381
+
1382
+ VM_FRAME_MAGIC_MASK = 0x7fff0001,
1383
+
1384
+ /* frame flag */
1385
+ VM_FRAME_FLAG_FINISH = 0x0020,
1386
+ VM_FRAME_FLAG_BMETHOD = 0x0040,
1387
+ VM_FRAME_FLAG_CFRAME = 0x0080,
1388
+ VM_FRAME_FLAG_LAMBDA = 0x0100,
1389
+ VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1390
+ VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1391
+ VM_FRAME_FLAG_PASSED = 0x0800,
1392
+ VM_FRAME_FLAG_BOX_REQUIRE = 0x1000,
1393
+
1394
+ /* env flag */
1395
+ VM_ENV_FLAG_LOCAL = 0x0002,
1396
+ VM_ENV_FLAG_ESCAPED = 0x0004,
1397
+ VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1398
+ VM_ENV_FLAG_ISOLATED = 0x0010,
1399
+ };
1400
+
1401
+ #define VM_ENV_DATA_SIZE ( 3)
1402
+
1403
+ #define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1404
+ #define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1405
+ #define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1406
+ #define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1407
+
1408
+ #define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1409
+
1410
+ static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1411
+
1412
+ static inline void
1413
+ VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1414
+ {
1415
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1416
+ VM_ASSERT(FIXNUM_P(flags));
1417
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1418
+ }
1419
+
1420
+ static inline void
1421
+ VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1422
+ {
1423
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1424
+ VM_ASSERT(FIXNUM_P(flags));
1425
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1426
+ }
1427
+
1428
+ static inline unsigned long
1429
+ VM_ENV_FLAGS(const VALUE *ep, long flag)
1430
+ {
1431
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1432
+ VM_ASSERT(FIXNUM_P(flags));
1433
+ return flags & flag;
1434
+ }
1435
+
1436
+ static inline unsigned long
1437
+ VM_ENV_FLAGS_UNCHECKED(const VALUE *ep, long flag)
1438
+ {
1439
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1440
+ return flags & flag;
1441
+ }
1442
+
1443
+ static inline unsigned long
1444
+ VM_ENV_FRAME_TYPE_P(const VALUE *ep, unsigned long frame_type)
1445
+ {
1446
+ return VM_ENV_FLAGS(ep, VM_FRAME_MAGIC_MASK) == frame_type;
1447
+ }
1448
+
1449
+ static inline unsigned long
1450
+ VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1451
+ {
1452
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1453
+ }
1454
+
1455
+ static inline unsigned long
1456
+ VM_FRAME_TYPE_UNCHECKED(const rb_control_frame_t *cfp)
1457
+ {
1458
+ return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_MAGIC_MASK);
1459
+ }
1460
+
1461
+ static inline int
1462
+ VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1463
+ {
1464
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1465
+ }
1466
+
1467
+ static inline int
1468
+ VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1469
+ {
1470
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1471
+ }
1472
+
1473
+ static inline int
1474
+ VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1475
+ {
1476
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1477
+ }
1478
+
1479
+ static inline int
1480
+ VM_FRAME_FINISHED_P_UNCHECKED(const rb_control_frame_t *cfp)
1481
+ {
1482
+ return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1483
+ }
1484
+
1485
+ static inline int
1486
+ VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1487
+ {
1488
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1489
+ }
1490
+
1491
+ static inline int
1492
+ rb_obj_is_iseq(VALUE iseq)
1493
+ {
1494
+ return imemo_type_p(iseq, imemo_iseq);
1495
+ }
1496
+
1497
+ #if VM_CHECK_MODE > 0
1498
+ #define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1499
+ #endif
1500
+
1501
+ static inline int
1502
+ VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1503
+ {
1504
+ int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1505
+ VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1506
+ (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1507
+ return cframe_p;
1508
+ }
1509
+
1510
+ static inline int
1511
+ VM_FRAME_CFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1512
+ {
1513
+ return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1514
+ }
1515
+
1516
+ static inline int
1517
+ VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1518
+ {
1519
+ return !VM_FRAME_CFRAME_P(cfp);
1520
+ }
1521
+
1522
+ static inline int
1523
+ VM_FRAME_RUBYFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1524
+ {
1525
+ return !VM_FRAME_CFRAME_P_UNCHECKED(cfp);
1526
+ }
1527
+
1528
+ static inline int
1529
+ VM_FRAME_NS_REQUIRE_P(const rb_control_frame_t *cfp)
1530
+ {
1531
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE) != 0;
1532
+ }
1533
+
1534
+ #define RUBYVM_CFUNC_FRAME_P(cfp) \
1535
+ (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1536
+
1537
+ #define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1538
+ #define VM_BLOCK_HANDLER_NONE 0
1539
+
1540
+ static inline int
1541
+ VM_ENV_LOCAL_P(const VALUE *ep)
1542
+ {
1543
+ return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1544
+ }
1545
+
1546
+ static inline int
1547
+ VM_ENV_LOCAL_P_UNCHECKED(const VALUE *ep)
1548
+ {
1549
+ return VM_ENV_FLAGS_UNCHECKED(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1550
+ }
1551
+
1552
+ static inline const VALUE *
1553
+ VM_ENV_PREV_EP_UNCHECKED(const VALUE *ep)
1554
+ {
1555
+ return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1556
+ }
1557
+
1558
+ static inline const VALUE *
1559
+ VM_ENV_PREV_EP(const VALUE *ep)
1560
+ {
1561
+ VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1562
+ return VM_ENV_PREV_EP_UNCHECKED(ep);
1563
+ }
1564
+
1565
+ static inline bool
1566
+ VM_ENV_BOXED_P(const VALUE *ep)
1567
+ {
1568
+ return VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CLASS) || VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_TOP);
1569
+ }
1570
+
1571
+ static inline VALUE
1572
+ VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1573
+ {
1574
+ if (VM_ENV_BOXED_P(ep)) {
1575
+ VM_ASSERT(VM_ENV_LOCAL_P(ep));
1576
+ return VM_BLOCK_HANDLER_NONE;
1577
+ }
1578
+
1579
+ VM_ASSERT(VM_ENV_LOCAL_P(ep));
1580
+ return ep[VM_ENV_DATA_INDEX_SPECVAL];
1581
+ }
1582
+
1583
+ static inline const rb_box_t *
1584
+ VM_ENV_BOX(const VALUE *ep)
1585
+ {
1586
+ VM_ASSERT(VM_ENV_BOXED_P(ep));
1587
+ VM_ASSERT(VM_ENV_LOCAL_P(ep));
1588
+ return (const rb_box_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1589
+ }
1590
+
1591
+ static inline const rb_box_t *
1592
+ VM_ENV_BOX_UNCHECKED(const VALUE *ep)
1593
+ {
1594
+ return (const rb_box_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1595
+ }
1596
+
1597
+ #if VM_CHECK_MODE > 0
1598
+ int rb_vm_ep_in_heap_p(const VALUE *ep);
1599
+ #endif
1600
+
1601
+ static inline int
1602
+ VM_ENV_ESCAPED_P(const VALUE *ep)
1603
+ {
1604
+ VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1605
+ return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1606
+ }
1607
+
1608
+ RBIMPL_ATTR_NONNULL((1))
1609
+ static inline VALUE
1610
+ VM_ENV_ENVVAL(const VALUE *ep)
1611
+ {
1612
+ VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1613
+ VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1614
+ VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1615
+ return envval;
1616
+ }
1617
+
1618
+ RBIMPL_ATTR_NONNULL((1))
1619
+ static inline const rb_env_t *
1620
+ VM_ENV_ENVVAL_PTR(const VALUE *ep)
1621
+ {
1622
+ return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1623
+ }
1624
+
1625
+ static inline const rb_env_t *
1626
+ vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1627
+ {
1628
+ rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1629
+ env->ep = env_ep;
1630
+ env->env = env_body;
1631
+ env->env_size = env_size;
1632
+ env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1633
+ return env;
1634
+ }
1635
+
1636
+ static inline void
1637
+ VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1638
+ {
1639
+ *((VALUE *)ptr) = v;
1640
+ }
1641
+
1642
+ static inline void
1643
+ VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1644
+ {
1645
+ VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1646
+ VM_FORCE_WRITE(ptr, special_const_value);
1647
+ }
1648
+
1649
+ static inline void
1650
+ VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1651
+ {
1652
+ VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1653
+ VM_FORCE_WRITE(&ep[index], v);
1654
+ }
1655
+
1656
+ const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1657
+ const VALUE *rb_vm_proc_local_ep(VALUE proc);
1658
+ void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1659
+ void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1660
+
1661
+ VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1662
+
1663
+ #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1664
+ #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1665
+
1666
+ #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1667
+ ((void *)(ecfp) > (void *)(cfp))
1668
+
1669
+ static inline const rb_control_frame_t *
1670
+ RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1671
+ {
1672
+ return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1673
+ }
1674
+
1675
+ static inline int
1676
+ RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1677
+ {
1678
+ return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1679
+ }
1680
+
1681
+ static inline int
1682
+ VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1683
+ {
1684
+ if ((block_handler & 0x03) == 0x01) {
1685
+ #if VM_CHECK_MODE > 0
1686
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1687
+ VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1688
+ #endif
1689
+ return 1;
1690
+ }
1691
+ else {
1692
+ return 0;
1693
+ }
1694
+ }
1695
+
1696
+ static inline VALUE
1697
+ VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1698
+ {
1699
+ VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1700
+ VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1701
+ return block_handler;
1702
+ }
1703
+
1704
+ static inline const struct rb_captured_block *
1705
+ VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1706
+ {
1707
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1708
+ VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1709
+ return captured;
1710
+ }
1711
+
1712
+ static inline int
1713
+ VM_BH_IFUNC_P(VALUE block_handler)
1714
+ {
1715
+ if ((block_handler & 0x03) == 0x03) {
1716
+ #if VM_CHECK_MODE > 0
1717
+ struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1718
+ VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1719
+ #endif
1720
+ return 1;
1721
+ }
1722
+ else {
1723
+ return 0;
1724
+ }
1725
+ }
1726
+
1727
+ static inline VALUE
1728
+ VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1729
+ {
1730
+ VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1731
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1732
+ return block_handler;
1733
+ }
1734
+
1735
+ static inline const struct rb_captured_block *
1736
+ VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1737
+ {
1738
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1739
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1740
+ return captured;
1741
+ }
1742
+
1743
+ static inline const struct rb_captured_block *
1744
+ VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1745
+ {
1746
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1747
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1748
+ return captured;
1749
+ }
1750
+
1751
+ static inline enum rb_block_handler_type
1752
+ vm_block_handler_type(VALUE block_handler)
1753
+ {
1754
+ if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1755
+ return block_handler_type_iseq;
1756
+ }
1757
+ else if (VM_BH_IFUNC_P(block_handler)) {
1758
+ return block_handler_type_ifunc;
1759
+ }
1760
+ else if (SYMBOL_P(block_handler)) {
1761
+ return block_handler_type_symbol;
1762
+ }
1763
+ else {
1764
+ VM_ASSERT(rb_obj_is_proc(block_handler));
1765
+ return block_handler_type_proc;
1766
+ }
1767
+ }
1768
+
1769
+ static inline void
1770
+ vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1771
+ {
1772
+ VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1773
+ (vm_block_handler_type(block_handler), 1));
1774
+ }
1775
+
1776
+ static inline enum rb_block_type
1777
+ vm_block_type(const struct rb_block *block)
1778
+ {
1779
+ #if VM_CHECK_MODE > 0
1780
+ switch (block->type) {
1781
+ case block_type_iseq:
1782
+ VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1783
+ break;
1784
+ case block_type_ifunc:
1785
+ VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1786
+ break;
1787
+ case block_type_symbol:
1788
+ VM_ASSERT(SYMBOL_P(block->as.symbol));
1789
+ break;
1790
+ case block_type_proc:
1791
+ VM_ASSERT(rb_obj_is_proc(block->as.proc));
1792
+ break;
1793
+ }
1794
+ #endif
1795
+ return block->type;
1796
+ }
1797
+
1798
+ static inline void
1799
+ vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1800
+ {
1801
+ struct rb_block *mb = (struct rb_block *)block;
1802
+ mb->type = type;
1803
+ }
1804
+
1805
+ static inline const struct rb_block *
1806
+ vm_proc_block(VALUE procval)
1807
+ {
1808
+ VM_ASSERT(rb_obj_is_proc(procval));
1809
+ return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1810
+ }
1811
+
1812
+ static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1813
+ static inline const VALUE *vm_block_ep(const struct rb_block *block);
1814
+
1815
+ static inline const rb_iseq_t *
1816
+ vm_proc_iseq(VALUE procval)
1817
+ {
1818
+ return vm_block_iseq(vm_proc_block(procval));
1819
+ }
1820
+
1821
+ static inline const VALUE *
1822
+ vm_proc_ep(VALUE procval)
1823
+ {
1824
+ return vm_block_ep(vm_proc_block(procval));
1825
+ }
1826
+
1827
+ static inline const rb_iseq_t *
1828
+ vm_block_iseq(const struct rb_block *block)
1829
+ {
1830
+ switch (vm_block_type(block)) {
1831
+ case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1832
+ case block_type_proc: return vm_proc_iseq(block->as.proc);
1833
+ case block_type_ifunc:
1834
+ case block_type_symbol: return NULL;
1835
+ }
1836
+ VM_UNREACHABLE(vm_block_iseq);
1837
+ return NULL;
1838
+ }
1839
+
1840
+ static inline const VALUE *
1841
+ vm_block_ep(const struct rb_block *block)
1842
+ {
1843
+ switch (vm_block_type(block)) {
1844
+ case block_type_iseq:
1845
+ case block_type_ifunc: return block->as.captured.ep;
1846
+ case block_type_proc: return vm_proc_ep(block->as.proc);
1847
+ case block_type_symbol: return NULL;
1848
+ }
1849
+ VM_UNREACHABLE(vm_block_ep);
1850
+ return NULL;
1851
+ }
1852
+
1853
+ static inline VALUE
1854
+ vm_block_self(const struct rb_block *block)
1855
+ {
1856
+ switch (vm_block_type(block)) {
1857
+ case block_type_iseq:
1858
+ case block_type_ifunc:
1859
+ return block->as.captured.self;
1860
+ case block_type_proc:
1861
+ return vm_block_self(vm_proc_block(block->as.proc));
1862
+ case block_type_symbol:
1863
+ return Qundef;
1864
+ }
1865
+ VM_UNREACHABLE(vm_block_self);
1866
+ return Qundef;
1867
+ }
1868
+
1869
+ static inline VALUE
1870
+ VM_BH_TO_SYMBOL(VALUE block_handler)
1871
+ {
1872
+ VM_ASSERT(SYMBOL_P(block_handler));
1873
+ return block_handler;
1874
+ }
1875
+
1876
+ static inline VALUE
1877
+ VM_BH_FROM_SYMBOL(VALUE symbol)
1878
+ {
1879
+ VM_ASSERT(SYMBOL_P(symbol));
1880
+ return symbol;
1881
+ }
1882
+
1883
+ static inline VALUE
1884
+ VM_BH_TO_PROC(VALUE block_handler)
1885
+ {
1886
+ VM_ASSERT(rb_obj_is_proc(block_handler));
1887
+ return block_handler;
1888
+ }
1889
+
1890
+ static inline VALUE
1891
+ VM_BH_FROM_PROC(VALUE procval)
1892
+ {
1893
+ VM_ASSERT(rb_obj_is_proc(procval));
1894
+ return procval;
1895
+ }
1896
+
1897
+ /* VM related object allocate functions */
1898
+ VALUE rb_thread_alloc(VALUE klass);
1899
+ VALUE rb_binding_alloc(VALUE klass);
1900
+ VALUE rb_proc_alloc(VALUE klass);
1901
+ VALUE rb_proc_dup(VALUE self);
1902
+
1903
+ /* for debug */
1904
+ extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1905
+ extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1906
+ extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1907
+
1908
+ #define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1909
+ #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1910
+ bool rb_vm_bugreport(const void *, FILE *);
1911
+ typedef void (*ruby_sighandler_t)(int);
1912
+ RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1913
+ NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1914
+
1915
+ /* functions about thread/vm execution */
1916
+ RUBY_SYMBOL_EXPORT_BEGIN
1917
+ VALUE rb_iseq_eval(const rb_iseq_t *iseq, const rb_box_t *box);
1918
+ VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1919
+ VALUE rb_iseq_path(const rb_iseq_t *iseq);
1920
+ VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1921
+ RUBY_SYMBOL_EXPORT_END
1922
+
1923
+ VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1924
+ void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1925
+
1926
+ int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1927
+ void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1928
+
1929
+ VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1930
+
1931
+ VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1932
+ static inline VALUE
1933
+ rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1934
+ {
1935
+ return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1936
+ }
1937
+
1938
+ static inline VALUE
1939
+ rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1940
+ {
1941
+ return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1942
+ }
1943
+
1944
+ VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1945
+ VALUE rb_vm_env_local_variables(const rb_env_t *env);
1946
+ VALUE rb_vm_env_numbered_parameters(const rb_env_t *env);
1947
+ const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1948
+ const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1949
+ void rb_vm_inc_const_missing_count(void);
1950
+ VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1951
+ const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1952
+ void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1953
+ void rb_vm_pop_frame(rb_execution_context_t *ec);
1954
+
1955
+ void rb_thread_start_timer_thread(void);
1956
+ void rb_thread_stop_timer_thread(void);
1957
+ void rb_thread_reset_timer_thread(void);
1958
+ void rb_thread_wakeup_timer_thread(int);
1959
+
1960
+ static inline void
1961
+ rb_vm_living_threads_init(rb_vm_t *vm)
1962
+ {
1963
+ ccan_list_head_init(&vm->workqueue);
1964
+ ccan_list_head_init(&vm->ractor.set);
1965
+ #ifdef RUBY_THREAD_PTHREAD_H
1966
+ ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1967
+ #endif
1968
+ }
1969
+
1970
+ typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1971
+ rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1972
+ rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1973
+ VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1974
+ int rb_vm_get_sourceline(const rb_control_frame_t *);
1975
+ void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1976
+ void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1977
+ void rb_thread_malloc_stack_set(rb_thread_t *th, void *stack);
1978
+ rb_thread_t * ruby_thread_from_native(void);
1979
+ int ruby_thread_set_native(rb_thread_t *th);
1980
+ int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1981
+ void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1982
+ void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1983
+ VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1984
+
1985
+ void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1986
+
1987
+ #define rb_vm_register_special_exception(sp, e, m) \
1988
+ rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1989
+
1990
+ void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1991
+
1992
+ rb_cref_t *rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass);
1993
+
1994
+ const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1995
+ const rb_callable_method_entry_t *rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp);
1996
+
1997
+ #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1998
+
1999
+ #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
2000
+ STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
2001
+ STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
2002
+ const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
2003
+ if (UNLIKELY((cfp) <= &bound[1])) { \
2004
+ vm_stackoverflow(); \
2005
+ } \
2006
+ } while (0)
2007
+
2008
+ #define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
2009
+ CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
2010
+
2011
+ VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
2012
+
2013
+ rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
2014
+
2015
+ /* for thread */
2016
+
2017
+ #if RUBY_VM_THREAD_MODEL == 2
2018
+
2019
+ RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
2020
+ RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
2021
+ RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
2022
+ RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
2023
+ RUBY_EXTERN unsigned int ruby_vm_event_local_num;
2024
+
2025
+ #define GET_VM() rb_current_vm()
2026
+ #define GET_RACTOR() rb_current_ractor()
2027
+ #define GET_THREAD() rb_current_thread()
2028
+ #define GET_EC() rb_current_execution_context(true)
2029
+
2030
+ static inline rb_thread_t *
2031
+ rb_ec_thread_ptr(const rb_execution_context_t *ec)
2032
+ {
2033
+ return ec->thread_ptr;
2034
+ }
2035
+
2036
+ static inline rb_ractor_t *
2037
+ rb_ec_ractor_ptr(const rb_execution_context_t *ec)
2038
+ {
2039
+ const rb_thread_t *th = rb_ec_thread_ptr(ec);
2040
+ if (th) {
2041
+ VM_ASSERT(th->ractor != NULL);
2042
+ return th->ractor;
2043
+ }
2044
+ else {
2045
+ return NULL;
2046
+ }
2047
+ }
2048
+
2049
+ static inline rb_vm_t *
2050
+ rb_ec_vm_ptr(const rb_execution_context_t *ec)
2051
+ {
2052
+ const rb_thread_t *th = rb_ec_thread_ptr(ec);
2053
+ if (th) {
2054
+ return th->vm;
2055
+ }
2056
+ else {
2057
+ return NULL;
2058
+ }
2059
+ }
2060
+
2061
+ NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
2062
+
2063
+ static inline rb_execution_context_t *
2064
+ rb_current_execution_context(bool expect_ec)
2065
+ {
2066
+ #ifdef RB_THREAD_LOCAL_SPECIFIER
2067
+ #ifdef RB_THREAD_CURRENT_EC_NOINLINE
2068
+ rb_execution_context_t * volatile ec = rb_current_ec();
2069
+ #else
2070
+ rb_execution_context_t * volatile ec = ruby_current_ec;
2071
+ #endif
2072
+
2073
+ /* On the shared objects, `__tls_get_addr()` is used to access the TLS
2074
+ * and the address of the `ruby_current_ec` can be stored on a function
2075
+ * frame. However, this address can be mis-used after native thread
2076
+ * migration of a coroutine.
2077
+ * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
2078
+ * 2) Context switch and resume it on the NT2.
2079
+ * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
2080
+ * This assertion checks such misusage.
2081
+ *
2082
+ * To avoid accidents, `GET_EC()` should be called once on the frame.
2083
+ * Note that inlining can produce the problem.
2084
+ */
2085
+ VM_ASSERT(ec == rb_current_ec_noinline());
2086
+ #else
2087
+ rb_execution_context_t * volatile ec = native_tls_get(ruby_current_ec_key);
2088
+ #endif
2089
+ VM_ASSERT(!expect_ec || ec != NULL);
2090
+ return ec;
2091
+ }
2092
+
2093
+ static inline rb_thread_t *
2094
+ rb_current_thread(void)
2095
+ {
2096
+ const rb_execution_context_t *ec = GET_EC();
2097
+ return rb_ec_thread_ptr(ec);
2098
+ }
2099
+
2100
+ static inline rb_ractor_t *
2101
+ rb_current_ractor_raw(bool expect)
2102
+ {
2103
+ if (ruby_single_main_ractor) {
2104
+ return ruby_single_main_ractor;
2105
+ }
2106
+ else {
2107
+ const rb_execution_context_t *ec = rb_current_execution_context(expect);
2108
+ return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2109
+ }
2110
+ }
2111
+
2112
+ static inline rb_ractor_t *
2113
+ rb_current_ractor(void)
2114
+ {
2115
+ return rb_current_ractor_raw(true);
2116
+ }
2117
+
2118
+ static inline rb_vm_t *
2119
+ rb_current_vm(void)
2120
+ {
2121
+ #if 0 // TODO: reconsider the assertions
2122
+ VM_ASSERT(ruby_current_vm_ptr == NULL ||
2123
+ ruby_current_execution_context_ptr == NULL ||
2124
+ rb_ec_thread_ptr(GET_EC()) == NULL ||
2125
+ rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2126
+ rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2127
+ #endif
2128
+
2129
+ return ruby_current_vm_ptr;
2130
+ }
2131
+
2132
+ void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2133
+ unsigned int recorded_lock_rec,
2134
+ unsigned int current_lock_rec);
2135
+
2136
+ /* This technically is a data race, as it's checked without the lock, however we
2137
+ * check against a value only our own thread will write. */
2138
+ NO_SANITIZE("thread", static inline bool
2139
+ vm_locked_by_ractor_p(rb_vm_t *vm, rb_ractor_t *cr))
2140
+ {
2141
+ VM_ASSERT(cr == GET_RACTOR());
2142
+ return vm->ractor.sync.lock_owner == cr;
2143
+ }
2144
+
2145
+ static inline unsigned int
2146
+ rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2147
+ {
2148
+ rb_vm_t *vm = rb_ec_vm_ptr(ec);
2149
+
2150
+ if (!vm_locked_by_ractor_p(vm, rb_ec_ractor_ptr(ec))) {
2151
+ return 0;
2152
+ }
2153
+ else {
2154
+ return vm->ractor.sync.lock_rec;
2155
+ }
2156
+ }
2157
+
2158
+ #else
2159
+ #error "unsupported thread model"
2160
+ #endif
2161
+
2162
+ enum {
2163
+ TIMER_INTERRUPT_MASK = 0x01,
2164
+ PENDING_INTERRUPT_MASK = 0x02,
2165
+ POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2166
+ TRAP_INTERRUPT_MASK = 0x08,
2167
+ TERMINATE_INTERRUPT_MASK = 0x10,
2168
+ VM_BARRIER_INTERRUPT_MASK = 0x20,
2169
+ };
2170
+
2171
+ #define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2172
+ #define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2173
+ #define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2174
+ #define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2175
+ #define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2176
+ #define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2177
+
2178
+ static inline bool
2179
+ RUBY_VM_INTERRUPTED(rb_execution_context_t *ec)
2180
+ {
2181
+ return (ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec->interrupt_mask) & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK));
2182
+ }
2183
+
2184
+ static inline bool
2185
+ RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2186
+ {
2187
+ #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2188
+ uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2189
+
2190
+ if (current_clock != ec->checked_clock) {
2191
+ ec->checked_clock = current_clock;
2192
+ RUBY_VM_SET_TIMER_INTERRUPT(ec);
2193
+ }
2194
+ #endif
2195
+ return ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec)->interrupt_mask;
2196
+ }
2197
+
2198
+ VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2199
+ int rb_signal_buff_size(void);
2200
+ int rb_signal_exec(rb_thread_t *th, int sig);
2201
+ void rb_threadptr_check_signal(rb_thread_t *mth);
2202
+ void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2203
+ void rb_threadptr_signal_exit(rb_thread_t *th);
2204
+ int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2205
+ void rb_threadptr_interrupt(rb_thread_t *th);
2206
+ void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2207
+ void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2208
+ void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2209
+ VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2210
+ void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2211
+ void rb_execution_context_update(rb_execution_context_t *ec);
2212
+ void rb_execution_context_mark(const rb_execution_context_t *ec);
2213
+ void rb_fiber_close(rb_fiber_t *fib);
2214
+ void Init_native_thread(rb_thread_t *th);
2215
+ int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2216
+
2217
+ // vm_sync.h
2218
+ void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2219
+ void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2220
+
2221
+ #define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2222
+ static inline void
2223
+ rb_vm_check_ints(rb_execution_context_t *ec)
2224
+ {
2225
+ #ifdef RUBY_ASSERT_CRITICAL_SECTION
2226
+ VM_ASSERT(ruby_assert_critical_section_entered == 0);
2227
+ #endif
2228
+
2229
+ VM_ASSERT(ec == rb_current_ec_noinline());
2230
+
2231
+ if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2232
+ rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2233
+ }
2234
+ }
2235
+
2236
+ /* tracer */
2237
+
2238
+ struct rb_trace_arg_struct {
2239
+ rb_event_flag_t event;
2240
+ rb_execution_context_t *ec;
2241
+ const rb_control_frame_t *cfp;
2242
+ VALUE self;
2243
+ ID id;
2244
+ ID called_id;
2245
+ VALUE klass;
2246
+ VALUE data;
2247
+
2248
+ int klass_solved;
2249
+
2250
+ /* calc from cfp */
2251
+ int lineno;
2252
+ VALUE path;
2253
+ };
2254
+
2255
+ void rb_hook_list_mark(rb_hook_list_t *hooks);
2256
+ void rb_hook_list_mark_and_move(rb_hook_list_t *hooks);
2257
+ void rb_hook_list_free(rb_hook_list_t *hooks);
2258
+ void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2259
+ void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2260
+
2261
+ void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2262
+
2263
+ #define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2264
+ const rb_event_flag_t flag_arg_ = (flag_); \
2265
+ rb_hook_list_t *hooks_arg_ = (hooks_); \
2266
+ if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2267
+ /* defer evaluating the other arguments */ \
2268
+ rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2269
+ } \
2270
+ } while (0)
2271
+
2272
+ static inline void
2273
+ rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2274
+ VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2275
+ {
2276
+ struct rb_trace_arg_struct trace_arg;
2277
+
2278
+ VM_ASSERT((hooks->events & flag) != 0);
2279
+
2280
+ trace_arg.event = flag;
2281
+ trace_arg.ec = ec;
2282
+ trace_arg.cfp = ec->cfp;
2283
+ trace_arg.self = self;
2284
+ trace_arg.id = id;
2285
+ trace_arg.called_id = called_id;
2286
+ trace_arg.klass = klass;
2287
+ trace_arg.data = data;
2288
+ trace_arg.path = Qundef;
2289
+ trace_arg.klass_solved = 0;
2290
+
2291
+ rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2292
+ }
2293
+
2294
+ struct rb_ractor_pub {
2295
+ VALUE self;
2296
+ uint32_t id;
2297
+ rb_hook_list_t hooks;
2298
+ };
2299
+
2300
+ static inline rb_hook_list_t *
2301
+ rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2302
+ {
2303
+ struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2304
+ return &cr_pub->hooks;
2305
+ }
2306
+
2307
+ #define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2308
+ EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2309
+
2310
+ #define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2311
+ EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2312
+
2313
+ static inline void
2314
+ rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2315
+ {
2316
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2317
+ NIL_P(eval_script) ? (VALUE)iseq :
2318
+ rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2319
+ }
2320
+
2321
+ void rb_vm_trap_exit(rb_vm_t *vm);
2322
+ void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2323
+ void rb_vm_postponed_job_free(void); /* vm_trace.c */
2324
+ size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2325
+ void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2326
+
2327
+ RUBY_SYMBOL_EXPORT_BEGIN
2328
+
2329
+ int rb_thread_check_trap_pending(void);
2330
+
2331
+ /* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2332
+ #define RUBY_EVENT_COVERAGE_LINE 0x010000
2333
+ #define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2334
+
2335
+ extern VALUE rb_get_coverages(void);
2336
+ extern void rb_set_coverages(VALUE, int, VALUE);
2337
+ extern void rb_clear_coverages(void);
2338
+ extern void rb_reset_coverages(void);
2339
+ extern void rb_resume_coverages(void);
2340
+ extern void rb_suspend_coverages(void);
2341
+
2342
+ void rb_postponed_job_flush(rb_vm_t *vm);
2343
+
2344
+ // ractor.c
2345
+ RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2346
+ RUBY_EXTERN VALUE rb_eRactorIsolationError;
2347
+
2348
+ RUBY_SYMBOL_EXPORT_END
2349
+
2350
+ #endif /* RUBY_VM_CORE_H */