debase-ruby_core_source 3.3.5 → 3.3.6

Sign up to get free protection for your applications and to get access to all the features.
Files changed (149) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -1
  3. data/.idea/vcs.xml +28 -0
  4. data/CHANGELOG.md +8 -0
  5. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/addr2line.h +22 -0
  6. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/builtin.h +119 -0
  7. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/ccan/build_assert/build_assert.h +40 -0
  8. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/ccan/check_type/check_type.h +63 -0
  9. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/ccan/container_of/container_of.h +142 -0
  10. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/ccan/list/list.h +791 -0
  11. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/ccan/str/str.h +17 -0
  12. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/constant.h +53 -0
  13. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/darray.h +209 -0
  14. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/debug_counter.h +423 -0
  15. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/dln.h +32 -0
  16. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/encindex.h +70 -0
  17. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/eval_intern.h +324 -0
  18. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/hrtime.h +237 -0
  19. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/id.h +347 -0
  20. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/id_table.h +39 -0
  21. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/insns.inc +265 -0
  22. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/insns_info.inc +9902 -0
  23. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/array.h +152 -0
  24. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/basic_operators.h +64 -0
  25. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/bignum.h +244 -0
  26. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/bits.h +568 -0
  27. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/class.h +283 -0
  28. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/cmdlineopt.h +65 -0
  29. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/compar.h +29 -0
  30. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/compile.h +34 -0
  31. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/compilers.h +107 -0
  32. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/complex.h +29 -0
  33. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/cont.h +35 -0
  34. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/dir.h +16 -0
  35. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/enc.h +19 -0
  36. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/encoding.h +36 -0
  37. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/enum.h +18 -0
  38. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/enumerator.h +21 -0
  39. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/error.h +218 -0
  40. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/eval.h +33 -0
  41. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/file.h +38 -0
  42. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/fixnum.h +184 -0
  43. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/gc.h +322 -0
  44. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/hash.h +192 -0
  45. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/imemo.h +261 -0
  46. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/inits.h +47 -0
  47. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/io.h +143 -0
  48. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/load.h +18 -0
  49. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/loadpath.h +16 -0
  50. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/math.h +23 -0
  51. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/missing.h +19 -0
  52. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/numeric.h +274 -0
  53. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/object.h +63 -0
  54. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/parse.h +129 -0
  55. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/proc.h +30 -0
  56. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/process.h +124 -0
  57. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/ractor.h +6 -0
  58. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/random.h +17 -0
  59. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/range.h +40 -0
  60. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/rational.h +71 -0
  61. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/re.h +28 -0
  62. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/ruby_parser.h +102 -0
  63. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/sanitizers.h +326 -0
  64. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/serial.h +23 -0
  65. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/signal.h +24 -0
  66. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/st.h +11 -0
  67. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/static_assert.h +16 -0
  68. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/string.h +186 -0
  69. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/struct.h +127 -0
  70. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/symbol.h +45 -0
  71. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/thread.h +85 -0
  72. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/time.h +34 -0
  73. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/transcode.h +23 -0
  74. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/util.h +27 -0
  75. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/variable.h +72 -0
  76. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/vm.h +137 -0
  77. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal/warnings.h +16 -0
  78. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/internal.h +108 -0
  79. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/iseq.h +340 -0
  80. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/known_errors.inc +1419 -0
  81. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/method.h +255 -0
  82. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/node.h +111 -0
  83. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/node_name.inc +224 -0
  84. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/optinsn.inc +128 -0
  85. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/optunifs.inc +43 -0
  86. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/parse.h +244 -0
  87. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/parser_bits.h +564 -0
  88. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/parser_node.h +32 -0
  89. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/parser_st.h +162 -0
  90. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/parser_value.h +106 -0
  91. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/ast.h +7524 -0
  92. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/defines.h +242 -0
  93. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/diagnostic.h +450 -0
  94. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/encoding.h +283 -0
  95. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/extension.h +19 -0
  96. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/node.h +129 -0
  97. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/options.h +396 -0
  98. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/pack.h +163 -0
  99. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/parser.h +933 -0
  100. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/prettyprint.h +34 -0
  101. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/prism.h +336 -0
  102. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/regexp.h +43 -0
  103. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/static_literals.h +121 -0
  104. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/util/pm_buffer.h +218 -0
  105. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/util/pm_char.h +204 -0
  106. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/util/pm_constant_pool.h +218 -0
  107. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/util/pm_integer.h +126 -0
  108. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/util/pm_list.h +97 -0
  109. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/util/pm_memchr.h +29 -0
  110. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/util/pm_newline_list.h +113 -0
  111. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/util/pm_string.h +190 -0
  112. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/util/pm_strncasecmp.h +32 -0
  113. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/util/pm_strpbrk.h +46 -0
  114. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism/version.h +29 -0
  115. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/prism_compile.h +99 -0
  116. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/probes_helper.h +42 -0
  117. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/ractor_core.h +382 -0
  118. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/regenc.h +254 -0
  119. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/regint.h +1006 -0
  120. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/regparse.h +371 -0
  121. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/revision.h +5 -0
  122. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/rjit.h +101 -0
  123. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/rjit_c.h +165 -0
  124. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/ruby_assert.h +14 -0
  125. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/ruby_atomic.h +23 -0
  126. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/rubyparser.h +1350 -0
  127. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/shape.h +234 -0
  128. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/siphash.h +48 -0
  129. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/symbol.h +123 -0
  130. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/thread_none.h +21 -0
  131. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/thread_pthread.h +168 -0
  132. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/thread_win32.h +58 -0
  133. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/timev.h +58 -0
  134. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/transcode_data.h +138 -0
  135. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/variable.h +39 -0
  136. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/version.h +69 -0
  137. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/vm.inc +5840 -0
  138. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/vm_call_iseq_optimized.inc +244 -0
  139. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/vm_callinfo.h +627 -0
  140. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/vm_core.h +2222 -0
  141. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/vm_debug.h +124 -0
  142. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/vm_exec.h +199 -0
  143. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/vm_insnhelper.h +277 -0
  144. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/vm_opts.h +67 -0
  145. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/vm_sync.h +137 -0
  146. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/vmtc.inc +259 -0
  147. data/lib/debase/ruby_core_source/ruby-3.4.0-preview2/yjit.h +79 -0
  148. data/lib/debase/ruby_core_source/version.rb +1 -1
  149. metadata +150 -6
@@ -0,0 +1,2222 @@
1
+ #ifndef RUBY_VM_CORE_H
2
+ #define RUBY_VM_CORE_H
3
+ /**********************************************************************
4
+
5
+ vm_core.h -
6
+
7
+ $Author$
8
+ created at: 04/01/01 19:41:38 JST
9
+
10
+ Copyright (C) 2004-2007 Koichi Sasada
11
+
12
+ **********************************************************************/
13
+
14
+ /*
15
+ * Enable check mode.
16
+ * 1: enable local assertions.
17
+ */
18
+ #ifndef VM_CHECK_MODE
19
+
20
+ // respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21
+ #define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
+
23
+ #define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24
+ #endif
25
+
26
+ /**
27
+ * VM Debug Level
28
+ *
29
+ * debug level:
30
+ * 0: no debug output
31
+ * 1: show instruction name
32
+ * 2: show stack frame when control stack frame is changed
33
+ * 3: show stack status
34
+ * 4: show register
35
+ * 5:
36
+ * 10: gc check
37
+ */
38
+
39
+ #ifndef VMDEBUG
40
+ #define VMDEBUG 0
41
+ #endif
42
+
43
+ #if 0
44
+ #undef VMDEBUG
45
+ #define VMDEBUG 3
46
+ #endif
47
+
48
+ #include "ruby/internal/config.h"
49
+
50
+ #include <stddef.h>
51
+ #include <signal.h>
52
+ #include <stdarg.h>
53
+
54
+ #include "ruby_assert.h"
55
+
56
+ #define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
+
58
+ #if VM_CHECK_MODE > 0
59
+ #define VM_ASSERT(expr, ...) \
60
+ RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61
+ #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62
+ #define RUBY_ASSERT_CRITICAL_SECTION
63
+ #define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64
+ #else
65
+ #define VM_ASSERT(/*expr, */...) ((void)0)
66
+ #define VM_UNREACHABLE(func) UNREACHABLE
67
+ #define RUBY_DEBUG_THREAD_SCHEDULE()
68
+ #endif
69
+
70
+ #define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71
+
72
+ #if defined(RUBY_ASSERT_CRITICAL_SECTION)
73
+ // TODO add documentation
74
+ extern int ruby_assert_critical_section_entered;
75
+ #define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
76
+ #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
77
+ #else
78
+ #define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
79
+ #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
80
+ #endif
81
+
82
+ #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
83
+ # include "wasm/setjmp.h"
84
+ #else
85
+ # include <setjmp.h>
86
+ #endif
87
+
88
+ #if defined(__linux__) || defined(__FreeBSD__)
89
+ # define RB_THREAD_T_HAS_NATIVE_ID
90
+ #endif
91
+
92
+ #include "ruby/internal/stdbool.h"
93
+ #include "ccan/list/list.h"
94
+ #include "id.h"
95
+ #include "internal.h"
96
+ #include "internal/array.h"
97
+ #include "internal/basic_operators.h"
98
+ #include "internal/sanitizers.h"
99
+ #include "internal/serial.h"
100
+ #include "internal/vm.h"
101
+ #include "method.h"
102
+ #include "node.h"
103
+ #include "ruby/ruby.h"
104
+ #include "ruby/st.h"
105
+ #include "ruby_atomic.h"
106
+ #include "vm_opts.h"
107
+
108
+ #include "ruby/thread_native.h"
109
+ /*
110
+ * implementation selector of get_insn_info algorithm
111
+ * 0: linear search
112
+ * 1: binary search
113
+ * 2: succinct bitvector
114
+ */
115
+ #ifndef VM_INSN_INFO_TABLE_IMPL
116
+ # define VM_INSN_INFO_TABLE_IMPL 2
117
+ #endif
118
+
119
+ #if defined(NSIG_MAX) /* POSIX issue 8 */
120
+ # undef NSIG
121
+ # define NSIG NSIG_MAX
122
+ #elif defined(_SIG_MAXSIG) /* FreeBSD */
123
+ # undef NSIG
124
+ # define NSIG _SIG_MAXSIG
125
+ #elif defined(_SIGMAX) /* QNX */
126
+ # define NSIG (_SIGMAX + 1)
127
+ #elif defined(NSIG) /* 99% of everything else */
128
+ # /* take it */
129
+ #else /* Last resort */
130
+ # define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
131
+ #endif
132
+
133
+ #define RUBY_NSIG NSIG
134
+
135
+ #if defined(SIGCLD)
136
+ # define RUBY_SIGCHLD (SIGCLD)
137
+ #elif defined(SIGCHLD)
138
+ # define RUBY_SIGCHLD (SIGCHLD)
139
+ #endif
140
+
141
+ #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
142
+ # define USE_SIGALTSTACK
143
+ void *rb_allocate_sigaltstack(void);
144
+ void *rb_register_sigaltstack(void *);
145
+ # define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
146
+ # define RB_ALTSTACK_FREE(var) free(var)
147
+ # define RB_ALTSTACK(var) var
148
+ #else /* noop */
149
+ # define RB_ALTSTACK_INIT(var, altstack)
150
+ # define RB_ALTSTACK_FREE(var)
151
+ # define RB_ALTSTACK(var) (0)
152
+ #endif
153
+
154
+ #include THREAD_IMPL_H
155
+ #define RUBY_VM_THREAD_MODEL 2
156
+
157
+ /*****************/
158
+ /* configuration */
159
+ /*****************/
160
+
161
+ /* gcc ver. check */
162
+ #if defined(__GNUC__) && __GNUC__ >= 2
163
+
164
+ #if OPT_TOKEN_THREADED_CODE
165
+ #if OPT_DIRECT_THREADED_CODE
166
+ #undef OPT_DIRECT_THREADED_CODE
167
+ #endif
168
+ #endif
169
+
170
+ #else /* defined(__GNUC__) && __GNUC__ >= 2 */
171
+
172
+ /* disable threaded code options */
173
+ #if OPT_DIRECT_THREADED_CODE
174
+ #undef OPT_DIRECT_THREADED_CODE
175
+ #endif
176
+ #if OPT_TOKEN_THREADED_CODE
177
+ #undef OPT_TOKEN_THREADED_CODE
178
+ #endif
179
+ #endif
180
+
181
+ /* call threaded code */
182
+ #if OPT_CALL_THREADED_CODE
183
+ #if OPT_DIRECT_THREADED_CODE
184
+ #undef OPT_DIRECT_THREADED_CODE
185
+ #endif /* OPT_DIRECT_THREADED_CODE */
186
+ #endif /* OPT_CALL_THREADED_CODE */
187
+
188
+ void rb_vm_encoded_insn_data_table_init(void);
189
+ typedef unsigned long rb_num_t;
190
+ typedef signed long rb_snum_t;
191
+
192
+ enum ruby_tag_type {
193
+ RUBY_TAG_NONE = 0x0,
194
+ RUBY_TAG_RETURN = 0x1,
195
+ RUBY_TAG_BREAK = 0x2,
196
+ RUBY_TAG_NEXT = 0x3,
197
+ RUBY_TAG_RETRY = 0x4,
198
+ RUBY_TAG_REDO = 0x5,
199
+ RUBY_TAG_RAISE = 0x6,
200
+ RUBY_TAG_THROW = 0x7,
201
+ RUBY_TAG_FATAL = 0x8,
202
+ RUBY_TAG_MASK = 0xf
203
+ };
204
+
205
+ #define TAG_NONE RUBY_TAG_NONE
206
+ #define TAG_RETURN RUBY_TAG_RETURN
207
+ #define TAG_BREAK RUBY_TAG_BREAK
208
+ #define TAG_NEXT RUBY_TAG_NEXT
209
+ #define TAG_RETRY RUBY_TAG_RETRY
210
+ #define TAG_REDO RUBY_TAG_REDO
211
+ #define TAG_RAISE RUBY_TAG_RAISE
212
+ #define TAG_THROW RUBY_TAG_THROW
213
+ #define TAG_FATAL RUBY_TAG_FATAL
214
+ #define TAG_MASK RUBY_TAG_MASK
215
+
216
+ enum ruby_vm_throw_flags {
217
+ VM_THROW_NO_ESCAPE_FLAG = 0x8000,
218
+ VM_THROW_STATE_MASK = 0xff
219
+ };
220
+
221
+ /* forward declarations */
222
+ struct rb_thread_struct;
223
+ struct rb_control_frame_struct;
224
+
225
+ /* iseq data type */
226
+ typedef struct rb_compile_option_struct rb_compile_option_t;
227
+
228
+ union ic_serial_entry {
229
+ rb_serial_t raw;
230
+ VALUE data[2];
231
+ };
232
+
233
+ // imemo_constcache
234
+ struct iseq_inline_constant_cache_entry {
235
+ VALUE flags;
236
+
237
+ VALUE value; // v0
238
+ VALUE _unused1; // v1
239
+ VALUE _unused2; // v2
240
+ const rb_cref_t *ic_cref; // v3
241
+ };
242
+ STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
243
+ (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
244
+ sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
245
+
246
+ struct iseq_inline_constant_cache {
247
+ struct iseq_inline_constant_cache_entry *entry;
248
+
249
+ /**
250
+ * A null-terminated list of ids, used to represent a constant's path
251
+ * idNULL is used to represent the :: prefix, and 0 is used to donate the end
252
+ * of the list.
253
+ *
254
+ * For example
255
+ * FOO {rb_intern("FOO"), 0}
256
+ * FOO::BAR {rb_intern("FOO"), rb_intern("BAR"), 0}
257
+ * ::FOO {idNULL, rb_intern("FOO"), 0}
258
+ * ::FOO::BAR {idNULL, rb_intern("FOO"), rb_intern("BAR"), 0}
259
+ */
260
+ const ID *segments;
261
+ };
262
+
263
+ struct iseq_inline_iv_cache_entry {
264
+ uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
265
+ ID iv_set_name;
266
+ };
267
+
268
+ struct iseq_inline_cvar_cache_entry {
269
+ struct rb_cvar_class_tbl_entry *entry;
270
+ };
271
+
272
+ union iseq_inline_storage_entry {
273
+ struct {
274
+ struct rb_thread_struct *running_thread;
275
+ VALUE value;
276
+ } once;
277
+ struct iseq_inline_constant_cache ic_cache;
278
+ struct iseq_inline_iv_cache_entry iv_cache;
279
+ };
280
+
281
+ struct rb_calling_info {
282
+ const struct rb_call_data *cd;
283
+ const struct rb_callcache *cc;
284
+ VALUE block_handler;
285
+ VALUE recv;
286
+ int argc;
287
+ bool kw_splat;
288
+ VALUE heap_argv;
289
+ };
290
+
291
+ #ifndef VM_ARGC_STACK_MAX
292
+ #define VM_ARGC_STACK_MAX 128
293
+ #endif
294
+
295
+ # define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
296
+
297
+ struct rb_execution_context_struct;
298
+
299
+ #if 1
300
+ #define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
301
+ #else
302
+ #define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
303
+ #endif
304
+ #define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
305
+
306
+ typedef struct rb_iseq_location_struct {
307
+ VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
308
+ VALUE base_label; /* String */
309
+ VALUE label; /* String */
310
+ int first_lineno;
311
+ int node_id;
312
+ rb_code_location_t code_location;
313
+ } rb_iseq_location_t;
314
+
315
+ #define PATHOBJ_PATH 0
316
+ #define PATHOBJ_REALPATH 1
317
+
318
+ static inline VALUE
319
+ pathobj_path(VALUE pathobj)
320
+ {
321
+ if (RB_TYPE_P(pathobj, T_STRING)) {
322
+ return pathobj;
323
+ }
324
+ else {
325
+ VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
326
+ return RARRAY_AREF(pathobj, PATHOBJ_PATH);
327
+ }
328
+ }
329
+
330
+ static inline VALUE
331
+ pathobj_realpath(VALUE pathobj)
332
+ {
333
+ if (RB_TYPE_P(pathobj, T_STRING)) {
334
+ return pathobj;
335
+ }
336
+ else {
337
+ VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
338
+ return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
339
+ }
340
+ }
341
+
342
+ /* Forward declarations */
343
+ struct rb_rjit_unit;
344
+
345
+ typedef uintptr_t iseq_bits_t;
346
+
347
+ #define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
348
+
349
+ /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
350
+ #define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
351
+
352
+ /* instruction sequence type */
353
+ enum rb_iseq_type {
354
+ ISEQ_TYPE_TOP,
355
+ ISEQ_TYPE_METHOD,
356
+ ISEQ_TYPE_BLOCK,
357
+ ISEQ_TYPE_CLASS,
358
+ ISEQ_TYPE_RESCUE,
359
+ ISEQ_TYPE_ENSURE,
360
+ ISEQ_TYPE_EVAL,
361
+ ISEQ_TYPE_MAIN,
362
+ ISEQ_TYPE_PLAIN
363
+ };
364
+
365
+ // Attributes specified by Primitive.attr!
366
+ enum rb_builtin_attr {
367
+ // The iseq does not call methods.
368
+ BUILTIN_ATTR_LEAF = 0x01,
369
+ // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
370
+ BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
371
+ // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
372
+ BUILTIN_ATTR_INLINE_BLOCK = 0x04,
373
+ };
374
+
375
+ typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
376
+
377
+ struct rb_iseq_constant_body {
378
+ enum rb_iseq_type type;
379
+
380
+ unsigned int iseq_size;
381
+ VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
382
+
383
+ /**
384
+ * parameter information
385
+ *
386
+ * def m(a1, a2, ..., aM, # mandatory
387
+ * b1=(...), b2=(...), ..., bN=(...), # optional
388
+ * *c, # rest
389
+ * d1, d2, ..., dO, # post
390
+ * e1:(...), e2:(...), ..., eK:(...), # keyword
391
+ * **f, # keyword_rest
392
+ * &g) # block
393
+ * =>
394
+ *
395
+ * lead_num = M
396
+ * opt_num = N
397
+ * rest_start = M+N
398
+ * post_start = M+N+(*1)
399
+ * post_num = O
400
+ * keyword_num = K
401
+ * block_start = M+N+(*1)+O+K
402
+ * keyword_bits = M+N+(*1)+O+K+(&1)
403
+ * size = M+N+O+(*1)+K+(&1)+(**1) // parameter size.
404
+ */
405
+
406
+ struct {
407
+ struct {
408
+ unsigned int has_lead : 1;
409
+ unsigned int has_opt : 1;
410
+ unsigned int has_rest : 1;
411
+ unsigned int has_post : 1;
412
+ unsigned int has_kw : 1;
413
+ unsigned int has_kwrest : 1;
414
+ unsigned int has_block : 1;
415
+
416
+ unsigned int ambiguous_param0 : 1; /* {|a|} */
417
+ unsigned int accepts_no_kwarg : 1;
418
+ unsigned int ruby2_keywords: 1;
419
+ unsigned int anon_rest: 1;
420
+ unsigned int anon_kwrest: 1;
421
+ unsigned int use_block: 1;
422
+ unsigned int forwardable: 1;
423
+ } flags;
424
+
425
+ unsigned int size;
426
+
427
+ int lead_num;
428
+ int opt_num;
429
+ int rest_start;
430
+ int post_start;
431
+ int post_num;
432
+ int block_start;
433
+
434
+ const VALUE *opt_table; /* (opt_num + 1) entries. */
435
+ /* opt_num and opt_table:
436
+ *
437
+ * def foo o1=e1, o2=e2, ..., oN=eN
438
+ * #=>
439
+ * # prologue code
440
+ * A1: e1
441
+ * A2: e2
442
+ * ...
443
+ * AN: eN
444
+ * AL: body
445
+ * opt_num = N
446
+ * opt_table = [A1, A2, ..., AN, AL]
447
+ */
448
+
449
+ const struct rb_iseq_param_keyword {
450
+ int num;
451
+ int required_num;
452
+ int bits_start;
453
+ int rest_start;
454
+ const ID *table;
455
+ VALUE *default_values;
456
+ } *keyword;
457
+ } param;
458
+
459
+ rb_iseq_location_t location;
460
+
461
+ /* insn info, must be freed */
462
+ struct iseq_insn_info {
463
+ const struct iseq_insn_info_entry *body;
464
+ unsigned int *positions;
465
+ unsigned int size;
466
+ #if VM_INSN_INFO_TABLE_IMPL == 2
467
+ struct succ_index_table *succ_index_table;
468
+ #endif
469
+ } insns_info;
470
+
471
+ const ID *local_table; /* must free */
472
+
473
+ /* catch table */
474
+ struct iseq_catch_table *catch_table;
475
+
476
+ /* for child iseq */
477
+ const struct rb_iseq_struct *parent_iseq;
478
+ struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
479
+
480
+ union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
481
+ struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
482
+
483
+ struct {
484
+ rb_snum_t flip_count;
485
+ VALUE script_lines;
486
+ VALUE coverage;
487
+ VALUE pc2branchindex;
488
+ VALUE *original_iseq;
489
+ } variable;
490
+
491
+ unsigned int local_table_size;
492
+ unsigned int ic_size; // Number of IC caches
493
+ unsigned int ise_size; // Number of ISE caches
494
+ unsigned int ivc_size; // Number of IVC caches
495
+ unsigned int icvarc_size; // Number of ICVARC caches
496
+ unsigned int ci_size;
497
+ unsigned int stack_max; /* for stack overflow check */
498
+
499
+ unsigned int builtin_attrs; // Union of rb_builtin_attr
500
+
501
+ bool prism; // ISEQ was generated from prism compiler
502
+
503
+ union {
504
+ iseq_bits_t * list; /* Find references for GC */
505
+ iseq_bits_t single;
506
+ } mark_bits;
507
+
508
+ struct rb_id_table *outer_variables;
509
+
510
+ const rb_iseq_t *mandatory_only_iseq;
511
+
512
+ #if USE_RJIT || USE_YJIT
513
+ // Function pointer for JIT code on jit_exec()
514
+ rb_jit_func_t jit_entry;
515
+ // Number of calls on jit_exec()
516
+ long unsigned jit_entry_calls;
517
+ #endif
518
+
519
+ #if USE_YJIT
520
+ // Function pointer for JIT code on jit_exec_exception()
521
+ rb_jit_func_t jit_exception;
522
+ // Number of calls on jit_exec_exception()
523
+ long unsigned jit_exception_calls;
524
+ #endif
525
+
526
+ #if USE_RJIT
527
+ // RJIT stores some data on each iseq.
528
+ VALUE rjit_blocks;
529
+ #endif
530
+
531
+ #if USE_YJIT
532
+ // YJIT stores some data on each iseq.
533
+ void *yjit_payload;
534
+ // Used to estimate how frequently this ISEQ gets called
535
+ uint64_t yjit_calls_at_interv;
536
+ #endif
537
+ };
538
+
539
+ /* T_IMEMO/iseq */
540
+ /* typedef rb_iseq_t is in method.h */
541
+ struct rb_iseq_struct {
542
+ VALUE flags; /* 1 */
543
+ VALUE wrapper; /* 2 */
544
+
545
+ struct rb_iseq_constant_body *body; /* 3 */
546
+
547
+ union { /* 4, 5 words */
548
+ struct iseq_compile_data *compile_data; /* used at compile time */
549
+
550
+ struct {
551
+ VALUE obj;
552
+ int index;
553
+ } loader;
554
+
555
+ struct {
556
+ struct rb_hook_list_struct *local_hooks;
557
+ rb_event_flag_t global_trace_events;
558
+ } exec;
559
+ } aux;
560
+ };
561
+
562
+ #define ISEQ_BODY(iseq) ((iseq)->body)
563
+
564
+ #if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
565
+ #define USE_LAZY_LOAD 0
566
+ #endif
567
+
568
+ #if !USE_LAZY_LOAD
569
+ static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
570
+ #endif
571
+ const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
572
+
573
+ static inline const rb_iseq_t *
574
+ rb_iseq_check(const rb_iseq_t *iseq)
575
+ {
576
+ if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
577
+ rb_iseq_complete((rb_iseq_t *)iseq);
578
+ }
579
+ return iseq;
580
+ }
581
+
582
+ static inline const rb_iseq_t *
583
+ def_iseq_ptr(rb_method_definition_t *def)
584
+ {
585
+ //TODO: re-visit. to check the bug, enable this assertion.
586
+ #if VM_CHECK_MODE > 0
587
+ if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
588
+ #endif
589
+ return rb_iseq_check(def->body.iseq.iseqptr);
590
+ }
591
+
592
+ enum ruby_special_exceptions {
593
+ ruby_error_reenter,
594
+ ruby_error_nomemory,
595
+ ruby_error_sysstack,
596
+ ruby_error_stackfatal,
597
+ ruby_error_stream_closed,
598
+ ruby_special_error_count
599
+ };
600
+
601
+ #define GetVMPtr(obj, ptr) \
602
+ GetCoreDataFromValue((obj), rb_vm_t, (ptr))
603
+
604
+ struct rb_vm_struct;
605
+ typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
606
+
607
+ typedef struct rb_at_exit_list {
608
+ rb_vm_at_exit_func *func;
609
+ struct rb_at_exit_list *next;
610
+ } rb_at_exit_list;
611
+
612
+ void *rb_objspace_alloc(void);
613
+ void rb_objspace_free(void *objspace);
614
+ void rb_objspace_call_finalizer(void);
615
+
616
+ typedef struct rb_hook_list_struct {
617
+ struct rb_event_hook_struct *hooks;
618
+ rb_event_flag_t events;
619
+ unsigned int running;
620
+ bool need_clean;
621
+ bool is_local;
622
+ } rb_hook_list_t;
623
+
624
+
625
+ // see builtin.h for definition
626
+ typedef const struct rb_builtin_function *RB_BUILTIN;
627
+
628
+ struct global_object_list {
629
+ VALUE *varptr;
630
+ struct global_object_list *next;
631
+ };
632
+
633
+ typedef struct rb_vm_struct {
634
+ VALUE self;
635
+
636
+ struct {
637
+ struct ccan_list_head set;
638
+ unsigned int cnt;
639
+ unsigned int blocking_cnt;
640
+
641
+ struct rb_ractor_struct *main_ractor;
642
+ struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
643
+
644
+ struct {
645
+ // monitor
646
+ rb_nativethread_lock_t lock;
647
+ struct rb_ractor_struct *lock_owner;
648
+ unsigned int lock_rec;
649
+
650
+ // join at exit
651
+ rb_nativethread_cond_t terminate_cond;
652
+ bool terminate_waiting;
653
+
654
+ #ifndef RUBY_THREAD_PTHREAD_H
655
+ bool barrier_waiting;
656
+ unsigned int barrier_cnt;
657
+ rb_nativethread_cond_t barrier_cond;
658
+ #endif
659
+ } sync;
660
+
661
+ // ractor scheduling
662
+ struct {
663
+ rb_nativethread_lock_t lock;
664
+ struct rb_ractor_struct *lock_owner;
665
+ bool locked;
666
+
667
+ rb_nativethread_cond_t cond; // GRQ
668
+ unsigned int snt_cnt; // count of shared NTs
669
+ unsigned int dnt_cnt; // count of dedicated NTs
670
+
671
+ unsigned int running_cnt;
672
+
673
+ unsigned int max_cpu;
674
+ struct ccan_list_head grq; // // Global Ready Queue
675
+ unsigned int grq_cnt;
676
+
677
+ // running threads
678
+ struct ccan_list_head running_threads;
679
+
680
+ // threads which switch context by timeslice
681
+ struct ccan_list_head timeslice_threads;
682
+
683
+ struct ccan_list_head zombie_threads;
684
+
685
+ // true if timeslice timer is not enable
686
+ bool timeslice_wait_inf;
687
+
688
+ // barrier
689
+ rb_nativethread_cond_t barrier_complete_cond;
690
+ rb_nativethread_cond_t barrier_release_cond;
691
+ bool barrier_waiting;
692
+ unsigned int barrier_waiting_cnt;
693
+ unsigned int barrier_serial;
694
+ } sched;
695
+ } ractor;
696
+
697
+ #ifdef USE_SIGALTSTACK
698
+ void *main_altstack;
699
+ #endif
700
+
701
+ rb_serial_t fork_gen;
702
+ struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
703
+
704
+ /* set in single-threaded processes only: */
705
+ volatile int ubf_async_safe;
706
+
707
+ unsigned int running: 1;
708
+ unsigned int thread_abort_on_exception: 1;
709
+ unsigned int thread_report_on_exception: 1;
710
+ unsigned int thread_ignore_deadlock: 1;
711
+
712
+ /* object management */
713
+ VALUE mark_object_ary;
714
+ struct global_object_list *global_object_list;
715
+ const VALUE special_exceptions[ruby_special_error_count];
716
+
717
+ /* load */
718
+ VALUE top_self;
719
+ VALUE load_path;
720
+ VALUE load_path_snapshot;
721
+ VALUE load_path_check_cache;
722
+ VALUE expanded_load_path;
723
+ VALUE loaded_features;
724
+ VALUE loaded_features_snapshot;
725
+ VALUE loaded_features_realpaths;
726
+ VALUE loaded_features_realpath_map;
727
+ struct st_table *loaded_features_index;
728
+ struct st_table *loading_table;
729
+ // For running the init function of statically linked
730
+ // extensions when they are loaded
731
+ struct st_table *static_ext_inits;
732
+
733
+ /* signal */
734
+ struct {
735
+ VALUE cmd[RUBY_NSIG];
736
+ } trap_list;
737
+
738
+ /* postponed_job (async-signal-safe, and thread-safe) */
739
+ struct rb_postponed_job_queue *postponed_job_queue;
740
+
741
+ int src_encoding_index;
742
+
743
+ /* workqueue (thread-safe, NOT async-signal-safe) */
744
+ struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
745
+ rb_nativethread_lock_t workqueue_lock;
746
+
747
+ VALUE orig_progname, progname;
748
+ VALUE coverages, me2counter;
749
+ int coverage_mode;
750
+
751
+ struct {
752
+ struct rb_objspace *objspace;
753
+ struct gc_mark_func_data_struct {
754
+ void *data;
755
+ void (*mark_func)(VALUE v, void *data);
756
+ } *mark_func_data;
757
+ } gc;
758
+
759
+ rb_at_exit_list *at_exit;
760
+
761
+ st_table *frozen_strings;
762
+
763
+ const struct rb_builtin_function *builtin_function_table;
764
+
765
+ st_table *ci_table;
766
+ struct rb_id_table *negative_cme_table;
767
+ st_table *overloaded_cme_table; // cme -> overloaded_cme
768
+ st_table *unused_block_warning_table;
769
+ bool unused_block_warning_strict;
770
+
771
+ // This id table contains a mapping from ID to ICs. It does this with ID
772
+ // keys and nested st_tables as values. The nested tables have ICs as keys
773
+ // and Qtrue as values. It is used when inline constant caches need to be
774
+ // invalidated or ISEQs are being freed.
775
+ struct rb_id_table *constant_cache;
776
+
777
+ #ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
778
+ #define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
779
+ #endif
780
+ const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
781
+
782
+ #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
783
+ uint32_t clock;
784
+ #endif
785
+
786
+ /* params */
787
+ struct { /* size in byte */
788
+ size_t thread_vm_stack_size;
789
+ size_t thread_machine_stack_size;
790
+ size_t fiber_vm_stack_size;
791
+ size_t fiber_machine_stack_size;
792
+ } default_params;
793
+
794
+ } rb_vm_t;
795
+
796
+ /* default values */
797
+
798
+ #define RUBY_VM_SIZE_ALIGN 4096
799
+
800
+ #define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
801
+ #define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
802
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
803
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
804
+
805
+ #define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
806
+ #define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
807
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
808
+ #if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
809
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
810
+ #else
811
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
812
+ #endif
813
+
814
+ #if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
815
+ /* It seems sanitizers consume A LOT of machine stacks */
816
+ #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
817
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
818
+ #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
819
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
820
+ #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
821
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
822
+ #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
823
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
824
+ #endif
825
+
826
+ #ifndef VM_DEBUG_BP_CHECK
827
+ #define VM_DEBUG_BP_CHECK 0
828
+ #endif
829
+
830
+ #ifndef VM_DEBUG_VERIFY_METHOD_CACHE
831
+ #define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
832
+ #endif
833
+
834
+ struct rb_captured_block {
835
+ VALUE self;
836
+ const VALUE *ep;
837
+ union {
838
+ const rb_iseq_t *iseq;
839
+ const struct vm_ifunc *ifunc;
840
+ VALUE val;
841
+ } code;
842
+ };
843
+
844
+ enum rb_block_handler_type {
845
+ block_handler_type_iseq,
846
+ block_handler_type_ifunc,
847
+ block_handler_type_symbol,
848
+ block_handler_type_proc
849
+ };
850
+
851
+ enum rb_block_type {
852
+ block_type_iseq,
853
+ block_type_ifunc,
854
+ block_type_symbol,
855
+ block_type_proc
856
+ };
857
+
858
+ struct rb_block {
859
+ union {
860
+ struct rb_captured_block captured;
861
+ VALUE symbol;
862
+ VALUE proc;
863
+ } as;
864
+ enum rb_block_type type;
865
+ };
866
+
867
+ typedef struct rb_control_frame_struct {
868
+ const VALUE *pc; // cfp[0]
869
+ VALUE *sp; // cfp[1]
870
+ const rb_iseq_t *iseq; // cfp[2]
871
+ VALUE self; // cfp[3] / block[0]
872
+ const VALUE *ep; // cfp[4] / block[1]
873
+ const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
874
+ void *jit_return; // cfp[6] -- return address for JIT code
875
+ #if VM_DEBUG_BP_CHECK
876
+ VALUE *bp_check; // cfp[7]
877
+ #endif
878
+ } rb_control_frame_t;
879
+
880
+ extern const rb_data_type_t ruby_threadptr_data_type;
881
+
882
+ static inline struct rb_thread_struct *
883
+ rb_thread_ptr(VALUE thval)
884
+ {
885
+ return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
886
+ }
887
+
888
+ enum rb_thread_status {
889
+ THREAD_RUNNABLE,
890
+ THREAD_STOPPED,
891
+ THREAD_STOPPED_FOREVER,
892
+ THREAD_KILLED
893
+ };
894
+
895
+ #ifdef RUBY_JMP_BUF
896
+ typedef RUBY_JMP_BUF rb_jmpbuf_t;
897
+ #else
898
+ typedef void *rb_jmpbuf_t[5];
899
+ #endif
900
+
901
+ /*
902
+ `rb_vm_tag_jmpbuf_t` type represents a buffer used to
903
+ long jump to a C frame associated with `rb_vm_tag`.
904
+
905
+ Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
906
+ following functions:
907
+ - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
908
+ - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
909
+
910
+ `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
911
+ `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
912
+ */
913
+ #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
914
+ /*
915
+ WebAssembly target with Asyncify-based SJLJ needs
916
+ to capture the execution context by unwind/rewind-ing
917
+ call frames into a jump buffer. The buffer space tends
918
+ to be considerably large unlike other architectures'
919
+ register-based buffers.
920
+ Therefore, we allocates the buffer on the heap on such
921
+ environments.
922
+ */
923
+ typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
924
+
925
+ #define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
926
+
927
+ static inline void
928
+ rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
929
+ {
930
+ *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
931
+ }
932
+
933
+ static inline void
934
+ rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
935
+ {
936
+ ruby_xfree(*jmpbuf);
937
+ }
938
+ #else
939
+ typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
940
+
941
+ #define RB_VM_TAG_JMPBUF_GET(buf) (buf)
942
+
943
+ static inline void
944
+ rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
945
+ {
946
+ // no-op
947
+ }
948
+
949
+ static inline void
950
+ rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
951
+ {
952
+ // no-op
953
+ }
954
+ #endif
955
+
956
+ /*
957
+ the members which are written in EC_PUSH_TAG() should be placed at
958
+ the beginning and the end, so that entire region is accessible.
959
+ */
960
+ struct rb_vm_tag {
961
+ VALUE tag;
962
+ VALUE retval;
963
+ rb_vm_tag_jmpbuf_t buf;
964
+ struct rb_vm_tag *prev;
965
+ enum ruby_tag_type state;
966
+ unsigned int lock_rec;
967
+ };
968
+
969
+ STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
970
+ STATIC_ASSERT(rb_vm_tag_buf_end,
971
+ offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
972
+ sizeof(struct rb_vm_tag));
973
+
974
+ struct rb_unblock_callback {
975
+ rb_unblock_function_t *func;
976
+ void *arg;
977
+ };
978
+
979
+ struct rb_mutex_struct;
980
+
981
+ typedef struct rb_fiber_struct rb_fiber_t;
982
+
983
+ struct rb_waiting_list {
984
+ struct rb_waiting_list *next;
985
+ struct rb_thread_struct *thread;
986
+ struct rb_fiber_struct *fiber;
987
+ };
988
+
989
+ struct rb_execution_context_struct {
990
+ /* execution information */
991
+ VALUE *vm_stack; /* must free, must mark */
992
+ size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
993
+ rb_control_frame_t *cfp;
994
+
995
+ struct rb_vm_tag *tag;
996
+
997
+ /* interrupt flags */
998
+ rb_atomic_t interrupt_flag;
999
+ rb_atomic_t interrupt_mask; /* size should match flag */
1000
+ #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1001
+ uint32_t checked_clock;
1002
+ #endif
1003
+
1004
+ rb_fiber_t *fiber_ptr;
1005
+ struct rb_thread_struct *thread_ptr;
1006
+
1007
+ /* storage (ec (fiber) local) */
1008
+ struct rb_id_table *local_storage;
1009
+ VALUE local_storage_recursive_hash;
1010
+ VALUE local_storage_recursive_hash_for_trace;
1011
+
1012
+ /* Inheritable fiber storage. */
1013
+ VALUE storage;
1014
+
1015
+ /* eval env */
1016
+ const VALUE *root_lep;
1017
+ VALUE root_svar;
1018
+
1019
+ /* trace information */
1020
+ struct rb_trace_arg_struct *trace_arg;
1021
+
1022
+ /* temporary places */
1023
+ VALUE errinfo;
1024
+ VALUE passed_block_handler; /* for rb_iterate */
1025
+
1026
+ uint8_t raised_flag; /* only 3 bits needed */
1027
+
1028
+ /* n.b. only 7 bits needed, really: */
1029
+ BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1030
+
1031
+ VALUE private_const_reference;
1032
+
1033
+ /* for GC */
1034
+ struct {
1035
+ VALUE *stack_start;
1036
+ VALUE *stack_end;
1037
+ size_t stack_maxsize;
1038
+ RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs;
1039
+
1040
+ #ifdef RUBY_ASAN_ENABLED
1041
+ void *asan_fake_stack_handle;
1042
+ #endif
1043
+ } machine;
1044
+ };
1045
+
1046
+ #ifndef rb_execution_context_t
1047
+ typedef struct rb_execution_context_struct rb_execution_context_t;
1048
+ #define rb_execution_context_t rb_execution_context_t
1049
+ #endif
1050
+
1051
+ // for builtin.h
1052
+ #define VM_CORE_H_EC_DEFINED 1
1053
+
1054
+ // Set the vm_stack pointer in the execution context.
1055
+ void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1056
+
1057
+ // Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1058
+ // @param ec the execution context to update.
1059
+ // @param stack a pointer to the stack to use.
1060
+ // @param size the size of the stack, as in `VALUE stack[size]`.
1061
+ void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1062
+
1063
+ // Clear (set to `NULL`) the vm_stack pointer.
1064
+ // @param ec the execution context to update.
1065
+ void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1066
+
1067
+ struct rb_ext_config {
1068
+ bool ractor_safe;
1069
+ };
1070
+
1071
+ typedef struct rb_ractor_struct rb_ractor_t;
1072
+
1073
+ struct rb_native_thread;
1074
+
1075
+ typedef struct rb_thread_struct {
1076
+ struct ccan_list_node lt_node; // managed by a ractor
1077
+ VALUE self;
1078
+ rb_ractor_t *ractor;
1079
+ rb_vm_t *vm;
1080
+ struct rb_native_thread *nt;
1081
+ rb_execution_context_t *ec;
1082
+
1083
+ struct rb_thread_sched_item sched;
1084
+ bool mn_schedulable;
1085
+ rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1086
+
1087
+ VALUE last_status; /* $? */
1088
+
1089
+ /* for cfunc */
1090
+ struct rb_calling_info *calling;
1091
+
1092
+ /* for load(true) */
1093
+ VALUE top_self;
1094
+ VALUE top_wrapper;
1095
+
1096
+ /* thread control */
1097
+
1098
+ BITFIELD(enum rb_thread_status, status, 2);
1099
+ /* bit flags */
1100
+ unsigned int has_dedicated_nt : 1;
1101
+ unsigned int to_kill : 1;
1102
+ unsigned int abort_on_exception: 1;
1103
+ unsigned int report_on_exception: 1;
1104
+ unsigned int pending_interrupt_queue_checked: 1;
1105
+ int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1106
+ uint32_t running_time_us; /* 12500..800000 */
1107
+
1108
+ void *blocking_region_buffer;
1109
+
1110
+ VALUE thgroup;
1111
+ VALUE value;
1112
+
1113
+ /* temporary place of retval on OPT_CALL_THREADED_CODE */
1114
+ #if OPT_CALL_THREADED_CODE
1115
+ VALUE retval;
1116
+ #endif
1117
+
1118
+ /* async errinfo queue */
1119
+ VALUE pending_interrupt_queue;
1120
+ VALUE pending_interrupt_mask_stack;
1121
+
1122
+ /* interrupt management */
1123
+ rb_nativethread_lock_t interrupt_lock;
1124
+ struct rb_unblock_callback unblock;
1125
+ VALUE locking_mutex;
1126
+ struct rb_mutex_struct *keeping_mutexes;
1127
+
1128
+ struct rb_waiting_list *join_list;
1129
+
1130
+ union {
1131
+ struct {
1132
+ VALUE proc;
1133
+ VALUE args;
1134
+ int kw_splat;
1135
+ } proc;
1136
+ struct {
1137
+ VALUE (*func)(void *);
1138
+ void *arg;
1139
+ } func;
1140
+ } invoke_arg;
1141
+
1142
+ enum thread_invoke_type {
1143
+ thread_invoke_type_none = 0,
1144
+ thread_invoke_type_proc,
1145
+ thread_invoke_type_ractor_proc,
1146
+ thread_invoke_type_func
1147
+ } invoke_type;
1148
+
1149
+ /* statistics data for profiler */
1150
+ VALUE stat_insn_usage;
1151
+
1152
+ /* fiber */
1153
+ rb_fiber_t *root_fiber;
1154
+
1155
+ VALUE scheduler;
1156
+ unsigned int blocking;
1157
+
1158
+ /* misc */
1159
+ VALUE name;
1160
+ void **specific_storage;
1161
+
1162
+ struct rb_ext_config ext_config;
1163
+ } rb_thread_t;
1164
+
1165
+ static inline unsigned int
1166
+ rb_th_serial(const rb_thread_t *th)
1167
+ {
1168
+ return th ? (unsigned int)th->serial : 0;
1169
+ }
1170
+
1171
+ typedef enum {
1172
+ VM_DEFINECLASS_TYPE_CLASS = 0x00,
1173
+ VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1174
+ VM_DEFINECLASS_TYPE_MODULE = 0x02,
1175
+ /* 0x03..0x06 is reserved */
1176
+ VM_DEFINECLASS_TYPE_MASK = 0x07
1177
+ } rb_vm_defineclass_type_t;
1178
+
1179
+ #define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1180
+ #define VM_DEFINECLASS_FLAG_SCOPED 0x08
1181
+ #define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1182
+ #define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1183
+ #define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1184
+ ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1185
+
1186
+ /* iseq.c */
1187
+ RUBY_SYMBOL_EXPORT_BEGIN
1188
+
1189
+ /* node -> iseq */
1190
+ rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1191
+ rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1192
+ rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1193
+ rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1194
+ rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1195
+ enum rb_iseq_type, const rb_compile_option_t*,
1196
+ VALUE script_lines);
1197
+
1198
+ struct iseq_link_anchor;
1199
+ struct rb_iseq_new_with_callback_callback_func {
1200
+ VALUE flags;
1201
+ VALUE reserved;
1202
+ void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1203
+ const void *data;
1204
+ };
1205
+ static inline struct rb_iseq_new_with_callback_callback_func *
1206
+ rb_iseq_new_with_callback_new_callback(
1207
+ void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1208
+ {
1209
+ struct rb_iseq_new_with_callback_callback_func *memo =
1210
+ IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1211
+ memo->func = func;
1212
+ memo->data = ptr;
1213
+
1214
+ return memo;
1215
+ }
1216
+ rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1217
+ VALUE name, VALUE path, VALUE realpath, int first_lineno,
1218
+ const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1219
+
1220
+ VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1221
+ int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1222
+
1223
+ VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1224
+
1225
+ RUBY_EXTERN VALUE rb_cISeq;
1226
+ RUBY_EXTERN VALUE rb_cRubyVM;
1227
+ RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1228
+ RUBY_EXTERN VALUE rb_block_param_proxy;
1229
+ RUBY_SYMBOL_EXPORT_END
1230
+
1231
+ #define GetProcPtr(obj, ptr) \
1232
+ GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1233
+
1234
+ typedef struct {
1235
+ const struct rb_block block;
1236
+ unsigned int is_from_method: 1; /* bool */
1237
+ unsigned int is_lambda: 1; /* bool */
1238
+ unsigned int is_isolated: 1; /* bool */
1239
+ } rb_proc_t;
1240
+
1241
+ RUBY_SYMBOL_EXPORT_BEGIN
1242
+ VALUE rb_proc_isolate(VALUE self);
1243
+ VALUE rb_proc_isolate_bang(VALUE self);
1244
+ VALUE rb_proc_ractor_make_shareable(VALUE self);
1245
+ RUBY_SYMBOL_EXPORT_END
1246
+
1247
+ typedef struct {
1248
+ VALUE flags; /* imemo header */
1249
+ rb_iseq_t *iseq;
1250
+ const VALUE *ep;
1251
+ const VALUE *env;
1252
+ unsigned int env_size;
1253
+ } rb_env_t;
1254
+
1255
+ extern const rb_data_type_t ruby_binding_data_type;
1256
+
1257
+ #define GetBindingPtr(obj, ptr) \
1258
+ GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1259
+
1260
+ typedef struct {
1261
+ const struct rb_block block;
1262
+ const VALUE pathobj;
1263
+ int first_lineno;
1264
+ } rb_binding_t;
1265
+
1266
+ /* used by compile time and send insn */
1267
+
1268
+ enum vm_check_match_type {
1269
+ VM_CHECKMATCH_TYPE_WHEN = 1,
1270
+ VM_CHECKMATCH_TYPE_CASE = 2,
1271
+ VM_CHECKMATCH_TYPE_RESCUE = 3
1272
+ };
1273
+
1274
+ #define VM_CHECKMATCH_TYPE_MASK 0x03
1275
+ #define VM_CHECKMATCH_ARRAY 0x04
1276
+
1277
+ enum vm_opt_newarray_send_type {
1278
+ VM_OPT_NEWARRAY_SEND_MAX = 1,
1279
+ VM_OPT_NEWARRAY_SEND_MIN = 2,
1280
+ VM_OPT_NEWARRAY_SEND_HASH = 3,
1281
+ VM_OPT_NEWARRAY_SEND_PACK = 4,
1282
+ VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1283
+ };
1284
+
1285
+ enum vm_special_object_type {
1286
+ VM_SPECIAL_OBJECT_VMCORE = 1,
1287
+ VM_SPECIAL_OBJECT_CBASE,
1288
+ VM_SPECIAL_OBJECT_CONST_BASE
1289
+ };
1290
+
1291
+ enum vm_svar_index {
1292
+ VM_SVAR_LASTLINE = 0, /* $_ */
1293
+ VM_SVAR_BACKREF = 1, /* $~ */
1294
+
1295
+ VM_SVAR_EXTRA_START = 2,
1296
+ VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1297
+ };
1298
+
1299
+ /* inline cache */
1300
+ typedef struct iseq_inline_constant_cache *IC;
1301
+ typedef struct iseq_inline_iv_cache_entry *IVC;
1302
+ typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1303
+ typedef union iseq_inline_storage_entry *ISE;
1304
+ typedef const struct rb_callinfo *CALL_INFO;
1305
+ typedef const struct rb_callcache *CALL_CACHE;
1306
+ typedef struct rb_call_data *CALL_DATA;
1307
+
1308
+ typedef VALUE CDHASH;
1309
+
1310
+ #ifndef FUNC_FASTCALL
1311
+ #define FUNC_FASTCALL(x) x
1312
+ #endif
1313
+
1314
+ typedef rb_control_frame_t *
1315
+ (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1316
+
1317
+ #define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1318
+ #define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1319
+
1320
+ #define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1321
+ #define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1322
+ #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1323
+
1324
+ enum vm_frame_env_flags {
1325
+ /* Frame/Environment flag bits:
1326
+ * MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
1327
+ *
1328
+ * X : tag for GC marking (It seems as Fixnum)
1329
+ * EEE : 4 bits Env flags
1330
+ * FF..: 7 bits Frame flags
1331
+ * MM..: 15 bits frame magic (to check frame corruption)
1332
+ */
1333
+
1334
+ /* frame types */
1335
+ VM_FRAME_MAGIC_METHOD = 0x11110001,
1336
+ VM_FRAME_MAGIC_BLOCK = 0x22220001,
1337
+ VM_FRAME_MAGIC_CLASS = 0x33330001,
1338
+ VM_FRAME_MAGIC_TOP = 0x44440001,
1339
+ VM_FRAME_MAGIC_CFUNC = 0x55550001,
1340
+ VM_FRAME_MAGIC_IFUNC = 0x66660001,
1341
+ VM_FRAME_MAGIC_EVAL = 0x77770001,
1342
+ VM_FRAME_MAGIC_RESCUE = 0x78880001,
1343
+ VM_FRAME_MAGIC_DUMMY = 0x79990001,
1344
+
1345
+ VM_FRAME_MAGIC_MASK = 0x7fff0001,
1346
+
1347
+ /* frame flag */
1348
+ VM_FRAME_FLAG_FINISH = 0x0020,
1349
+ VM_FRAME_FLAG_BMETHOD = 0x0040,
1350
+ VM_FRAME_FLAG_CFRAME = 0x0080,
1351
+ VM_FRAME_FLAG_LAMBDA = 0x0100,
1352
+ VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1353
+ VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1354
+ VM_FRAME_FLAG_PASSED = 0x0800,
1355
+
1356
+ /* env flag */
1357
+ VM_ENV_FLAG_LOCAL = 0x0002,
1358
+ VM_ENV_FLAG_ESCAPED = 0x0004,
1359
+ VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1360
+ VM_ENV_FLAG_ISOLATED = 0x0010,
1361
+ };
1362
+
1363
+ #define VM_ENV_DATA_SIZE ( 3)
1364
+
1365
+ #define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1366
+ #define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1367
+ #define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1368
+ #define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1369
+
1370
+ #define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1371
+
1372
+ static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1373
+
1374
+ static inline void
1375
+ VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1376
+ {
1377
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1378
+ VM_ASSERT(FIXNUM_P(flags));
1379
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1380
+ }
1381
+
1382
+ static inline void
1383
+ VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1384
+ {
1385
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1386
+ VM_ASSERT(FIXNUM_P(flags));
1387
+ VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1388
+ }
1389
+
1390
+ static inline unsigned long
1391
+ VM_ENV_FLAGS(const VALUE *ep, long flag)
1392
+ {
1393
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1394
+ VM_ASSERT(FIXNUM_P(flags));
1395
+ return flags & flag;
1396
+ }
1397
+
1398
+ static inline unsigned long
1399
+ VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1400
+ {
1401
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1402
+ }
1403
+
1404
+ static inline int
1405
+ VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1406
+ {
1407
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1408
+ }
1409
+
1410
+ static inline int
1411
+ VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1412
+ {
1413
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1414
+ }
1415
+
1416
+ static inline int
1417
+ VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1418
+ {
1419
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1420
+ }
1421
+
1422
+ static inline int
1423
+ VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1424
+ {
1425
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1426
+ }
1427
+
1428
+ static inline int
1429
+ rb_obj_is_iseq(VALUE iseq)
1430
+ {
1431
+ return imemo_type_p(iseq, imemo_iseq);
1432
+ }
1433
+
1434
+ #if VM_CHECK_MODE > 0
1435
+ #define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1436
+ #endif
1437
+
1438
+ static inline int
1439
+ VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1440
+ {
1441
+ int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1442
+ VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1443
+ (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1444
+ return cframe_p;
1445
+ }
1446
+
1447
+ static inline int
1448
+ VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1449
+ {
1450
+ return !VM_FRAME_CFRAME_P(cfp);
1451
+ }
1452
+
1453
+ #define RUBYVM_CFUNC_FRAME_P(cfp) \
1454
+ (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1455
+
1456
+ #define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1457
+ #define VM_BLOCK_HANDLER_NONE 0
1458
+
1459
+ static inline int
1460
+ VM_ENV_LOCAL_P(const VALUE *ep)
1461
+ {
1462
+ return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1463
+ }
1464
+
1465
+ static inline const VALUE *
1466
+ VM_ENV_PREV_EP(const VALUE *ep)
1467
+ {
1468
+ VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1469
+ return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1470
+ }
1471
+
1472
+ static inline VALUE
1473
+ VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1474
+ {
1475
+ VM_ASSERT(VM_ENV_LOCAL_P(ep));
1476
+ return ep[VM_ENV_DATA_INDEX_SPECVAL];
1477
+ }
1478
+
1479
+ #if VM_CHECK_MODE > 0
1480
+ int rb_vm_ep_in_heap_p(const VALUE *ep);
1481
+ #endif
1482
+
1483
+ static inline int
1484
+ VM_ENV_ESCAPED_P(const VALUE *ep)
1485
+ {
1486
+ VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1487
+ return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1488
+ }
1489
+
1490
+ #if VM_CHECK_MODE > 0
1491
+ static inline int
1492
+ vm_assert_env(VALUE obj)
1493
+ {
1494
+ VM_ASSERT(imemo_type_p(obj, imemo_env));
1495
+ return 1;
1496
+ }
1497
+ #endif
1498
+
1499
+ RBIMPL_ATTR_NONNULL((1))
1500
+ static inline VALUE
1501
+ VM_ENV_ENVVAL(const VALUE *ep)
1502
+ {
1503
+ VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1504
+ VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1505
+ VM_ASSERT(vm_assert_env(envval));
1506
+ return envval;
1507
+ }
1508
+
1509
+ RBIMPL_ATTR_NONNULL((1))
1510
+ static inline const rb_env_t *
1511
+ VM_ENV_ENVVAL_PTR(const VALUE *ep)
1512
+ {
1513
+ return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1514
+ }
1515
+
1516
+ static inline const rb_env_t *
1517
+ vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1518
+ {
1519
+ rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1520
+ env->ep = env_ep;
1521
+ env->env = env_body;
1522
+ env->env_size = env_size;
1523
+ env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1524
+ return env;
1525
+ }
1526
+
1527
+ static inline void
1528
+ VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1529
+ {
1530
+ *((VALUE *)ptr) = v;
1531
+ }
1532
+
1533
+ static inline void
1534
+ VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1535
+ {
1536
+ VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1537
+ VM_FORCE_WRITE(ptr, special_const_value);
1538
+ }
1539
+
1540
+ static inline void
1541
+ VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1542
+ {
1543
+ VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1544
+ VM_FORCE_WRITE(&ep[index], v);
1545
+ }
1546
+
1547
+ const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1548
+ const VALUE *rb_vm_proc_local_ep(VALUE proc);
1549
+ void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1550
+ void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1551
+
1552
+ VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1553
+
1554
+ #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1555
+ #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1556
+
1557
+ #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1558
+ ((void *)(ecfp) > (void *)(cfp))
1559
+
1560
+ static inline const rb_control_frame_t *
1561
+ RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1562
+ {
1563
+ return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1564
+ }
1565
+
1566
+ static inline int
1567
+ RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1568
+ {
1569
+ return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1570
+ }
1571
+
1572
+ static inline int
1573
+ VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1574
+ {
1575
+ if ((block_handler & 0x03) == 0x01) {
1576
+ #if VM_CHECK_MODE > 0
1577
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1578
+ VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1579
+ #endif
1580
+ return 1;
1581
+ }
1582
+ else {
1583
+ return 0;
1584
+ }
1585
+ }
1586
+
1587
+ static inline VALUE
1588
+ VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1589
+ {
1590
+ VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1591
+ VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1592
+ return block_handler;
1593
+ }
1594
+
1595
+ static inline const struct rb_captured_block *
1596
+ VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1597
+ {
1598
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1599
+ VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1600
+ return captured;
1601
+ }
1602
+
1603
+ static inline int
1604
+ VM_BH_IFUNC_P(VALUE block_handler)
1605
+ {
1606
+ if ((block_handler & 0x03) == 0x03) {
1607
+ #if VM_CHECK_MODE > 0
1608
+ struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1609
+ VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1610
+ #endif
1611
+ return 1;
1612
+ }
1613
+ else {
1614
+ return 0;
1615
+ }
1616
+ }
1617
+
1618
+ static inline VALUE
1619
+ VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1620
+ {
1621
+ VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1622
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1623
+ return block_handler;
1624
+ }
1625
+
1626
+ static inline const struct rb_captured_block *
1627
+ VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1628
+ {
1629
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1630
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1631
+ return captured;
1632
+ }
1633
+
1634
+ static inline const struct rb_captured_block *
1635
+ VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1636
+ {
1637
+ struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1638
+ VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1639
+ return captured;
1640
+ }
1641
+
1642
+ static inline enum rb_block_handler_type
1643
+ vm_block_handler_type(VALUE block_handler)
1644
+ {
1645
+ if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1646
+ return block_handler_type_iseq;
1647
+ }
1648
+ else if (VM_BH_IFUNC_P(block_handler)) {
1649
+ return block_handler_type_ifunc;
1650
+ }
1651
+ else if (SYMBOL_P(block_handler)) {
1652
+ return block_handler_type_symbol;
1653
+ }
1654
+ else {
1655
+ VM_ASSERT(rb_obj_is_proc(block_handler));
1656
+ return block_handler_type_proc;
1657
+ }
1658
+ }
1659
+
1660
+ static inline void
1661
+ vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1662
+ {
1663
+ VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1664
+ (vm_block_handler_type(block_handler), 1));
1665
+ }
1666
+
1667
+ static inline enum rb_block_type
1668
+ vm_block_type(const struct rb_block *block)
1669
+ {
1670
+ #if VM_CHECK_MODE > 0
1671
+ switch (block->type) {
1672
+ case block_type_iseq:
1673
+ VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1674
+ break;
1675
+ case block_type_ifunc:
1676
+ VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1677
+ break;
1678
+ case block_type_symbol:
1679
+ VM_ASSERT(SYMBOL_P(block->as.symbol));
1680
+ break;
1681
+ case block_type_proc:
1682
+ VM_ASSERT(rb_obj_is_proc(block->as.proc));
1683
+ break;
1684
+ }
1685
+ #endif
1686
+ return block->type;
1687
+ }
1688
+
1689
+ static inline void
1690
+ vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1691
+ {
1692
+ struct rb_block *mb = (struct rb_block *)block;
1693
+ mb->type = type;
1694
+ }
1695
+
1696
+ static inline const struct rb_block *
1697
+ vm_proc_block(VALUE procval)
1698
+ {
1699
+ VM_ASSERT(rb_obj_is_proc(procval));
1700
+ return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1701
+ }
1702
+
1703
+ static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1704
+ static inline const VALUE *vm_block_ep(const struct rb_block *block);
1705
+
1706
+ static inline const rb_iseq_t *
1707
+ vm_proc_iseq(VALUE procval)
1708
+ {
1709
+ return vm_block_iseq(vm_proc_block(procval));
1710
+ }
1711
+
1712
+ static inline const VALUE *
1713
+ vm_proc_ep(VALUE procval)
1714
+ {
1715
+ return vm_block_ep(vm_proc_block(procval));
1716
+ }
1717
+
1718
+ static inline const rb_iseq_t *
1719
+ vm_block_iseq(const struct rb_block *block)
1720
+ {
1721
+ switch (vm_block_type(block)) {
1722
+ case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1723
+ case block_type_proc: return vm_proc_iseq(block->as.proc);
1724
+ case block_type_ifunc:
1725
+ case block_type_symbol: return NULL;
1726
+ }
1727
+ VM_UNREACHABLE(vm_block_iseq);
1728
+ return NULL;
1729
+ }
1730
+
1731
+ static inline const VALUE *
1732
+ vm_block_ep(const struct rb_block *block)
1733
+ {
1734
+ switch (vm_block_type(block)) {
1735
+ case block_type_iseq:
1736
+ case block_type_ifunc: return block->as.captured.ep;
1737
+ case block_type_proc: return vm_proc_ep(block->as.proc);
1738
+ case block_type_symbol: return NULL;
1739
+ }
1740
+ VM_UNREACHABLE(vm_block_ep);
1741
+ return NULL;
1742
+ }
1743
+
1744
+ static inline VALUE
1745
+ vm_block_self(const struct rb_block *block)
1746
+ {
1747
+ switch (vm_block_type(block)) {
1748
+ case block_type_iseq:
1749
+ case block_type_ifunc:
1750
+ return block->as.captured.self;
1751
+ case block_type_proc:
1752
+ return vm_block_self(vm_proc_block(block->as.proc));
1753
+ case block_type_symbol:
1754
+ return Qundef;
1755
+ }
1756
+ VM_UNREACHABLE(vm_block_self);
1757
+ return Qundef;
1758
+ }
1759
+
1760
+ static inline VALUE
1761
+ VM_BH_TO_SYMBOL(VALUE block_handler)
1762
+ {
1763
+ VM_ASSERT(SYMBOL_P(block_handler));
1764
+ return block_handler;
1765
+ }
1766
+
1767
+ static inline VALUE
1768
+ VM_BH_FROM_SYMBOL(VALUE symbol)
1769
+ {
1770
+ VM_ASSERT(SYMBOL_P(symbol));
1771
+ return symbol;
1772
+ }
1773
+
1774
+ static inline VALUE
1775
+ VM_BH_TO_PROC(VALUE block_handler)
1776
+ {
1777
+ VM_ASSERT(rb_obj_is_proc(block_handler));
1778
+ return block_handler;
1779
+ }
1780
+
1781
+ static inline VALUE
1782
+ VM_BH_FROM_PROC(VALUE procval)
1783
+ {
1784
+ VM_ASSERT(rb_obj_is_proc(procval));
1785
+ return procval;
1786
+ }
1787
+
1788
+ /* VM related object allocate functions */
1789
+ VALUE rb_thread_alloc(VALUE klass);
1790
+ VALUE rb_binding_alloc(VALUE klass);
1791
+ VALUE rb_proc_alloc(VALUE klass);
1792
+ VALUE rb_proc_dup(VALUE self);
1793
+
1794
+ /* for debug */
1795
+ extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1796
+ extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1797
+ extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1798
+
1799
+ #define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1800
+ #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1801
+ bool rb_vm_bugreport(const void *, FILE *);
1802
+ typedef void (*ruby_sighandler_t)(int);
1803
+ RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1804
+ NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1805
+
1806
+ /* functions about thread/vm execution */
1807
+ RUBY_SYMBOL_EXPORT_BEGIN
1808
+ VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1809
+ VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1810
+ VALUE rb_iseq_path(const rb_iseq_t *iseq);
1811
+ VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1812
+ RUBY_SYMBOL_EXPORT_END
1813
+
1814
+ VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1815
+ void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1816
+
1817
+ int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1818
+ void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1819
+
1820
+ VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1821
+
1822
+ VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1823
+ static inline VALUE
1824
+ rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1825
+ {
1826
+ return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1827
+ }
1828
+
1829
+ static inline VALUE
1830
+ rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1831
+ {
1832
+ return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1833
+ }
1834
+
1835
+ VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1836
+ VALUE rb_vm_env_local_variables(const rb_env_t *env);
1837
+ const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1838
+ const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1839
+ void rb_vm_inc_const_missing_count(void);
1840
+ VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1841
+ const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1842
+ void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1843
+ void rb_vm_pop_frame(rb_execution_context_t *ec);
1844
+
1845
+ void rb_thread_start_timer_thread(void);
1846
+ void rb_thread_stop_timer_thread(void);
1847
+ void rb_thread_reset_timer_thread(void);
1848
+ void rb_thread_wakeup_timer_thread(int);
1849
+
1850
+ static inline void
1851
+ rb_vm_living_threads_init(rb_vm_t *vm)
1852
+ {
1853
+ ccan_list_head_init(&vm->waiting_fds);
1854
+ ccan_list_head_init(&vm->workqueue);
1855
+ ccan_list_head_init(&vm->ractor.set);
1856
+ ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1857
+ }
1858
+
1859
+ typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1860
+ rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1861
+ rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1862
+ VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1863
+ int rb_vm_get_sourceline(const rb_control_frame_t *);
1864
+ void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1865
+ void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1866
+ rb_thread_t * ruby_thread_from_native(void);
1867
+ int ruby_thread_set_native(rb_thread_t *th);
1868
+ int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1869
+ void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1870
+ void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1871
+ VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1872
+
1873
+ void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1874
+
1875
+ #define rb_vm_register_special_exception(sp, e, m) \
1876
+ rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1877
+
1878
+ void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1879
+
1880
+ void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1881
+
1882
+ const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1883
+
1884
+ #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1885
+
1886
+ #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1887
+ STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1888
+ STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1889
+ const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1890
+ if (UNLIKELY((cfp) <= &bound[1])) { \
1891
+ vm_stackoverflow(); \
1892
+ } \
1893
+ } while (0)
1894
+
1895
+ #define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1896
+ CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1897
+
1898
+ VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1899
+
1900
+ rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1901
+
1902
+ /* for thread */
1903
+
1904
+ #if RUBY_VM_THREAD_MODEL == 2
1905
+
1906
+ RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1907
+ RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1908
+ RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1909
+ RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1910
+ RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1911
+
1912
+ #define GET_VM() rb_current_vm()
1913
+ #define GET_RACTOR() rb_current_ractor()
1914
+ #define GET_THREAD() rb_current_thread()
1915
+ #define GET_EC() rb_current_execution_context(true)
1916
+
1917
+ static inline rb_thread_t *
1918
+ rb_ec_thread_ptr(const rb_execution_context_t *ec)
1919
+ {
1920
+ return ec->thread_ptr;
1921
+ }
1922
+
1923
+ static inline rb_ractor_t *
1924
+ rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1925
+ {
1926
+ const rb_thread_t *th = rb_ec_thread_ptr(ec);
1927
+ if (th) {
1928
+ VM_ASSERT(th->ractor != NULL);
1929
+ return th->ractor;
1930
+ }
1931
+ else {
1932
+ return NULL;
1933
+ }
1934
+ }
1935
+
1936
+ static inline rb_vm_t *
1937
+ rb_ec_vm_ptr(const rb_execution_context_t *ec)
1938
+ {
1939
+ const rb_thread_t *th = rb_ec_thread_ptr(ec);
1940
+ if (th) {
1941
+ return th->vm;
1942
+ }
1943
+ else {
1944
+ return NULL;
1945
+ }
1946
+ }
1947
+
1948
+ static inline rb_execution_context_t *
1949
+ rb_current_execution_context(bool expect_ec)
1950
+ {
1951
+ #ifdef RB_THREAD_LOCAL_SPECIFIER
1952
+ #ifdef __APPLE__
1953
+ rb_execution_context_t *ec = rb_current_ec();
1954
+ #else
1955
+ rb_execution_context_t *ec = ruby_current_ec;
1956
+ #endif
1957
+
1958
+ /* On the shared objects, `__tls_get_addr()` is used to access the TLS
1959
+ * and the address of the `ruby_current_ec` can be stored on a function
1960
+ * frame. However, this address can be mis-used after native thread
1961
+ * migration of a coroutine.
1962
+ * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
1963
+ * 2) Context switch and resume it on the NT2.
1964
+ * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
1965
+ * This assertion checks such misusage.
1966
+ *
1967
+ * To avoid accidents, `GET_EC()` should be called once on the frame.
1968
+ * Note that inlining can produce the problem.
1969
+ */
1970
+ VM_ASSERT(ec == rb_current_ec_noinline());
1971
+ #else
1972
+ rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
1973
+ #endif
1974
+ VM_ASSERT(!expect_ec || ec != NULL);
1975
+ return ec;
1976
+ }
1977
+
1978
+ static inline rb_thread_t *
1979
+ rb_current_thread(void)
1980
+ {
1981
+ const rb_execution_context_t *ec = GET_EC();
1982
+ return rb_ec_thread_ptr(ec);
1983
+ }
1984
+
1985
+ static inline rb_ractor_t *
1986
+ rb_current_ractor_raw(bool expect)
1987
+ {
1988
+ if (ruby_single_main_ractor) {
1989
+ return ruby_single_main_ractor;
1990
+ }
1991
+ else {
1992
+ const rb_execution_context_t *ec = rb_current_execution_context(expect);
1993
+ return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
1994
+ }
1995
+ }
1996
+
1997
+ static inline rb_ractor_t *
1998
+ rb_current_ractor(void)
1999
+ {
2000
+ return rb_current_ractor_raw(true);
2001
+ }
2002
+
2003
+ static inline rb_vm_t *
2004
+ rb_current_vm(void)
2005
+ {
2006
+ #if 0 // TODO: reconsider the assertions
2007
+ VM_ASSERT(ruby_current_vm_ptr == NULL ||
2008
+ ruby_current_execution_context_ptr == NULL ||
2009
+ rb_ec_thread_ptr(GET_EC()) == NULL ||
2010
+ rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2011
+ rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2012
+ #endif
2013
+
2014
+ return ruby_current_vm_ptr;
2015
+ }
2016
+
2017
+ void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2018
+ unsigned int recorded_lock_rec,
2019
+ unsigned int current_lock_rec);
2020
+
2021
+ static inline unsigned int
2022
+ rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2023
+ {
2024
+ rb_vm_t *vm = rb_ec_vm_ptr(ec);
2025
+
2026
+ if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
2027
+ return 0;
2028
+ }
2029
+ else {
2030
+ return vm->ractor.sync.lock_rec;
2031
+ }
2032
+ }
2033
+
2034
+ #else
2035
+ #error "unsupported thread model"
2036
+ #endif
2037
+
2038
+ enum {
2039
+ TIMER_INTERRUPT_MASK = 0x01,
2040
+ PENDING_INTERRUPT_MASK = 0x02,
2041
+ POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2042
+ TRAP_INTERRUPT_MASK = 0x08,
2043
+ TERMINATE_INTERRUPT_MASK = 0x10,
2044
+ VM_BARRIER_INTERRUPT_MASK = 0x20,
2045
+ };
2046
+
2047
+ #define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2048
+ #define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2049
+ #define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2050
+ #define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2051
+ #define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2052
+ #define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2053
+ #define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
2054
+ (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
2055
+
2056
+ static inline bool
2057
+ RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2058
+ {
2059
+ #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2060
+ uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2061
+
2062
+ if (current_clock != ec->checked_clock) {
2063
+ ec->checked_clock = current_clock;
2064
+ RUBY_VM_SET_TIMER_INTERRUPT(ec);
2065
+ }
2066
+ #endif
2067
+ return ec->interrupt_flag & ~(ec)->interrupt_mask;
2068
+ }
2069
+
2070
+ VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2071
+ int rb_signal_buff_size(void);
2072
+ int rb_signal_exec(rb_thread_t *th, int sig);
2073
+ void rb_threadptr_check_signal(rb_thread_t *mth);
2074
+ void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2075
+ void rb_threadptr_signal_exit(rb_thread_t *th);
2076
+ int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2077
+ void rb_threadptr_interrupt(rb_thread_t *th);
2078
+ void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2079
+ void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2080
+ void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2081
+ VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2082
+ void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2083
+ void rb_execution_context_update(rb_execution_context_t *ec);
2084
+ void rb_execution_context_mark(const rb_execution_context_t *ec);
2085
+ void rb_fiber_close(rb_fiber_t *fib);
2086
+ void Init_native_thread(rb_thread_t *th);
2087
+ int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2088
+
2089
+ // vm_sync.h
2090
+ void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2091
+ void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2092
+
2093
+ #define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2094
+ static inline void
2095
+ rb_vm_check_ints(rb_execution_context_t *ec)
2096
+ {
2097
+ #ifdef RUBY_ASSERT_CRITICAL_SECTION
2098
+ VM_ASSERT(ruby_assert_critical_section_entered == 0);
2099
+ #endif
2100
+
2101
+ VM_ASSERT(ec == GET_EC());
2102
+
2103
+ if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2104
+ rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2105
+ }
2106
+ }
2107
+
2108
+ /* tracer */
2109
+
2110
+ struct rb_trace_arg_struct {
2111
+ rb_event_flag_t event;
2112
+ rb_execution_context_t *ec;
2113
+ const rb_control_frame_t *cfp;
2114
+ VALUE self;
2115
+ ID id;
2116
+ ID called_id;
2117
+ VALUE klass;
2118
+ VALUE data;
2119
+
2120
+ int klass_solved;
2121
+
2122
+ /* calc from cfp */
2123
+ int lineno;
2124
+ VALUE path;
2125
+ };
2126
+
2127
+ void rb_hook_list_mark(rb_hook_list_t *hooks);
2128
+ void rb_hook_list_mark_and_update(rb_hook_list_t *hooks);
2129
+ void rb_hook_list_free(rb_hook_list_t *hooks);
2130
+ void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2131
+ void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2132
+
2133
+ void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2134
+
2135
+ #define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2136
+ const rb_event_flag_t flag_arg_ = (flag_); \
2137
+ rb_hook_list_t *hooks_arg_ = (hooks_); \
2138
+ if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2139
+ /* defer evaluating the other arguments */ \
2140
+ rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2141
+ } \
2142
+ } while (0)
2143
+
2144
+ static inline void
2145
+ rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2146
+ VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2147
+ {
2148
+ struct rb_trace_arg_struct trace_arg;
2149
+
2150
+ VM_ASSERT((hooks->events & flag) != 0);
2151
+
2152
+ trace_arg.event = flag;
2153
+ trace_arg.ec = ec;
2154
+ trace_arg.cfp = ec->cfp;
2155
+ trace_arg.self = self;
2156
+ trace_arg.id = id;
2157
+ trace_arg.called_id = called_id;
2158
+ trace_arg.klass = klass;
2159
+ trace_arg.data = data;
2160
+ trace_arg.path = Qundef;
2161
+ trace_arg.klass_solved = 0;
2162
+
2163
+ rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2164
+ }
2165
+
2166
+ struct rb_ractor_pub {
2167
+ VALUE self;
2168
+ uint32_t id;
2169
+ rb_hook_list_t hooks;
2170
+ };
2171
+
2172
+ static inline rb_hook_list_t *
2173
+ rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2174
+ {
2175
+ struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2176
+ return &cr_pub->hooks;
2177
+ }
2178
+
2179
+ #define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2180
+ EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2181
+
2182
+ #define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2183
+ EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2184
+
2185
+ static inline void
2186
+ rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2187
+ {
2188
+ EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2189
+ NIL_P(eval_script) ? (VALUE)iseq :
2190
+ rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2191
+ }
2192
+
2193
+ void rb_vm_trap_exit(rb_vm_t *vm);
2194
+ void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2195
+ void rb_vm_postponed_job_free(void); /* vm_trace.c */
2196
+ size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2197
+ void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2198
+
2199
+ RUBY_SYMBOL_EXPORT_BEGIN
2200
+
2201
+ int rb_thread_check_trap_pending(void);
2202
+
2203
+ /* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2204
+ #define RUBY_EVENT_COVERAGE_LINE 0x010000
2205
+ #define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2206
+
2207
+ extern VALUE rb_get_coverages(void);
2208
+ extern void rb_set_coverages(VALUE, int, VALUE);
2209
+ extern void rb_clear_coverages(void);
2210
+ extern void rb_reset_coverages(void);
2211
+ extern void rb_resume_coverages(void);
2212
+ extern void rb_suspend_coverages(void);
2213
+
2214
+ void rb_postponed_job_flush(rb_vm_t *vm);
2215
+
2216
+ // ractor.c
2217
+ RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2218
+ RUBY_EXTERN VALUE rb_eRactorIsolationError;
2219
+
2220
+ RUBY_SYMBOL_EXPORT_END
2221
+
2222
+ #endif /* RUBY_VM_CORE_H */