debase-ruby_core_source 0.9.5 → 0.9.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/Rakefile +1 -1
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/addr2line.h +21 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/ccan/build_assert/build_assert.h +40 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/ccan/check_type/check_type.h +63 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/ccan/container_of/container_of.h +142 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/ccan/list/list.h +773 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/ccan/str/str.h +16 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/constant.h +50 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/dln.h +51 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/encindex.h +67 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/eval_intern.h +315 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/gc.h +114 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/id.h +220 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/id_table.h +31 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/insns.inc +112 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/insns_info.inc +796 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/internal.h +1767 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/iseq.h +252 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/known_errors.inc +746 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/method.h +213 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/node.h +520 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/node_name.inc +208 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/opt_sc.inc +774 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/optinsn.inc +83 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/optunifs.inc +129 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/parse.h +204 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/probes_helper.h +43 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/regenc.h +251 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/regint.h +927 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/regparse.h +369 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/revision.h +1 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/ruby_assert.h +54 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/ruby_atomic.h +233 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/siphash.h +48 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/symbol.h +108 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/thread_pthread.h +54 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/thread_win32.h +36 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/timev.h +42 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/transcode_data.h +139 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/version.h +73 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/vm.inc +3412 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/vm_call_iseq_optimized.inc +212 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/vm_core.h +1644 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/vm_debug.h +37 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/vm_exec.h +182 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/vm_insnhelper.h +227 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/vm_opts.h +57 -0
- data/lib/debase/ruby_core_source/ruby-2.4.0-p0/vmtc.inc +110 -0
- data/lib/debase/ruby_core_source/version.rb +1 -1
- metadata +49 -2
@@ -0,0 +1,212 @@
|
|
1
|
+
#if 1 /* enable or disable this optimization */
|
2
|
+
|
3
|
+
/* DO NOT EDIT THIS FILE DIRECTLY
|
4
|
+
*
|
5
|
+
* This file is generated by tool/mkcall_iseq.rb
|
6
|
+
*/
|
7
|
+
|
8
|
+
static VALUE
|
9
|
+
vm_call_iseq_setup_normal_0start_0params_0locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
10
|
+
{
|
11
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 0, 0);
|
12
|
+
}
|
13
|
+
|
14
|
+
static VALUE
|
15
|
+
vm_call_iseq_setup_normal_0start_0params_1locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
16
|
+
{
|
17
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 0, 1);
|
18
|
+
}
|
19
|
+
|
20
|
+
static VALUE
|
21
|
+
vm_call_iseq_setup_normal_0start_0params_2locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
22
|
+
{
|
23
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 0, 2);
|
24
|
+
}
|
25
|
+
|
26
|
+
static VALUE
|
27
|
+
vm_call_iseq_setup_normal_0start_0params_3locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
28
|
+
{
|
29
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 0, 3);
|
30
|
+
}
|
31
|
+
|
32
|
+
static VALUE
|
33
|
+
vm_call_iseq_setup_normal_0start_0params_4locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
34
|
+
{
|
35
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 0, 4);
|
36
|
+
}
|
37
|
+
|
38
|
+
static VALUE
|
39
|
+
vm_call_iseq_setup_normal_0start_0params_5locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
40
|
+
{
|
41
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 0, 5);
|
42
|
+
}
|
43
|
+
|
44
|
+
static VALUE
|
45
|
+
vm_call_iseq_setup_normal_0start_1params_0locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
46
|
+
{
|
47
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 1, 0);
|
48
|
+
}
|
49
|
+
|
50
|
+
static VALUE
|
51
|
+
vm_call_iseq_setup_normal_0start_1params_1locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
52
|
+
{
|
53
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 1, 1);
|
54
|
+
}
|
55
|
+
|
56
|
+
static VALUE
|
57
|
+
vm_call_iseq_setup_normal_0start_1params_2locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
58
|
+
{
|
59
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 1, 2);
|
60
|
+
}
|
61
|
+
|
62
|
+
static VALUE
|
63
|
+
vm_call_iseq_setup_normal_0start_1params_3locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
64
|
+
{
|
65
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 1, 3);
|
66
|
+
}
|
67
|
+
|
68
|
+
static VALUE
|
69
|
+
vm_call_iseq_setup_normal_0start_1params_4locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
70
|
+
{
|
71
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 1, 4);
|
72
|
+
}
|
73
|
+
|
74
|
+
static VALUE
|
75
|
+
vm_call_iseq_setup_normal_0start_1params_5locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
76
|
+
{
|
77
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 1, 5);
|
78
|
+
}
|
79
|
+
|
80
|
+
static VALUE
|
81
|
+
vm_call_iseq_setup_normal_0start_2params_0locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
82
|
+
{
|
83
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 2, 0);
|
84
|
+
}
|
85
|
+
|
86
|
+
static VALUE
|
87
|
+
vm_call_iseq_setup_normal_0start_2params_1locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
88
|
+
{
|
89
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 2, 1);
|
90
|
+
}
|
91
|
+
|
92
|
+
static VALUE
|
93
|
+
vm_call_iseq_setup_normal_0start_2params_2locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
94
|
+
{
|
95
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 2, 2);
|
96
|
+
}
|
97
|
+
|
98
|
+
static VALUE
|
99
|
+
vm_call_iseq_setup_normal_0start_2params_3locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
100
|
+
{
|
101
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 2, 3);
|
102
|
+
}
|
103
|
+
|
104
|
+
static VALUE
|
105
|
+
vm_call_iseq_setup_normal_0start_2params_4locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
106
|
+
{
|
107
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 2, 4);
|
108
|
+
}
|
109
|
+
|
110
|
+
static VALUE
|
111
|
+
vm_call_iseq_setup_normal_0start_2params_5locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
112
|
+
{
|
113
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 2, 5);
|
114
|
+
}
|
115
|
+
|
116
|
+
static VALUE
|
117
|
+
vm_call_iseq_setup_normal_0start_3params_0locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
118
|
+
{
|
119
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 3, 0);
|
120
|
+
}
|
121
|
+
|
122
|
+
static VALUE
|
123
|
+
vm_call_iseq_setup_normal_0start_3params_1locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
124
|
+
{
|
125
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 3, 1);
|
126
|
+
}
|
127
|
+
|
128
|
+
static VALUE
|
129
|
+
vm_call_iseq_setup_normal_0start_3params_2locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
130
|
+
{
|
131
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 3, 2);
|
132
|
+
}
|
133
|
+
|
134
|
+
static VALUE
|
135
|
+
vm_call_iseq_setup_normal_0start_3params_3locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
136
|
+
{
|
137
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 3, 3);
|
138
|
+
}
|
139
|
+
|
140
|
+
static VALUE
|
141
|
+
vm_call_iseq_setup_normal_0start_3params_4locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
142
|
+
{
|
143
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 3, 4);
|
144
|
+
}
|
145
|
+
|
146
|
+
static VALUE
|
147
|
+
vm_call_iseq_setup_normal_0start_3params_5locals(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
148
|
+
{
|
149
|
+
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, 3, 5);
|
150
|
+
}
|
151
|
+
|
152
|
+
/* vm_call_iseq_handlers[param][local] */
|
153
|
+
static const vm_call_handler vm_call_iseq_handlers[][6] = {
|
154
|
+
{vm_call_iseq_setup_normal_0start_0params_0locals,
|
155
|
+
vm_call_iseq_setup_normal_0start_0params_1locals,
|
156
|
+
vm_call_iseq_setup_normal_0start_0params_2locals,
|
157
|
+
vm_call_iseq_setup_normal_0start_0params_3locals,
|
158
|
+
vm_call_iseq_setup_normal_0start_0params_4locals,
|
159
|
+
vm_call_iseq_setup_normal_0start_0params_5locals},
|
160
|
+
{vm_call_iseq_setup_normal_0start_1params_0locals,
|
161
|
+
vm_call_iseq_setup_normal_0start_1params_1locals,
|
162
|
+
vm_call_iseq_setup_normal_0start_1params_2locals,
|
163
|
+
vm_call_iseq_setup_normal_0start_1params_3locals,
|
164
|
+
vm_call_iseq_setup_normal_0start_1params_4locals,
|
165
|
+
vm_call_iseq_setup_normal_0start_1params_5locals},
|
166
|
+
{vm_call_iseq_setup_normal_0start_2params_0locals,
|
167
|
+
vm_call_iseq_setup_normal_0start_2params_1locals,
|
168
|
+
vm_call_iseq_setup_normal_0start_2params_2locals,
|
169
|
+
vm_call_iseq_setup_normal_0start_2params_3locals,
|
170
|
+
vm_call_iseq_setup_normal_0start_2params_4locals,
|
171
|
+
vm_call_iseq_setup_normal_0start_2params_5locals},
|
172
|
+
{vm_call_iseq_setup_normal_0start_3params_0locals,
|
173
|
+
vm_call_iseq_setup_normal_0start_3params_1locals,
|
174
|
+
vm_call_iseq_setup_normal_0start_3params_2locals,
|
175
|
+
vm_call_iseq_setup_normal_0start_3params_3locals,
|
176
|
+
vm_call_iseq_setup_normal_0start_3params_4locals,
|
177
|
+
vm_call_iseq_setup_normal_0start_3params_5locals}
|
178
|
+
};
|
179
|
+
|
180
|
+
static inline vm_call_handler
|
181
|
+
vm_call_iseq_setup_func(const struct rb_call_info *ci, const int param_size, const int local_size)
|
182
|
+
{
|
183
|
+
if (UNLIKELY(ci->flag & VM_CALL_TAILCALL)) {
|
184
|
+
return &vm_call_iseq_setup_tailcall_0start;
|
185
|
+
}
|
186
|
+
else if (0) { /* to disable optimize */
|
187
|
+
return &vm_call_iseq_setup_normal_0start;
|
188
|
+
}
|
189
|
+
else {
|
190
|
+
if (param_size <= 3 &&
|
191
|
+
local_size <= 5) {
|
192
|
+
VM_ASSERT(local_size >= 0);
|
193
|
+
return vm_call_iseq_handlers[param_size][local_size];
|
194
|
+
}
|
195
|
+
return &vm_call_iseq_setup_normal_0start;
|
196
|
+
}
|
197
|
+
}
|
198
|
+
|
199
|
+
#else
|
200
|
+
|
201
|
+
|
202
|
+
static inline vm_call_handler
|
203
|
+
vm_call_iseq_setup_func(const struct rb_call_info *ci, struct rb_call_cache *cc)
|
204
|
+
{
|
205
|
+
if (UNLIKELY(ci->flag & VM_CALL_TAILCALL)) {
|
206
|
+
return &vm_call_iseq_setup_tailcall_0start;
|
207
|
+
}
|
208
|
+
else {
|
209
|
+
return &vm_call_iseq_setup_normal_0start;
|
210
|
+
}
|
211
|
+
}
|
212
|
+
#endif
|
@@ -0,0 +1,1644 @@
|
|
1
|
+
/**********************************************************************
|
2
|
+
|
3
|
+
vm_core.h -
|
4
|
+
|
5
|
+
$Author: nobu $
|
6
|
+
created at: 04/01/01 19:41:38 JST
|
7
|
+
|
8
|
+
Copyright (C) 2004-2007 Koichi Sasada
|
9
|
+
|
10
|
+
**********************************************************************/
|
11
|
+
|
12
|
+
#ifndef RUBY_VM_CORE_H
|
13
|
+
#define RUBY_VM_CORE_H
|
14
|
+
|
15
|
+
/*
|
16
|
+
* Enable check mode.
|
17
|
+
* 1: enable local assertions.
|
18
|
+
*/
|
19
|
+
#ifndef VM_CHECK_MODE
|
20
|
+
#define VM_CHECK_MODE 0
|
21
|
+
#endif
|
22
|
+
|
23
|
+
/**
|
24
|
+
* VM Debug Level
|
25
|
+
*
|
26
|
+
* debug level:
|
27
|
+
* 0: no debug output
|
28
|
+
* 1: show instruction name
|
29
|
+
* 2: show stack frame when control stack frame is changed
|
30
|
+
* 3: show stack status
|
31
|
+
* 4: show register
|
32
|
+
* 5:
|
33
|
+
* 10: gc check
|
34
|
+
*/
|
35
|
+
|
36
|
+
#ifndef VMDEBUG
|
37
|
+
#define VMDEBUG 0
|
38
|
+
#endif
|
39
|
+
|
40
|
+
#if 0
|
41
|
+
#undef VMDEBUG
|
42
|
+
#define VMDEBUG 3
|
43
|
+
#endif
|
44
|
+
|
45
|
+
#include "ruby_assert.h"
|
46
|
+
|
47
|
+
#if VM_CHECK_MODE > 0
|
48
|
+
#define VM_ASSERT(expr) ( \
|
49
|
+
RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr))
|
50
|
+
|
51
|
+
#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
|
52
|
+
|
53
|
+
#else
|
54
|
+
#define VM_ASSERT(expr) ((void)0)
|
55
|
+
#define VM_UNREACHABLE(func) ((void)0)
|
56
|
+
#endif
|
57
|
+
|
58
|
+
#define RUBY_VM_THREAD_MODEL 2
|
59
|
+
|
60
|
+
#include "ruby/ruby.h"
|
61
|
+
#include "ruby/st.h"
|
62
|
+
|
63
|
+
#include "node.h"
|
64
|
+
#include "vm_debug.h"
|
65
|
+
#include "vm_opts.h"
|
66
|
+
#include "id.h"
|
67
|
+
#include "method.h"
|
68
|
+
#include "ruby_atomic.h"
|
69
|
+
#include "ccan/list/list.h"
|
70
|
+
|
71
|
+
#include "ruby/thread_native.h"
|
72
|
+
#if defined(_WIN32)
|
73
|
+
#include "thread_win32.h"
|
74
|
+
#elif defined(HAVE_PTHREAD_H)
|
75
|
+
#include "thread_pthread.h"
|
76
|
+
#endif
|
77
|
+
|
78
|
+
#ifndef ENABLE_VM_OBJSPACE
|
79
|
+
#ifdef _WIN32
|
80
|
+
/*
|
81
|
+
* TODO: object space independent st_table.
|
82
|
+
* socklist and conlist will be freed exit_handler(), after object
|
83
|
+
* space destruction.
|
84
|
+
*/
|
85
|
+
#define ENABLE_VM_OBJSPACE 0
|
86
|
+
#else
|
87
|
+
#define ENABLE_VM_OBJSPACE 1
|
88
|
+
#endif
|
89
|
+
#endif
|
90
|
+
|
91
|
+
#include <setjmp.h>
|
92
|
+
#include <signal.h>
|
93
|
+
|
94
|
+
#ifndef NSIG
|
95
|
+
# define NSIG (_SIGMAX + 1) /* For QNX */
|
96
|
+
#endif
|
97
|
+
|
98
|
+
#define RUBY_NSIG NSIG
|
99
|
+
|
100
|
+
#ifdef HAVE_STDARG_PROTOTYPES
|
101
|
+
#include <stdarg.h>
|
102
|
+
#define va_init_list(a,b) va_start((a),(b))
|
103
|
+
#else
|
104
|
+
#include <varargs.h>
|
105
|
+
#define va_init_list(a,b) va_start((a))
|
106
|
+
#endif
|
107
|
+
|
108
|
+
#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
|
109
|
+
#define USE_SIGALTSTACK
|
110
|
+
#endif
|
111
|
+
|
112
|
+
/*****************/
|
113
|
+
/* configuration */
|
114
|
+
/*****************/
|
115
|
+
|
116
|
+
/* gcc ver. check */
|
117
|
+
#if defined(__GNUC__) && __GNUC__ >= 2
|
118
|
+
|
119
|
+
#if OPT_TOKEN_THREADED_CODE
|
120
|
+
#if OPT_DIRECT_THREADED_CODE
|
121
|
+
#undef OPT_DIRECT_THREADED_CODE
|
122
|
+
#endif
|
123
|
+
#endif
|
124
|
+
|
125
|
+
#else /* defined(__GNUC__) && __GNUC__ >= 2 */
|
126
|
+
|
127
|
+
/* disable threaded code options */
|
128
|
+
#if OPT_DIRECT_THREADED_CODE
|
129
|
+
#undef OPT_DIRECT_THREADED_CODE
|
130
|
+
#endif
|
131
|
+
#if OPT_TOKEN_THREADED_CODE
|
132
|
+
#undef OPT_TOKEN_THREADED_CODE
|
133
|
+
#endif
|
134
|
+
#endif
|
135
|
+
|
136
|
+
#ifdef __native_client__
|
137
|
+
#undef OPT_DIRECT_THREADED_CODE
|
138
|
+
#endif
|
139
|
+
|
140
|
+
/* call threaded code */
|
141
|
+
#if OPT_CALL_THREADED_CODE
|
142
|
+
#if OPT_DIRECT_THREADED_CODE
|
143
|
+
#undef OPT_DIRECT_THREADED_CODE
|
144
|
+
#endif /* OPT_DIRECT_THREADED_CODE */
|
145
|
+
#if OPT_STACK_CACHING
|
146
|
+
#undef OPT_STACK_CACHING
|
147
|
+
#endif /* OPT_STACK_CACHING */
|
148
|
+
#endif /* OPT_CALL_THREADED_CODE */
|
149
|
+
|
150
|
+
typedef unsigned long rb_num_t;
|
151
|
+
|
152
|
+
enum ruby_tag_type {
|
153
|
+
RUBY_TAG_RETURN = 0x1,
|
154
|
+
RUBY_TAG_BREAK = 0x2,
|
155
|
+
RUBY_TAG_NEXT = 0x3,
|
156
|
+
RUBY_TAG_RETRY = 0x4,
|
157
|
+
RUBY_TAG_REDO = 0x5,
|
158
|
+
RUBY_TAG_RAISE = 0x6,
|
159
|
+
RUBY_TAG_THROW = 0x7,
|
160
|
+
RUBY_TAG_FATAL = 0x8,
|
161
|
+
RUBY_TAG_MASK = 0xf
|
162
|
+
};
|
163
|
+
#define TAG_RETURN RUBY_TAG_RETURN
|
164
|
+
#define TAG_BREAK RUBY_TAG_BREAK
|
165
|
+
#define TAG_NEXT RUBY_TAG_NEXT
|
166
|
+
#define TAG_RETRY RUBY_TAG_RETRY
|
167
|
+
#define TAG_REDO RUBY_TAG_REDO
|
168
|
+
#define TAG_RAISE RUBY_TAG_RAISE
|
169
|
+
#define TAG_THROW RUBY_TAG_THROW
|
170
|
+
#define TAG_FATAL RUBY_TAG_FATAL
|
171
|
+
#define TAG_MASK RUBY_TAG_MASK
|
172
|
+
|
173
|
+
enum ruby_vm_throw_flags {
|
174
|
+
VM_THROW_NO_ESCAPE_FLAG = 0x8000,
|
175
|
+
VM_THROW_LEVEL_SHIFT = 16,
|
176
|
+
VM_THROW_STATE_MASK = 0xff
|
177
|
+
};
|
178
|
+
|
179
|
+
/* forward declarations */
|
180
|
+
struct rb_thread_struct;
|
181
|
+
struct rb_control_frame_struct;
|
182
|
+
|
183
|
+
/* iseq data type */
|
184
|
+
typedef struct rb_compile_option_struct rb_compile_option_t;
|
185
|
+
|
186
|
+
struct iseq_inline_cache_entry {
|
187
|
+
rb_serial_t ic_serial;
|
188
|
+
const rb_cref_t *ic_cref;
|
189
|
+
union {
|
190
|
+
size_t index;
|
191
|
+
VALUE value;
|
192
|
+
} ic_value;
|
193
|
+
};
|
194
|
+
|
195
|
+
union iseq_inline_storage_entry {
|
196
|
+
struct {
|
197
|
+
struct rb_thread_struct *running_thread;
|
198
|
+
VALUE value;
|
199
|
+
} once;
|
200
|
+
struct iseq_inline_cache_entry cache;
|
201
|
+
};
|
202
|
+
|
203
|
+
enum method_missing_reason {
|
204
|
+
MISSING_NOENTRY = 0x00,
|
205
|
+
MISSING_PRIVATE = 0x01,
|
206
|
+
MISSING_PROTECTED = 0x02,
|
207
|
+
MISSING_FCALL = 0x04,
|
208
|
+
MISSING_VCALL = 0x08,
|
209
|
+
MISSING_SUPER = 0x10,
|
210
|
+
MISSING_MISSING = 0x20,
|
211
|
+
MISSING_NONE = 0x40
|
212
|
+
};
|
213
|
+
|
214
|
+
struct rb_call_info {
|
215
|
+
/* fixed at compile time */
|
216
|
+
ID mid;
|
217
|
+
unsigned int flag;
|
218
|
+
int orig_argc;
|
219
|
+
};
|
220
|
+
|
221
|
+
struct rb_call_info_kw_arg {
|
222
|
+
int keyword_len;
|
223
|
+
VALUE keywords[1];
|
224
|
+
};
|
225
|
+
|
226
|
+
struct rb_call_info_with_kwarg {
|
227
|
+
struct rb_call_info ci;
|
228
|
+
struct rb_call_info_kw_arg *kw_arg;
|
229
|
+
};
|
230
|
+
|
231
|
+
struct rb_calling_info {
|
232
|
+
VALUE block_handler;
|
233
|
+
VALUE recv;
|
234
|
+
int argc;
|
235
|
+
};
|
236
|
+
|
237
|
+
struct rb_call_cache;
|
238
|
+
typedef VALUE (*vm_call_handler)(struct rb_thread_struct *th, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
|
239
|
+
|
240
|
+
struct rb_call_cache {
|
241
|
+
/* inline cache: keys */
|
242
|
+
rb_serial_t method_state;
|
243
|
+
rb_serial_t class_serial;
|
244
|
+
|
245
|
+
/* inline cache: values */
|
246
|
+
const rb_callable_method_entry_t *me;
|
247
|
+
|
248
|
+
vm_call_handler call;
|
249
|
+
|
250
|
+
union {
|
251
|
+
unsigned int index; /* used by ivar */
|
252
|
+
enum method_missing_reason method_missing_reason; /* used by method_missing */
|
253
|
+
int inc_sp; /* used by cfunc */
|
254
|
+
} aux;
|
255
|
+
};
|
256
|
+
|
257
|
+
#if 1
|
258
|
+
#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
|
259
|
+
#else
|
260
|
+
#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
|
261
|
+
#endif
|
262
|
+
#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
|
263
|
+
|
264
|
+
typedef struct rb_iseq_location_struct {
|
265
|
+
VALUE path;
|
266
|
+
VALUE absolute_path;
|
267
|
+
VALUE base_label;
|
268
|
+
VALUE label;
|
269
|
+
VALUE first_lineno; /* TODO: may be unsigned short */
|
270
|
+
} rb_iseq_location_t;
|
271
|
+
|
272
|
+
struct rb_iseq_constant_body {
|
273
|
+
enum iseq_type {
|
274
|
+
ISEQ_TYPE_TOP,
|
275
|
+
ISEQ_TYPE_METHOD,
|
276
|
+
ISEQ_TYPE_BLOCK,
|
277
|
+
ISEQ_TYPE_CLASS,
|
278
|
+
ISEQ_TYPE_RESCUE,
|
279
|
+
ISEQ_TYPE_ENSURE,
|
280
|
+
ISEQ_TYPE_EVAL,
|
281
|
+
ISEQ_TYPE_MAIN,
|
282
|
+
ISEQ_TYPE_DEFINED_GUARD
|
283
|
+
} type; /* instruction sequence type */
|
284
|
+
|
285
|
+
unsigned int iseq_size;
|
286
|
+
const VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
|
287
|
+
|
288
|
+
/**
|
289
|
+
* parameter information
|
290
|
+
*
|
291
|
+
* def m(a1, a2, ..., aM, # mandatory
|
292
|
+
* b1=(...), b2=(...), ..., bN=(...), # optional
|
293
|
+
* *c, # rest
|
294
|
+
* d1, d2, ..., dO, # post
|
295
|
+
* e1:(...), e2:(...), ..., eK:(...), # keyword
|
296
|
+
* **f, # keyword_rest
|
297
|
+
* &g) # block
|
298
|
+
* =>
|
299
|
+
*
|
300
|
+
* lead_num = M
|
301
|
+
* opt_num = N
|
302
|
+
* rest_start = M+N
|
303
|
+
* post_start = M+N+(*1)
|
304
|
+
* post_num = O
|
305
|
+
* keyword_num = K
|
306
|
+
* block_start = M+N+(*1)+O+K
|
307
|
+
* keyword_bits = M+N+(*1)+O+K+(&1)
|
308
|
+
* size = M+N+O+(*1)+K+(&1)+(**1) // parameter size.
|
309
|
+
*/
|
310
|
+
|
311
|
+
struct {
|
312
|
+
struct {
|
313
|
+
unsigned int has_lead : 1;
|
314
|
+
unsigned int has_opt : 1;
|
315
|
+
unsigned int has_rest : 1;
|
316
|
+
unsigned int has_post : 1;
|
317
|
+
unsigned int has_kw : 1;
|
318
|
+
unsigned int has_kwrest : 1;
|
319
|
+
unsigned int has_block : 1;
|
320
|
+
|
321
|
+
unsigned int ambiguous_param0 : 1; /* {|a|} */
|
322
|
+
} flags;
|
323
|
+
|
324
|
+
unsigned int size;
|
325
|
+
|
326
|
+
int lead_num;
|
327
|
+
int opt_num;
|
328
|
+
int rest_start;
|
329
|
+
int post_start;
|
330
|
+
int post_num;
|
331
|
+
int block_start;
|
332
|
+
|
333
|
+
const VALUE *opt_table; /* (opt_num + 1) entries. */
|
334
|
+
/* opt_num and opt_table:
|
335
|
+
*
|
336
|
+
* def foo o1=e1, o2=e2, ..., oN=eN
|
337
|
+
* #=>
|
338
|
+
* # prologue code
|
339
|
+
* A1: e1
|
340
|
+
* A2: e2
|
341
|
+
* ...
|
342
|
+
* AN: eN
|
343
|
+
* AL: body
|
344
|
+
* opt_num = N
|
345
|
+
* opt_table = [A1, A2, ..., AN, AL]
|
346
|
+
*/
|
347
|
+
|
348
|
+
const struct rb_iseq_param_keyword {
|
349
|
+
int num;
|
350
|
+
int required_num;
|
351
|
+
int bits_start;
|
352
|
+
int rest_start;
|
353
|
+
const ID *table;
|
354
|
+
const VALUE *default_values;
|
355
|
+
} *keyword;
|
356
|
+
} param;
|
357
|
+
|
358
|
+
rb_iseq_location_t location;
|
359
|
+
|
360
|
+
/* insn info, must be freed */
|
361
|
+
const struct iseq_line_info_entry *line_info_table;
|
362
|
+
|
363
|
+
const ID *local_table; /* must free */
|
364
|
+
|
365
|
+
/* catch table */
|
366
|
+
const struct iseq_catch_table *catch_table;
|
367
|
+
|
368
|
+
/* for child iseq */
|
369
|
+
const struct rb_iseq_struct *parent_iseq;
|
370
|
+
struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
|
371
|
+
|
372
|
+
union iseq_inline_storage_entry *is_entries;
|
373
|
+
struct rb_call_info *ci_entries; /* struct rb_call_info ci_entries[ci_size];
|
374
|
+
* struct rb_call_info_with_kwarg cikw_entries[ci_kw_size];
|
375
|
+
* So that:
|
376
|
+
* struct rb_call_info_with_kwarg *cikw_entries = &body->ci_entries[ci_size];
|
377
|
+
*/
|
378
|
+
struct rb_call_cache *cc_entries; /* size is ci_size = ci_kw_size */
|
379
|
+
|
380
|
+
VALUE mark_ary; /* Array: includes operands which should be GC marked */
|
381
|
+
|
382
|
+
unsigned int local_table_size;
|
383
|
+
unsigned int is_size;
|
384
|
+
unsigned int ci_size;
|
385
|
+
unsigned int ci_kw_size;
|
386
|
+
unsigned int line_info_size;
|
387
|
+
unsigned int stack_max; /* for stack overflow check */
|
388
|
+
};
|
389
|
+
|
390
|
+
/* T_IMEMO/iseq */
|
391
|
+
/* typedef rb_iseq_t is in method.h */
|
392
|
+
struct rb_iseq_struct {
|
393
|
+
VALUE flags;
|
394
|
+
VALUE reserved1;
|
395
|
+
struct rb_iseq_constant_body *body;
|
396
|
+
|
397
|
+
union { /* 4, 5 words */
|
398
|
+
struct iseq_compile_data *compile_data; /* used at compile time */
|
399
|
+
|
400
|
+
struct {
|
401
|
+
VALUE obj;
|
402
|
+
int index;
|
403
|
+
} loader;
|
404
|
+
} aux;
|
405
|
+
};
|
406
|
+
|
407
|
+
#ifndef USE_LAZY_LOAD
|
408
|
+
#define USE_LAZY_LOAD 0
|
409
|
+
#endif
|
410
|
+
|
411
|
+
#if USE_LAZY_LOAD
|
412
|
+
const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
|
413
|
+
#endif
|
414
|
+
|
415
|
+
static inline const rb_iseq_t *
|
416
|
+
rb_iseq_check(const rb_iseq_t *iseq)
|
417
|
+
{
|
418
|
+
#if USE_LAZY_LOAD
|
419
|
+
if (iseq->body == NULL) {
|
420
|
+
rb_iseq_complete((rb_iseq_t *)iseq);
|
421
|
+
}
|
422
|
+
#endif
|
423
|
+
return iseq;
|
424
|
+
}
|
425
|
+
|
426
|
+
enum ruby_special_exceptions {
|
427
|
+
ruby_error_reenter,
|
428
|
+
ruby_error_nomemory,
|
429
|
+
ruby_error_sysstack,
|
430
|
+
ruby_error_closed_stream,
|
431
|
+
ruby_special_error_count
|
432
|
+
};
|
433
|
+
|
434
|
+
enum ruby_basic_operators {
|
435
|
+
BOP_PLUS,
|
436
|
+
BOP_MINUS,
|
437
|
+
BOP_MULT,
|
438
|
+
BOP_DIV,
|
439
|
+
BOP_MOD,
|
440
|
+
BOP_EQ,
|
441
|
+
BOP_EQQ,
|
442
|
+
BOP_LT,
|
443
|
+
BOP_LE,
|
444
|
+
BOP_LTLT,
|
445
|
+
BOP_AREF,
|
446
|
+
BOP_ASET,
|
447
|
+
BOP_LENGTH,
|
448
|
+
BOP_SIZE,
|
449
|
+
BOP_EMPTY_P,
|
450
|
+
BOP_SUCC,
|
451
|
+
BOP_GT,
|
452
|
+
BOP_GE,
|
453
|
+
BOP_NOT,
|
454
|
+
BOP_NEQ,
|
455
|
+
BOP_MATCH,
|
456
|
+
BOP_FREEZE,
|
457
|
+
BOP_MAX,
|
458
|
+
BOP_MIN,
|
459
|
+
|
460
|
+
BOP_LAST_
|
461
|
+
};
|
462
|
+
|
463
|
+
#define GetVMPtr(obj, ptr) \
|
464
|
+
GetCoreDataFromValue((obj), rb_vm_t, (ptr))
|
465
|
+
|
466
|
+
struct rb_vm_struct;
|
467
|
+
typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
|
468
|
+
|
469
|
+
typedef struct rb_at_exit_list {
|
470
|
+
rb_vm_at_exit_func *func;
|
471
|
+
struct rb_at_exit_list *next;
|
472
|
+
} rb_at_exit_list;
|
473
|
+
|
474
|
+
struct rb_objspace;
|
475
|
+
struct rb_objspace *rb_objspace_alloc(void);
|
476
|
+
void rb_objspace_free(struct rb_objspace *);
|
477
|
+
|
478
|
+
typedef struct rb_hook_list_struct {
|
479
|
+
struct rb_event_hook_struct *hooks;
|
480
|
+
rb_event_flag_t events;
|
481
|
+
int need_clean;
|
482
|
+
} rb_hook_list_t;
|
483
|
+
|
484
|
+
typedef struct rb_vm_struct {
|
485
|
+
VALUE self;
|
486
|
+
|
487
|
+
rb_global_vm_lock_t gvl;
|
488
|
+
rb_nativethread_lock_t thread_destruct_lock;
|
489
|
+
|
490
|
+
struct rb_thread_struct *main_thread;
|
491
|
+
struct rb_thread_struct *running_thread;
|
492
|
+
|
493
|
+
struct list_head living_threads;
|
494
|
+
size_t living_thread_num;
|
495
|
+
VALUE thgroup_default;
|
496
|
+
|
497
|
+
unsigned int running: 1;
|
498
|
+
unsigned int thread_abort_on_exception: 1;
|
499
|
+
unsigned int thread_report_on_exception: 1;
|
500
|
+
int trace_running;
|
501
|
+
volatile int sleeper;
|
502
|
+
|
503
|
+
/* object management */
|
504
|
+
VALUE mark_object_ary;
|
505
|
+
const VALUE special_exceptions[ruby_special_error_count];
|
506
|
+
|
507
|
+
/* load */
|
508
|
+
VALUE top_self;
|
509
|
+
VALUE load_path;
|
510
|
+
VALUE load_path_snapshot;
|
511
|
+
VALUE load_path_check_cache;
|
512
|
+
VALUE expanded_load_path;
|
513
|
+
VALUE loaded_features;
|
514
|
+
VALUE loaded_features_snapshot;
|
515
|
+
struct st_table *loaded_features_index;
|
516
|
+
struct st_table *loading_table;
|
517
|
+
|
518
|
+
/* signal */
|
519
|
+
struct {
|
520
|
+
VALUE cmd;
|
521
|
+
int safe;
|
522
|
+
} trap_list[RUBY_NSIG];
|
523
|
+
|
524
|
+
/* hook */
|
525
|
+
rb_hook_list_t event_hooks;
|
526
|
+
|
527
|
+
/* relation table of ensure - rollback for callcc */
|
528
|
+
struct st_table *ensure_rollback_table;
|
529
|
+
|
530
|
+
/* postponed_job */
|
531
|
+
struct rb_postponed_job_struct *postponed_job_buffer;
|
532
|
+
int postponed_job_index;
|
533
|
+
|
534
|
+
int src_encoding_index;
|
535
|
+
|
536
|
+
VALUE verbose, debug, orig_progname, progname;
|
537
|
+
VALUE coverages;
|
538
|
+
|
539
|
+
VALUE defined_module_hash;
|
540
|
+
|
541
|
+
struct rb_objspace *objspace;
|
542
|
+
|
543
|
+
rb_at_exit_list *at_exit;
|
544
|
+
|
545
|
+
VALUE *defined_strings;
|
546
|
+
st_table *frozen_strings;
|
547
|
+
|
548
|
+
/* params */
|
549
|
+
struct { /* size in byte */
|
550
|
+
size_t thread_vm_stack_size;
|
551
|
+
size_t thread_machine_stack_size;
|
552
|
+
size_t fiber_vm_stack_size;
|
553
|
+
size_t fiber_machine_stack_size;
|
554
|
+
} default_params;
|
555
|
+
|
556
|
+
short redefined_flag[BOP_LAST_];
|
557
|
+
} rb_vm_t;
|
558
|
+
|
559
|
+
/* default values */
|
560
|
+
|
561
|
+
#define RUBY_VM_SIZE_ALIGN 4096
|
562
|
+
|
563
|
+
#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
|
564
|
+
#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
|
565
|
+
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
|
566
|
+
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
567
|
+
|
568
|
+
#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
569
|
+
#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
|
570
|
+
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
|
571
|
+
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
572
|
+
|
573
|
+
/* optimize insn */
|
574
|
+
#define INTEGER_REDEFINED_OP_FLAG (1 << 0)
|
575
|
+
#define FLOAT_REDEFINED_OP_FLAG (1 << 1)
|
576
|
+
#define STRING_REDEFINED_OP_FLAG (1 << 2)
|
577
|
+
#define ARRAY_REDEFINED_OP_FLAG (1 << 3)
|
578
|
+
#define HASH_REDEFINED_OP_FLAG (1 << 4)
|
579
|
+
/* #define BIGNUM_REDEFINED_OP_FLAG (1 << 5) */
|
580
|
+
#define SYMBOL_REDEFINED_OP_FLAG (1 << 6)
|
581
|
+
#define TIME_REDEFINED_OP_FLAG (1 << 7)
|
582
|
+
#define REGEXP_REDEFINED_OP_FLAG (1 << 8)
|
583
|
+
#define NIL_REDEFINED_OP_FLAG (1 << 9)
|
584
|
+
#define TRUE_REDEFINED_OP_FLAG (1 << 10)
|
585
|
+
#define FALSE_REDEFINED_OP_FLAG (1 << 11)
|
586
|
+
|
587
|
+
#define BASIC_OP_UNREDEFINED_P(op, klass) (LIKELY((GET_VM()->redefined_flag[(op)]&(klass)) == 0))
|
588
|
+
|
589
|
+
#ifndef VM_DEBUG_BP_CHECK
|
590
|
+
#define VM_DEBUG_BP_CHECK 0
|
591
|
+
#endif
|
592
|
+
|
593
|
+
#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
|
594
|
+
#define VM_DEBUG_VERIFY_METHOD_CACHE (VM_DEBUG_MODE != 0)
|
595
|
+
#endif
|
596
|
+
|
597
|
+
struct rb_captured_block {
|
598
|
+
VALUE self;
|
599
|
+
const VALUE *ep;
|
600
|
+
union {
|
601
|
+
const rb_iseq_t *iseq;
|
602
|
+
const struct vm_ifunc *ifunc;
|
603
|
+
VALUE val;
|
604
|
+
} code;
|
605
|
+
};
|
606
|
+
|
607
|
+
enum rb_block_handler_type {
|
608
|
+
block_handler_type_iseq,
|
609
|
+
block_handler_type_ifunc,
|
610
|
+
block_handler_type_symbol,
|
611
|
+
block_handler_type_proc
|
612
|
+
};
|
613
|
+
|
614
|
+
enum rb_block_type {
|
615
|
+
block_type_iseq,
|
616
|
+
block_type_ifunc,
|
617
|
+
block_type_symbol,
|
618
|
+
block_type_proc
|
619
|
+
};
|
620
|
+
|
621
|
+
struct rb_block {
|
622
|
+
union {
|
623
|
+
struct rb_captured_block captured;
|
624
|
+
VALUE symbol;
|
625
|
+
VALUE proc;
|
626
|
+
} as;
|
627
|
+
enum rb_block_type type;
|
628
|
+
};
|
629
|
+
|
630
|
+
typedef struct rb_control_frame_struct {
|
631
|
+
const VALUE *pc; /* cfp[0] */
|
632
|
+
VALUE *sp; /* cfp[1] */
|
633
|
+
const rb_iseq_t *iseq; /* cfp[2] */
|
634
|
+
VALUE self; /* cfp[3] / block[0] */
|
635
|
+
const VALUE *ep; /* cfp[4] / block[1] */
|
636
|
+
const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc */
|
637
|
+
|
638
|
+
#if VM_DEBUG_BP_CHECK
|
639
|
+
VALUE *bp_check; /* cfp[6] */
|
640
|
+
#endif
|
641
|
+
} rb_control_frame_t;
|
642
|
+
|
643
|
+
extern const rb_data_type_t ruby_threadptr_data_type;
|
644
|
+
|
645
|
+
#define GetThreadPtr(obj, ptr) \
|
646
|
+
TypedData_Get_Struct((obj), rb_thread_t, &ruby_threadptr_data_type, (ptr))
|
647
|
+
|
648
|
+
enum rb_thread_status {
|
649
|
+
THREAD_RUNNABLE,
|
650
|
+
THREAD_STOPPED,
|
651
|
+
THREAD_STOPPED_FOREVER,
|
652
|
+
THREAD_KILLED
|
653
|
+
};
|
654
|
+
|
655
|
+
typedef RUBY_JMP_BUF rb_jmpbuf_t;
|
656
|
+
|
657
|
+
/*
|
658
|
+
the members which are written in TH_PUSH_TAG() should be placed at
|
659
|
+
the beginning and the end, so that entire region is accessible.
|
660
|
+
*/
|
661
|
+
struct rb_vm_tag {
|
662
|
+
VALUE tag;
|
663
|
+
VALUE retval;
|
664
|
+
rb_jmpbuf_t buf;
|
665
|
+
struct rb_vm_tag *prev;
|
666
|
+
};
|
667
|
+
|
668
|
+
struct rb_vm_protect_tag {
|
669
|
+
struct rb_vm_protect_tag *prev;
|
670
|
+
};
|
671
|
+
|
672
|
+
struct rb_unblock_callback {
|
673
|
+
rb_unblock_function_t *func;
|
674
|
+
void *arg;
|
675
|
+
};
|
676
|
+
|
677
|
+
struct rb_mutex_struct;
|
678
|
+
|
679
|
+
typedef struct rb_thread_list_struct{
|
680
|
+
struct rb_thread_list_struct *next;
|
681
|
+
struct rb_thread_struct *th;
|
682
|
+
} rb_thread_list_t;
|
683
|
+
|
684
|
+
typedef struct rb_ensure_entry {
|
685
|
+
VALUE marker;
|
686
|
+
VALUE (*e_proc)(ANYARGS);
|
687
|
+
VALUE data2;
|
688
|
+
} rb_ensure_entry_t;
|
689
|
+
|
690
|
+
typedef struct rb_ensure_list {
|
691
|
+
struct rb_ensure_list *next;
|
692
|
+
struct rb_ensure_entry entry;
|
693
|
+
} rb_ensure_list_t;
|
694
|
+
|
695
|
+
typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3];
|
696
|
+
|
697
|
+
typedef struct rb_fiber_struct rb_fiber_t;
|
698
|
+
|
699
|
+
typedef struct rb_thread_struct {
|
700
|
+
struct list_node vmlt_node;
|
701
|
+
VALUE self;
|
702
|
+
rb_vm_t *vm;
|
703
|
+
|
704
|
+
/* execution information */
|
705
|
+
VALUE *stack; /* must free, must mark */
|
706
|
+
size_t stack_size; /* size in word (byte size / sizeof(VALUE)) */
|
707
|
+
rb_control_frame_t *cfp;
|
708
|
+
int safe_level;
|
709
|
+
int raised_flag;
|
710
|
+
VALUE last_status; /* $? */
|
711
|
+
|
712
|
+
/* passing state */
|
713
|
+
int state;
|
714
|
+
|
715
|
+
int waiting_fd;
|
716
|
+
|
717
|
+
/* for rb_iterate */
|
718
|
+
VALUE passed_block_handler;
|
719
|
+
|
720
|
+
/* for bmethod */
|
721
|
+
const rb_callable_method_entry_t *passed_bmethod_me;
|
722
|
+
|
723
|
+
/* for cfunc */
|
724
|
+
struct rb_calling_info *calling;
|
725
|
+
|
726
|
+
/* for load(true) */
|
727
|
+
VALUE top_self;
|
728
|
+
VALUE top_wrapper;
|
729
|
+
|
730
|
+
/* eval env */
|
731
|
+
const VALUE *root_lep;
|
732
|
+
VALUE root_svar;
|
733
|
+
|
734
|
+
/* thread control */
|
735
|
+
rb_nativethread_id_t thread_id;
|
736
|
+
#ifdef NON_SCALAR_THREAD_ID
|
737
|
+
rb_thread_id_string_t thread_id_string;
|
738
|
+
#endif
|
739
|
+
enum rb_thread_status status;
|
740
|
+
int to_kill;
|
741
|
+
int priority;
|
742
|
+
|
743
|
+
native_thread_data_t native_thread_data;
|
744
|
+
void *blocking_region_buffer;
|
745
|
+
|
746
|
+
VALUE thgroup;
|
747
|
+
VALUE value;
|
748
|
+
|
749
|
+
/* temporary place of errinfo */
|
750
|
+
VALUE errinfo;
|
751
|
+
|
752
|
+
/* temporary place of retval on OPT_CALL_THREADED_CODE */
|
753
|
+
#if OPT_CALL_THREADED_CODE
|
754
|
+
VALUE retval;
|
755
|
+
#endif
|
756
|
+
|
757
|
+
/* async errinfo queue */
|
758
|
+
VALUE pending_interrupt_queue;
|
759
|
+
VALUE pending_interrupt_mask_stack;
|
760
|
+
int pending_interrupt_queue_checked;
|
761
|
+
|
762
|
+
rb_atomic_t interrupt_flag;
|
763
|
+
unsigned long interrupt_mask;
|
764
|
+
rb_nativethread_lock_t interrupt_lock;
|
765
|
+
rb_nativethread_cond_t interrupt_cond;
|
766
|
+
struct rb_unblock_callback unblock;
|
767
|
+
VALUE locking_mutex;
|
768
|
+
struct rb_mutex_struct *keeping_mutexes;
|
769
|
+
|
770
|
+
struct rb_vm_tag *tag;
|
771
|
+
struct rb_vm_protect_tag *protect_tag;
|
772
|
+
|
773
|
+
/* storage */
|
774
|
+
st_table *local_storage;
|
775
|
+
VALUE local_storage_recursive_hash;
|
776
|
+
VALUE local_storage_recursive_hash_for_trace;
|
777
|
+
|
778
|
+
rb_thread_list_t *join_list;
|
779
|
+
|
780
|
+
VALUE first_proc;
|
781
|
+
VALUE first_args;
|
782
|
+
VALUE (*first_func)(ANYARGS);
|
783
|
+
|
784
|
+
/* for GC */
|
785
|
+
struct {
|
786
|
+
VALUE *stack_start;
|
787
|
+
VALUE *stack_end;
|
788
|
+
size_t stack_maxsize;
|
789
|
+
#ifdef __ia64
|
790
|
+
VALUE *register_stack_start;
|
791
|
+
VALUE *register_stack_end;
|
792
|
+
size_t register_stack_maxsize;
|
793
|
+
#endif
|
794
|
+
jmp_buf regs;
|
795
|
+
} machine;
|
796
|
+
|
797
|
+
/* statistics data for profiler */
|
798
|
+
VALUE stat_insn_usage;
|
799
|
+
|
800
|
+
/* tracer */
|
801
|
+
rb_hook_list_t event_hooks;
|
802
|
+
struct rb_trace_arg_struct *trace_arg; /* trace information */
|
803
|
+
|
804
|
+
/* fiber */
|
805
|
+
rb_fiber_t *fiber;
|
806
|
+
rb_fiber_t *root_fiber;
|
807
|
+
rb_jmpbuf_t root_jmpbuf;
|
808
|
+
|
809
|
+
/* ensure & callcc */
|
810
|
+
rb_ensure_list_t *ensure_list;
|
811
|
+
|
812
|
+
/* misc */
|
813
|
+
enum method_missing_reason method_missing_reason: 8;
|
814
|
+
unsigned int abort_on_exception: 1;
|
815
|
+
unsigned int report_on_exception: 1;
|
816
|
+
#ifdef USE_SIGALTSTACK
|
817
|
+
void *altstack;
|
818
|
+
#endif
|
819
|
+
unsigned long running_time_us;
|
820
|
+
VALUE name;
|
821
|
+
} rb_thread_t;
|
822
|
+
|
823
|
+
typedef enum {
|
824
|
+
VM_DEFINECLASS_TYPE_CLASS = 0x00,
|
825
|
+
VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
|
826
|
+
VM_DEFINECLASS_TYPE_MODULE = 0x02,
|
827
|
+
/* 0x03..0x06 is reserved */
|
828
|
+
VM_DEFINECLASS_TYPE_MASK = 0x07
|
829
|
+
} rb_vm_defineclass_type_t;
|
830
|
+
|
831
|
+
#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
|
832
|
+
#define VM_DEFINECLASS_FLAG_SCOPED 0x08
|
833
|
+
#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
|
834
|
+
#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
|
835
|
+
#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
|
836
|
+
((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
|
837
|
+
|
838
|
+
/* iseq.c */
|
839
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
840
|
+
|
841
|
+
/* node -> iseq */
|
842
|
+
rb_iseq_t *rb_iseq_new(NODE*, VALUE, VALUE, VALUE, const rb_iseq_t *parent, enum iseq_type);
|
843
|
+
rb_iseq_t *rb_iseq_new_top(NODE *node, VALUE name, VALUE path, VALUE absolute_path, const rb_iseq_t *parent);
|
844
|
+
rb_iseq_t *rb_iseq_new_main(NODE *node, VALUE path, VALUE absolute_path, const rb_iseq_t *parent);
|
845
|
+
rb_iseq_t *rb_iseq_new_with_bopt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, enum iseq_type, VALUE);
|
846
|
+
rb_iseq_t *rb_iseq_new_with_opt(NODE*, VALUE, VALUE, VALUE, VALUE, const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
|
847
|
+
|
848
|
+
/* src -> iseq */
|
849
|
+
rb_iseq_t *rb_iseq_compile(VALUE src, VALUE file, VALUE line);
|
850
|
+
rb_iseq_t *rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, const struct rb_block *base_block);
|
851
|
+
rb_iseq_t *rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE absolute_path, VALUE line, const struct rb_block *base_block, VALUE opt);
|
852
|
+
|
853
|
+
VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
|
854
|
+
int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
|
855
|
+
const char *ruby_node_name(int node);
|
856
|
+
|
857
|
+
VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
|
858
|
+
|
859
|
+
RUBY_EXTERN VALUE rb_cISeq;
|
860
|
+
RUBY_EXTERN VALUE rb_cRubyVM;
|
861
|
+
RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
|
862
|
+
RUBY_SYMBOL_EXPORT_END
|
863
|
+
|
864
|
+
#define GetProcPtr(obj, ptr) \
|
865
|
+
GetCoreDataFromValue((obj), rb_proc_t, (ptr))
|
866
|
+
|
867
|
+
typedef struct {
|
868
|
+
const struct rb_block block;
|
869
|
+
int8_t safe_level; /* 0..1 */
|
870
|
+
int8_t is_from_method; /* bool */
|
871
|
+
int8_t is_lambda; /* bool */
|
872
|
+
} rb_proc_t;
|
873
|
+
|
874
|
+
typedef struct {
|
875
|
+
VALUE flags; /* imemo header */
|
876
|
+
const rb_iseq_t *iseq;
|
877
|
+
const VALUE *ep;
|
878
|
+
const VALUE *env;
|
879
|
+
unsigned int env_size;
|
880
|
+
} rb_env_t;
|
881
|
+
|
882
|
+
extern const rb_data_type_t ruby_binding_data_type;
|
883
|
+
|
884
|
+
#define GetBindingPtr(obj, ptr) \
|
885
|
+
GetCoreDataFromValue((obj), rb_binding_t, (ptr))
|
886
|
+
|
887
|
+
typedef struct {
|
888
|
+
struct rb_block block;
|
889
|
+
VALUE path;
|
890
|
+
unsigned short first_lineno;
|
891
|
+
} rb_binding_t;
|
892
|
+
|
893
|
+
/* used by compile time and send insn */
|
894
|
+
|
895
|
+
enum vm_check_match_type {
|
896
|
+
VM_CHECKMATCH_TYPE_WHEN = 1,
|
897
|
+
VM_CHECKMATCH_TYPE_CASE = 2,
|
898
|
+
VM_CHECKMATCH_TYPE_RESCUE = 3
|
899
|
+
};
|
900
|
+
|
901
|
+
#define VM_CHECKMATCH_TYPE_MASK 0x03
|
902
|
+
#define VM_CHECKMATCH_ARRAY 0x04
|
903
|
+
|
904
|
+
#define VM_CALL_ARGS_SPLAT (0x01 << 0) /* m(*args) */
|
905
|
+
#define VM_CALL_ARGS_BLOCKARG (0x01 << 1) /* m(&block) */
|
906
|
+
#define VM_CALL_FCALL (0x01 << 2) /* m(...) */
|
907
|
+
#define VM_CALL_VCALL (0x01 << 3) /* m */
|
908
|
+
#define VM_CALL_ARGS_SIMPLE (0x01 << 4) /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
|
909
|
+
#define VM_CALL_BLOCKISEQ (0x01 << 5) /* has blockiseq */
|
910
|
+
#define VM_CALL_KWARG (0x01 << 6) /* has kwarg */
|
911
|
+
#define VM_CALL_TAILCALL (0x01 << 7) /* located at tail position */
|
912
|
+
#define VM_CALL_SUPER (0x01 << 8) /* super */
|
913
|
+
#define VM_CALL_OPT_SEND (0x01 << 9) /* internal flag */
|
914
|
+
|
915
|
+
enum vm_special_object_type {
|
916
|
+
VM_SPECIAL_OBJECT_VMCORE = 1,
|
917
|
+
VM_SPECIAL_OBJECT_CBASE,
|
918
|
+
VM_SPECIAL_OBJECT_CONST_BASE
|
919
|
+
};
|
920
|
+
|
921
|
+
enum vm_svar_index {
|
922
|
+
VM_SVAR_LASTLINE = 0, /* $_ */
|
923
|
+
VM_SVAR_BACKREF = 1, /* $~ */
|
924
|
+
|
925
|
+
VM_SVAR_EXTRA_START = 2,
|
926
|
+
VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
|
927
|
+
};
|
928
|
+
|
929
|
+
/* inline cache */
|
930
|
+
typedef struct iseq_inline_cache_entry *IC;
|
931
|
+
typedef struct rb_call_info *CALL_INFO;
|
932
|
+
typedef struct rb_call_cache *CALL_CACHE;
|
933
|
+
|
934
|
+
void rb_vm_change_state(void);
|
935
|
+
|
936
|
+
typedef VALUE CDHASH;
|
937
|
+
|
938
|
+
#ifndef FUNC_FASTCALL
|
939
|
+
#define FUNC_FASTCALL(x) x
|
940
|
+
#endif
|
941
|
+
|
942
|
+
typedef rb_control_frame_t *
|
943
|
+
(FUNC_FASTCALL(*rb_insn_func_t))(rb_thread_t *, rb_control_frame_t *);
|
944
|
+
|
945
|
+
#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
|
946
|
+
#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
|
947
|
+
|
948
|
+
#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
|
949
|
+
#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
|
950
|
+
#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
|
951
|
+
|
952
|
+
enum {
|
953
|
+
/* Frame/Environment flag bits:
|
954
|
+
* MMMM MMMM MMMM MMMM ____ ____ FFFF EEEX (LSB)
|
955
|
+
*
|
956
|
+
* X : tag for GC marking (It seems as Fixnum)
|
957
|
+
* EEE : 3 bits Env flags
|
958
|
+
* FFFF: 4 bits Frame flags
|
959
|
+
* MMMM: 16 bits frame magic (to check frame corruption)
|
960
|
+
*/
|
961
|
+
|
962
|
+
/* frame types */
|
963
|
+
VM_FRAME_MAGIC_METHOD = 0x11110001,
|
964
|
+
VM_FRAME_MAGIC_BLOCK = 0x22220001,
|
965
|
+
VM_FRAME_MAGIC_CLASS = 0x33330001,
|
966
|
+
VM_FRAME_MAGIC_TOP = 0x44440001,
|
967
|
+
VM_FRAME_MAGIC_CFUNC = 0x55550001,
|
968
|
+
VM_FRAME_MAGIC_PROC = 0x66660001,
|
969
|
+
VM_FRAME_MAGIC_IFUNC = 0x77770001,
|
970
|
+
VM_FRAME_MAGIC_EVAL = 0x88880001,
|
971
|
+
VM_FRAME_MAGIC_LAMBDA = 0x99990001,
|
972
|
+
VM_FRAME_MAGIC_RESCUE = 0xaaaa0001,
|
973
|
+
VM_FRAME_MAGIC_DUMMY = 0xbbbb0001,
|
974
|
+
|
975
|
+
VM_FRAME_MAGIC_MASK = 0xffff0001,
|
976
|
+
|
977
|
+
/* frame flag */
|
978
|
+
VM_FRAME_FLAG_PASSED = 0x0010,
|
979
|
+
VM_FRAME_FLAG_FINISH = 0x0020,
|
980
|
+
VM_FRAME_FLAG_BMETHOD = 0x0040,
|
981
|
+
VM_FRAME_FLAG_CFRAME = 0x0080,
|
982
|
+
|
983
|
+
/* env flag */
|
984
|
+
VM_ENV_FLAG_LOCAL = 0x0002,
|
985
|
+
VM_ENV_FLAG_ESCAPED = 0x0004,
|
986
|
+
VM_ENV_FLAG_WB_REQUIRED = 0x0008
|
987
|
+
};
|
988
|
+
|
989
|
+
#define VM_ENV_DATA_SIZE ( 3)
|
990
|
+
|
991
|
+
#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
|
992
|
+
#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
|
993
|
+
#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
|
994
|
+
#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
|
995
|
+
#define VM_ENV_DATA_INDEX_ENV_PROC ( 2) /* ep[ 2] */
|
996
|
+
|
997
|
+
#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
|
998
|
+
|
999
|
+
static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
|
1000
|
+
|
1001
|
+
static inline void
|
1002
|
+
VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
|
1003
|
+
{
|
1004
|
+
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
|
1005
|
+
VM_ASSERT(FIXNUM_P(flags));
|
1006
|
+
VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
|
1007
|
+
}
|
1008
|
+
|
1009
|
+
static inline void
|
1010
|
+
VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
|
1011
|
+
{
|
1012
|
+
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
|
1013
|
+
VM_ASSERT(FIXNUM_P(flags));
|
1014
|
+
VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
|
1015
|
+
}
|
1016
|
+
|
1017
|
+
static inline unsigned long
|
1018
|
+
VM_ENV_FLAGS(const VALUE *ep, long flag)
|
1019
|
+
{
|
1020
|
+
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
|
1021
|
+
VM_ASSERT(FIXNUM_P(flags));
|
1022
|
+
return flags & flag;
|
1023
|
+
}
|
1024
|
+
|
1025
|
+
static inline unsigned long
|
1026
|
+
VM_FRAME_TYPE(const rb_control_frame_t *cfp)
|
1027
|
+
{
|
1028
|
+
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
|
1029
|
+
}
|
1030
|
+
|
1031
|
+
static inline int
|
1032
|
+
VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
|
1033
|
+
{
|
1034
|
+
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH ) != 0;
|
1035
|
+
}
|
1036
|
+
|
1037
|
+
static inline int
|
1038
|
+
VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
|
1039
|
+
{
|
1040
|
+
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
|
1041
|
+
}
|
1042
|
+
|
1043
|
+
static inline int
|
1044
|
+
rb_obj_is_iseq(VALUE iseq)
|
1045
|
+
{
|
1046
|
+
return RB_TYPE_P(iseq, T_IMEMO) && imemo_type(iseq) == imemo_iseq;
|
1047
|
+
}
|
1048
|
+
|
1049
|
+
#if VM_CHECK_MODE > 0
|
1050
|
+
#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
|
1051
|
+
#endif
|
1052
|
+
|
1053
|
+
static inline int
|
1054
|
+
VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
|
1055
|
+
{
|
1056
|
+
int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
|
1057
|
+
VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p);
|
1058
|
+
return cframe_p;
|
1059
|
+
}
|
1060
|
+
|
1061
|
+
static inline int
|
1062
|
+
VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
|
1063
|
+
{
|
1064
|
+
return !VM_FRAME_CFRAME_P(cfp);
|
1065
|
+
}
|
1066
|
+
|
1067
|
+
#define RUBYVM_CFUNC_FRAME_P(cfp) \
|
1068
|
+
(VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
|
1069
|
+
|
1070
|
+
#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
|
1071
|
+
#define VM_BLOCK_HANDLER_NONE 0
|
1072
|
+
|
1073
|
+
static inline int
|
1074
|
+
VM_ENV_LOCAL_P(const VALUE *ep)
|
1075
|
+
{
|
1076
|
+
return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
|
1077
|
+
}
|
1078
|
+
|
1079
|
+
static inline const VALUE *
|
1080
|
+
VM_ENV_PREV_EP(const VALUE *ep)
|
1081
|
+
{
|
1082
|
+
VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
|
1083
|
+
return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
|
1084
|
+
}
|
1085
|
+
|
1086
|
+
static inline VALUE
|
1087
|
+
VM_ENV_BLOCK_HANDLER(const VALUE *ep)
|
1088
|
+
{
|
1089
|
+
VM_ASSERT(VM_ENV_LOCAL_P(ep));
|
1090
|
+
return ep[VM_ENV_DATA_INDEX_SPECVAL];
|
1091
|
+
}
|
1092
|
+
|
1093
|
+
#if VM_CHECK_MODE > 0
|
1094
|
+
int rb_vm_ep_in_heap_p(const VALUE *ep);
|
1095
|
+
#endif
|
1096
|
+
|
1097
|
+
static inline int
|
1098
|
+
VM_ENV_ESCAPED_P(const VALUE *ep)
|
1099
|
+
{
|
1100
|
+
VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
|
1101
|
+
return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
|
1102
|
+
}
|
1103
|
+
|
1104
|
+
#if VM_CHECK_MODE > 0
|
1105
|
+
static inline int
|
1106
|
+
vm_assert_env(VALUE obj)
|
1107
|
+
{
|
1108
|
+
VM_ASSERT(RB_TYPE_P(obj, T_IMEMO));
|
1109
|
+
VM_ASSERT(imemo_type(obj) == imemo_env);
|
1110
|
+
return 1;
|
1111
|
+
}
|
1112
|
+
#endif
|
1113
|
+
|
1114
|
+
static inline VALUE
|
1115
|
+
VM_ENV_ENVVAL(const VALUE *ep)
|
1116
|
+
{
|
1117
|
+
VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
|
1118
|
+
VM_ASSERT(VM_ENV_ESCAPED_P(ep));
|
1119
|
+
VM_ASSERT(vm_assert_env(envval));
|
1120
|
+
return envval;
|
1121
|
+
}
|
1122
|
+
|
1123
|
+
static inline const rb_env_t *
|
1124
|
+
VM_ENV_ENVVAL_PTR(const VALUE *ep)
|
1125
|
+
{
|
1126
|
+
return (const rb_env_t *)VM_ENV_ENVVAL(ep);
|
1127
|
+
}
|
1128
|
+
|
1129
|
+
static inline VALUE
|
1130
|
+
VM_ENV_PROCVAL(const VALUE *ep)
|
1131
|
+
{
|
1132
|
+
VM_ASSERT(VM_ENV_ESCAPED_P(ep));
|
1133
|
+
VM_ASSERT(VM_ENV_LOCAL_P(ep));
|
1134
|
+
VM_ASSERT(VM_ENV_BLOCK_HANDLER(ep) != VM_BLOCK_HANDLER_NONE);
|
1135
|
+
|
1136
|
+
return ep[VM_ENV_DATA_INDEX_ENV_PROC];
|
1137
|
+
}
|
1138
|
+
|
1139
|
+
static inline const rb_env_t *
|
1140
|
+
vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
|
1141
|
+
{
|
1142
|
+
rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
|
1143
|
+
env->env_size = env_size;
|
1144
|
+
env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
|
1145
|
+
return env;
|
1146
|
+
}
|
1147
|
+
|
1148
|
+
static inline void
|
1149
|
+
VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
|
1150
|
+
{
|
1151
|
+
*((VALUE *)ptr) = v;
|
1152
|
+
}
|
1153
|
+
|
1154
|
+
static inline void
|
1155
|
+
VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
|
1156
|
+
{
|
1157
|
+
VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
|
1158
|
+
VM_FORCE_WRITE(ptr, special_const_value);
|
1159
|
+
}
|
1160
|
+
|
1161
|
+
static inline void
|
1162
|
+
VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
|
1163
|
+
{
|
1164
|
+
VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
|
1165
|
+
VM_FORCE_WRITE(&ep[index], v);
|
1166
|
+
}
|
1167
|
+
|
1168
|
+
const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
|
1169
|
+
VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
|
1170
|
+
|
1171
|
+
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
|
1172
|
+
#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
|
1173
|
+
#define RUBY_VM_END_CONTROL_FRAME(th) \
|
1174
|
+
((rb_control_frame_t *)((th)->stack + (th)->stack_size))
|
1175
|
+
#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
|
1176
|
+
((void *)(ecfp) > (void *)(cfp))
|
1177
|
+
#define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \
|
1178
|
+
(!RUBY_VM_VALID_CONTROL_FRAME_P((cfp), RUBY_VM_END_CONTROL_FRAME(th)))
|
1179
|
+
|
1180
|
+
static inline int
|
1181
|
+
VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
|
1182
|
+
{
|
1183
|
+
if ((block_handler & 0x03) == 0x01) {
|
1184
|
+
#if VM_CHECK_MODE > 0
|
1185
|
+
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
|
1186
|
+
VM_ASSERT(RB_TYPE_P(captured->code.val, T_IMEMO));
|
1187
|
+
VM_ASSERT(imemo_type(captured->code.val) == imemo_iseq);
|
1188
|
+
#endif
|
1189
|
+
return 1;
|
1190
|
+
}
|
1191
|
+
else {
|
1192
|
+
return 0;
|
1193
|
+
}
|
1194
|
+
}
|
1195
|
+
|
1196
|
+
static inline VALUE
|
1197
|
+
VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
|
1198
|
+
{
|
1199
|
+
VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
|
1200
|
+
VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
|
1201
|
+
return block_handler;
|
1202
|
+
}
|
1203
|
+
|
1204
|
+
static inline const struct rb_captured_block *
|
1205
|
+
VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
|
1206
|
+
{
|
1207
|
+
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
|
1208
|
+
VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
|
1209
|
+
return captured;
|
1210
|
+
}
|
1211
|
+
|
1212
|
+
static inline int
|
1213
|
+
VM_BH_IFUNC_P(VALUE block_handler)
|
1214
|
+
{
|
1215
|
+
if ((block_handler & 0x03) == 0x03) {
|
1216
|
+
#if VM_CHECK_MODE > 0
|
1217
|
+
struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
|
1218
|
+
VM_ASSERT(RB_TYPE_P(captured->code.val, T_IMEMO));
|
1219
|
+
VM_ASSERT(imemo_type(captured->code.val) == imemo_ifunc);
|
1220
|
+
#endif
|
1221
|
+
return 1;
|
1222
|
+
}
|
1223
|
+
else {
|
1224
|
+
return 0;
|
1225
|
+
}
|
1226
|
+
}
|
1227
|
+
|
1228
|
+
static inline VALUE
|
1229
|
+
VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
|
1230
|
+
{
|
1231
|
+
VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
|
1232
|
+
VM_ASSERT(VM_BH_IFUNC_P(block_handler));
|
1233
|
+
return block_handler;
|
1234
|
+
}
|
1235
|
+
|
1236
|
+
static inline const struct rb_captured_block *
|
1237
|
+
VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
|
1238
|
+
{
|
1239
|
+
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
|
1240
|
+
VM_ASSERT(VM_BH_IFUNC_P(block_handler));
|
1241
|
+
return captured;
|
1242
|
+
}
|
1243
|
+
|
1244
|
+
static inline const struct rb_captured_block *
|
1245
|
+
VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
|
1246
|
+
{
|
1247
|
+
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
|
1248
|
+
VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
|
1249
|
+
return captured;
|
1250
|
+
}
|
1251
|
+
|
1252
|
+
static inline enum rb_block_handler_type
|
1253
|
+
vm_block_handler_type(VALUE block_handler)
|
1254
|
+
{
|
1255
|
+
if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
|
1256
|
+
return block_handler_type_iseq;
|
1257
|
+
}
|
1258
|
+
else if (VM_BH_IFUNC_P(block_handler)) {
|
1259
|
+
return block_handler_type_ifunc;
|
1260
|
+
}
|
1261
|
+
else if (SYMBOL_P(block_handler)) {
|
1262
|
+
return block_handler_type_symbol;
|
1263
|
+
}
|
1264
|
+
else {
|
1265
|
+
VM_ASSERT(rb_obj_is_proc(block_handler));
|
1266
|
+
return block_handler_type_proc;
|
1267
|
+
}
|
1268
|
+
}
|
1269
|
+
|
1270
|
+
static inline int
|
1271
|
+
vm_block_handler_verify(VALUE block_handler)
|
1272
|
+
{
|
1273
|
+
VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
|
1274
|
+
vm_block_handler_type(block_handler) >= 0);
|
1275
|
+
return 1;
|
1276
|
+
}
|
1277
|
+
|
1278
|
+
static inline enum rb_block_type
|
1279
|
+
vm_block_type(const struct rb_block *block)
|
1280
|
+
{
|
1281
|
+
#if VM_CHECK_MODE > 0
|
1282
|
+
switch (block->type) {
|
1283
|
+
case block_type_iseq:
|
1284
|
+
VM_ASSERT(RB_TYPE_P(block->as.captured.code.val, T_IMEMO));
|
1285
|
+
VM_ASSERT(imemo_type(block->as.captured.code.val) == imemo_iseq);
|
1286
|
+
break;
|
1287
|
+
case block_type_ifunc:
|
1288
|
+
VM_ASSERT(RB_TYPE_P(block->as.captured.code.val, T_IMEMO));
|
1289
|
+
VM_ASSERT(imemo_type(block->as.captured.code.val) == imemo_ifunc);
|
1290
|
+
break;
|
1291
|
+
case block_type_symbol:
|
1292
|
+
VM_ASSERT(SYMBOL_P(block->as.symbol));
|
1293
|
+
break;
|
1294
|
+
case block_type_proc:
|
1295
|
+
VM_ASSERT(rb_obj_is_proc(block->as.proc));
|
1296
|
+
break;
|
1297
|
+
}
|
1298
|
+
#endif
|
1299
|
+
return block->type;
|
1300
|
+
}
|
1301
|
+
|
1302
|
+
static inline void
|
1303
|
+
vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
|
1304
|
+
{
|
1305
|
+
struct rb_block *mb = (struct rb_block *)block;
|
1306
|
+
mb->type = type;
|
1307
|
+
}
|
1308
|
+
|
1309
|
+
static inline const struct rb_block *
|
1310
|
+
vm_proc_block(VALUE procval)
|
1311
|
+
{
|
1312
|
+
rb_proc_t *proc = RTYPEDDATA_DATA(procval);
|
1313
|
+
VM_ASSERT(rb_obj_is_proc(procval));
|
1314
|
+
return &proc->block;
|
1315
|
+
}
|
1316
|
+
|
1317
|
+
static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
|
1318
|
+
static inline const VALUE *vm_block_ep(const struct rb_block *block);
|
1319
|
+
|
1320
|
+
static inline const rb_iseq_t *
|
1321
|
+
vm_proc_iseq(VALUE procval)
|
1322
|
+
{
|
1323
|
+
VM_ASSERT(rb_obj_is_proc(procval));
|
1324
|
+
return vm_block_iseq(vm_proc_block(procval));
|
1325
|
+
}
|
1326
|
+
|
1327
|
+
static inline const VALUE *
|
1328
|
+
vm_proc_ep(VALUE procval)
|
1329
|
+
{
|
1330
|
+
return vm_block_ep(vm_proc_block(procval));
|
1331
|
+
}
|
1332
|
+
|
1333
|
+
static inline const rb_iseq_t *
|
1334
|
+
vm_block_iseq(const struct rb_block *block)
|
1335
|
+
{
|
1336
|
+
switch (vm_block_type(block)) {
|
1337
|
+
case block_type_iseq: return block->as.captured.code.iseq;
|
1338
|
+
case block_type_proc: return vm_proc_iseq(block->as.proc);
|
1339
|
+
case block_type_ifunc:
|
1340
|
+
case block_type_symbol: return NULL;
|
1341
|
+
}
|
1342
|
+
VM_UNREACHABLE(vm_block_iseq);
|
1343
|
+
return NULL;
|
1344
|
+
}
|
1345
|
+
|
1346
|
+
static inline const VALUE *
|
1347
|
+
vm_block_ep(const struct rb_block *block)
|
1348
|
+
{
|
1349
|
+
switch (vm_block_type(block)) {
|
1350
|
+
case block_type_iseq:
|
1351
|
+
case block_type_ifunc: return block->as.captured.ep;
|
1352
|
+
case block_type_proc: return vm_proc_ep(block->as.proc);
|
1353
|
+
case block_type_symbol: return NULL;
|
1354
|
+
}
|
1355
|
+
VM_UNREACHABLE(vm_block_ep);
|
1356
|
+
return NULL;
|
1357
|
+
}
|
1358
|
+
|
1359
|
+
static inline VALUE
|
1360
|
+
vm_block_self(const struct rb_block *block)
|
1361
|
+
{
|
1362
|
+
switch (vm_block_type(block)) {
|
1363
|
+
case block_type_iseq:
|
1364
|
+
case block_type_ifunc:
|
1365
|
+
return block->as.captured.self;
|
1366
|
+
case block_type_proc:
|
1367
|
+
return vm_block_self(vm_proc_block(block->as.proc));
|
1368
|
+
case block_type_symbol:
|
1369
|
+
return Qundef;
|
1370
|
+
}
|
1371
|
+
VM_UNREACHABLE(vm_block_self);
|
1372
|
+
return Qundef;
|
1373
|
+
}
|
1374
|
+
|
1375
|
+
static inline VALUE
|
1376
|
+
VM_BH_TO_SYMBOL(VALUE block_handler)
|
1377
|
+
{
|
1378
|
+
VM_ASSERT(SYMBOL_P(block_handler));
|
1379
|
+
return block_handler;
|
1380
|
+
}
|
1381
|
+
|
1382
|
+
static inline VALUE
|
1383
|
+
VM_BH_FROM_SYMBOL(VALUE symbol)
|
1384
|
+
{
|
1385
|
+
VM_ASSERT(SYMBOL_P(symbol));
|
1386
|
+
return symbol;
|
1387
|
+
}
|
1388
|
+
|
1389
|
+
static inline VALUE
|
1390
|
+
VM_BH_TO_PROC(VALUE block_handler)
|
1391
|
+
{
|
1392
|
+
VM_ASSERT(rb_obj_is_proc(block_handler));
|
1393
|
+
return block_handler;
|
1394
|
+
}
|
1395
|
+
|
1396
|
+
static inline VALUE
|
1397
|
+
VM_BH_FROM_PROC(VALUE procval)
|
1398
|
+
{
|
1399
|
+
VM_ASSERT(rb_obj_is_proc(procval));
|
1400
|
+
return procval;
|
1401
|
+
}
|
1402
|
+
|
1403
|
+
/* VM related object allocate functions */
|
1404
|
+
VALUE rb_thread_alloc(VALUE klass);
|
1405
|
+
VALUE rb_proc_alloc(VALUE klass);
|
1406
|
+
VALUE rb_binding_alloc(VALUE klass);
|
1407
|
+
|
1408
|
+
/* for debug */
|
1409
|
+
extern void rb_vmdebug_stack_dump_raw(rb_thread_t *, rb_control_frame_t *);
|
1410
|
+
extern void rb_vmdebug_debug_print_pre(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *_pc);
|
1411
|
+
extern void rb_vmdebug_debug_print_post(rb_thread_t *th, rb_control_frame_t *cfp);
|
1412
|
+
|
1413
|
+
#define SDR() rb_vmdebug_stack_dump_raw(GET_THREAD(), GET_THREAD()->cfp)
|
1414
|
+
#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_THREAD(), (cfp))
|
1415
|
+
void rb_vm_bugreport(const void *);
|
1416
|
+
NORETURN(void rb_bug_context(const void *, const char *fmt, ...));
|
1417
|
+
|
1418
|
+
/* functions about thread/vm execution */
|
1419
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
1420
|
+
VALUE rb_iseq_eval(const rb_iseq_t *iseq);
|
1421
|
+
VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
|
1422
|
+
RUBY_SYMBOL_EXPORT_END
|
1423
|
+
int rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, ID *called_idp, VALUE *klassp);
|
1424
|
+
|
1425
|
+
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, VALUE block_handler);
|
1426
|
+
VALUE rb_vm_make_proc_lambda(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
|
1427
|
+
VALUE rb_vm_make_proc(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass);
|
1428
|
+
VALUE rb_vm_make_binding(rb_thread_t *th, const rb_control_frame_t *src_cfp);
|
1429
|
+
VALUE rb_vm_env_local_variables(const rb_env_t *env);
|
1430
|
+
const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
|
1431
|
+
const VALUE *rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars);
|
1432
|
+
void rb_vm_inc_const_missing_count(void);
|
1433
|
+
void rb_vm_gvl_destroy(rb_vm_t *vm);
|
1434
|
+
VALUE rb_vm_call(rb_thread_t *th, VALUE recv, VALUE id, int argc,
|
1435
|
+
const VALUE *argv, const rb_callable_method_entry_t *me);
|
1436
|
+
void rb_vm_pop_frame(rb_thread_t *th);
|
1437
|
+
|
1438
|
+
void rb_thread_start_timer_thread(void);
|
1439
|
+
void rb_thread_stop_timer_thread(void);
|
1440
|
+
void rb_thread_reset_timer_thread(void);
|
1441
|
+
void rb_thread_wakeup_timer_thread(void);
|
1442
|
+
|
1443
|
+
static inline void
|
1444
|
+
rb_vm_living_threads_init(rb_vm_t *vm)
|
1445
|
+
{
|
1446
|
+
list_head_init(&vm->living_threads);
|
1447
|
+
vm->living_thread_num = 0;
|
1448
|
+
}
|
1449
|
+
|
1450
|
+
static inline void
|
1451
|
+
rb_vm_living_threads_insert(rb_vm_t *vm, rb_thread_t *th)
|
1452
|
+
{
|
1453
|
+
list_add_tail(&vm->living_threads, &th->vmlt_node);
|
1454
|
+
vm->living_thread_num++;
|
1455
|
+
}
|
1456
|
+
|
1457
|
+
static inline void
|
1458
|
+
rb_vm_living_threads_remove(rb_vm_t *vm, rb_thread_t *th)
|
1459
|
+
{
|
1460
|
+
list_del(&th->vmlt_node);
|
1461
|
+
vm->living_thread_num--;
|
1462
|
+
}
|
1463
|
+
|
1464
|
+
typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
|
1465
|
+
rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_thread_t *th, const rb_control_frame_t *cfp);
|
1466
|
+
rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_thread_t *th, const rb_control_frame_t *cfp);
|
1467
|
+
int rb_vm_get_sourceline(const rb_control_frame_t *);
|
1468
|
+
VALUE rb_name_err_mesg_new(VALUE mesg, VALUE recv, VALUE method);
|
1469
|
+
void rb_vm_stack_to_heap(rb_thread_t *th);
|
1470
|
+
void ruby_thread_init_stack(rb_thread_t *th);
|
1471
|
+
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
|
1472
|
+
void rb_vm_rewind_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
|
1473
|
+
|
1474
|
+
void rb_vm_register_special_exception(enum ruby_special_exceptions sp, VALUE exception_class, const char *mesg);
|
1475
|
+
|
1476
|
+
void rb_gc_mark_machine_stack(rb_thread_t *th);
|
1477
|
+
|
1478
|
+
int rb_autoloading_value(VALUE mod, ID id, VALUE* value);
|
1479
|
+
|
1480
|
+
void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
|
1481
|
+
|
1482
|
+
const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
|
1483
|
+
|
1484
|
+
#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
|
1485
|
+
|
1486
|
+
#define RUBY_CONST_ASSERT(expr) (1/!!(expr)) /* expr must be a compile-time constant */
|
1487
|
+
#define VM_STACK_OVERFLOWED_P(cfp, sp, margin) \
|
1488
|
+
(!RUBY_CONST_ASSERT(sizeof(*(sp)) == sizeof(VALUE)) || \
|
1489
|
+
!RUBY_CONST_ASSERT(sizeof(*(cfp)) == sizeof(rb_control_frame_t)) || \
|
1490
|
+
((rb_control_frame_t *)((sp) + (margin)) + 1) >= (cfp))
|
1491
|
+
#define WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) \
|
1492
|
+
if (LIKELY(!VM_STACK_OVERFLOWED_P(cfp, sp, margin))) {(void)0;} else /* overflowed */
|
1493
|
+
#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) \
|
1494
|
+
WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) vm_stackoverflow()
|
1495
|
+
#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
|
1496
|
+
WHEN_VM_STACK_OVERFLOWED(cfp, (cfp)->sp, margin) vm_stackoverflow()
|
1497
|
+
|
1498
|
+
/* for thread */
|
1499
|
+
|
1500
|
+
#if RUBY_VM_THREAD_MODEL == 2
|
1501
|
+
extern rb_thread_t *ruby_current_thread;
|
1502
|
+
extern rb_vm_t *ruby_current_vm;
|
1503
|
+
extern rb_event_flag_t ruby_vm_event_flags;
|
1504
|
+
|
1505
|
+
#define GET_VM() ruby_current_vm
|
1506
|
+
|
1507
|
+
#ifndef OPT_CALL_CFUNC_WITHOUT_FRAME
|
1508
|
+
#define OPT_CALL_CFUNC_WITHOUT_FRAME 0
|
1509
|
+
#endif
|
1510
|
+
|
1511
|
+
#define GET_THREAD() vm_thread_with_frame(ruby_current_thread)
|
1512
|
+
#if OPT_CALL_CFUNC_WITHOUT_FRAME
|
1513
|
+
static inline rb_thread_t *
|
1514
|
+
vm_thread_with_frame(rb_thread_t *th)
|
1515
|
+
{
|
1516
|
+
if (UNLIKELY(th->passed_ci != 0)) {
|
1517
|
+
void rb_vm_call_cfunc_push_frame(rb_thread_t *th);
|
1518
|
+
rb_vm_call_cfunc_push_frame(th);
|
1519
|
+
}
|
1520
|
+
return th;
|
1521
|
+
}
|
1522
|
+
#else
|
1523
|
+
#define vm_thread_with_frame(th) (th)
|
1524
|
+
#endif
|
1525
|
+
|
1526
|
+
#define rb_thread_set_current_raw(th) (void)(ruby_current_thread = (th))
|
1527
|
+
#define rb_thread_set_current(th) do { \
|
1528
|
+
if ((th)->vm->running_thread != (th)) { \
|
1529
|
+
(th)->running_time_us = 0; \
|
1530
|
+
} \
|
1531
|
+
rb_thread_set_current_raw(th); \
|
1532
|
+
(th)->vm->running_thread = (th); \
|
1533
|
+
} while (0)
|
1534
|
+
|
1535
|
+
#else
|
1536
|
+
#error "unsupported thread model"
|
1537
|
+
#endif
|
1538
|
+
|
1539
|
+
enum {
|
1540
|
+
TIMER_INTERRUPT_MASK = 0x01,
|
1541
|
+
PENDING_INTERRUPT_MASK = 0x02,
|
1542
|
+
POSTPONED_JOB_INTERRUPT_MASK = 0x04,
|
1543
|
+
TRAP_INTERRUPT_MASK = 0x08
|
1544
|
+
};
|
1545
|
+
|
1546
|
+
#define RUBY_VM_SET_TIMER_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, TIMER_INTERRUPT_MASK)
|
1547
|
+
#define RUBY_VM_SET_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, PENDING_INTERRUPT_MASK)
|
1548
|
+
#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
|
1549
|
+
#define RUBY_VM_SET_TRAP_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, TRAP_INTERRUPT_MASK)
|
1550
|
+
#define RUBY_VM_INTERRUPTED(th) ((th)->interrupt_flag & ~(th)->interrupt_mask & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
|
1551
|
+
#define RUBY_VM_INTERRUPTED_ANY(th) ((th)->interrupt_flag & ~(th)->interrupt_mask)
|
1552
|
+
|
1553
|
+
VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
|
1554
|
+
int rb_signal_buff_size(void);
|
1555
|
+
void rb_signal_exec(rb_thread_t *th, int sig);
|
1556
|
+
void rb_threadptr_check_signal(rb_thread_t *mth);
|
1557
|
+
void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
|
1558
|
+
void rb_threadptr_signal_exit(rb_thread_t *th);
|
1559
|
+
void rb_threadptr_execute_interrupts(rb_thread_t *, int);
|
1560
|
+
void rb_threadptr_interrupt(rb_thread_t *th);
|
1561
|
+
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
|
1562
|
+
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
|
1563
|
+
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
|
1564
|
+
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th);
|
1565
|
+
void rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo);
|
1566
|
+
|
1567
|
+
#define RUBY_VM_CHECK_INTS(th) ruby_vm_check_ints(th)
|
1568
|
+
static inline void
|
1569
|
+
ruby_vm_check_ints(rb_thread_t *th)
|
1570
|
+
{
|
1571
|
+
if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(th))) {
|
1572
|
+
rb_threadptr_execute_interrupts(th, 0);
|
1573
|
+
}
|
1574
|
+
}
|
1575
|
+
|
1576
|
+
/* tracer */
|
1577
|
+
struct rb_trace_arg_struct {
|
1578
|
+
rb_event_flag_t event;
|
1579
|
+
rb_thread_t *th;
|
1580
|
+
rb_control_frame_t *cfp;
|
1581
|
+
VALUE self;
|
1582
|
+
ID id;
|
1583
|
+
ID called_id;
|
1584
|
+
VALUE klass;
|
1585
|
+
VALUE data;
|
1586
|
+
|
1587
|
+
int klass_solved;
|
1588
|
+
|
1589
|
+
/* calc from cfp */
|
1590
|
+
int lineno;
|
1591
|
+
VALUE path;
|
1592
|
+
};
|
1593
|
+
|
1594
|
+
void rb_threadptr_exec_event_hooks(struct rb_trace_arg_struct *trace_arg);
|
1595
|
+
void rb_threadptr_exec_event_hooks_and_pop_frame(struct rb_trace_arg_struct *trace_arg);
|
1596
|
+
|
1597
|
+
#define EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
|
1598
|
+
const rb_event_flag_t flag_arg_ = (flag_); \
|
1599
|
+
if (UNLIKELY(ruby_vm_event_flags & (flag_arg_))) { \
|
1600
|
+
/* defer evaluating the other arguments */ \
|
1601
|
+
ruby_exec_event_hook_orig(th_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
|
1602
|
+
} \
|
1603
|
+
} while (0)
|
1604
|
+
|
1605
|
+
static inline void
|
1606
|
+
ruby_exec_event_hook_orig(rb_thread_t *const th, const rb_event_flag_t flag,
|
1607
|
+
VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
|
1608
|
+
{
|
1609
|
+
if ((th->event_hooks.events | th->vm->event_hooks.events) & flag) {
|
1610
|
+
struct rb_trace_arg_struct trace_arg;
|
1611
|
+
trace_arg.event = flag;
|
1612
|
+
trace_arg.th = th;
|
1613
|
+
trace_arg.cfp = th->cfp;
|
1614
|
+
trace_arg.self = self;
|
1615
|
+
trace_arg.id = id;
|
1616
|
+
trace_arg.called_id = called_id;
|
1617
|
+
trace_arg.klass = klass;
|
1618
|
+
trace_arg.data = data;
|
1619
|
+
trace_arg.path = Qundef;
|
1620
|
+
trace_arg.klass_solved = 0;
|
1621
|
+
if (pop_p) rb_threadptr_exec_event_hooks_and_pop_frame(&trace_arg);
|
1622
|
+
else rb_threadptr_exec_event_hooks(&trace_arg);
|
1623
|
+
}
|
1624
|
+
}
|
1625
|
+
|
1626
|
+
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, called_id_, klass_, data_) \
|
1627
|
+
EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, called_id_, klass_, data_, 0)
|
1628
|
+
|
1629
|
+
#define EXEC_EVENT_HOOK_AND_POP_FRAME(th_, flag_, self_, id_, called_id_, klass_, data_) \
|
1630
|
+
EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, called_id_, klass_, data_, 1)
|
1631
|
+
|
1632
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
1633
|
+
|
1634
|
+
int rb_thread_check_trap_pending(void);
|
1635
|
+
|
1636
|
+
extern VALUE rb_get_coverages(void);
|
1637
|
+
extern void rb_set_coverages(VALUE);
|
1638
|
+
extern void rb_reset_coverages(void);
|
1639
|
+
|
1640
|
+
void rb_postponed_job_flush(rb_vm_t *vm);
|
1641
|
+
|
1642
|
+
RUBY_SYMBOL_EXPORT_END
|
1643
|
+
|
1644
|
+
#endif /* RUBY_VM_CORE_H */
|