debase-ruby_core_source 0.10.2 → 0.10.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/addr2line.h +21 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/ccan/build_assert/build_assert.h +40 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/ccan/check_type/check_type.h +63 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/ccan/container_of/container_of.h +142 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/ccan/list/list.h +773 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/ccan/str/str.h +16 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/constant.h +51 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/debug_counter.h +109 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/dln.h +51 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/encindex.h +69 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/eval_intern.h +334 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/gc.h +116 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/id.h +250 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/id_table.h +31 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/insns.inc +217 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/insns_info.inc +1570 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/internal.h +2036 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/iseq.h +307 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/known_errors.inc +746 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/method.h +218 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/node.h +540 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/node_name.inc +198 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/opt_sc.inc +1601 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/optinsn.inc +103 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/optunifs.inc +61 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/parse.h +206 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/probes_helper.h +43 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/regenc.h +254 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/regint.h +938 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/regparse.h +370 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/revision.h +1 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/ruby_assert.h +54 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/ruby_atomic.h +233 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/siphash.h +48 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/symbol.h +108 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/thread_pthread.h +54 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/thread_win32.h +36 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/timev.h +42 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/transcode_data.h +139 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/version.h +73 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/vm.inc +3667 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/vm_call_iseq_optimized.inc +213 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/vm_core.h +1767 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/vm_debug.h +37 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/vm_exec.h +192 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/vm_insnhelper.h +255 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/vm_opts.h +56 -0
- data/lib/debase/ruby_core_source/ruby-2.5.1-p57/vmtc.inc +214 -0
- data/lib/debase/ruby_core_source/version.rb +1 -1
- metadata +51 -3
@@ -0,0 +1,213 @@
|
|
1
|
+
/* -*- c -*- */
|
2
|
+
#if 1 /* enable or disable this optimization */
|
3
|
+
|
4
|
+
/* DO NOT EDIT THIS FILE DIRECTLY
|
5
|
+
*
|
6
|
+
* This file is generated by tool/mk_call_iseq_optimized.rb
|
7
|
+
*/
|
8
|
+
|
9
|
+
static VALUE
|
10
|
+
vm_call_iseq_setup_normal_0start_0params_0locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
11
|
+
{
|
12
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 0, 0);
|
13
|
+
}
|
14
|
+
|
15
|
+
static VALUE
|
16
|
+
vm_call_iseq_setup_normal_0start_0params_1locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
17
|
+
{
|
18
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 0, 1);
|
19
|
+
}
|
20
|
+
|
21
|
+
static VALUE
|
22
|
+
vm_call_iseq_setup_normal_0start_0params_2locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
23
|
+
{
|
24
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 0, 2);
|
25
|
+
}
|
26
|
+
|
27
|
+
static VALUE
|
28
|
+
vm_call_iseq_setup_normal_0start_0params_3locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
29
|
+
{
|
30
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 0, 3);
|
31
|
+
}
|
32
|
+
|
33
|
+
static VALUE
|
34
|
+
vm_call_iseq_setup_normal_0start_0params_4locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
35
|
+
{
|
36
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 0, 4);
|
37
|
+
}
|
38
|
+
|
39
|
+
static VALUE
|
40
|
+
vm_call_iseq_setup_normal_0start_0params_5locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
41
|
+
{
|
42
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 0, 5);
|
43
|
+
}
|
44
|
+
|
45
|
+
static VALUE
|
46
|
+
vm_call_iseq_setup_normal_0start_1params_0locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
47
|
+
{
|
48
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 1, 0);
|
49
|
+
}
|
50
|
+
|
51
|
+
static VALUE
|
52
|
+
vm_call_iseq_setup_normal_0start_1params_1locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
53
|
+
{
|
54
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 1, 1);
|
55
|
+
}
|
56
|
+
|
57
|
+
static VALUE
|
58
|
+
vm_call_iseq_setup_normal_0start_1params_2locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
59
|
+
{
|
60
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 1, 2);
|
61
|
+
}
|
62
|
+
|
63
|
+
static VALUE
|
64
|
+
vm_call_iseq_setup_normal_0start_1params_3locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
65
|
+
{
|
66
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 1, 3);
|
67
|
+
}
|
68
|
+
|
69
|
+
static VALUE
|
70
|
+
vm_call_iseq_setup_normal_0start_1params_4locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
71
|
+
{
|
72
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 1, 4);
|
73
|
+
}
|
74
|
+
|
75
|
+
static VALUE
|
76
|
+
vm_call_iseq_setup_normal_0start_1params_5locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
77
|
+
{
|
78
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 1, 5);
|
79
|
+
}
|
80
|
+
|
81
|
+
static VALUE
|
82
|
+
vm_call_iseq_setup_normal_0start_2params_0locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
83
|
+
{
|
84
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 2, 0);
|
85
|
+
}
|
86
|
+
|
87
|
+
static VALUE
|
88
|
+
vm_call_iseq_setup_normal_0start_2params_1locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
89
|
+
{
|
90
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 2, 1);
|
91
|
+
}
|
92
|
+
|
93
|
+
static VALUE
|
94
|
+
vm_call_iseq_setup_normal_0start_2params_2locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
95
|
+
{
|
96
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 2, 2);
|
97
|
+
}
|
98
|
+
|
99
|
+
static VALUE
|
100
|
+
vm_call_iseq_setup_normal_0start_2params_3locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
101
|
+
{
|
102
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 2, 3);
|
103
|
+
}
|
104
|
+
|
105
|
+
static VALUE
|
106
|
+
vm_call_iseq_setup_normal_0start_2params_4locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
107
|
+
{
|
108
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 2, 4);
|
109
|
+
}
|
110
|
+
|
111
|
+
static VALUE
|
112
|
+
vm_call_iseq_setup_normal_0start_2params_5locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
113
|
+
{
|
114
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 2, 5);
|
115
|
+
}
|
116
|
+
|
117
|
+
static VALUE
|
118
|
+
vm_call_iseq_setup_normal_0start_3params_0locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
119
|
+
{
|
120
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 3, 0);
|
121
|
+
}
|
122
|
+
|
123
|
+
static VALUE
|
124
|
+
vm_call_iseq_setup_normal_0start_3params_1locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
125
|
+
{
|
126
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 3, 1);
|
127
|
+
}
|
128
|
+
|
129
|
+
static VALUE
|
130
|
+
vm_call_iseq_setup_normal_0start_3params_2locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
131
|
+
{
|
132
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 3, 2);
|
133
|
+
}
|
134
|
+
|
135
|
+
static VALUE
|
136
|
+
vm_call_iseq_setup_normal_0start_3params_3locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
137
|
+
{
|
138
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 3, 3);
|
139
|
+
}
|
140
|
+
|
141
|
+
static VALUE
|
142
|
+
vm_call_iseq_setup_normal_0start_3params_4locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
143
|
+
{
|
144
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 3, 4);
|
145
|
+
}
|
146
|
+
|
147
|
+
static VALUE
|
148
|
+
vm_call_iseq_setup_normal_0start_3params_5locals(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
149
|
+
{
|
150
|
+
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, 3, 5);
|
151
|
+
}
|
152
|
+
|
153
|
+
/* vm_call_iseq_handlers[param][local] */
|
154
|
+
static const vm_call_handler vm_call_iseq_handlers[][6] = {
|
155
|
+
{vm_call_iseq_setup_normal_0start_0params_0locals,
|
156
|
+
vm_call_iseq_setup_normal_0start_0params_1locals,
|
157
|
+
vm_call_iseq_setup_normal_0start_0params_2locals,
|
158
|
+
vm_call_iseq_setup_normal_0start_0params_3locals,
|
159
|
+
vm_call_iseq_setup_normal_0start_0params_4locals,
|
160
|
+
vm_call_iseq_setup_normal_0start_0params_5locals},
|
161
|
+
{vm_call_iseq_setup_normal_0start_1params_0locals,
|
162
|
+
vm_call_iseq_setup_normal_0start_1params_1locals,
|
163
|
+
vm_call_iseq_setup_normal_0start_1params_2locals,
|
164
|
+
vm_call_iseq_setup_normal_0start_1params_3locals,
|
165
|
+
vm_call_iseq_setup_normal_0start_1params_4locals,
|
166
|
+
vm_call_iseq_setup_normal_0start_1params_5locals},
|
167
|
+
{vm_call_iseq_setup_normal_0start_2params_0locals,
|
168
|
+
vm_call_iseq_setup_normal_0start_2params_1locals,
|
169
|
+
vm_call_iseq_setup_normal_0start_2params_2locals,
|
170
|
+
vm_call_iseq_setup_normal_0start_2params_3locals,
|
171
|
+
vm_call_iseq_setup_normal_0start_2params_4locals,
|
172
|
+
vm_call_iseq_setup_normal_0start_2params_5locals},
|
173
|
+
{vm_call_iseq_setup_normal_0start_3params_0locals,
|
174
|
+
vm_call_iseq_setup_normal_0start_3params_1locals,
|
175
|
+
vm_call_iseq_setup_normal_0start_3params_2locals,
|
176
|
+
vm_call_iseq_setup_normal_0start_3params_3locals,
|
177
|
+
vm_call_iseq_setup_normal_0start_3params_4locals,
|
178
|
+
vm_call_iseq_setup_normal_0start_3params_5locals}
|
179
|
+
};
|
180
|
+
|
181
|
+
static inline vm_call_handler
|
182
|
+
vm_call_iseq_setup_func(const struct rb_call_info *ci, const int param_size, const int local_size)
|
183
|
+
{
|
184
|
+
if (UNLIKELY(ci->flag & VM_CALL_TAILCALL)) {
|
185
|
+
return &vm_call_iseq_setup_tailcall_0start;
|
186
|
+
}
|
187
|
+
else if (0) { /* to disable optimize */
|
188
|
+
return &vm_call_iseq_setup_normal_0start;
|
189
|
+
}
|
190
|
+
else {
|
191
|
+
if (param_size <= 3 &&
|
192
|
+
local_size <= 5) {
|
193
|
+
VM_ASSERT(local_size >= 0);
|
194
|
+
return vm_call_iseq_handlers[param_size][local_size];
|
195
|
+
}
|
196
|
+
return &vm_call_iseq_setup_normal_0start;
|
197
|
+
}
|
198
|
+
}
|
199
|
+
|
200
|
+
#else
|
201
|
+
|
202
|
+
|
203
|
+
static inline vm_call_handler
|
204
|
+
vm_call_iseq_setup_func(const struct rb_call_info *ci, struct rb_call_cache *cc)
|
205
|
+
{
|
206
|
+
if (UNLIKELY(ci->flag & VM_CALL_TAILCALL)) {
|
207
|
+
return &vm_call_iseq_setup_tailcall_0start;
|
208
|
+
}
|
209
|
+
else {
|
210
|
+
return &vm_call_iseq_setup_normal_0start;
|
211
|
+
}
|
212
|
+
}
|
213
|
+
#endif
|
@@ -0,0 +1,1767 @@
|
|
1
|
+
/**********************************************************************
|
2
|
+
|
3
|
+
vm_core.h -
|
4
|
+
|
5
|
+
$Author: naruse $
|
6
|
+
created at: 04/01/01 19:41:38 JST
|
7
|
+
|
8
|
+
Copyright (C) 2004-2007 Koichi Sasada
|
9
|
+
|
10
|
+
**********************************************************************/
|
11
|
+
|
12
|
+
#ifndef RUBY_VM_CORE_H
|
13
|
+
#define RUBY_VM_CORE_H
|
14
|
+
|
15
|
+
/*
|
16
|
+
* Enable check mode.
|
17
|
+
* 1: enable local assertions.
|
18
|
+
*/
|
19
|
+
#ifndef VM_CHECK_MODE
|
20
|
+
#define VM_CHECK_MODE 0
|
21
|
+
#endif
|
22
|
+
|
23
|
+
/**
|
24
|
+
* VM Debug Level
|
25
|
+
*
|
26
|
+
* debug level:
|
27
|
+
* 0: no debug output
|
28
|
+
* 1: show instruction name
|
29
|
+
* 2: show stack frame when control stack frame is changed
|
30
|
+
* 3: show stack status
|
31
|
+
* 4: show register
|
32
|
+
* 5:
|
33
|
+
* 10: gc check
|
34
|
+
*/
|
35
|
+
|
36
|
+
#ifndef VMDEBUG
|
37
|
+
#define VMDEBUG 0
|
38
|
+
#endif
|
39
|
+
|
40
|
+
#if 0
|
41
|
+
#undef VMDEBUG
|
42
|
+
#define VMDEBUG 3
|
43
|
+
#endif
|
44
|
+
|
45
|
+
#include "ruby_assert.h"
|
46
|
+
|
47
|
+
#if VM_CHECK_MODE > 0
|
48
|
+
#define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
|
49
|
+
|
50
|
+
#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
|
51
|
+
|
52
|
+
#else
|
53
|
+
#define VM_ASSERT(expr) ((void)0)
|
54
|
+
#define VM_UNREACHABLE(func) UNREACHABLE
|
55
|
+
#endif
|
56
|
+
|
57
|
+
#define RUBY_VM_THREAD_MODEL 2
|
58
|
+
|
59
|
+
#include "ruby/ruby.h"
|
60
|
+
#include "ruby/st.h"
|
61
|
+
|
62
|
+
#include "node.h"
|
63
|
+
#include "vm_debug.h"
|
64
|
+
#include "vm_opts.h"
|
65
|
+
#include "id.h"
|
66
|
+
#include "method.h"
|
67
|
+
#include "ruby_atomic.h"
|
68
|
+
#include "ccan/list/list.h"
|
69
|
+
|
70
|
+
#include "ruby/thread_native.h"
|
71
|
+
#if defined(_WIN32)
|
72
|
+
#include "thread_win32.h"
|
73
|
+
#elif defined(HAVE_PTHREAD_H)
|
74
|
+
#include "thread_pthread.h"
|
75
|
+
#endif
|
76
|
+
|
77
|
+
#include <setjmp.h>
|
78
|
+
#include <signal.h>
|
79
|
+
|
80
|
+
#ifndef NSIG
|
81
|
+
# define NSIG (_SIGMAX + 1) /* For QNX */
|
82
|
+
#endif
|
83
|
+
|
84
|
+
#define RUBY_NSIG NSIG
|
85
|
+
|
86
|
+
#ifdef HAVE_STDARG_PROTOTYPES
|
87
|
+
#include <stdarg.h>
|
88
|
+
#define va_init_list(a,b) va_start((a),(b))
|
89
|
+
#else
|
90
|
+
#include <varargs.h>
|
91
|
+
#define va_init_list(a,b) va_start((a))
|
92
|
+
#endif
|
93
|
+
|
94
|
+
#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
|
95
|
+
#define USE_SIGALTSTACK
|
96
|
+
#endif
|
97
|
+
|
98
|
+
/*****************/
|
99
|
+
/* configuration */
|
100
|
+
/*****************/
|
101
|
+
|
102
|
+
/* gcc ver. check */
|
103
|
+
#if defined(__GNUC__) && __GNUC__ >= 2
|
104
|
+
|
105
|
+
#if OPT_TOKEN_THREADED_CODE
|
106
|
+
#if OPT_DIRECT_THREADED_CODE
|
107
|
+
#undef OPT_DIRECT_THREADED_CODE
|
108
|
+
#endif
|
109
|
+
#endif
|
110
|
+
|
111
|
+
#else /* defined(__GNUC__) && __GNUC__ >= 2 */
|
112
|
+
|
113
|
+
/* disable threaded code options */
|
114
|
+
#if OPT_DIRECT_THREADED_CODE
|
115
|
+
#undef OPT_DIRECT_THREADED_CODE
|
116
|
+
#endif
|
117
|
+
#if OPT_TOKEN_THREADED_CODE
|
118
|
+
#undef OPT_TOKEN_THREADED_CODE
|
119
|
+
#endif
|
120
|
+
#endif
|
121
|
+
|
122
|
+
/* call threaded code */
|
123
|
+
#if OPT_CALL_THREADED_CODE
|
124
|
+
#if OPT_DIRECT_THREADED_CODE
|
125
|
+
#undef OPT_DIRECT_THREADED_CODE
|
126
|
+
#endif /* OPT_DIRECT_THREADED_CODE */
|
127
|
+
#if OPT_STACK_CACHING
|
128
|
+
#undef OPT_STACK_CACHING
|
129
|
+
#endif /* OPT_STACK_CACHING */
|
130
|
+
#endif /* OPT_CALL_THREADED_CODE */
|
131
|
+
|
132
|
+
typedef unsigned long rb_num_t;
|
133
|
+
|
134
|
+
enum ruby_tag_type {
|
135
|
+
RUBY_TAG_NONE = 0x0,
|
136
|
+
RUBY_TAG_RETURN = 0x1,
|
137
|
+
RUBY_TAG_BREAK = 0x2,
|
138
|
+
RUBY_TAG_NEXT = 0x3,
|
139
|
+
RUBY_TAG_RETRY = 0x4,
|
140
|
+
RUBY_TAG_REDO = 0x5,
|
141
|
+
RUBY_TAG_RAISE = 0x6,
|
142
|
+
RUBY_TAG_THROW = 0x7,
|
143
|
+
RUBY_TAG_FATAL = 0x8,
|
144
|
+
RUBY_TAG_MASK = 0xf
|
145
|
+
};
|
146
|
+
|
147
|
+
#define TAG_NONE RUBY_TAG_NONE
|
148
|
+
#define TAG_RETURN RUBY_TAG_RETURN
|
149
|
+
#define TAG_BREAK RUBY_TAG_BREAK
|
150
|
+
#define TAG_NEXT RUBY_TAG_NEXT
|
151
|
+
#define TAG_RETRY RUBY_TAG_RETRY
|
152
|
+
#define TAG_REDO RUBY_TAG_REDO
|
153
|
+
#define TAG_RAISE RUBY_TAG_RAISE
|
154
|
+
#define TAG_THROW RUBY_TAG_THROW
|
155
|
+
#define TAG_FATAL RUBY_TAG_FATAL
|
156
|
+
#define TAG_MASK RUBY_TAG_MASK
|
157
|
+
|
158
|
+
enum ruby_vm_throw_flags {
|
159
|
+
VM_THROW_NO_ESCAPE_FLAG = 0x8000,
|
160
|
+
VM_THROW_LEVEL_SHIFT = 16,
|
161
|
+
VM_THROW_STATE_MASK = 0xff
|
162
|
+
};
|
163
|
+
|
164
|
+
/* forward declarations */
|
165
|
+
struct rb_thread_struct;
|
166
|
+
struct rb_control_frame_struct;
|
167
|
+
|
168
|
+
/* iseq data type */
|
169
|
+
typedef struct rb_compile_option_struct rb_compile_option_t;
|
170
|
+
|
171
|
+
struct iseq_inline_cache_entry {
|
172
|
+
rb_serial_t ic_serial;
|
173
|
+
const rb_cref_t *ic_cref;
|
174
|
+
union {
|
175
|
+
size_t index;
|
176
|
+
VALUE value;
|
177
|
+
} ic_value;
|
178
|
+
};
|
179
|
+
|
180
|
+
union iseq_inline_storage_entry {
|
181
|
+
struct {
|
182
|
+
struct rb_thread_struct *running_thread;
|
183
|
+
VALUE value;
|
184
|
+
} once;
|
185
|
+
struct iseq_inline_cache_entry cache;
|
186
|
+
};
|
187
|
+
|
188
|
+
enum method_missing_reason {
|
189
|
+
MISSING_NOENTRY = 0x00,
|
190
|
+
MISSING_PRIVATE = 0x01,
|
191
|
+
MISSING_PROTECTED = 0x02,
|
192
|
+
MISSING_FCALL = 0x04,
|
193
|
+
MISSING_VCALL = 0x08,
|
194
|
+
MISSING_SUPER = 0x10,
|
195
|
+
MISSING_MISSING = 0x20,
|
196
|
+
MISSING_NONE = 0x40
|
197
|
+
};
|
198
|
+
|
199
|
+
struct rb_call_info {
|
200
|
+
/* fixed at compile time */
|
201
|
+
ID mid;
|
202
|
+
unsigned int flag;
|
203
|
+
int orig_argc;
|
204
|
+
};
|
205
|
+
|
206
|
+
struct rb_call_info_kw_arg {
|
207
|
+
int keyword_len;
|
208
|
+
VALUE keywords[1];
|
209
|
+
};
|
210
|
+
|
211
|
+
struct rb_call_info_with_kwarg {
|
212
|
+
struct rb_call_info ci;
|
213
|
+
struct rb_call_info_kw_arg *kw_arg;
|
214
|
+
};
|
215
|
+
|
216
|
+
struct rb_calling_info {
|
217
|
+
VALUE block_handler;
|
218
|
+
VALUE recv;
|
219
|
+
int argc;
|
220
|
+
};
|
221
|
+
|
222
|
+
struct rb_call_cache;
|
223
|
+
struct rb_execution_context_struct;
|
224
|
+
typedef VALUE (*vm_call_handler)(struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
|
225
|
+
|
226
|
+
struct rb_call_cache {
|
227
|
+
/* inline cache: keys */
|
228
|
+
rb_serial_t method_state;
|
229
|
+
rb_serial_t class_serial;
|
230
|
+
|
231
|
+
/* inline cache: values */
|
232
|
+
const rb_callable_method_entry_t *me;
|
233
|
+
|
234
|
+
vm_call_handler call;
|
235
|
+
|
236
|
+
union {
|
237
|
+
unsigned int index; /* used by ivar */
|
238
|
+
enum method_missing_reason method_missing_reason; /* used by method_missing */
|
239
|
+
int inc_sp; /* used by cfunc */
|
240
|
+
} aux;
|
241
|
+
};
|
242
|
+
|
243
|
+
#if 1
|
244
|
+
#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
|
245
|
+
#else
|
246
|
+
#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
|
247
|
+
#endif
|
248
|
+
#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
|
249
|
+
|
250
|
+
typedef struct rb_iseq_location_struct {
|
251
|
+
VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
|
252
|
+
VALUE base_label; /* String */
|
253
|
+
VALUE label; /* String */
|
254
|
+
VALUE first_lineno; /* TODO: may be unsigned short */
|
255
|
+
rb_code_range_t code_range;
|
256
|
+
} rb_iseq_location_t;
|
257
|
+
|
258
|
+
#define PATHOBJ_PATH 0
|
259
|
+
#define PATHOBJ_REALPATH 1
|
260
|
+
|
261
|
+
static inline VALUE
|
262
|
+
pathobj_path(VALUE pathobj)
|
263
|
+
{
|
264
|
+
if (RB_TYPE_P(pathobj, T_STRING)) {
|
265
|
+
return pathobj;
|
266
|
+
}
|
267
|
+
else {
|
268
|
+
VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
|
269
|
+
return RARRAY_AREF(pathobj, PATHOBJ_PATH);
|
270
|
+
}
|
271
|
+
}
|
272
|
+
|
273
|
+
static inline VALUE
|
274
|
+
pathobj_realpath(VALUE pathobj)
|
275
|
+
{
|
276
|
+
if (RB_TYPE_P(pathobj, T_STRING)) {
|
277
|
+
return pathobj;
|
278
|
+
}
|
279
|
+
else {
|
280
|
+
VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
|
281
|
+
return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
|
282
|
+
}
|
283
|
+
}
|
284
|
+
|
285
|
+
struct rb_iseq_constant_body {
|
286
|
+
enum iseq_type {
|
287
|
+
ISEQ_TYPE_TOP,
|
288
|
+
ISEQ_TYPE_METHOD,
|
289
|
+
ISEQ_TYPE_BLOCK,
|
290
|
+
ISEQ_TYPE_CLASS,
|
291
|
+
ISEQ_TYPE_RESCUE,
|
292
|
+
ISEQ_TYPE_ENSURE,
|
293
|
+
ISEQ_TYPE_EVAL,
|
294
|
+
ISEQ_TYPE_MAIN,
|
295
|
+
ISEQ_TYPE_DEFINED_GUARD
|
296
|
+
} type; /* instruction sequence type */
|
297
|
+
|
298
|
+
unsigned int iseq_size;
|
299
|
+
const VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
|
300
|
+
|
301
|
+
/**
|
302
|
+
* parameter information
|
303
|
+
*
|
304
|
+
* def m(a1, a2, ..., aM, # mandatory
|
305
|
+
* b1=(...), b2=(...), ..., bN=(...), # optional
|
306
|
+
* *c, # rest
|
307
|
+
* d1, d2, ..., dO, # post
|
308
|
+
* e1:(...), e2:(...), ..., eK:(...), # keyword
|
309
|
+
* **f, # keyword_rest
|
310
|
+
* &g) # block
|
311
|
+
* =>
|
312
|
+
*
|
313
|
+
* lead_num = M
|
314
|
+
* opt_num = N
|
315
|
+
* rest_start = M+N
|
316
|
+
* post_start = M+N+(*1)
|
317
|
+
* post_num = O
|
318
|
+
* keyword_num = K
|
319
|
+
* block_start = M+N+(*1)+O+K
|
320
|
+
* keyword_bits = M+N+(*1)+O+K+(&1)
|
321
|
+
* size = M+N+O+(*1)+K+(&1)+(**1) // parameter size.
|
322
|
+
*/
|
323
|
+
|
324
|
+
struct {
|
325
|
+
struct {
|
326
|
+
unsigned int has_lead : 1;
|
327
|
+
unsigned int has_opt : 1;
|
328
|
+
unsigned int has_rest : 1;
|
329
|
+
unsigned int has_post : 1;
|
330
|
+
unsigned int has_kw : 1;
|
331
|
+
unsigned int has_kwrest : 1;
|
332
|
+
unsigned int has_block : 1;
|
333
|
+
|
334
|
+
unsigned int ambiguous_param0 : 1; /* {|a|} */
|
335
|
+
} flags;
|
336
|
+
|
337
|
+
unsigned int size;
|
338
|
+
|
339
|
+
int lead_num;
|
340
|
+
int opt_num;
|
341
|
+
int rest_start;
|
342
|
+
int post_start;
|
343
|
+
int post_num;
|
344
|
+
int block_start;
|
345
|
+
|
346
|
+
const VALUE *opt_table; /* (opt_num + 1) entries. */
|
347
|
+
/* opt_num and opt_table:
|
348
|
+
*
|
349
|
+
* def foo o1=e1, o2=e2, ..., oN=eN
|
350
|
+
* #=>
|
351
|
+
* # prologue code
|
352
|
+
* A1: e1
|
353
|
+
* A2: e2
|
354
|
+
* ...
|
355
|
+
* AN: eN
|
356
|
+
* AL: body
|
357
|
+
* opt_num = N
|
358
|
+
* opt_table = [A1, A2, ..., AN, AL]
|
359
|
+
*/
|
360
|
+
|
361
|
+
const struct rb_iseq_param_keyword {
|
362
|
+
int num;
|
363
|
+
int required_num;
|
364
|
+
int bits_start;
|
365
|
+
int rest_start;
|
366
|
+
const ID *table;
|
367
|
+
const VALUE *default_values;
|
368
|
+
} *keyword;
|
369
|
+
} param;
|
370
|
+
|
371
|
+
rb_iseq_location_t location;
|
372
|
+
|
373
|
+
/* insn info, must be freed */
|
374
|
+
const struct iseq_insn_info_entry *insns_info;
|
375
|
+
|
376
|
+
const ID *local_table; /* must free */
|
377
|
+
|
378
|
+
/* catch table */
|
379
|
+
const struct iseq_catch_table *catch_table;
|
380
|
+
|
381
|
+
/* for child iseq */
|
382
|
+
const struct rb_iseq_struct *parent_iseq;
|
383
|
+
struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
|
384
|
+
|
385
|
+
union iseq_inline_storage_entry *is_entries;
|
386
|
+
struct rb_call_info *ci_entries; /* struct rb_call_info ci_entries[ci_size];
|
387
|
+
* struct rb_call_info_with_kwarg cikw_entries[ci_kw_size];
|
388
|
+
* So that:
|
389
|
+
* struct rb_call_info_with_kwarg *cikw_entries = &body->ci_entries[ci_size];
|
390
|
+
*/
|
391
|
+
struct rb_call_cache *cc_entries; /* size is ci_size = ci_kw_size */
|
392
|
+
|
393
|
+
VALUE mark_ary; /* Array: includes operands which should be GC marked */
|
394
|
+
|
395
|
+
unsigned int local_table_size;
|
396
|
+
unsigned int is_size;
|
397
|
+
unsigned int ci_size;
|
398
|
+
unsigned int ci_kw_size;
|
399
|
+
unsigned int insns_info_size;
|
400
|
+
unsigned int stack_max; /* for stack overflow check */
|
401
|
+
};
|
402
|
+
|
403
|
+
/* T_IMEMO/iseq */
|
404
|
+
/* typedef rb_iseq_t is in method.h */
|
405
|
+
struct rb_iseq_struct {
|
406
|
+
VALUE flags;
|
407
|
+
VALUE reserved1;
|
408
|
+
struct rb_iseq_constant_body *body;
|
409
|
+
|
410
|
+
union { /* 4, 5 words */
|
411
|
+
struct iseq_compile_data *compile_data; /* used at compile time */
|
412
|
+
|
413
|
+
struct {
|
414
|
+
VALUE obj;
|
415
|
+
int index;
|
416
|
+
} loader;
|
417
|
+
|
418
|
+
rb_event_flag_t trace_events;
|
419
|
+
} aux;
|
420
|
+
};
|
421
|
+
|
422
|
+
#ifndef USE_LAZY_LOAD
|
423
|
+
#define USE_LAZY_LOAD 0
|
424
|
+
#endif
|
425
|
+
|
426
|
+
#if USE_LAZY_LOAD
|
427
|
+
const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
|
428
|
+
#endif
|
429
|
+
|
430
|
+
static inline const rb_iseq_t *
|
431
|
+
rb_iseq_check(const rb_iseq_t *iseq)
|
432
|
+
{
|
433
|
+
#if USE_LAZY_LOAD
|
434
|
+
if (iseq->body == NULL) {
|
435
|
+
rb_iseq_complete((rb_iseq_t *)iseq);
|
436
|
+
}
|
437
|
+
#endif
|
438
|
+
return iseq;
|
439
|
+
}
|
440
|
+
|
441
|
+
enum ruby_special_exceptions {
|
442
|
+
ruby_error_reenter,
|
443
|
+
ruby_error_nomemory,
|
444
|
+
ruby_error_sysstack,
|
445
|
+
ruby_error_stackfatal,
|
446
|
+
ruby_error_stream_closed,
|
447
|
+
ruby_special_error_count
|
448
|
+
};
|
449
|
+
|
450
|
+
enum ruby_basic_operators {
|
451
|
+
BOP_PLUS,
|
452
|
+
BOP_MINUS,
|
453
|
+
BOP_MULT,
|
454
|
+
BOP_DIV,
|
455
|
+
BOP_MOD,
|
456
|
+
BOP_EQ,
|
457
|
+
BOP_EQQ,
|
458
|
+
BOP_LT,
|
459
|
+
BOP_LE,
|
460
|
+
BOP_LTLT,
|
461
|
+
BOP_AREF,
|
462
|
+
BOP_ASET,
|
463
|
+
BOP_LENGTH,
|
464
|
+
BOP_SIZE,
|
465
|
+
BOP_EMPTY_P,
|
466
|
+
BOP_SUCC,
|
467
|
+
BOP_GT,
|
468
|
+
BOP_GE,
|
469
|
+
BOP_NOT,
|
470
|
+
BOP_NEQ,
|
471
|
+
BOP_MATCH,
|
472
|
+
BOP_FREEZE,
|
473
|
+
BOP_UMINUS,
|
474
|
+
BOP_MAX,
|
475
|
+
BOP_MIN,
|
476
|
+
|
477
|
+
BOP_LAST_
|
478
|
+
};
|
479
|
+
|
480
|
+
#define GetVMPtr(obj, ptr) \
|
481
|
+
GetCoreDataFromValue((obj), rb_vm_t, (ptr))
|
482
|
+
|
483
|
+
struct rb_vm_struct;
|
484
|
+
typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
|
485
|
+
|
486
|
+
typedef struct rb_at_exit_list {
|
487
|
+
rb_vm_at_exit_func *func;
|
488
|
+
struct rb_at_exit_list *next;
|
489
|
+
} rb_at_exit_list;
|
490
|
+
|
491
|
+
struct rb_objspace;
|
492
|
+
struct rb_objspace *rb_objspace_alloc(void);
|
493
|
+
void rb_objspace_free(struct rb_objspace *);
|
494
|
+
|
495
|
+
typedef struct rb_hook_list_struct {
|
496
|
+
struct rb_event_hook_struct *hooks;
|
497
|
+
rb_event_flag_t events;
|
498
|
+
int need_clean;
|
499
|
+
} rb_hook_list_t;
|
500
|
+
|
501
|
+
typedef struct rb_vm_struct {
|
502
|
+
VALUE self;
|
503
|
+
|
504
|
+
rb_global_vm_lock_t gvl;
|
505
|
+
rb_nativethread_lock_t thread_destruct_lock;
|
506
|
+
|
507
|
+
struct rb_thread_struct *main_thread;
|
508
|
+
struct rb_thread_struct *running_thread;
|
509
|
+
|
510
|
+
struct list_head waiting_fds; /* <=> struct waiting_fd */
|
511
|
+
struct list_head living_threads;
|
512
|
+
size_t living_thread_num;
|
513
|
+
VALUE thgroup_default;
|
514
|
+
|
515
|
+
unsigned int running: 1;
|
516
|
+
unsigned int thread_abort_on_exception: 1;
|
517
|
+
unsigned int thread_report_on_exception: 1;
|
518
|
+
int trace_running;
|
519
|
+
volatile int sleeper;
|
520
|
+
|
521
|
+
/* object management */
|
522
|
+
VALUE mark_object_ary;
|
523
|
+
const VALUE special_exceptions[ruby_special_error_count];
|
524
|
+
|
525
|
+
/* load */
|
526
|
+
VALUE top_self;
|
527
|
+
VALUE load_path;
|
528
|
+
VALUE load_path_snapshot;
|
529
|
+
VALUE load_path_check_cache;
|
530
|
+
VALUE expanded_load_path;
|
531
|
+
VALUE loaded_features;
|
532
|
+
VALUE loaded_features_snapshot;
|
533
|
+
struct st_table *loaded_features_index;
|
534
|
+
struct st_table *loading_table;
|
535
|
+
|
536
|
+
/* signal */
|
537
|
+
struct {
|
538
|
+
VALUE cmd[RUBY_NSIG];
|
539
|
+
unsigned char safe[RUBY_NSIG];
|
540
|
+
} trap_list;
|
541
|
+
|
542
|
+
/* hook */
|
543
|
+
rb_hook_list_t event_hooks;
|
544
|
+
|
545
|
+
/* relation table of ensure - rollback for callcc */
|
546
|
+
struct st_table *ensure_rollback_table;
|
547
|
+
|
548
|
+
/* postponed_job */
|
549
|
+
struct rb_postponed_job_struct *postponed_job_buffer;
|
550
|
+
int postponed_job_index;
|
551
|
+
|
552
|
+
int src_encoding_index;
|
553
|
+
|
554
|
+
VALUE verbose, debug, orig_progname, progname;
|
555
|
+
VALUE coverages;
|
556
|
+
int coverage_mode;
|
557
|
+
|
558
|
+
VALUE defined_module_hash;
|
559
|
+
|
560
|
+
struct rb_objspace *objspace;
|
561
|
+
|
562
|
+
rb_at_exit_list *at_exit;
|
563
|
+
|
564
|
+
VALUE *defined_strings;
|
565
|
+
st_table *frozen_strings;
|
566
|
+
|
567
|
+
/* params */
|
568
|
+
struct { /* size in byte */
|
569
|
+
size_t thread_vm_stack_size;
|
570
|
+
size_t thread_machine_stack_size;
|
571
|
+
size_t fiber_vm_stack_size;
|
572
|
+
size_t fiber_machine_stack_size;
|
573
|
+
} default_params;
|
574
|
+
|
575
|
+
short redefined_flag[BOP_LAST_];
|
576
|
+
} rb_vm_t;
|
577
|
+
|
578
|
+
/* default values */
|
579
|
+
|
580
|
+
#define RUBY_VM_SIZE_ALIGN 4096
|
581
|
+
|
582
|
+
#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
|
583
|
+
#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
|
584
|
+
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
|
585
|
+
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
586
|
+
|
587
|
+
#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
588
|
+
#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
|
589
|
+
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
|
590
|
+
#if defined(__powerpc64__)
|
591
|
+
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
|
592
|
+
#else
|
593
|
+
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
594
|
+
#endif
|
595
|
+
|
596
|
+
/* optimize insn */
|
597
|
+
#define INTEGER_REDEFINED_OP_FLAG (1 << 0)
|
598
|
+
#define FLOAT_REDEFINED_OP_FLAG (1 << 1)
|
599
|
+
#define STRING_REDEFINED_OP_FLAG (1 << 2)
|
600
|
+
#define ARRAY_REDEFINED_OP_FLAG (1 << 3)
|
601
|
+
#define HASH_REDEFINED_OP_FLAG (1 << 4)
|
602
|
+
/* #define BIGNUM_REDEFINED_OP_FLAG (1 << 5) */
|
603
|
+
#define SYMBOL_REDEFINED_OP_FLAG (1 << 6)
|
604
|
+
#define TIME_REDEFINED_OP_FLAG (1 << 7)
|
605
|
+
#define REGEXP_REDEFINED_OP_FLAG (1 << 8)
|
606
|
+
#define NIL_REDEFINED_OP_FLAG (1 << 9)
|
607
|
+
#define TRUE_REDEFINED_OP_FLAG (1 << 10)
|
608
|
+
#define FALSE_REDEFINED_OP_FLAG (1 << 11)
|
609
|
+
|
610
|
+
#define BASIC_OP_UNREDEFINED_P(op, klass) (LIKELY((GET_VM()->redefined_flag[(op)]&(klass)) == 0))
|
611
|
+
|
612
|
+
#ifndef VM_DEBUG_BP_CHECK
|
613
|
+
#define VM_DEBUG_BP_CHECK 0
|
614
|
+
#endif
|
615
|
+
|
616
|
+
#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
|
617
|
+
#define VM_DEBUG_VERIFY_METHOD_CACHE (VM_DEBUG_MODE != 0)
|
618
|
+
#endif
|
619
|
+
|
620
|
+
struct rb_captured_block {
|
621
|
+
VALUE self;
|
622
|
+
const VALUE *ep;
|
623
|
+
union {
|
624
|
+
const rb_iseq_t *iseq;
|
625
|
+
const struct vm_ifunc *ifunc;
|
626
|
+
VALUE val;
|
627
|
+
} code;
|
628
|
+
};
|
629
|
+
|
630
|
+
enum rb_block_handler_type {
|
631
|
+
block_handler_type_iseq,
|
632
|
+
block_handler_type_ifunc,
|
633
|
+
block_handler_type_symbol,
|
634
|
+
block_handler_type_proc
|
635
|
+
};
|
636
|
+
|
637
|
+
enum rb_block_type {
|
638
|
+
block_type_iseq,
|
639
|
+
block_type_ifunc,
|
640
|
+
block_type_symbol,
|
641
|
+
block_type_proc
|
642
|
+
};
|
643
|
+
|
644
|
+
struct rb_block {
|
645
|
+
union {
|
646
|
+
struct rb_captured_block captured;
|
647
|
+
VALUE symbol;
|
648
|
+
VALUE proc;
|
649
|
+
} as;
|
650
|
+
enum rb_block_type type;
|
651
|
+
};
|
652
|
+
|
653
|
+
typedef struct rb_control_frame_struct {
|
654
|
+
const VALUE *pc; /* cfp[0] */
|
655
|
+
VALUE *sp; /* cfp[1] */
|
656
|
+
const rb_iseq_t *iseq; /* cfp[2] */
|
657
|
+
VALUE self; /* cfp[3] / block[0] */
|
658
|
+
const VALUE *ep; /* cfp[4] / block[1] */
|
659
|
+
const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc */
|
660
|
+
|
661
|
+
#if VM_DEBUG_BP_CHECK
|
662
|
+
VALUE *bp_check; /* cfp[6] */
|
663
|
+
#endif
|
664
|
+
} rb_control_frame_t;
|
665
|
+
|
666
|
+
extern const rb_data_type_t ruby_threadptr_data_type;
|
667
|
+
|
668
|
+
static inline struct rb_thread_struct *
|
669
|
+
rb_thread_ptr(VALUE thval)
|
670
|
+
{
|
671
|
+
return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
|
672
|
+
}
|
673
|
+
|
674
|
+
enum rb_thread_status {
|
675
|
+
THREAD_RUNNABLE,
|
676
|
+
THREAD_STOPPED,
|
677
|
+
THREAD_STOPPED_FOREVER,
|
678
|
+
THREAD_KILLED
|
679
|
+
};
|
680
|
+
|
681
|
+
typedef RUBY_JMP_BUF rb_jmpbuf_t;
|
682
|
+
|
683
|
+
/*
|
684
|
+
the members which are written in EC_PUSH_TAG() should be placed at
|
685
|
+
the beginning and the end, so that entire region is accessible.
|
686
|
+
*/
|
687
|
+
struct rb_vm_tag {
|
688
|
+
VALUE tag;
|
689
|
+
VALUE retval;
|
690
|
+
rb_jmpbuf_t buf;
|
691
|
+
struct rb_vm_tag *prev;
|
692
|
+
enum ruby_tag_type state;
|
693
|
+
};
|
694
|
+
|
695
|
+
STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
|
696
|
+
STATIC_ASSERT(rb_vm_tag_buf_end,
|
697
|
+
offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) <
|
698
|
+
sizeof(struct rb_vm_tag));
|
699
|
+
|
700
|
+
struct rb_vm_protect_tag {
|
701
|
+
struct rb_vm_protect_tag *prev;
|
702
|
+
};
|
703
|
+
|
704
|
+
struct rb_unblock_callback {
|
705
|
+
rb_unblock_function_t *func;
|
706
|
+
void *arg;
|
707
|
+
};
|
708
|
+
|
709
|
+
struct rb_mutex_struct;
|
710
|
+
|
711
|
+
typedef struct rb_thread_list_struct{
|
712
|
+
struct rb_thread_list_struct *next;
|
713
|
+
struct rb_thread_struct *th;
|
714
|
+
} rb_thread_list_t;
|
715
|
+
|
716
|
+
typedef struct rb_ensure_entry {
|
717
|
+
VALUE marker;
|
718
|
+
VALUE (*e_proc)(ANYARGS);
|
719
|
+
VALUE data2;
|
720
|
+
} rb_ensure_entry_t;
|
721
|
+
|
722
|
+
typedef struct rb_ensure_list {
|
723
|
+
struct rb_ensure_list *next;
|
724
|
+
struct rb_ensure_entry entry;
|
725
|
+
} rb_ensure_list_t;
|
726
|
+
|
727
|
+
typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3];
|
728
|
+
|
729
|
+
typedef struct rb_fiber_struct rb_fiber_t;
|
730
|
+
|
731
|
+
typedef struct rb_execution_context_struct {
|
732
|
+
/* execution information */
|
733
|
+
VALUE *vm_stack; /* must free, must mark */
|
734
|
+
size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
|
735
|
+
rb_control_frame_t *cfp;
|
736
|
+
|
737
|
+
struct rb_vm_tag *tag;
|
738
|
+
struct rb_vm_protect_tag *protect_tag;
|
739
|
+
int safe_level;
|
740
|
+
int raised_flag;
|
741
|
+
|
742
|
+
/* interrupt flags */
|
743
|
+
rb_atomic_t interrupt_flag;
|
744
|
+
unsigned long interrupt_mask;
|
745
|
+
|
746
|
+
rb_fiber_t *fiber_ptr;
|
747
|
+
struct rb_thread_struct *thread_ptr;
|
748
|
+
|
749
|
+
/* storage (ec (fiber) local) */
|
750
|
+
st_table *local_storage;
|
751
|
+
VALUE local_storage_recursive_hash;
|
752
|
+
VALUE local_storage_recursive_hash_for_trace;
|
753
|
+
|
754
|
+
/* eval env */
|
755
|
+
const VALUE *root_lep;
|
756
|
+
VALUE root_svar;
|
757
|
+
|
758
|
+
/* ensure & callcc */
|
759
|
+
rb_ensure_list_t *ensure_list;
|
760
|
+
|
761
|
+
/* trace information */
|
762
|
+
struct rb_trace_arg_struct *trace_arg;
|
763
|
+
|
764
|
+
/* temporary places */
|
765
|
+
VALUE errinfo;
|
766
|
+
VALUE passed_block_handler; /* for rb_iterate */
|
767
|
+
const rb_callable_method_entry_t *passed_bmethod_me; /* for bmethod */
|
768
|
+
enum method_missing_reason method_missing_reason;
|
769
|
+
|
770
|
+
/* for GC */
|
771
|
+
struct {
|
772
|
+
VALUE *stack_start;
|
773
|
+
VALUE *stack_end;
|
774
|
+
size_t stack_maxsize;
|
775
|
+
#ifdef __ia64
|
776
|
+
VALUE *register_stack_start;
|
777
|
+
VALUE *register_stack_end;
|
778
|
+
size_t register_stack_maxsize;
|
779
|
+
#endif
|
780
|
+
jmp_buf regs;
|
781
|
+
} machine;
|
782
|
+
} rb_execution_context_t;
|
783
|
+
|
784
|
+
void ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
|
785
|
+
|
786
|
+
typedef struct rb_thread_struct {
|
787
|
+
struct list_node vmlt_node;
|
788
|
+
VALUE self;
|
789
|
+
rb_vm_t *vm;
|
790
|
+
|
791
|
+
rb_execution_context_t *ec;
|
792
|
+
|
793
|
+
VALUE last_status; /* $? */
|
794
|
+
|
795
|
+
/* for cfunc */
|
796
|
+
struct rb_calling_info *calling;
|
797
|
+
|
798
|
+
/* for load(true) */
|
799
|
+
VALUE top_self;
|
800
|
+
VALUE top_wrapper;
|
801
|
+
|
802
|
+
/* thread control */
|
803
|
+
rb_nativethread_id_t thread_id;
|
804
|
+
#ifdef NON_SCALAR_THREAD_ID
|
805
|
+
rb_thread_id_string_t thread_id_string;
|
806
|
+
#endif
|
807
|
+
enum rb_thread_status status;
|
808
|
+
int to_kill;
|
809
|
+
int priority;
|
810
|
+
|
811
|
+
native_thread_data_t native_thread_data;
|
812
|
+
void *blocking_region_buffer;
|
813
|
+
|
814
|
+
VALUE thgroup;
|
815
|
+
VALUE value;
|
816
|
+
|
817
|
+
/* temporary place of retval on OPT_CALL_THREADED_CODE */
|
818
|
+
#if OPT_CALL_THREADED_CODE
|
819
|
+
VALUE retval;
|
820
|
+
#endif
|
821
|
+
|
822
|
+
/* async errinfo queue */
|
823
|
+
VALUE pending_interrupt_queue;
|
824
|
+
VALUE pending_interrupt_mask_stack;
|
825
|
+
int pending_interrupt_queue_checked;
|
826
|
+
|
827
|
+
/* interrupt management */
|
828
|
+
rb_nativethread_lock_t interrupt_lock;
|
829
|
+
struct rb_unblock_callback unblock;
|
830
|
+
VALUE locking_mutex;
|
831
|
+
struct rb_mutex_struct *keeping_mutexes;
|
832
|
+
|
833
|
+
rb_thread_list_t *join_list;
|
834
|
+
|
835
|
+
VALUE first_proc;
|
836
|
+
VALUE first_args;
|
837
|
+
VALUE (*first_func)(ANYARGS);
|
838
|
+
|
839
|
+
/* statistics data for profiler */
|
840
|
+
VALUE stat_insn_usage;
|
841
|
+
|
842
|
+
/* fiber */
|
843
|
+
rb_fiber_t *root_fiber;
|
844
|
+
rb_jmpbuf_t root_jmpbuf;
|
845
|
+
|
846
|
+
/* misc */
|
847
|
+
unsigned int abort_on_exception: 1;
|
848
|
+
unsigned int report_on_exception: 1;
|
849
|
+
#ifdef USE_SIGALTSTACK
|
850
|
+
void *altstack;
|
851
|
+
#endif
|
852
|
+
uint32_t running_time_us; /* 12500..800000 */
|
853
|
+
VALUE name;
|
854
|
+
} rb_thread_t;
|
855
|
+
|
856
|
+
typedef enum {
|
857
|
+
VM_DEFINECLASS_TYPE_CLASS = 0x00,
|
858
|
+
VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
|
859
|
+
VM_DEFINECLASS_TYPE_MODULE = 0x02,
|
860
|
+
/* 0x03..0x06 is reserved */
|
861
|
+
VM_DEFINECLASS_TYPE_MASK = 0x07
|
862
|
+
} rb_vm_defineclass_type_t;
|
863
|
+
|
864
|
+
#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
|
865
|
+
#define VM_DEFINECLASS_FLAG_SCOPED 0x08
|
866
|
+
#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
|
867
|
+
#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
|
868
|
+
#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
|
869
|
+
((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
|
870
|
+
|
871
|
+
/* iseq.c */
|
872
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
873
|
+
|
874
|
+
/* node -> iseq */
|
875
|
+
rb_iseq_t *rb_iseq_new (const NODE *node, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type);
|
876
|
+
rb_iseq_t *rb_iseq_new_top (const NODE *node, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
|
877
|
+
rb_iseq_t *rb_iseq_new_main (const NODE *node, VALUE path, VALUE realpath, const rb_iseq_t *parent);
|
878
|
+
rb_iseq_t *rb_iseq_new_with_opt(const NODE *node, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno,
|
879
|
+
const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
|
880
|
+
|
881
|
+
/* src -> iseq */
|
882
|
+
rb_iseq_t *rb_iseq_compile(VALUE src, VALUE file, VALUE line);
|
883
|
+
rb_iseq_t *rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, const struct rb_block *base_block);
|
884
|
+
rb_iseq_t *rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE realpath, VALUE line, const struct rb_block *base_block, VALUE opt);
|
885
|
+
|
886
|
+
VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
|
887
|
+
int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
|
888
|
+
const char *ruby_node_name(int node);
|
889
|
+
|
890
|
+
VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
|
891
|
+
|
892
|
+
RUBY_EXTERN VALUE rb_cISeq;
|
893
|
+
RUBY_EXTERN VALUE rb_cRubyVM;
|
894
|
+
RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
|
895
|
+
RUBY_SYMBOL_EXPORT_END
|
896
|
+
|
897
|
+
#define GetProcPtr(obj, ptr) \
|
898
|
+
GetCoreDataFromValue((obj), rb_proc_t, (ptr))
|
899
|
+
|
900
|
+
typedef struct {
|
901
|
+
const struct rb_block block;
|
902
|
+
int8_t safe_level; /* 0..1 */
|
903
|
+
int8_t is_from_method; /* bool */
|
904
|
+
int8_t is_lambda; /* bool */
|
905
|
+
} rb_proc_t;
|
906
|
+
|
907
|
+
typedef struct {
|
908
|
+
VALUE flags; /* imemo header */
|
909
|
+
const rb_iseq_t *iseq;
|
910
|
+
const VALUE *ep;
|
911
|
+
const VALUE *env;
|
912
|
+
unsigned int env_size;
|
913
|
+
} rb_env_t;
|
914
|
+
|
915
|
+
extern const rb_data_type_t ruby_binding_data_type;
|
916
|
+
|
917
|
+
#define GetBindingPtr(obj, ptr) \
|
918
|
+
GetCoreDataFromValue((obj), rb_binding_t, (ptr))
|
919
|
+
|
920
|
+
typedef struct {
|
921
|
+
const struct rb_block block;
|
922
|
+
const VALUE pathobj;
|
923
|
+
unsigned short first_lineno;
|
924
|
+
} rb_binding_t;
|
925
|
+
|
926
|
+
/* used by compile time and send insn */
|
927
|
+
|
928
|
+
enum vm_check_match_type {
|
929
|
+
VM_CHECKMATCH_TYPE_WHEN = 1,
|
930
|
+
VM_CHECKMATCH_TYPE_CASE = 2,
|
931
|
+
VM_CHECKMATCH_TYPE_RESCUE = 3
|
932
|
+
};
|
933
|
+
|
934
|
+
#define VM_CHECKMATCH_TYPE_MASK 0x03
|
935
|
+
#define VM_CHECKMATCH_ARRAY 0x04
|
936
|
+
|
937
|
+
enum vm_call_flag_bits {
|
938
|
+
VM_CALL_ARGS_SPLAT_bit, /* m(*args) */
|
939
|
+
VM_CALL_ARGS_BLOCKARG_bit, /* m(&block) */
|
940
|
+
VM_CALL_ARGS_BLOCKARG_BLOCKPARAM_bit, /* m(&block) and block is a passed block parameter */
|
941
|
+
VM_CALL_FCALL_bit, /* m(...) */
|
942
|
+
VM_CALL_VCALL_bit, /* m */
|
943
|
+
VM_CALL_ARGS_SIMPLE_bit, /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
|
944
|
+
VM_CALL_BLOCKISEQ_bit, /* has blockiseq */
|
945
|
+
VM_CALL_KWARG_bit, /* has kwarg */
|
946
|
+
VM_CALL_KW_SPLAT_bit, /* m(**opts) */
|
947
|
+
VM_CALL_TAILCALL_bit, /* located at tail position */
|
948
|
+
VM_CALL_SUPER_bit, /* super */
|
949
|
+
VM_CALL_OPT_SEND_bit, /* internal flag */
|
950
|
+
VM_CALL__END
|
951
|
+
};
|
952
|
+
|
953
|
+
#define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
|
954
|
+
#define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
|
955
|
+
#define VM_CALL_ARGS_BLOCKARG_BLOCKPARAM (0x01 << VM_CALL_ARGS_BLOCKARG_BLOCKPARAM_bit)
|
956
|
+
#define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
|
957
|
+
#define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
|
958
|
+
#define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
|
959
|
+
#define VM_CALL_BLOCKISEQ (0x01 << VM_CALL_BLOCKISEQ_bit)
|
960
|
+
#define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
|
961
|
+
#define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
|
962
|
+
#define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
|
963
|
+
#define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
|
964
|
+
#define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
|
965
|
+
|
966
|
+
enum vm_special_object_type {
|
967
|
+
VM_SPECIAL_OBJECT_VMCORE = 1,
|
968
|
+
VM_SPECIAL_OBJECT_CBASE,
|
969
|
+
VM_SPECIAL_OBJECT_CONST_BASE
|
970
|
+
};
|
971
|
+
|
972
|
+
enum vm_svar_index {
|
973
|
+
VM_SVAR_LASTLINE = 0, /* $_ */
|
974
|
+
VM_SVAR_BACKREF = 1, /* $~ */
|
975
|
+
|
976
|
+
VM_SVAR_EXTRA_START = 2,
|
977
|
+
VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
|
978
|
+
};
|
979
|
+
|
980
|
+
/* inline cache */
|
981
|
+
typedef struct iseq_inline_cache_entry *IC;
|
982
|
+
typedef struct rb_call_info *CALL_INFO;
|
983
|
+
typedef struct rb_call_cache *CALL_CACHE;
|
984
|
+
|
985
|
+
void rb_vm_change_state(void);
|
986
|
+
|
987
|
+
typedef VALUE CDHASH;
|
988
|
+
|
989
|
+
#ifndef FUNC_FASTCALL
|
990
|
+
#define FUNC_FASTCALL(x) x
|
991
|
+
#endif
|
992
|
+
|
993
|
+
typedef rb_control_frame_t *
|
994
|
+
(FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
|
995
|
+
|
996
|
+
#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
|
997
|
+
#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
|
998
|
+
|
999
|
+
#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
|
1000
|
+
#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
|
1001
|
+
#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
|
1002
|
+
|
1003
|
+
enum {
|
1004
|
+
/* Frame/Environment flag bits:
|
1005
|
+
* MMMM MMMM MMMM MMMM ____ __FF FFFF EEEX (LSB)
|
1006
|
+
*
|
1007
|
+
* X : tag for GC marking (It seems as Fixnum)
|
1008
|
+
* EEE : 3 bits Env flags
|
1009
|
+
* FF..: 6 bits Frame flags
|
1010
|
+
* MM..: 16 bits frame magic (to check frame corruption)
|
1011
|
+
*/
|
1012
|
+
|
1013
|
+
/* frame types */
|
1014
|
+
VM_FRAME_MAGIC_METHOD = 0x11110001,
|
1015
|
+
VM_FRAME_MAGIC_BLOCK = 0x22220001,
|
1016
|
+
VM_FRAME_MAGIC_CLASS = 0x33330001,
|
1017
|
+
VM_FRAME_MAGIC_TOP = 0x44440001,
|
1018
|
+
VM_FRAME_MAGIC_CFUNC = 0x55550001,
|
1019
|
+
VM_FRAME_MAGIC_IFUNC = 0x66660001,
|
1020
|
+
VM_FRAME_MAGIC_EVAL = 0x77770001,
|
1021
|
+
VM_FRAME_MAGIC_RESCUE = 0x88880001,
|
1022
|
+
VM_FRAME_MAGIC_DUMMY = 0x99990001,
|
1023
|
+
|
1024
|
+
VM_FRAME_MAGIC_MASK = 0xffff0001,
|
1025
|
+
|
1026
|
+
/* frame flag */
|
1027
|
+
VM_FRAME_FLAG_PASSED = 0x0010,
|
1028
|
+
VM_FRAME_FLAG_FINISH = 0x0020,
|
1029
|
+
VM_FRAME_FLAG_BMETHOD = 0x0040,
|
1030
|
+
VM_FRAME_FLAG_CFRAME = 0x0080,
|
1031
|
+
VM_FRAME_FLAG_LAMBDA = 0x0100,
|
1032
|
+
VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
|
1033
|
+
|
1034
|
+
/* env flag */
|
1035
|
+
VM_ENV_FLAG_LOCAL = 0x0002,
|
1036
|
+
VM_ENV_FLAG_ESCAPED = 0x0004,
|
1037
|
+
VM_ENV_FLAG_WB_REQUIRED = 0x0008
|
1038
|
+
};
|
1039
|
+
|
1040
|
+
#define VM_ENV_DATA_SIZE ( 3)
|
1041
|
+
|
1042
|
+
#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
|
1043
|
+
#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
|
1044
|
+
#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
|
1045
|
+
#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
|
1046
|
+
#define VM_ENV_DATA_INDEX_ENV_PROC ( 2) /* ep[ 2] */
|
1047
|
+
|
1048
|
+
#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
|
1049
|
+
|
1050
|
+
static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
|
1051
|
+
|
1052
|
+
static inline void
|
1053
|
+
VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
|
1054
|
+
{
|
1055
|
+
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
|
1056
|
+
VM_ASSERT(FIXNUM_P(flags));
|
1057
|
+
VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
|
1058
|
+
}
|
1059
|
+
|
1060
|
+
static inline void
|
1061
|
+
VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
|
1062
|
+
{
|
1063
|
+
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
|
1064
|
+
VM_ASSERT(FIXNUM_P(flags));
|
1065
|
+
VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
|
1066
|
+
}
|
1067
|
+
|
1068
|
+
static inline unsigned long
|
1069
|
+
VM_ENV_FLAGS(const VALUE *ep, long flag)
|
1070
|
+
{
|
1071
|
+
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
|
1072
|
+
VM_ASSERT(FIXNUM_P(flags));
|
1073
|
+
return flags & flag;
|
1074
|
+
}
|
1075
|
+
|
1076
|
+
static inline unsigned long
|
1077
|
+
VM_FRAME_TYPE(const rb_control_frame_t *cfp)
|
1078
|
+
{
|
1079
|
+
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
|
1080
|
+
}
|
1081
|
+
|
1082
|
+
static inline int
|
1083
|
+
VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
|
1084
|
+
{
|
1085
|
+
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
|
1086
|
+
}
|
1087
|
+
|
1088
|
+
static inline int
|
1089
|
+
VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
|
1090
|
+
{
|
1091
|
+
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
|
1092
|
+
}
|
1093
|
+
|
1094
|
+
static inline int
|
1095
|
+
VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
|
1096
|
+
{
|
1097
|
+
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
|
1098
|
+
}
|
1099
|
+
|
1100
|
+
static inline int
|
1101
|
+
rb_obj_is_iseq(VALUE iseq)
|
1102
|
+
{
|
1103
|
+
return imemo_type_p(iseq, imemo_iseq);
|
1104
|
+
}
|
1105
|
+
|
1106
|
+
#if VM_CHECK_MODE > 0
|
1107
|
+
#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
|
1108
|
+
#endif
|
1109
|
+
|
1110
|
+
static inline int
|
1111
|
+
VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
|
1112
|
+
{
|
1113
|
+
int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
|
1114
|
+
VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p);
|
1115
|
+
return cframe_p;
|
1116
|
+
}
|
1117
|
+
|
1118
|
+
static inline int
|
1119
|
+
VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
|
1120
|
+
{
|
1121
|
+
return !VM_FRAME_CFRAME_P(cfp);
|
1122
|
+
}
|
1123
|
+
|
1124
|
+
#define RUBYVM_CFUNC_FRAME_P(cfp) \
|
1125
|
+
(VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
|
1126
|
+
|
1127
|
+
#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
|
1128
|
+
#define VM_BLOCK_HANDLER_NONE 0
|
1129
|
+
|
1130
|
+
static inline int
|
1131
|
+
VM_ENV_LOCAL_P(const VALUE *ep)
|
1132
|
+
{
|
1133
|
+
return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
|
1134
|
+
}
|
1135
|
+
|
1136
|
+
static inline const VALUE *
|
1137
|
+
VM_ENV_PREV_EP(const VALUE *ep)
|
1138
|
+
{
|
1139
|
+
VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
|
1140
|
+
return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
|
1141
|
+
}
|
1142
|
+
|
1143
|
+
static inline VALUE
|
1144
|
+
VM_ENV_BLOCK_HANDLER(const VALUE *ep)
|
1145
|
+
{
|
1146
|
+
VM_ASSERT(VM_ENV_LOCAL_P(ep));
|
1147
|
+
return ep[VM_ENV_DATA_INDEX_SPECVAL];
|
1148
|
+
}
|
1149
|
+
|
1150
|
+
#if VM_CHECK_MODE > 0
|
1151
|
+
int rb_vm_ep_in_heap_p(const VALUE *ep);
|
1152
|
+
#endif
|
1153
|
+
|
1154
|
+
static inline int
|
1155
|
+
VM_ENV_ESCAPED_P(const VALUE *ep)
|
1156
|
+
{
|
1157
|
+
VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
|
1158
|
+
return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
|
1159
|
+
}
|
1160
|
+
|
1161
|
+
#if VM_CHECK_MODE > 0
|
1162
|
+
static inline int
|
1163
|
+
vm_assert_env(VALUE obj)
|
1164
|
+
{
|
1165
|
+
VM_ASSERT(imemo_type_p(obj, imemo_env));
|
1166
|
+
return 1;
|
1167
|
+
}
|
1168
|
+
#endif
|
1169
|
+
|
1170
|
+
static inline VALUE
|
1171
|
+
VM_ENV_ENVVAL(const VALUE *ep)
|
1172
|
+
{
|
1173
|
+
VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
|
1174
|
+
VM_ASSERT(VM_ENV_ESCAPED_P(ep));
|
1175
|
+
VM_ASSERT(vm_assert_env(envval));
|
1176
|
+
return envval;
|
1177
|
+
}
|
1178
|
+
|
1179
|
+
static inline const rb_env_t *
|
1180
|
+
VM_ENV_ENVVAL_PTR(const VALUE *ep)
|
1181
|
+
{
|
1182
|
+
return (const rb_env_t *)VM_ENV_ENVVAL(ep);
|
1183
|
+
}
|
1184
|
+
|
1185
|
+
static inline VALUE
|
1186
|
+
VM_ENV_PROCVAL(const VALUE *ep)
|
1187
|
+
{
|
1188
|
+
VM_ASSERT(VM_ENV_ESCAPED_P(ep));
|
1189
|
+
VM_ASSERT(VM_ENV_LOCAL_P(ep));
|
1190
|
+
VM_ASSERT(VM_ENV_BLOCK_HANDLER(ep) != VM_BLOCK_HANDLER_NONE);
|
1191
|
+
|
1192
|
+
return ep[VM_ENV_DATA_INDEX_ENV_PROC];
|
1193
|
+
}
|
1194
|
+
|
1195
|
+
static inline const rb_env_t *
|
1196
|
+
vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
|
1197
|
+
{
|
1198
|
+
rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
|
1199
|
+
env->env_size = env_size;
|
1200
|
+
env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
|
1201
|
+
return env;
|
1202
|
+
}
|
1203
|
+
|
1204
|
+
static inline void
|
1205
|
+
VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
|
1206
|
+
{
|
1207
|
+
*((VALUE *)ptr) = v;
|
1208
|
+
}
|
1209
|
+
|
1210
|
+
static inline void
|
1211
|
+
VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
|
1212
|
+
{
|
1213
|
+
VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
|
1214
|
+
VM_FORCE_WRITE(ptr, special_const_value);
|
1215
|
+
}
|
1216
|
+
|
1217
|
+
static inline void
|
1218
|
+
VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
|
1219
|
+
{
|
1220
|
+
VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
|
1221
|
+
VM_FORCE_WRITE(&ep[index], v);
|
1222
|
+
}
|
1223
|
+
|
1224
|
+
const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
|
1225
|
+
const VALUE *rb_vm_proc_local_ep(VALUE proc);
|
1226
|
+
void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
|
1227
|
+
void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
|
1228
|
+
|
1229
|
+
VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
|
1230
|
+
|
1231
|
+
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
|
1232
|
+
#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
|
1233
|
+
|
1234
|
+
#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
|
1235
|
+
((void *)(ecfp) > (void *)(cfp))
|
1236
|
+
|
1237
|
+
static inline const rb_control_frame_t *
|
1238
|
+
RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
|
1239
|
+
{
|
1240
|
+
return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
|
1241
|
+
}
|
1242
|
+
|
1243
|
+
static inline int
|
1244
|
+
RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
|
1245
|
+
{
|
1246
|
+
return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
|
1247
|
+
}
|
1248
|
+
|
1249
|
+
static inline int
|
1250
|
+
VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
|
1251
|
+
{
|
1252
|
+
if ((block_handler & 0x03) == 0x01) {
|
1253
|
+
#if VM_CHECK_MODE > 0
|
1254
|
+
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
|
1255
|
+
VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
|
1256
|
+
#endif
|
1257
|
+
return 1;
|
1258
|
+
}
|
1259
|
+
else {
|
1260
|
+
return 0;
|
1261
|
+
}
|
1262
|
+
}
|
1263
|
+
|
1264
|
+
static inline VALUE
|
1265
|
+
VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
|
1266
|
+
{
|
1267
|
+
VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
|
1268
|
+
VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
|
1269
|
+
return block_handler;
|
1270
|
+
}
|
1271
|
+
|
1272
|
+
static inline const struct rb_captured_block *
|
1273
|
+
VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
|
1274
|
+
{
|
1275
|
+
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
|
1276
|
+
VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
|
1277
|
+
return captured;
|
1278
|
+
}
|
1279
|
+
|
1280
|
+
static inline int
|
1281
|
+
VM_BH_IFUNC_P(VALUE block_handler)
|
1282
|
+
{
|
1283
|
+
if ((block_handler & 0x03) == 0x03) {
|
1284
|
+
#if VM_CHECK_MODE > 0
|
1285
|
+
struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
|
1286
|
+
VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
|
1287
|
+
#endif
|
1288
|
+
return 1;
|
1289
|
+
}
|
1290
|
+
else {
|
1291
|
+
return 0;
|
1292
|
+
}
|
1293
|
+
}
|
1294
|
+
|
1295
|
+
static inline VALUE
|
1296
|
+
VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
|
1297
|
+
{
|
1298
|
+
VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
|
1299
|
+
VM_ASSERT(VM_BH_IFUNC_P(block_handler));
|
1300
|
+
return block_handler;
|
1301
|
+
}
|
1302
|
+
|
1303
|
+
static inline const struct rb_captured_block *
|
1304
|
+
VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
|
1305
|
+
{
|
1306
|
+
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
|
1307
|
+
VM_ASSERT(VM_BH_IFUNC_P(block_handler));
|
1308
|
+
return captured;
|
1309
|
+
}
|
1310
|
+
|
1311
|
+
static inline const struct rb_captured_block *
|
1312
|
+
VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
|
1313
|
+
{
|
1314
|
+
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
|
1315
|
+
VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
|
1316
|
+
return captured;
|
1317
|
+
}
|
1318
|
+
|
1319
|
+
static inline enum rb_block_handler_type
|
1320
|
+
vm_block_handler_type(VALUE block_handler)
|
1321
|
+
{
|
1322
|
+
if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
|
1323
|
+
return block_handler_type_iseq;
|
1324
|
+
}
|
1325
|
+
else if (VM_BH_IFUNC_P(block_handler)) {
|
1326
|
+
return block_handler_type_ifunc;
|
1327
|
+
}
|
1328
|
+
else if (SYMBOL_P(block_handler)) {
|
1329
|
+
return block_handler_type_symbol;
|
1330
|
+
}
|
1331
|
+
else {
|
1332
|
+
VM_ASSERT(rb_obj_is_proc(block_handler));
|
1333
|
+
return block_handler_type_proc;
|
1334
|
+
}
|
1335
|
+
}
|
1336
|
+
|
1337
|
+
static inline void
|
1338
|
+
vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
|
1339
|
+
{
|
1340
|
+
VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
|
1341
|
+
(vm_block_handler_type(block_handler), 1));
|
1342
|
+
}
|
1343
|
+
|
1344
|
+
static inline enum rb_block_type
|
1345
|
+
vm_block_type(const struct rb_block *block)
|
1346
|
+
{
|
1347
|
+
#if VM_CHECK_MODE > 0
|
1348
|
+
switch (block->type) {
|
1349
|
+
case block_type_iseq:
|
1350
|
+
VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
|
1351
|
+
break;
|
1352
|
+
case block_type_ifunc:
|
1353
|
+
VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
|
1354
|
+
break;
|
1355
|
+
case block_type_symbol:
|
1356
|
+
VM_ASSERT(SYMBOL_P(block->as.symbol));
|
1357
|
+
break;
|
1358
|
+
case block_type_proc:
|
1359
|
+
VM_ASSERT(rb_obj_is_proc(block->as.proc));
|
1360
|
+
break;
|
1361
|
+
}
|
1362
|
+
#endif
|
1363
|
+
return block->type;
|
1364
|
+
}
|
1365
|
+
|
1366
|
+
static inline void
|
1367
|
+
vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
|
1368
|
+
{
|
1369
|
+
struct rb_block *mb = (struct rb_block *)block;
|
1370
|
+
mb->type = type;
|
1371
|
+
}
|
1372
|
+
|
1373
|
+
static inline const struct rb_block *
|
1374
|
+
vm_proc_block(VALUE procval)
|
1375
|
+
{
|
1376
|
+
VM_ASSERT(rb_obj_is_proc(procval));
|
1377
|
+
return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
|
1378
|
+
}
|
1379
|
+
|
1380
|
+
static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
|
1381
|
+
static inline const VALUE *vm_block_ep(const struct rb_block *block);
|
1382
|
+
|
1383
|
+
static inline const rb_iseq_t *
|
1384
|
+
vm_proc_iseq(VALUE procval)
|
1385
|
+
{
|
1386
|
+
return vm_block_iseq(vm_proc_block(procval));
|
1387
|
+
}
|
1388
|
+
|
1389
|
+
static inline const VALUE *
|
1390
|
+
vm_proc_ep(VALUE procval)
|
1391
|
+
{
|
1392
|
+
return vm_block_ep(vm_proc_block(procval));
|
1393
|
+
}
|
1394
|
+
|
1395
|
+
static inline const rb_iseq_t *
|
1396
|
+
vm_block_iseq(const struct rb_block *block)
|
1397
|
+
{
|
1398
|
+
switch (vm_block_type(block)) {
|
1399
|
+
case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
|
1400
|
+
case block_type_proc: return vm_proc_iseq(block->as.proc);
|
1401
|
+
case block_type_ifunc:
|
1402
|
+
case block_type_symbol: return NULL;
|
1403
|
+
}
|
1404
|
+
VM_UNREACHABLE(vm_block_iseq);
|
1405
|
+
return NULL;
|
1406
|
+
}
|
1407
|
+
|
1408
|
+
static inline const VALUE *
|
1409
|
+
vm_block_ep(const struct rb_block *block)
|
1410
|
+
{
|
1411
|
+
switch (vm_block_type(block)) {
|
1412
|
+
case block_type_iseq:
|
1413
|
+
case block_type_ifunc: return block->as.captured.ep;
|
1414
|
+
case block_type_proc: return vm_proc_ep(block->as.proc);
|
1415
|
+
case block_type_symbol: return NULL;
|
1416
|
+
}
|
1417
|
+
VM_UNREACHABLE(vm_block_ep);
|
1418
|
+
return NULL;
|
1419
|
+
}
|
1420
|
+
|
1421
|
+
static inline VALUE
|
1422
|
+
vm_block_self(const struct rb_block *block)
|
1423
|
+
{
|
1424
|
+
switch (vm_block_type(block)) {
|
1425
|
+
case block_type_iseq:
|
1426
|
+
case block_type_ifunc:
|
1427
|
+
return block->as.captured.self;
|
1428
|
+
case block_type_proc:
|
1429
|
+
return vm_block_self(vm_proc_block(block->as.proc));
|
1430
|
+
case block_type_symbol:
|
1431
|
+
return Qundef;
|
1432
|
+
}
|
1433
|
+
VM_UNREACHABLE(vm_block_self);
|
1434
|
+
return Qundef;
|
1435
|
+
}
|
1436
|
+
|
1437
|
+
static inline VALUE
|
1438
|
+
VM_BH_TO_SYMBOL(VALUE block_handler)
|
1439
|
+
{
|
1440
|
+
VM_ASSERT(SYMBOL_P(block_handler));
|
1441
|
+
return block_handler;
|
1442
|
+
}
|
1443
|
+
|
1444
|
+
static inline VALUE
|
1445
|
+
VM_BH_FROM_SYMBOL(VALUE symbol)
|
1446
|
+
{
|
1447
|
+
VM_ASSERT(SYMBOL_P(symbol));
|
1448
|
+
return symbol;
|
1449
|
+
}
|
1450
|
+
|
1451
|
+
static inline VALUE
|
1452
|
+
VM_BH_TO_PROC(VALUE block_handler)
|
1453
|
+
{
|
1454
|
+
VM_ASSERT(rb_obj_is_proc(block_handler));
|
1455
|
+
return block_handler;
|
1456
|
+
}
|
1457
|
+
|
1458
|
+
static inline VALUE
|
1459
|
+
VM_BH_FROM_PROC(VALUE procval)
|
1460
|
+
{
|
1461
|
+
VM_ASSERT(rb_obj_is_proc(procval));
|
1462
|
+
return procval;
|
1463
|
+
}
|
1464
|
+
|
1465
|
+
/* VM related object allocate functions */
|
1466
|
+
VALUE rb_thread_alloc(VALUE klass);
|
1467
|
+
VALUE rb_proc_alloc(VALUE klass);
|
1468
|
+
VALUE rb_binding_alloc(VALUE klass);
|
1469
|
+
|
1470
|
+
/* for debug */
|
1471
|
+
extern void rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
|
1472
|
+
extern void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc);
|
1473
|
+
extern void rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
|
1474
|
+
|
1475
|
+
#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp)
|
1476
|
+
#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp))
|
1477
|
+
void rb_vm_bugreport(const void *);
|
1478
|
+
NORETURN(void rb_bug_context(const void *, const char *fmt, ...));
|
1479
|
+
|
1480
|
+
/* functions about thread/vm execution */
|
1481
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
1482
|
+
VALUE rb_iseq_eval(const rb_iseq_t *iseq);
|
1483
|
+
VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
|
1484
|
+
VALUE rb_iseq_path(const rb_iseq_t *iseq);
|
1485
|
+
VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
|
1486
|
+
RUBY_SYMBOL_EXPORT_END
|
1487
|
+
|
1488
|
+
VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
|
1489
|
+
void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
|
1490
|
+
|
1491
|
+
int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
|
1492
|
+
void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
|
1493
|
+
|
1494
|
+
VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, VALUE block_handler);
|
1495
|
+
|
1496
|
+
VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
|
1497
|
+
static inline VALUE
|
1498
|
+
rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
|
1499
|
+
{
|
1500
|
+
return rb_vm_make_proc_lambda(ec, captured, klass, 0);
|
1501
|
+
}
|
1502
|
+
|
1503
|
+
static inline VALUE
|
1504
|
+
rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
|
1505
|
+
{
|
1506
|
+
return rb_vm_make_proc_lambda(ec, captured, klass, 1);
|
1507
|
+
}
|
1508
|
+
|
1509
|
+
VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
|
1510
|
+
VALUE rb_vm_env_local_variables(const rb_env_t *env);
|
1511
|
+
const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
|
1512
|
+
const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
|
1513
|
+
void rb_vm_inc_const_missing_count(void);
|
1514
|
+
void rb_vm_gvl_destroy(rb_vm_t *vm);
|
1515
|
+
VALUE rb_vm_call(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
|
1516
|
+
const VALUE *argv, const rb_callable_method_entry_t *me);
|
1517
|
+
void rb_vm_pop_frame(rb_execution_context_t *ec);
|
1518
|
+
|
1519
|
+
void rb_thread_start_timer_thread(void);
|
1520
|
+
void rb_thread_stop_timer_thread(void);
|
1521
|
+
void rb_thread_reset_timer_thread(void);
|
1522
|
+
void rb_thread_wakeup_timer_thread(void);
|
1523
|
+
|
1524
|
+
static inline void
|
1525
|
+
rb_vm_living_threads_init(rb_vm_t *vm)
|
1526
|
+
{
|
1527
|
+
list_head_init(&vm->waiting_fds);
|
1528
|
+
list_head_init(&vm->living_threads);
|
1529
|
+
vm->living_thread_num = 0;
|
1530
|
+
}
|
1531
|
+
|
1532
|
+
static inline void
|
1533
|
+
rb_vm_living_threads_insert(rb_vm_t *vm, rb_thread_t *th)
|
1534
|
+
{
|
1535
|
+
list_add_tail(&vm->living_threads, &th->vmlt_node);
|
1536
|
+
vm->living_thread_num++;
|
1537
|
+
}
|
1538
|
+
|
1539
|
+
static inline void
|
1540
|
+
rb_vm_living_threads_remove(rb_vm_t *vm, rb_thread_t *th)
|
1541
|
+
{
|
1542
|
+
list_del(&th->vmlt_node);
|
1543
|
+
vm->living_thread_num--;
|
1544
|
+
}
|
1545
|
+
|
1546
|
+
typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
|
1547
|
+
rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
|
1548
|
+
rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
|
1549
|
+
int rb_vm_get_sourceline(const rb_control_frame_t *);
|
1550
|
+
VALUE rb_name_err_mesg_new(VALUE mesg, VALUE recv, VALUE method);
|
1551
|
+
void rb_vm_stack_to_heap(rb_execution_context_t *ec);
|
1552
|
+
void ruby_thread_init_stack(rb_thread_t *th);
|
1553
|
+
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
|
1554
|
+
void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
|
1555
|
+
VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
|
1556
|
+
|
1557
|
+
void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
|
1558
|
+
|
1559
|
+
#define rb_vm_register_special_exception(sp, e, m) \
|
1560
|
+
rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
|
1561
|
+
|
1562
|
+
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec);
|
1563
|
+
|
1564
|
+
void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
|
1565
|
+
|
1566
|
+
const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
|
1567
|
+
|
1568
|
+
#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
|
1569
|
+
|
1570
|
+
#define RUBY_CONST_ASSERT(expr) (1/!!(expr)) /* expr must be a compile-time constant */
|
1571
|
+
#define VM_STACK_OVERFLOWED_P(cfp, sp, margin) \
|
1572
|
+
(!RUBY_CONST_ASSERT(sizeof(*(sp)) == sizeof(VALUE)) || \
|
1573
|
+
!RUBY_CONST_ASSERT(sizeof(*(cfp)) == sizeof(rb_control_frame_t)) || \
|
1574
|
+
((rb_control_frame_t *)((sp) + (margin)) + 1) >= (cfp))
|
1575
|
+
#define WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) \
|
1576
|
+
if (LIKELY(!VM_STACK_OVERFLOWED_P(cfp, sp, margin))) {(void)0;} else /* overflowed */
|
1577
|
+
#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) \
|
1578
|
+
WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) vm_stackoverflow()
|
1579
|
+
#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
|
1580
|
+
WHEN_VM_STACK_OVERFLOWED(cfp, (cfp)->sp, margin) vm_stackoverflow()
|
1581
|
+
|
1582
|
+
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
|
1583
|
+
|
1584
|
+
/* for thread */
|
1585
|
+
|
1586
|
+
#if RUBY_VM_THREAD_MODEL == 2
|
1587
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
1588
|
+
|
1589
|
+
extern rb_vm_t *ruby_current_vm_ptr;
|
1590
|
+
extern rb_execution_context_t *ruby_current_execution_context_ptr;
|
1591
|
+
extern rb_event_flag_t ruby_vm_event_flags;
|
1592
|
+
extern rb_event_flag_t ruby_vm_event_enabled_flags;
|
1593
|
+
|
1594
|
+
RUBY_SYMBOL_EXPORT_END
|
1595
|
+
|
1596
|
+
#define GET_VM() rb_current_vm()
|
1597
|
+
#define GET_THREAD() rb_current_thread()
|
1598
|
+
#define GET_EC() rb_current_execution_context()
|
1599
|
+
|
1600
|
+
static inline rb_thread_t *
|
1601
|
+
rb_ec_thread_ptr(const rb_execution_context_t *ec)
|
1602
|
+
{
|
1603
|
+
return ec->thread_ptr;
|
1604
|
+
}
|
1605
|
+
|
1606
|
+
static inline rb_vm_t *
|
1607
|
+
rb_ec_vm_ptr(const rb_execution_context_t *ec)
|
1608
|
+
{
|
1609
|
+
const rb_thread_t *th = rb_ec_thread_ptr(ec);
|
1610
|
+
if (th) {
|
1611
|
+
return th->vm;
|
1612
|
+
}
|
1613
|
+
else {
|
1614
|
+
return NULL;
|
1615
|
+
}
|
1616
|
+
}
|
1617
|
+
|
1618
|
+
static inline rb_execution_context_t *
|
1619
|
+
rb_current_execution_context(void)
|
1620
|
+
{
|
1621
|
+
return ruby_current_execution_context_ptr;
|
1622
|
+
}
|
1623
|
+
|
1624
|
+
static inline rb_thread_t *
|
1625
|
+
rb_current_thread(void)
|
1626
|
+
{
|
1627
|
+
const rb_execution_context_t *ec = GET_EC();
|
1628
|
+
return rb_ec_thread_ptr(ec);
|
1629
|
+
}
|
1630
|
+
|
1631
|
+
static inline rb_vm_t *
|
1632
|
+
rb_current_vm(void)
|
1633
|
+
{
|
1634
|
+
VM_ASSERT(ruby_current_vm_ptr == NULL ||
|
1635
|
+
ruby_current_execution_context_ptr == NULL ||
|
1636
|
+
rb_ec_thread_ptr(GET_EC()) == NULL ||
|
1637
|
+
rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
|
1638
|
+
return ruby_current_vm_ptr;
|
1639
|
+
}
|
1640
|
+
|
1641
|
+
#define rb_thread_set_current_raw(th) (void)(ruby_current_execution_context_ptr = (th)->ec)
|
1642
|
+
#define rb_thread_set_current(th) do { \
|
1643
|
+
if ((th)->vm->running_thread != (th)) { \
|
1644
|
+
(th)->running_time_us = 0; \
|
1645
|
+
} \
|
1646
|
+
rb_thread_set_current_raw(th); \
|
1647
|
+
(th)->vm->running_thread = (th); \
|
1648
|
+
} while (0)
|
1649
|
+
|
1650
|
+
#else
|
1651
|
+
#error "unsupported thread model"
|
1652
|
+
#endif
|
1653
|
+
|
1654
|
+
enum {
|
1655
|
+
TIMER_INTERRUPT_MASK = 0x01,
|
1656
|
+
PENDING_INTERRUPT_MASK = 0x02,
|
1657
|
+
POSTPONED_JOB_INTERRUPT_MASK = 0x04,
|
1658
|
+
TRAP_INTERRUPT_MASK = 0x08
|
1659
|
+
};
|
1660
|
+
|
1661
|
+
#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
|
1662
|
+
#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
|
1663
|
+
#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
|
1664
|
+
#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
|
1665
|
+
#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
|
1666
|
+
(PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
|
1667
|
+
#define RUBY_VM_INTERRUPTED_ANY(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask)
|
1668
|
+
|
1669
|
+
VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
|
1670
|
+
int rb_signal_buff_size(void);
|
1671
|
+
void rb_signal_exec(rb_thread_t *th, int sig);
|
1672
|
+
void rb_threadptr_check_signal(rb_thread_t *mth);
|
1673
|
+
void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
|
1674
|
+
void rb_threadptr_signal_exit(rb_thread_t *th);
|
1675
|
+
void rb_threadptr_execute_interrupts(rb_thread_t *, int);
|
1676
|
+
void rb_threadptr_interrupt(rb_thread_t *th);
|
1677
|
+
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
|
1678
|
+
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
|
1679
|
+
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
|
1680
|
+
void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
|
1681
|
+
void rb_execution_context_mark(const rb_execution_context_t *ec);
|
1682
|
+
void rb_fiber_close(rb_fiber_t *fib);
|
1683
|
+
void Init_native_thread(rb_thread_t *th);
|
1684
|
+
|
1685
|
+
#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
|
1686
|
+
static inline void
|
1687
|
+
rb_vm_check_ints(rb_execution_context_t *ec)
|
1688
|
+
{
|
1689
|
+
VM_ASSERT(ec == GET_EC());
|
1690
|
+
if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
|
1691
|
+
rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
|
1692
|
+
}
|
1693
|
+
}
|
1694
|
+
|
1695
|
+
/* tracer */
|
1696
|
+
struct rb_trace_arg_struct {
|
1697
|
+
rb_event_flag_t event;
|
1698
|
+
rb_execution_context_t *ec;
|
1699
|
+
const rb_control_frame_t *cfp;
|
1700
|
+
VALUE self;
|
1701
|
+
ID id;
|
1702
|
+
ID called_id;
|
1703
|
+
VALUE klass;
|
1704
|
+
VALUE data;
|
1705
|
+
|
1706
|
+
int klass_solved;
|
1707
|
+
|
1708
|
+
/* calc from cfp */
|
1709
|
+
int lineno;
|
1710
|
+
VALUE path;
|
1711
|
+
};
|
1712
|
+
|
1713
|
+
void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, int pop_p);
|
1714
|
+
|
1715
|
+
#define EXEC_EVENT_HOOK_ORIG(ec_, flag_, vm_flags_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
|
1716
|
+
const rb_event_flag_t flag_arg_ = (flag_); \
|
1717
|
+
if (UNLIKELY(vm_flags_ & (flag_arg_))) { \
|
1718
|
+
/* defer evaluating the other arguments */ \
|
1719
|
+
rb_exec_event_hook_orig(ec_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
|
1720
|
+
} \
|
1721
|
+
} while (0)
|
1722
|
+
|
1723
|
+
static inline void
|
1724
|
+
rb_exec_event_hook_orig(rb_execution_context_t *ec, const rb_event_flag_t flag,
|
1725
|
+
VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
|
1726
|
+
{
|
1727
|
+
struct rb_trace_arg_struct trace_arg;
|
1728
|
+
|
1729
|
+
VM_ASSERT(rb_ec_vm_ptr(ec)->event_hooks.events == ruby_vm_event_flags);
|
1730
|
+
VM_ASSERT(rb_ec_vm_ptr(ec)->event_hooks.events & flag);
|
1731
|
+
|
1732
|
+
trace_arg.event = flag;
|
1733
|
+
trace_arg.ec = ec;
|
1734
|
+
trace_arg.cfp = ec->cfp;
|
1735
|
+
trace_arg.self = self;
|
1736
|
+
trace_arg.id = id;
|
1737
|
+
trace_arg.called_id = called_id;
|
1738
|
+
trace_arg.klass = klass;
|
1739
|
+
trace_arg.data = data;
|
1740
|
+
trace_arg.path = Qundef;
|
1741
|
+
trace_arg.klass_solved = 0;
|
1742
|
+
rb_exec_event_hooks(&trace_arg, pop_p);
|
1743
|
+
}
|
1744
|
+
|
1745
|
+
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
|
1746
|
+
EXEC_EVENT_HOOK_ORIG(ec_, flag_, ruby_vm_event_flags, self_, id_, called_id_, klass_, data_, 0)
|
1747
|
+
|
1748
|
+
#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
|
1749
|
+
EXEC_EVENT_HOOK_ORIG(ec_, flag_, ruby_vm_event_flags, self_, id_, called_id_, klass_, data_, 1)
|
1750
|
+
|
1751
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
1752
|
+
|
1753
|
+
int rb_thread_check_trap_pending(void);
|
1754
|
+
|
1755
|
+
/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
|
1756
|
+
#define RUBY_EVENT_COVERAGE_LINE 0x010000
|
1757
|
+
#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
|
1758
|
+
|
1759
|
+
extern VALUE rb_get_coverages(void);
|
1760
|
+
extern void rb_set_coverages(VALUE, int, VALUE);
|
1761
|
+
extern void rb_reset_coverages(void);
|
1762
|
+
|
1763
|
+
void rb_postponed_job_flush(rb_vm_t *vm);
|
1764
|
+
|
1765
|
+
RUBY_SYMBOL_EXPORT_END
|
1766
|
+
|
1767
|
+
#endif /* RUBY_VM_CORE_H */
|