debase-ruby_core_source 0.7.5 → 0.7.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/CONTRIBUTING.md +2 -1
- data/Rakefile +3 -2
- data/lib/debase/ruby_core_source/ruby-2.2.1/addr2line.h +21 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/ccan/build_assert/build_assert.h +40 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/ccan/check_type/check_type.h +63 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/ccan/container_of/container_of.h +142 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/ccan/list/list.h +635 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/ccan/str/str.h +16 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/constant.h +43 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/dln.h +51 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/eval_intern.h +275 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/gc.h +107 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/id.h +210 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/insns.inc +107 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/insns_info.inc +752 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/internal.h +1185 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/iseq.h +158 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/known_errors.inc +746 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/method.h +144 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/node.h +544 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/node_name.inc +212 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/opt_sc.inc +734 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/optinsn.inc +83 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/optunifs.inc +124 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/parse.h +203 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/probes_helper.h +67 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/regenc.h +234 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/regint.h +972 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/regparse.h +363 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/revision.h +1 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/ruby_atomic.h +170 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/siphash.h +48 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/symbol.h +87 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/thread_pthread.h +54 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/thread_win32.h +36 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/timev.h +42 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/transcode_data.h +123 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/version.h +52 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/vm.inc +3353 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/vm_core.h +1161 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/vm_debug.h +37 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/vm_exec.h +182 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/vm_insnhelper.h +233 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/vm_opts.h +56 -0
- data/lib/debase/ruby_core_source/ruby-2.2.1/vmtc.inc +105 -0
- data/lib/debase/ruby_core_source/version.rb +1 -1
- metadata +45 -2
@@ -0,0 +1,1161 @@
|
|
1
|
+
/**********************************************************************
|
2
|
+
|
3
|
+
vm_core.h -
|
4
|
+
|
5
|
+
$Author: naruse $
|
6
|
+
created at: 04/01/01 19:41:38 JST
|
7
|
+
|
8
|
+
Copyright (C) 2004-2007 Koichi Sasada
|
9
|
+
|
10
|
+
**********************************************************************/
|
11
|
+
|
12
|
+
#ifndef RUBY_VM_CORE_H
|
13
|
+
#define RUBY_VM_CORE_H
|
14
|
+
|
15
|
+
#define RUBY_VM_THREAD_MODEL 2
|
16
|
+
|
17
|
+
#include "ruby/ruby.h"
|
18
|
+
#include "ruby/st.h"
|
19
|
+
|
20
|
+
#include "node.h"
|
21
|
+
#include "vm_debug.h"
|
22
|
+
#include "vm_opts.h"
|
23
|
+
#include "id.h"
|
24
|
+
#include "method.h"
|
25
|
+
#include "ruby_atomic.h"
|
26
|
+
#include "ccan/list/list.h"
|
27
|
+
|
28
|
+
#include "ruby/thread_native.h"
|
29
|
+
#if defined(_WIN32)
|
30
|
+
#include "thread_win32.h"
|
31
|
+
#elif defined(HAVE_PTHREAD_H)
|
32
|
+
#include "thread_pthread.h"
|
33
|
+
#endif
|
34
|
+
|
35
|
+
#ifndef ENABLE_VM_OBJSPACE
|
36
|
+
#ifdef _WIN32
|
37
|
+
/*
|
38
|
+
* TODO: object space independent st_table.
|
39
|
+
* socklist needs st_table in rb_w32_sysinit(), before object space
|
40
|
+
* initialization.
|
41
|
+
* It is too early now to change st_hash_type, since it breaks binary
|
42
|
+
* compatibility.
|
43
|
+
*/
|
44
|
+
#define ENABLE_VM_OBJSPACE 0
|
45
|
+
#else
|
46
|
+
#define ENABLE_VM_OBJSPACE 1
|
47
|
+
#endif
|
48
|
+
#endif
|
49
|
+
|
50
|
+
#include <setjmp.h>
|
51
|
+
#include <signal.h>
|
52
|
+
|
53
|
+
#ifndef NSIG
|
54
|
+
# define NSIG (_SIGMAX + 1) /* For QNX */
|
55
|
+
#endif
|
56
|
+
|
57
|
+
#define RUBY_NSIG NSIG
|
58
|
+
|
59
|
+
#ifdef HAVE_STDARG_PROTOTYPES
|
60
|
+
#include <stdarg.h>
|
61
|
+
#define va_init_list(a,b) va_start((a),(b))
|
62
|
+
#else
|
63
|
+
#include <varargs.h>
|
64
|
+
#define va_init_list(a,b) va_start((a))
|
65
|
+
#endif
|
66
|
+
|
67
|
+
#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
|
68
|
+
#define USE_SIGALTSTACK
|
69
|
+
#endif
|
70
|
+
|
71
|
+
/*****************/
|
72
|
+
/* configuration */
|
73
|
+
/*****************/
|
74
|
+
|
75
|
+
/* gcc ver. check */
|
76
|
+
#if defined(__GNUC__) && __GNUC__ >= 2
|
77
|
+
|
78
|
+
#if OPT_TOKEN_THREADED_CODE
|
79
|
+
#if OPT_DIRECT_THREADED_CODE
|
80
|
+
#undef OPT_DIRECT_THREADED_CODE
|
81
|
+
#endif
|
82
|
+
#endif
|
83
|
+
|
84
|
+
#else /* defined(__GNUC__) && __GNUC__ >= 2 */
|
85
|
+
|
86
|
+
/* disable threaded code options */
|
87
|
+
#if OPT_DIRECT_THREADED_CODE
|
88
|
+
#undef OPT_DIRECT_THREADED_CODE
|
89
|
+
#endif
|
90
|
+
#if OPT_TOKEN_THREADED_CODE
|
91
|
+
#undef OPT_TOKEN_THREADED_CODE
|
92
|
+
#endif
|
93
|
+
#endif
|
94
|
+
|
95
|
+
#ifdef __native_client__
|
96
|
+
#undef OPT_DIRECT_THREADED_CODE
|
97
|
+
#endif
|
98
|
+
|
99
|
+
/* call threaded code */
|
100
|
+
#if OPT_CALL_THREADED_CODE
|
101
|
+
#if OPT_DIRECT_THREADED_CODE
|
102
|
+
#undef OPT_DIRECT_THREADED_CODE
|
103
|
+
#endif /* OPT_DIRECT_THREADED_CODE */
|
104
|
+
#if OPT_STACK_CACHING
|
105
|
+
#undef OPT_STACK_CACHING
|
106
|
+
#endif /* OPT_STACK_CACHING */
|
107
|
+
#endif /* OPT_CALL_THREADED_CODE */
|
108
|
+
|
109
|
+
typedef unsigned long rb_num_t;
|
110
|
+
|
111
|
+
/* iseq data type */
|
112
|
+
|
113
|
+
struct iseq_compile_data_ensure_node_stack;
|
114
|
+
|
115
|
+
typedef struct rb_compile_option_struct rb_compile_option_t;
|
116
|
+
|
117
|
+
|
118
|
+
struct iseq_inline_cache_entry {
|
119
|
+
rb_serial_t ic_serial;
|
120
|
+
union {
|
121
|
+
size_t index;
|
122
|
+
VALUE value;
|
123
|
+
} ic_value;
|
124
|
+
};
|
125
|
+
|
126
|
+
union iseq_inline_storage_entry {
|
127
|
+
struct {
|
128
|
+
struct rb_thread_struct *running_thread;
|
129
|
+
VALUE value;
|
130
|
+
} once;
|
131
|
+
struct iseq_inline_cache_entry cache;
|
132
|
+
};
|
133
|
+
|
134
|
+
/* to avoid warning */
|
135
|
+
struct rb_thread_struct;
|
136
|
+
struct rb_control_frame_struct;
|
137
|
+
|
138
|
+
typedef struct rb_call_info_kw_arg_struct {
|
139
|
+
int keyword_len;
|
140
|
+
VALUE keywords[1];
|
141
|
+
} rb_call_info_kw_arg_t;
|
142
|
+
|
143
|
+
/* rb_call_info_t contains calling information including inline cache */
|
144
|
+
typedef struct rb_call_info_struct {
|
145
|
+
/* fixed at compile time */
|
146
|
+
ID mid;
|
147
|
+
|
148
|
+
unsigned int flag;
|
149
|
+
int orig_argc;
|
150
|
+
rb_iseq_t *blockiseq;
|
151
|
+
rb_call_info_kw_arg_t *kw_arg;
|
152
|
+
|
153
|
+
/* inline cache: keys */
|
154
|
+
rb_serial_t method_state;
|
155
|
+
rb_serial_t class_serial;
|
156
|
+
VALUE klass;
|
157
|
+
|
158
|
+
/* inline cache: values */
|
159
|
+
const rb_method_entry_t *me;
|
160
|
+
VALUE defined_class;
|
161
|
+
|
162
|
+
/* temporary values for method calling */
|
163
|
+
struct rb_block_struct *blockptr;
|
164
|
+
VALUE recv;
|
165
|
+
int argc;
|
166
|
+
union {
|
167
|
+
int opt_pc; /* used by iseq */
|
168
|
+
int index; /* used by ivar */
|
169
|
+
int missing_reason; /* used by method_missing */
|
170
|
+
int inc_sp; /* used by cfunc */
|
171
|
+
} aux;
|
172
|
+
|
173
|
+
VALUE (*call)(struct rb_thread_struct *th, struct rb_control_frame_struct *cfp, struct rb_call_info_struct *ci);
|
174
|
+
} rb_call_info_t;
|
175
|
+
|
176
|
+
#if 1
|
177
|
+
#define GetCoreDataFromValue(obj, type, ptr) do { \
|
178
|
+
(ptr) = (type*)DATA_PTR(obj); \
|
179
|
+
} while (0)
|
180
|
+
#else
|
181
|
+
#define GetCoreDataFromValue(obj, type, ptr) Data_Get_Struct((obj), type, (ptr))
|
182
|
+
#endif
|
183
|
+
|
184
|
+
#define GetISeqPtr(obj, ptr) \
|
185
|
+
GetCoreDataFromValue((obj), rb_iseq_t, (ptr))
|
186
|
+
|
187
|
+
typedef struct rb_iseq_location_struct {
|
188
|
+
const VALUE path;
|
189
|
+
const VALUE absolute_path;
|
190
|
+
const VALUE base_label;
|
191
|
+
const VALUE label;
|
192
|
+
VALUE first_lineno; /* TODO: may be unsigned short */
|
193
|
+
} rb_iseq_location_t;
|
194
|
+
|
195
|
+
struct rb_iseq_struct;
|
196
|
+
|
197
|
+
struct rb_iseq_struct {
|
198
|
+
/***************/
|
199
|
+
/* static data */
|
200
|
+
/***************/
|
201
|
+
|
202
|
+
enum iseq_type {
|
203
|
+
ISEQ_TYPE_TOP,
|
204
|
+
ISEQ_TYPE_METHOD,
|
205
|
+
ISEQ_TYPE_BLOCK,
|
206
|
+
ISEQ_TYPE_CLASS,
|
207
|
+
ISEQ_TYPE_RESCUE,
|
208
|
+
ISEQ_TYPE_ENSURE,
|
209
|
+
ISEQ_TYPE_EVAL,
|
210
|
+
ISEQ_TYPE_MAIN,
|
211
|
+
ISEQ_TYPE_DEFINED_GUARD
|
212
|
+
} type; /* instruction sequence type */
|
213
|
+
#if defined(WORDS_BIGENDIAN) && (SIZEOF_VALUE > SIZEOF_INT)
|
214
|
+
char dummy[SIZEOF_VALUE - SIZEOF_INT]; /* [Bug #10037][ruby-core:63721] */
|
215
|
+
#endif
|
216
|
+
int stack_max; /* for stack overflow check */
|
217
|
+
|
218
|
+
rb_iseq_location_t location;
|
219
|
+
|
220
|
+
VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
|
221
|
+
unsigned int iseq_size;
|
222
|
+
unsigned int line_info_size;
|
223
|
+
|
224
|
+
const VALUE mark_ary; /* Array: includes operands which should be GC marked */
|
225
|
+
const VALUE coverage; /* coverage array */
|
226
|
+
|
227
|
+
/* insn info, must be freed */
|
228
|
+
struct iseq_line_info_entry *line_info_table;
|
229
|
+
|
230
|
+
ID *local_table; /* must free */
|
231
|
+
int local_table_size;
|
232
|
+
|
233
|
+
/* sizeof(vars) + 1 */
|
234
|
+
int local_size;
|
235
|
+
|
236
|
+
union iseq_inline_storage_entry *is_entries;
|
237
|
+
int is_size;
|
238
|
+
|
239
|
+
int callinfo_size;
|
240
|
+
rb_call_info_t *callinfo_entries;
|
241
|
+
|
242
|
+
/**
|
243
|
+
* parameter information
|
244
|
+
*
|
245
|
+
* def m(a1, a2, ..., aM, # mandatory
|
246
|
+
* b1=(...), b2=(...), ..., bN=(...), # optional
|
247
|
+
* *c, # rest
|
248
|
+
* d1, d2, ..., dO, # post
|
249
|
+
* e1:(...), e2:(...), ..., eK:(...), # keyword
|
250
|
+
* **f, # keyword_rest
|
251
|
+
* &g) # block
|
252
|
+
* =>
|
253
|
+
*
|
254
|
+
* lead_num = M
|
255
|
+
* opt_num = N
|
256
|
+
* rest_start = M+N
|
257
|
+
* post_start = M+N+(*1)
|
258
|
+
* post_num = O
|
259
|
+
* keyword_num = K
|
260
|
+
* block_start = M+N+(*1)+O+K
|
261
|
+
* keyword_bits = M+N+(*1)+O+K+(&1)
|
262
|
+
* size = M+N+O+(*1)+K+(&1)+(**1) // parameter size.
|
263
|
+
*/
|
264
|
+
|
265
|
+
struct {
|
266
|
+
struct {
|
267
|
+
unsigned int has_lead : 1;
|
268
|
+
unsigned int has_opt : 1;
|
269
|
+
unsigned int has_rest : 1;
|
270
|
+
unsigned int has_post : 1;
|
271
|
+
unsigned int has_kw : 1;
|
272
|
+
unsigned int has_kwrest : 1;
|
273
|
+
unsigned int has_block : 1;
|
274
|
+
|
275
|
+
unsigned int ambiguous_param0 : 1; /* {|a|} */
|
276
|
+
} flags;
|
277
|
+
|
278
|
+
int size;
|
279
|
+
|
280
|
+
int lead_num;
|
281
|
+
int opt_num;
|
282
|
+
int rest_start;
|
283
|
+
int post_start;
|
284
|
+
int post_num;
|
285
|
+
int block_start;
|
286
|
+
|
287
|
+
VALUE *opt_table; /* (opt_num + 1) entries. */
|
288
|
+
/* opt_num and opt_table:
|
289
|
+
*
|
290
|
+
* def foo o1=e1, o2=e2, ..., oN=eN
|
291
|
+
* #=>
|
292
|
+
* # prologue code
|
293
|
+
* A1: e1
|
294
|
+
* A2: e2
|
295
|
+
* ...
|
296
|
+
* AN: eN
|
297
|
+
* AL: body
|
298
|
+
* opt_num = N
|
299
|
+
* opt_table = [A1, A2, ..., AN, AL]
|
300
|
+
*/
|
301
|
+
|
302
|
+
struct rb_iseq_param_keyword {
|
303
|
+
int num;
|
304
|
+
int required_num;
|
305
|
+
int bits_start;
|
306
|
+
int rest_start;
|
307
|
+
ID *table;
|
308
|
+
VALUE *default_values;
|
309
|
+
} *keyword;
|
310
|
+
} param;
|
311
|
+
|
312
|
+
/* catch table */
|
313
|
+
struct iseq_catch_table *catch_table;
|
314
|
+
|
315
|
+
/* for child iseq */
|
316
|
+
struct rb_iseq_struct *parent_iseq;
|
317
|
+
struct rb_iseq_struct *local_iseq;
|
318
|
+
|
319
|
+
/****************/
|
320
|
+
/* dynamic data */
|
321
|
+
/****************/
|
322
|
+
|
323
|
+
VALUE self;
|
324
|
+
const VALUE orig; /* non-NULL if its data have origin */
|
325
|
+
|
326
|
+
/* block inlining */
|
327
|
+
/*
|
328
|
+
* NODE *node;
|
329
|
+
* void *special_block_builder;
|
330
|
+
* void *cached_special_block_builder;
|
331
|
+
* VALUE cached_special_block;
|
332
|
+
*/
|
333
|
+
|
334
|
+
/* klass/module nest information stack (cref) */
|
335
|
+
NODE * const cref_stack;
|
336
|
+
const VALUE klass;
|
337
|
+
|
338
|
+
/* misc */
|
339
|
+
ID defined_method_id; /* for define_method */
|
340
|
+
rb_num_t flip_cnt;
|
341
|
+
|
342
|
+
/* used at compile time */
|
343
|
+
struct iseq_compile_data *compile_data;
|
344
|
+
|
345
|
+
/* original iseq, before encoding
|
346
|
+
* used for debug/dump (TODO: union with compile_data) */
|
347
|
+
VALUE *iseq;
|
348
|
+
};
|
349
|
+
|
350
|
+
enum ruby_special_exceptions {
|
351
|
+
ruby_error_reenter,
|
352
|
+
ruby_error_nomemory,
|
353
|
+
ruby_error_sysstack,
|
354
|
+
ruby_error_closed_stream,
|
355
|
+
ruby_special_error_count
|
356
|
+
};
|
357
|
+
|
358
|
+
enum ruby_basic_operators {
|
359
|
+
BOP_PLUS,
|
360
|
+
BOP_MINUS,
|
361
|
+
BOP_MULT,
|
362
|
+
BOP_DIV,
|
363
|
+
BOP_MOD,
|
364
|
+
BOP_EQ,
|
365
|
+
BOP_EQQ,
|
366
|
+
BOP_LT,
|
367
|
+
BOP_LE,
|
368
|
+
BOP_LTLT,
|
369
|
+
BOP_AREF,
|
370
|
+
BOP_ASET,
|
371
|
+
BOP_LENGTH,
|
372
|
+
BOP_SIZE,
|
373
|
+
BOP_EMPTY_P,
|
374
|
+
BOP_SUCC,
|
375
|
+
BOP_GT,
|
376
|
+
BOP_GE,
|
377
|
+
BOP_NOT,
|
378
|
+
BOP_NEQ,
|
379
|
+
BOP_MATCH,
|
380
|
+
BOP_FREEZE,
|
381
|
+
|
382
|
+
BOP_LAST_
|
383
|
+
};
|
384
|
+
|
385
|
+
#define GetVMPtr(obj, ptr) \
|
386
|
+
GetCoreDataFromValue((obj), rb_vm_t, (ptr))
|
387
|
+
|
388
|
+
#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
|
389
|
+
struct rb_objspace;
|
390
|
+
void rb_objspace_free(struct rb_objspace *);
|
391
|
+
#endif
|
392
|
+
|
393
|
+
typedef struct rb_hook_list_struct {
|
394
|
+
struct rb_event_hook_struct *hooks;
|
395
|
+
rb_event_flag_t events;
|
396
|
+
int need_clean;
|
397
|
+
} rb_hook_list_t;
|
398
|
+
|
399
|
+
typedef struct rb_vm_struct {
|
400
|
+
VALUE self;
|
401
|
+
|
402
|
+
rb_global_vm_lock_t gvl;
|
403
|
+
rb_nativethread_lock_t thread_destruct_lock;
|
404
|
+
|
405
|
+
struct rb_thread_struct *main_thread;
|
406
|
+
struct rb_thread_struct *running_thread;
|
407
|
+
|
408
|
+
struct list_head living_threads;
|
409
|
+
size_t living_thread_num;
|
410
|
+
VALUE thgroup_default;
|
411
|
+
|
412
|
+
int running;
|
413
|
+
int thread_abort_on_exception;
|
414
|
+
int trace_running;
|
415
|
+
volatile int sleeper;
|
416
|
+
|
417
|
+
/* object management */
|
418
|
+
VALUE mark_object_ary;
|
419
|
+
|
420
|
+
const VALUE special_exceptions[ruby_special_error_count];
|
421
|
+
|
422
|
+
/* load */
|
423
|
+
VALUE top_self;
|
424
|
+
VALUE load_path;
|
425
|
+
VALUE load_path_snapshot;
|
426
|
+
VALUE load_path_check_cache;
|
427
|
+
VALUE expanded_load_path;
|
428
|
+
VALUE loaded_features;
|
429
|
+
VALUE loaded_features_snapshot;
|
430
|
+
struct st_table *loaded_features_index;
|
431
|
+
struct st_table *loading_table;
|
432
|
+
|
433
|
+
/* signal */
|
434
|
+
struct {
|
435
|
+
VALUE cmd;
|
436
|
+
int safe;
|
437
|
+
} trap_list[RUBY_NSIG];
|
438
|
+
|
439
|
+
/* hook */
|
440
|
+
rb_hook_list_t event_hooks;
|
441
|
+
|
442
|
+
/* relation table of ensure - rollback for callcc */
|
443
|
+
struct st_table *ensure_rollback_table;
|
444
|
+
|
445
|
+
/* postponed_job */
|
446
|
+
struct rb_postponed_job_struct *postponed_job_buffer;
|
447
|
+
int postponed_job_index;
|
448
|
+
|
449
|
+
int src_encoding_index;
|
450
|
+
|
451
|
+
VALUE verbose, debug, orig_progname, progname;
|
452
|
+
VALUE coverages;
|
453
|
+
|
454
|
+
struct unlinked_method_entry_list_entry *unlinked_method_entry_list;
|
455
|
+
|
456
|
+
VALUE defined_module_hash;
|
457
|
+
|
458
|
+
#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
|
459
|
+
struct rb_objspace *objspace;
|
460
|
+
#endif
|
461
|
+
|
462
|
+
/*
|
463
|
+
* @shyouhei notes that this is not for storing normal Ruby
|
464
|
+
* objects so do *NOT* mark this when you GC.
|
465
|
+
*/
|
466
|
+
struct RArray at_exit;
|
467
|
+
|
468
|
+
VALUE *defined_strings;
|
469
|
+
st_table *frozen_strings;
|
470
|
+
|
471
|
+
/* params */
|
472
|
+
struct { /* size in byte */
|
473
|
+
size_t thread_vm_stack_size;
|
474
|
+
size_t thread_machine_stack_size;
|
475
|
+
size_t fiber_vm_stack_size;
|
476
|
+
size_t fiber_machine_stack_size;
|
477
|
+
} default_params;
|
478
|
+
|
479
|
+
short redefined_flag[BOP_LAST_];
|
480
|
+
} rb_vm_t;
|
481
|
+
|
482
|
+
/* default values */
|
483
|
+
|
484
|
+
#define RUBY_VM_SIZE_ALIGN 4096
|
485
|
+
|
486
|
+
#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
|
487
|
+
#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
|
488
|
+
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
|
489
|
+
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
490
|
+
|
491
|
+
#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
492
|
+
#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
|
493
|
+
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
|
494
|
+
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
495
|
+
|
496
|
+
/* optimize insn */
|
497
|
+
#define FIXNUM_REDEFINED_OP_FLAG (1 << 0)
|
498
|
+
#define FLOAT_REDEFINED_OP_FLAG (1 << 1)
|
499
|
+
#define STRING_REDEFINED_OP_FLAG (1 << 2)
|
500
|
+
#define ARRAY_REDEFINED_OP_FLAG (1 << 3)
|
501
|
+
#define HASH_REDEFINED_OP_FLAG (1 << 4)
|
502
|
+
#define BIGNUM_REDEFINED_OP_FLAG (1 << 5)
|
503
|
+
#define SYMBOL_REDEFINED_OP_FLAG (1 << 6)
|
504
|
+
#define TIME_REDEFINED_OP_FLAG (1 << 7)
|
505
|
+
#define REGEXP_REDEFINED_OP_FLAG (1 << 8)
|
506
|
+
|
507
|
+
#define BASIC_OP_UNREDEFINED_P(op, klass) (LIKELY((GET_VM()->redefined_flag[(op)]&(klass)) == 0))
|
508
|
+
|
509
|
+
#ifndef VM_DEBUG_BP_CHECK
|
510
|
+
#define VM_DEBUG_BP_CHECK 0
|
511
|
+
#endif
|
512
|
+
|
513
|
+
#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
|
514
|
+
#define VM_DEBUG_VERIFY_METHOD_CACHE 0
|
515
|
+
#endif
|
516
|
+
|
517
|
+
typedef struct rb_control_frame_struct {
|
518
|
+
VALUE *pc; /* cfp[0] */
|
519
|
+
VALUE *sp; /* cfp[1] */
|
520
|
+
rb_iseq_t *iseq; /* cfp[2] */
|
521
|
+
VALUE flag; /* cfp[3] */
|
522
|
+
VALUE self; /* cfp[4] / block[0] */
|
523
|
+
VALUE klass; /* cfp[5] / block[1] */
|
524
|
+
VALUE *ep; /* cfp[6] / block[2] */
|
525
|
+
rb_iseq_t *block_iseq; /* cfp[7] / block[3] */
|
526
|
+
VALUE proc; /* cfp[8] / block[4] */
|
527
|
+
const rb_method_entry_t *me;/* cfp[9] */
|
528
|
+
|
529
|
+
#if VM_DEBUG_BP_CHECK
|
530
|
+
VALUE *bp_check; /* cfp[10] */
|
531
|
+
#endif
|
532
|
+
} rb_control_frame_t;
|
533
|
+
|
534
|
+
typedef struct rb_block_struct {
|
535
|
+
VALUE self; /* share with method frame if it's only block */
|
536
|
+
VALUE klass; /* share with method frame if it's only block */
|
537
|
+
VALUE *ep; /* share with method frame if it's only block */
|
538
|
+
rb_iseq_t *iseq;
|
539
|
+
VALUE proc;
|
540
|
+
} rb_block_t;
|
541
|
+
|
542
|
+
extern const rb_data_type_t ruby_threadptr_data_type;
|
543
|
+
|
544
|
+
#define GetThreadPtr(obj, ptr) \
|
545
|
+
TypedData_Get_Struct((obj), rb_thread_t, &ruby_threadptr_data_type, (ptr))
|
546
|
+
|
547
|
+
enum rb_thread_status {
|
548
|
+
THREAD_RUNNABLE,
|
549
|
+
THREAD_STOPPED,
|
550
|
+
THREAD_STOPPED_FOREVER,
|
551
|
+
THREAD_KILLED
|
552
|
+
};
|
553
|
+
|
554
|
+
typedef RUBY_JMP_BUF rb_jmpbuf_t;
|
555
|
+
|
556
|
+
/*
|
557
|
+
the members which are written in TH_PUSH_TAG() should be placed at
|
558
|
+
the beginning and the end, so that entire region is accessible.
|
559
|
+
*/
|
560
|
+
struct rb_vm_tag {
|
561
|
+
VALUE tag;
|
562
|
+
VALUE retval;
|
563
|
+
rb_jmpbuf_t buf;
|
564
|
+
struct rb_vm_tag *prev;
|
565
|
+
};
|
566
|
+
|
567
|
+
struct rb_vm_protect_tag {
|
568
|
+
struct rb_vm_protect_tag *prev;
|
569
|
+
};
|
570
|
+
|
571
|
+
struct rb_unblock_callback {
|
572
|
+
rb_unblock_function_t *func;
|
573
|
+
void *arg;
|
574
|
+
};
|
575
|
+
|
576
|
+
struct rb_mutex_struct;
|
577
|
+
|
578
|
+
struct rb_thread_struct;
|
579
|
+
typedef struct rb_thread_list_struct{
|
580
|
+
struct rb_thread_list_struct *next;
|
581
|
+
struct rb_thread_struct *th;
|
582
|
+
} rb_thread_list_t;
|
583
|
+
|
584
|
+
|
585
|
+
typedef struct rb_ensure_entry {
|
586
|
+
VALUE marker;
|
587
|
+
VALUE (*e_proc)(ANYARGS);
|
588
|
+
VALUE data2;
|
589
|
+
} rb_ensure_entry_t;
|
590
|
+
|
591
|
+
typedef struct rb_ensure_list {
|
592
|
+
struct rb_ensure_list *next;
|
593
|
+
struct rb_ensure_entry entry;
|
594
|
+
} rb_ensure_list_t;
|
595
|
+
|
596
|
+
typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3];
|
597
|
+
|
598
|
+
typedef struct rb_fiber_struct rb_fiber_t;
|
599
|
+
|
600
|
+
typedef struct rb_thread_struct {
|
601
|
+
struct list_node vmlt_node;
|
602
|
+
VALUE self;
|
603
|
+
rb_vm_t *vm;
|
604
|
+
|
605
|
+
/* execution information */
|
606
|
+
VALUE *stack; /* must free, must mark */
|
607
|
+
size_t stack_size; /* size in word (byte size / sizeof(VALUE)) */
|
608
|
+
rb_control_frame_t *cfp;
|
609
|
+
int safe_level;
|
610
|
+
int raised_flag;
|
611
|
+
VALUE last_status; /* $? */
|
612
|
+
|
613
|
+
/* passing state */
|
614
|
+
int state;
|
615
|
+
|
616
|
+
int waiting_fd;
|
617
|
+
|
618
|
+
/* for rb_iterate */
|
619
|
+
const rb_block_t *passed_block;
|
620
|
+
|
621
|
+
/* for bmethod */
|
622
|
+
const rb_method_entry_t *passed_bmethod_me;
|
623
|
+
|
624
|
+
/* for cfunc */
|
625
|
+
rb_call_info_t *passed_ci;
|
626
|
+
|
627
|
+
/* for load(true) */
|
628
|
+
VALUE top_self;
|
629
|
+
VALUE top_wrapper;
|
630
|
+
|
631
|
+
/* eval env */
|
632
|
+
rb_block_t *base_block;
|
633
|
+
|
634
|
+
VALUE *root_lep;
|
635
|
+
VALUE root_svar;
|
636
|
+
|
637
|
+
/* thread control */
|
638
|
+
rb_nativethread_id_t thread_id;
|
639
|
+
#ifdef NON_SCALAR_THREAD_ID
|
640
|
+
rb_thread_id_string_t thread_id_string;
|
641
|
+
#endif
|
642
|
+
enum rb_thread_status status;
|
643
|
+
int to_kill;
|
644
|
+
int priority;
|
645
|
+
int mark_stack_len;
|
646
|
+
|
647
|
+
native_thread_data_t native_thread_data;
|
648
|
+
void *blocking_region_buffer;
|
649
|
+
|
650
|
+
VALUE thgroup;
|
651
|
+
VALUE value;
|
652
|
+
|
653
|
+
/* temporary place of errinfo */
|
654
|
+
VALUE errinfo;
|
655
|
+
|
656
|
+
/* temporary place of retval on OPT_CALL_THREADED_CODE */
|
657
|
+
#if OPT_CALL_THREADED_CODE
|
658
|
+
VALUE retval;
|
659
|
+
#endif
|
660
|
+
|
661
|
+
/* async errinfo queue */
|
662
|
+
VALUE pending_interrupt_queue;
|
663
|
+
VALUE pending_interrupt_mask_stack;
|
664
|
+
int pending_interrupt_queue_checked;
|
665
|
+
|
666
|
+
rb_atomic_t interrupt_flag;
|
667
|
+
unsigned long interrupt_mask;
|
668
|
+
rb_nativethread_lock_t interrupt_lock;
|
669
|
+
rb_nativethread_cond_t interrupt_cond;
|
670
|
+
struct rb_unblock_callback unblock;
|
671
|
+
VALUE locking_mutex;
|
672
|
+
struct rb_mutex_struct *keeping_mutexes;
|
673
|
+
|
674
|
+
struct rb_vm_tag *tag;
|
675
|
+
struct rb_vm_protect_tag *protect_tag;
|
676
|
+
|
677
|
+
/*! Thread-local state of evaluation context.
|
678
|
+
*
|
679
|
+
* If negative, this thread is evaluating the main program.
|
680
|
+
* If positive, this thread is evaluating a program under Kernel::eval
|
681
|
+
* family.
|
682
|
+
*/
|
683
|
+
int parse_in_eval;
|
684
|
+
|
685
|
+
/*! Thread-local state of compiling context.
|
686
|
+
*
|
687
|
+
* If non-zero, the parser does not automatically print error messages to
|
688
|
+
* stderr. */
|
689
|
+
int mild_compile_error;
|
690
|
+
|
691
|
+
/* storage */
|
692
|
+
st_table *local_storage;
|
693
|
+
VALUE local_storage_recursive_hash;
|
694
|
+
VALUE local_storage_recursive_hash_for_trace;
|
695
|
+
|
696
|
+
rb_thread_list_t *join_list;
|
697
|
+
|
698
|
+
VALUE first_proc;
|
699
|
+
VALUE first_args;
|
700
|
+
VALUE (*first_func)(ANYARGS);
|
701
|
+
|
702
|
+
/* for GC */
|
703
|
+
struct {
|
704
|
+
VALUE *stack_start;
|
705
|
+
VALUE *stack_end;
|
706
|
+
size_t stack_maxsize;
|
707
|
+
#ifdef __ia64
|
708
|
+
VALUE *register_stack_start;
|
709
|
+
VALUE *register_stack_end;
|
710
|
+
size_t register_stack_maxsize;
|
711
|
+
#endif
|
712
|
+
jmp_buf regs;
|
713
|
+
} machine;
|
714
|
+
|
715
|
+
/* statistics data for profiler */
|
716
|
+
VALUE stat_insn_usage;
|
717
|
+
|
718
|
+
/* tracer */
|
719
|
+
rb_hook_list_t event_hooks;
|
720
|
+
struct rb_trace_arg_struct *trace_arg; /* trace information */
|
721
|
+
|
722
|
+
/* fiber */
|
723
|
+
rb_fiber_t *fiber;
|
724
|
+
rb_fiber_t *root_fiber;
|
725
|
+
rb_jmpbuf_t root_jmpbuf;
|
726
|
+
|
727
|
+
/* ensure & callcc */
|
728
|
+
rb_ensure_list_t *ensure_list;
|
729
|
+
|
730
|
+
/* misc */
|
731
|
+
int method_missing_reason;
|
732
|
+
int abort_on_exception;
|
733
|
+
#ifdef USE_SIGALTSTACK
|
734
|
+
void *altstack;
|
735
|
+
#endif
|
736
|
+
unsigned long running_time_us;
|
737
|
+
} rb_thread_t;
|
738
|
+
|
739
|
+
typedef enum {
|
740
|
+
VM_DEFINECLASS_TYPE_CLASS = 0x00,
|
741
|
+
VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
|
742
|
+
VM_DEFINECLASS_TYPE_MODULE = 0x02,
|
743
|
+
/* 0x03..0x06 is reserved */
|
744
|
+
VM_DEFINECLASS_TYPE_MASK = 0x07
|
745
|
+
} rb_vm_defineclass_type_t;
|
746
|
+
|
747
|
+
#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
|
748
|
+
#define VM_DEFINECLASS_FLAG_SCOPED 0x08
|
749
|
+
#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
|
750
|
+
#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
|
751
|
+
#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
|
752
|
+
((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
|
753
|
+
|
754
|
+
/* iseq.c */
|
755
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
756
|
+
|
757
|
+
/* node -> iseq */
|
758
|
+
VALUE rb_iseq_new(NODE*, VALUE, VALUE, VALUE, VALUE, enum iseq_type);
|
759
|
+
VALUE rb_iseq_new_top(NODE *node, VALUE name, VALUE path, VALUE absolute_path, VALUE parent);
|
760
|
+
VALUE rb_iseq_new_main(NODE *node, VALUE path, VALUE absolute_path);
|
761
|
+
VALUE rb_iseq_new_with_bopt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, enum iseq_type, VALUE);
|
762
|
+
VALUE rb_iseq_new_with_opt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, enum iseq_type, const rb_compile_option_t*);
|
763
|
+
|
764
|
+
/* src -> iseq */
|
765
|
+
VALUE rb_iseq_compile(VALUE src, VALUE file, VALUE line);
|
766
|
+
VALUE rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, rb_block_t *base_block);
|
767
|
+
VALUE rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE absolute_path, VALUE line, rb_block_t *base_block, VALUE opt);
|
768
|
+
|
769
|
+
VALUE rb_iseq_disasm(VALUE self);
|
770
|
+
int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
|
771
|
+
const char *ruby_node_name(int node);
|
772
|
+
|
773
|
+
RUBY_EXTERN VALUE rb_cISeq;
|
774
|
+
RUBY_EXTERN VALUE rb_cRubyVM;
|
775
|
+
RUBY_EXTERN VALUE rb_cEnv;
|
776
|
+
RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
|
777
|
+
RUBY_SYMBOL_EXPORT_END
|
778
|
+
|
779
|
+
#define GetProcPtr(obj, ptr) \
|
780
|
+
GetCoreDataFromValue((obj), rb_proc_t, (ptr))
|
781
|
+
|
782
|
+
typedef struct {
|
783
|
+
rb_block_t block;
|
784
|
+
|
785
|
+
VALUE envval; /* for GC mark */
|
786
|
+
VALUE blockprocval;
|
787
|
+
int8_t safe_level; /* 0..3 */
|
788
|
+
int8_t is_from_method; /* bool */
|
789
|
+
int8_t is_lambda; /* bool */
|
790
|
+
} rb_proc_t;
|
791
|
+
|
792
|
+
#define GetEnvPtr(obj, ptr) \
|
793
|
+
GetCoreDataFromValue((obj), rb_env_t, (ptr))
|
794
|
+
|
795
|
+
typedef struct {
|
796
|
+
int env_size;
|
797
|
+
int local_size;
|
798
|
+
VALUE prev_envval; /* for GC mark */
|
799
|
+
rb_block_t block;
|
800
|
+
VALUE env[1]; /* flexible array */
|
801
|
+
} rb_env_t;
|
802
|
+
|
803
|
+
extern const rb_data_type_t ruby_binding_data_type;
|
804
|
+
|
805
|
+
#define GetBindingPtr(obj, ptr) \
|
806
|
+
GetCoreDataFromValue((obj), rb_binding_t, (ptr))
|
807
|
+
|
808
|
+
typedef struct {
|
809
|
+
VALUE env;
|
810
|
+
VALUE path;
|
811
|
+
VALUE blockprocval; /* for GC mark */
|
812
|
+
unsigned short first_lineno;
|
813
|
+
} rb_binding_t;
|
814
|
+
|
815
|
+
/* used by compile time and send insn */
|
816
|
+
|
817
|
+
enum vm_check_match_type {
|
818
|
+
VM_CHECKMATCH_TYPE_WHEN = 1,
|
819
|
+
VM_CHECKMATCH_TYPE_CASE = 2,
|
820
|
+
VM_CHECKMATCH_TYPE_RESCUE = 3
|
821
|
+
};
|
822
|
+
|
823
|
+
#define VM_CHECKMATCH_TYPE_MASK 0x03
|
824
|
+
#define VM_CHECKMATCH_ARRAY 0x04
|
825
|
+
|
826
|
+
#define VM_CALL_ARGS_SPLAT (0x01 << 1) /* m(*args) */
|
827
|
+
#define VM_CALL_ARGS_BLOCKARG (0x01 << 2) /* m(&block) */
|
828
|
+
#define VM_CALL_FCALL (0x01 << 3) /* m(...) */
|
829
|
+
#define VM_CALL_VCALL (0x01 << 4) /* m */
|
830
|
+
#define VM_CALL_TAILCALL (0x01 << 5) /* located at tail position */
|
831
|
+
#define VM_CALL_SUPER (0x01 << 6) /* super */
|
832
|
+
#define VM_CALL_OPT_SEND (0x01 << 7) /* internal flag */
|
833
|
+
#define VM_CALL_ARGS_SIMPLE (0x01 << 8) /* (ci->flag & (SPLAT|BLOCKARG)) && ci->blockiseq == NULL && ci->kw_arg == NULL */
|
834
|
+
|
835
|
+
enum vm_special_object_type {
|
836
|
+
VM_SPECIAL_OBJECT_VMCORE = 1,
|
837
|
+
VM_SPECIAL_OBJECT_CBASE,
|
838
|
+
VM_SPECIAL_OBJECT_CONST_BASE
|
839
|
+
};
|
840
|
+
|
841
|
+
#define VM_FRAME_MAGIC_METHOD 0x11
|
842
|
+
#define VM_FRAME_MAGIC_BLOCK 0x21
|
843
|
+
#define VM_FRAME_MAGIC_CLASS 0x31
|
844
|
+
#define VM_FRAME_MAGIC_TOP 0x41
|
845
|
+
#define VM_FRAME_MAGIC_CFUNC 0x61
|
846
|
+
#define VM_FRAME_MAGIC_PROC 0x71
|
847
|
+
#define VM_FRAME_MAGIC_IFUNC 0x81
|
848
|
+
#define VM_FRAME_MAGIC_EVAL 0x91
|
849
|
+
#define VM_FRAME_MAGIC_LAMBDA 0xa1
|
850
|
+
#define VM_FRAME_MAGIC_RESCUE 0xb1
|
851
|
+
#define VM_FRAME_MAGIC_MASK_BITS 8
|
852
|
+
#define VM_FRAME_MAGIC_MASK (~(~(VALUE)0<<VM_FRAME_MAGIC_MASK_BITS))
|
853
|
+
|
854
|
+
#define VM_FRAME_TYPE(cfp) ((cfp)->flag & VM_FRAME_MAGIC_MASK)
|
855
|
+
|
856
|
+
/* other frame flag */
|
857
|
+
#define VM_FRAME_FLAG_PASSED 0x0100
|
858
|
+
#define VM_FRAME_FLAG_FINISH 0x0200
|
859
|
+
#define VM_FRAME_FLAG_BMETHOD 0x0400
|
860
|
+
#define VM_FRAME_TYPE_FINISH_P(cfp) (((cfp)->flag & VM_FRAME_FLAG_FINISH) != 0)
|
861
|
+
#define VM_FRAME_TYPE_BMETHOD_P(cfp) (((cfp)->flag & VM_FRAME_FLAG_BMETHOD) != 0)
|
862
|
+
|
863
|
+
#define RUBYVM_CFUNC_FRAME_P(cfp) \
|
864
|
+
(VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
|
865
|
+
|
866
|
+
/* inline cache */
|
867
|
+
typedef struct iseq_inline_cache_entry *IC;
|
868
|
+
typedef rb_call_info_t *CALL_INFO;
|
869
|
+
|
870
|
+
void rb_vm_change_state(void);
|
871
|
+
|
872
|
+
typedef VALUE CDHASH;
|
873
|
+
|
874
|
+
#ifndef FUNC_FASTCALL
|
875
|
+
#define FUNC_FASTCALL(x) x
|
876
|
+
#endif
|
877
|
+
|
878
|
+
typedef rb_control_frame_t *
|
879
|
+
(FUNC_FASTCALL(*rb_insn_func_t))(rb_thread_t *, rb_control_frame_t *);
|
880
|
+
|
881
|
+
#define GC_GUARDED_PTR(p) ((VALUE)((VALUE)(p) | 0x01))
|
882
|
+
#define GC_GUARDED_PTR_REF(p) ((void *)(((VALUE)(p)) & ~0x03))
|
883
|
+
#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
|
884
|
+
|
885
|
+
/*
|
886
|
+
* block frame:
|
887
|
+
* ep[ 0]: prev frame
|
888
|
+
* ep[-1]: CREF (for *_eval)
|
889
|
+
*
|
890
|
+
* method frame:
|
891
|
+
* ep[ 0]: block pointer (ptr | VM_ENVVAL_BLOCK_PTR_FLAG)
|
892
|
+
*/
|
893
|
+
|
894
|
+
#define VM_ENVVAL_BLOCK_PTR_FLAG 0x02
|
895
|
+
#define VM_ENVVAL_BLOCK_PTR(v) (GC_GUARDED_PTR(v) | VM_ENVVAL_BLOCK_PTR_FLAG)
|
896
|
+
#define VM_ENVVAL_BLOCK_PTR_P(v) ((v) & VM_ENVVAL_BLOCK_PTR_FLAG)
|
897
|
+
#define VM_ENVVAL_PREV_EP_PTR(v) ((VALUE)GC_GUARDED_PTR(v))
|
898
|
+
#define VM_ENVVAL_PREV_EP_PTR_P(v) (!(VM_ENVVAL_BLOCK_PTR_P(v)))
|
899
|
+
|
900
|
+
#define VM_EP_PREV_EP(ep) ((VALUE *)GC_GUARDED_PTR_REF((ep)[0]))
|
901
|
+
#define VM_EP_BLOCK_PTR(ep) ((rb_block_t *)GC_GUARDED_PTR_REF((ep)[0]))
|
902
|
+
#define VM_EP_LEP_P(ep) VM_ENVVAL_BLOCK_PTR_P((ep)[0])
|
903
|
+
|
904
|
+
VALUE *rb_vm_ep_local_ep(VALUE *ep);
|
905
|
+
rb_block_t *rb_vm_control_frame_block_ptr(rb_control_frame_t *cfp);
|
906
|
+
|
907
|
+
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
|
908
|
+
#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
|
909
|
+
#define RUBY_VM_END_CONTROL_FRAME(th) \
|
910
|
+
((rb_control_frame_t *)((th)->stack + (th)->stack_size))
|
911
|
+
#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
|
912
|
+
((void *)(ecfp) > (void *)(cfp))
|
913
|
+
#define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \
|
914
|
+
(!RUBY_VM_VALID_CONTROL_FRAME_P((cfp), RUBY_VM_END_CONTROL_FRAME(th)))
|
915
|
+
|
916
|
+
#define RUBY_VM_IFUNC_P(ptr) (BUILTIN_TYPE(ptr) == T_NODE)
|
917
|
+
#define RUBY_VM_NORMAL_ISEQ_P(ptr) \
|
918
|
+
((ptr) && !RUBY_VM_IFUNC_P(ptr))
|
919
|
+
|
920
|
+
#define RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp) ((rb_block_t *)(&(cfp)->self))
|
921
|
+
#define RUBY_VM_GET_CFP_FROM_BLOCK_PTR(b) \
|
922
|
+
((rb_control_frame_t *)((VALUE *)(b) - 4))
|
923
|
+
/* magic number `4' is depend on rb_control_frame_t layout. */
|
924
|
+
|
925
|
+
/* VM related object allocate functions */
|
926
|
+
VALUE rb_thread_alloc(VALUE klass);
|
927
|
+
VALUE rb_proc_wrap(VALUE klass, rb_proc_t *); /* may use with rb_proc_alloc */
|
928
|
+
VALUE rb_binding_alloc(VALUE klass);
|
929
|
+
|
930
|
+
/* for debug */
|
931
|
+
extern void rb_vmdebug_stack_dump_raw(rb_thread_t *, rb_control_frame_t *);
|
932
|
+
extern void rb_vmdebug_debug_print_pre(rb_thread_t *th, rb_control_frame_t *cfp, VALUE *_pc);
|
933
|
+
extern void rb_vmdebug_debug_print_post(rb_thread_t *th, rb_control_frame_t *cfp);
|
934
|
+
|
935
|
+
#define SDR() rb_vmdebug_stack_dump_raw(GET_THREAD(), GET_THREAD()->cfp)
|
936
|
+
#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_THREAD(), (cfp))
|
937
|
+
void rb_vm_bugreport(const void *);
|
938
|
+
NORETURN(void rb_bug_context(const void *, const char *fmt, ...));
|
939
|
+
|
940
|
+
/* functions about thread/vm execution */
|
941
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
942
|
+
VALUE rb_iseq_eval(VALUE iseqval);
|
943
|
+
VALUE rb_iseq_eval_main(VALUE iseqval);
|
944
|
+
RUBY_SYMBOL_EXPORT_END
|
945
|
+
int rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, VALUE *klassp);
|
946
|
+
|
947
|
+
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc,
|
948
|
+
int argc, const VALUE *argv, const rb_block_t *blockptr);
|
949
|
+
VALUE rb_vm_make_proc_lambda(rb_thread_t *th, const rb_block_t *block, VALUE klass, int8_t is_lambda);
|
950
|
+
VALUE rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass);
|
951
|
+
VALUE rb_vm_make_binding(rb_thread_t *th, const rb_control_frame_t *src_cfp);
|
952
|
+
VALUE rb_vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp);
|
953
|
+
VALUE rb_vm_env_local_variables(VALUE envval);
|
954
|
+
VALUE *rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars);
|
955
|
+
void rb_vm_inc_const_missing_count(void);
|
956
|
+
void rb_vm_gvl_destroy(rb_vm_t *vm);
|
957
|
+
VALUE rb_vm_call(rb_thread_t *th, VALUE recv, VALUE id, int argc,
|
958
|
+
const VALUE *argv, const rb_method_entry_t *me,
|
959
|
+
VALUE defined_class);
|
960
|
+
void rb_unlink_method_entry(rb_method_entry_t *me);
|
961
|
+
void rb_gc_mark_unlinked_live_method_entries(void *pvm);
|
962
|
+
|
963
|
+
void rb_thread_start_timer_thread(void);
|
964
|
+
void rb_thread_stop_timer_thread(int);
|
965
|
+
void rb_thread_reset_timer_thread(void);
|
966
|
+
void rb_thread_wakeup_timer_thread(void);
|
967
|
+
|
968
|
+
static inline void
|
969
|
+
rb_vm_living_threads_init(rb_vm_t *vm)
|
970
|
+
{
|
971
|
+
list_head_init(&vm->living_threads);
|
972
|
+
vm->living_thread_num = 0;
|
973
|
+
}
|
974
|
+
|
975
|
+
static inline void
|
976
|
+
rb_vm_living_threads_insert(rb_vm_t *vm, rb_thread_t *th)
|
977
|
+
{
|
978
|
+
list_add_tail(&vm->living_threads, &th->vmlt_node);
|
979
|
+
vm->living_thread_num++;
|
980
|
+
}
|
981
|
+
|
982
|
+
static inline void
|
983
|
+
rb_vm_living_threads_remove(rb_vm_t *vm, rb_thread_t *th)
|
984
|
+
{
|
985
|
+
list_del(&th->vmlt_node);
|
986
|
+
vm->living_thread_num--;
|
987
|
+
}
|
988
|
+
|
989
|
+
int ruby_thread_has_gvl_p(void);
|
990
|
+
typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
|
991
|
+
rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_thread_t *th, const rb_control_frame_t *cfp);
|
992
|
+
rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_thread_t *th, const rb_control_frame_t *cfp);
|
993
|
+
int rb_vm_get_sourceline(const rb_control_frame_t *);
|
994
|
+
VALUE rb_name_err_mesg_new(VALUE obj, VALUE mesg, VALUE recv, VALUE method);
|
995
|
+
void rb_vm_stack_to_heap(rb_thread_t *th);
|
996
|
+
void ruby_thread_init_stack(rb_thread_t *th);
|
997
|
+
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, VALUE *klassp);
|
998
|
+
void rb_vm_rewind_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
|
999
|
+
|
1000
|
+
void rb_vm_register_special_exception(enum ruby_special_exceptions sp, VALUE exception_class, const char *mesg);
|
1001
|
+
|
1002
|
+
void rb_gc_mark_machine_stack(rb_thread_t *th);
|
1003
|
+
|
1004
|
+
int rb_autoloading_value(VALUE mod, ID id, VALUE* value);
|
1005
|
+
|
1006
|
+
void rb_vm_rewrite_cref_stack(NODE *node, VALUE old_klass, VALUE new_klass, NODE **new_cref_ptr);
|
1007
|
+
|
1008
|
+
#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
|
1009
|
+
|
1010
|
+
#define RUBY_CONST_ASSERT(expr) (1/!!(expr)) /* expr must be a compile-time constant */
|
1011
|
+
#define VM_STACK_OVERFLOWED_P(cfp, sp, margin) \
|
1012
|
+
(!RUBY_CONST_ASSERT(sizeof(*(sp)) == sizeof(VALUE)) || \
|
1013
|
+
!RUBY_CONST_ASSERT(sizeof(*(cfp)) == sizeof(rb_control_frame_t)) || \
|
1014
|
+
((rb_control_frame_t *)((sp) + (margin)) + 1) >= (cfp))
|
1015
|
+
#define WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) \
|
1016
|
+
if (LIKELY(!VM_STACK_OVERFLOWED_P(cfp, sp, margin))) {(void)0;} else /* overflowed */
|
1017
|
+
#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) \
|
1018
|
+
WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) vm_stackoverflow()
|
1019
|
+
#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
|
1020
|
+
WHEN_VM_STACK_OVERFLOWED(cfp, (cfp)->sp, margin) vm_stackoverflow()
|
1021
|
+
|
1022
|
+
/* for thread */
|
1023
|
+
|
1024
|
+
#if RUBY_VM_THREAD_MODEL == 2
|
1025
|
+
extern rb_thread_t *ruby_current_thread;
|
1026
|
+
extern rb_vm_t *ruby_current_vm;
|
1027
|
+
extern rb_event_flag_t ruby_vm_event_flags;
|
1028
|
+
|
1029
|
+
#define GET_VM() ruby_current_vm
|
1030
|
+
|
1031
|
+
#ifndef OPT_CALL_CFUNC_WITHOUT_FRAME
|
1032
|
+
#define OPT_CALL_CFUNC_WITHOUT_FRAME 0
|
1033
|
+
#endif
|
1034
|
+
|
1035
|
+
static inline rb_thread_t *
|
1036
|
+
GET_THREAD(void)
|
1037
|
+
{
|
1038
|
+
rb_thread_t *th = ruby_current_thread;
|
1039
|
+
#if OPT_CALL_CFUNC_WITHOUT_FRAME
|
1040
|
+
if (UNLIKELY(th->passed_ci != 0)) {
|
1041
|
+
void vm_call_cfunc_push_frame(rb_thread_t *th);
|
1042
|
+
vm_call_cfunc_push_frame(th);
|
1043
|
+
}
|
1044
|
+
#endif
|
1045
|
+
return th;
|
1046
|
+
}
|
1047
|
+
|
1048
|
+
#define rb_thread_set_current_raw(th) (void)(ruby_current_thread = (th))
|
1049
|
+
#define rb_thread_set_current(th) do { \
|
1050
|
+
if ((th)->vm->running_thread != (th)) { \
|
1051
|
+
(th)->running_time_us = 0; \
|
1052
|
+
} \
|
1053
|
+
rb_thread_set_current_raw(th); \
|
1054
|
+
(th)->vm->running_thread = (th); \
|
1055
|
+
} while (0)
|
1056
|
+
|
1057
|
+
#else
|
1058
|
+
#error "unsupported thread model"
|
1059
|
+
#endif
|
1060
|
+
|
1061
|
+
enum {
|
1062
|
+
TIMER_INTERRUPT_MASK = 0x01,
|
1063
|
+
PENDING_INTERRUPT_MASK = 0x02,
|
1064
|
+
POSTPONED_JOB_INTERRUPT_MASK = 0x04,
|
1065
|
+
TRAP_INTERRUPT_MASK = 0x08
|
1066
|
+
};
|
1067
|
+
|
1068
|
+
#define RUBY_VM_SET_TIMER_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, TIMER_INTERRUPT_MASK)
|
1069
|
+
#define RUBY_VM_SET_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, PENDING_INTERRUPT_MASK)
|
1070
|
+
#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
|
1071
|
+
#define RUBY_VM_SET_TRAP_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, TRAP_INTERRUPT_MASK)
|
1072
|
+
#define RUBY_VM_INTERRUPTED(th) ((th)->interrupt_flag & ~(th)->interrupt_mask & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
|
1073
|
+
#define RUBY_VM_INTERRUPTED_ANY(th) ((th)->interrupt_flag & ~(th)->interrupt_mask)
|
1074
|
+
|
1075
|
+
int rb_signal_buff_size(void);
|
1076
|
+
void rb_signal_exec(rb_thread_t *th, int sig);
|
1077
|
+
void rb_threadptr_check_signal(rb_thread_t *mth);
|
1078
|
+
void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
|
1079
|
+
void rb_threadptr_signal_exit(rb_thread_t *th);
|
1080
|
+
void rb_threadptr_execute_interrupts(rb_thread_t *, int);
|
1081
|
+
void rb_threadptr_interrupt(rb_thread_t *th);
|
1082
|
+
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
|
1083
|
+
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
|
1084
|
+
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
|
1085
|
+
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th);
|
1086
|
+
|
1087
|
+
#define RUBY_VM_CHECK_INTS_BLOCKING(th) do { \
|
1088
|
+
if (UNLIKELY(!rb_threadptr_pending_interrupt_empty_p(th))) { \
|
1089
|
+
th->pending_interrupt_queue_checked = 0; \
|
1090
|
+
RUBY_VM_SET_INTERRUPT(th); \
|
1091
|
+
rb_threadptr_execute_interrupts(th, 1); \
|
1092
|
+
} \
|
1093
|
+
else if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(th))) { \
|
1094
|
+
rb_threadptr_execute_interrupts(th, 1); \
|
1095
|
+
} \
|
1096
|
+
} while (0)
|
1097
|
+
|
1098
|
+
#define RUBY_VM_CHECK_INTS(th) do { \
|
1099
|
+
if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(th))) { \
|
1100
|
+
rb_threadptr_execute_interrupts(th, 0); \
|
1101
|
+
} \
|
1102
|
+
} while (0)
|
1103
|
+
|
1104
|
+
/* tracer */
|
1105
|
+
struct rb_trace_arg_struct {
|
1106
|
+
rb_event_flag_t event;
|
1107
|
+
rb_thread_t *th;
|
1108
|
+
rb_control_frame_t *cfp;
|
1109
|
+
VALUE self;
|
1110
|
+
ID id;
|
1111
|
+
VALUE klass;
|
1112
|
+
VALUE data;
|
1113
|
+
|
1114
|
+
int klass_solved;
|
1115
|
+
|
1116
|
+
/* calc from cfp */
|
1117
|
+
int lineno;
|
1118
|
+
VALUE path;
|
1119
|
+
};
|
1120
|
+
|
1121
|
+
void rb_threadptr_exec_event_hooks(struct rb_trace_arg_struct *trace_arg);
|
1122
|
+
void rb_threadptr_exec_event_hooks_and_pop_frame(struct rb_trace_arg_struct *trace_arg);
|
1123
|
+
|
1124
|
+
#define EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, klass_, data_, pop_p_) do { \
|
1125
|
+
if (UNLIKELY(ruby_vm_event_flags & (flag_))) { \
|
1126
|
+
if (((th)->event_hooks.events | (th)->vm->event_hooks.events) & (flag_)) { \
|
1127
|
+
struct rb_trace_arg_struct trace_arg; \
|
1128
|
+
trace_arg.event = (flag_); \
|
1129
|
+
trace_arg.th = (th_); \
|
1130
|
+
trace_arg.cfp = (trace_arg.th)->cfp; \
|
1131
|
+
trace_arg.self = (self_); \
|
1132
|
+
trace_arg.id = (id_); \
|
1133
|
+
trace_arg.klass = (klass_); \
|
1134
|
+
trace_arg.data = (data_); \
|
1135
|
+
trace_arg.path = Qundef; \
|
1136
|
+
trace_arg.klass_solved = 0; \
|
1137
|
+
if (pop_p_) rb_threadptr_exec_event_hooks_and_pop_frame(&trace_arg); \
|
1138
|
+
else rb_threadptr_exec_event_hooks(&trace_arg); \
|
1139
|
+
} \
|
1140
|
+
} \
|
1141
|
+
} while (0)
|
1142
|
+
|
1143
|
+
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, klass_, data_) \
|
1144
|
+
EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, klass_, data_, 0)
|
1145
|
+
|
1146
|
+
#define EXEC_EVENT_HOOK_AND_POP_FRAME(th_, flag_, self_, id_, klass_, data_) \
|
1147
|
+
EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, klass_, data_, 1)
|
1148
|
+
|
1149
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
1150
|
+
|
1151
|
+
int rb_thread_check_trap_pending(void);
|
1152
|
+
|
1153
|
+
extern VALUE rb_get_coverages(void);
|
1154
|
+
extern void rb_set_coverages(VALUE);
|
1155
|
+
extern void rb_reset_coverages(void);
|
1156
|
+
|
1157
|
+
void rb_postponed_job_flush(rb_vm_t *vm);
|
1158
|
+
|
1159
|
+
RUBY_SYMBOL_EXPORT_END
|
1160
|
+
|
1161
|
+
#endif /* RUBY_VM_CORE_H */
|