debugger-ruby_core_source 1.2.4 → 1.3.0
Sign up to get free protection for your applications and to get access to all the features.
- data/CHANGELOG.md +3 -0
- data/debugger-ruby_core_source.gemspec +1 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/addr2line.h +21 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/constant.h +36 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/dln.h +51 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/encdb.h +170 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/eval_intern.h +260 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/gc.h +101 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/id.h +171 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/insns.inc +189 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/insns_info.inc +731 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/internal.h +889 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/iseq.h +136 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/known_errors.inc +731 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/method.h +142 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/node.h +543 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/node_name.inc +212 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/opt_sc.inc +710 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/optinsn.inc +83 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/optunifs.inc +121 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/parse.h +183 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/probes_helper.h +67 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/regenc.h +223 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/regint.h +911 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/regparse.h +363 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/revision.h +1 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/ruby_atomic.h +165 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/siphash.h +48 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/thread_native.h +23 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/thread_pthread.h +56 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/thread_win32.h +45 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/timev.h +42 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/transcode_data.h +123 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/transdb.h +193 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/version.h +52 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/vm.inc +3243 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/vm_core.h +1043 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/vm_debug.h +37 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/vm_exec.h +182 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/vm_insnhelper.h +273 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/vm_opts.h +56 -0
- data/lib/debugger/ruby_core_source/ruby-2.1.0/vmtc.inc +102 -0
- data/lib/debugger/ruby_core_source/version.rb +1 -1
- metadata +44 -3
@@ -0,0 +1,1043 @@
|
|
1
|
+
/**********************************************************************
|
2
|
+
|
3
|
+
vm_core.h -
|
4
|
+
|
5
|
+
$Author: charliesome $
|
6
|
+
created at: 04/01/01 19:41:38 JST
|
7
|
+
|
8
|
+
Copyright (C) 2004-2007 Koichi Sasada
|
9
|
+
|
10
|
+
**********************************************************************/
|
11
|
+
|
12
|
+
#ifndef RUBY_VM_CORE_H
|
13
|
+
#define RUBY_VM_CORE_H
|
14
|
+
|
15
|
+
#define RUBY_VM_THREAD_MODEL 2
|
16
|
+
|
17
|
+
#include "ruby/ruby.h"
|
18
|
+
#include "ruby/st.h"
|
19
|
+
|
20
|
+
#include "node.h"
|
21
|
+
#include "vm_debug.h"
|
22
|
+
#include "vm_opts.h"
|
23
|
+
#include "id.h"
|
24
|
+
#include "method.h"
|
25
|
+
#include "ruby_atomic.h"
|
26
|
+
|
27
|
+
#include "thread_native.h"
|
28
|
+
|
29
|
+
#ifndef ENABLE_VM_OBJSPACE
|
30
|
+
#ifdef _WIN32
|
31
|
+
/*
|
32
|
+
* TODO: object space independent st_table.
|
33
|
+
* socklist needs st_table in rb_w32_sysinit(), before object space
|
34
|
+
* initialization.
|
35
|
+
* It is too early now to change st_hash_type, since it breaks binary
|
36
|
+
* compatibility.
|
37
|
+
*/
|
38
|
+
#define ENABLE_VM_OBJSPACE 0
|
39
|
+
#else
|
40
|
+
#define ENABLE_VM_OBJSPACE 1
|
41
|
+
#endif
|
42
|
+
#endif
|
43
|
+
|
44
|
+
#include <setjmp.h>
|
45
|
+
#include <signal.h>
|
46
|
+
|
47
|
+
#ifndef NSIG
|
48
|
+
# define NSIG (_SIGMAX + 1) /* For QNX */
|
49
|
+
#endif
|
50
|
+
|
51
|
+
#define RUBY_NSIG NSIG
|
52
|
+
|
53
|
+
#ifdef HAVE_STDARG_PROTOTYPES
|
54
|
+
#include <stdarg.h>
|
55
|
+
#define va_init_list(a,b) va_start((a),(b))
|
56
|
+
#else
|
57
|
+
#include <varargs.h>
|
58
|
+
#define va_init_list(a,b) va_start((a))
|
59
|
+
#endif
|
60
|
+
|
61
|
+
#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
|
62
|
+
#define USE_SIGALTSTACK
|
63
|
+
#endif
|
64
|
+
|
65
|
+
/*****************/
|
66
|
+
/* configuration */
|
67
|
+
/*****************/
|
68
|
+
|
69
|
+
/* gcc ver. check */
|
70
|
+
#if defined(__GNUC__) && __GNUC__ >= 2
|
71
|
+
|
72
|
+
#if OPT_TOKEN_THREADED_CODE
|
73
|
+
#if OPT_DIRECT_THREADED_CODE
|
74
|
+
#undef OPT_DIRECT_THREADED_CODE
|
75
|
+
#endif
|
76
|
+
#endif
|
77
|
+
|
78
|
+
#else /* defined(__GNUC__) && __GNUC__ >= 2 */
|
79
|
+
|
80
|
+
/* disable threaded code options */
|
81
|
+
#if OPT_DIRECT_THREADED_CODE
|
82
|
+
#undef OPT_DIRECT_THREADED_CODE
|
83
|
+
#endif
|
84
|
+
#if OPT_TOKEN_THREADED_CODE
|
85
|
+
#undef OPT_TOKEN_THREADED_CODE
|
86
|
+
#endif
|
87
|
+
#endif
|
88
|
+
|
89
|
+
#ifdef __native_client__
|
90
|
+
#undef OPT_DIRECT_THREADED_CODE
|
91
|
+
#endif
|
92
|
+
|
93
|
+
/* call threaded code */
|
94
|
+
#if OPT_CALL_THREADED_CODE
|
95
|
+
#if OPT_DIRECT_THREADED_CODE
|
96
|
+
#undef OPT_DIRECT_THREADED_CODE
|
97
|
+
#endif /* OPT_DIRECT_THREADED_CODE */
|
98
|
+
#if OPT_STACK_CACHING
|
99
|
+
#undef OPT_STACK_CACHING
|
100
|
+
#endif /* OPT_STACK_CACHING */
|
101
|
+
#endif /* OPT_CALL_THREADED_CODE */
|
102
|
+
|
103
|
+
/* likely */
|
104
|
+
#if __GNUC__ >= 3
|
105
|
+
#define LIKELY(x) (__builtin_expect((x), 1))
|
106
|
+
#define UNLIKELY(x) (__builtin_expect((x), 0))
|
107
|
+
#else /* __GNUC__ >= 3 */
|
108
|
+
#define LIKELY(x) (x)
|
109
|
+
#define UNLIKELY(x) (x)
|
110
|
+
#endif /* __GNUC__ >= 3 */
|
111
|
+
|
112
|
+
#ifndef __has_attribute
|
113
|
+
# define __has_attribute(x) 0
|
114
|
+
#endif
|
115
|
+
|
116
|
+
#if __has_attribute(unused)
|
117
|
+
#define UNINITIALIZED_VAR(x) x __attribute__((unused))
|
118
|
+
#elif defined(__GNUC__) && __GNUC__ >= 3
|
119
|
+
#define UNINITIALIZED_VAR(x) x = x
|
120
|
+
#else
|
121
|
+
#define UNINITIALIZED_VAR(x) x
|
122
|
+
#endif
|
123
|
+
|
124
|
+
typedef unsigned long rb_num_t;
|
125
|
+
|
126
|
+
/* iseq data type */
|
127
|
+
|
128
|
+
struct iseq_compile_data_ensure_node_stack;
|
129
|
+
|
130
|
+
typedef struct rb_compile_option_struct rb_compile_option_t;
|
131
|
+
|
132
|
+
|
133
|
+
struct iseq_inline_cache_entry {
|
134
|
+
rb_serial_t ic_serial;
|
135
|
+
union {
|
136
|
+
size_t index;
|
137
|
+
VALUE value;
|
138
|
+
} ic_value;
|
139
|
+
};
|
140
|
+
|
141
|
+
union iseq_inline_storage_entry {
|
142
|
+
struct {
|
143
|
+
struct rb_thread_struct *running_thread;
|
144
|
+
VALUE value;
|
145
|
+
VALUE done;
|
146
|
+
} once;
|
147
|
+
struct iseq_inline_cache_entry cache;
|
148
|
+
};
|
149
|
+
|
150
|
+
/* to avoid warning */
|
151
|
+
struct rb_thread_struct;
|
152
|
+
struct rb_control_frame_struct;
|
153
|
+
|
154
|
+
/* rb_call_info_t contains calling information including inline cache */
|
155
|
+
typedef struct rb_call_info_struct {
|
156
|
+
/* fixed at compile time */
|
157
|
+
ID mid;
|
158
|
+
VALUE flag;
|
159
|
+
int orig_argc;
|
160
|
+
rb_iseq_t *blockiseq;
|
161
|
+
|
162
|
+
/* inline cache: keys */
|
163
|
+
rb_serial_t method_state;
|
164
|
+
rb_serial_t class_serial;
|
165
|
+
VALUE klass;
|
166
|
+
|
167
|
+
/* inline cache: values */
|
168
|
+
const rb_method_entry_t *me;
|
169
|
+
VALUE defined_class;
|
170
|
+
|
171
|
+
/* temporary values for method calling */
|
172
|
+
int argc;
|
173
|
+
struct rb_block_struct *blockptr;
|
174
|
+
VALUE recv;
|
175
|
+
union {
|
176
|
+
int opt_pc; /* used by iseq */
|
177
|
+
long index; /* used by ivar */
|
178
|
+
int missing_reason; /* used by method_missing */
|
179
|
+
int inc_sp; /* used by cfunc */
|
180
|
+
} aux;
|
181
|
+
|
182
|
+
VALUE (*call)(struct rb_thread_struct *th, struct rb_control_frame_struct *cfp, struct rb_call_info_struct *ci);
|
183
|
+
} rb_call_info_t;
|
184
|
+
|
185
|
+
#if 1
|
186
|
+
#define GetCoreDataFromValue(obj, type, ptr) do { \
|
187
|
+
(ptr) = (type*)DATA_PTR(obj); \
|
188
|
+
} while (0)
|
189
|
+
#else
|
190
|
+
#define GetCoreDataFromValue(obj, type, ptr) Data_Get_Struct((obj), type, (ptr))
|
191
|
+
#endif
|
192
|
+
|
193
|
+
#define GetISeqPtr(obj, ptr) \
|
194
|
+
GetCoreDataFromValue((obj), rb_iseq_t, (ptr))
|
195
|
+
|
196
|
+
typedef struct rb_iseq_location_struct {
|
197
|
+
const VALUE path;
|
198
|
+
const VALUE absolute_path;
|
199
|
+
const VALUE base_label;
|
200
|
+
const VALUE label;
|
201
|
+
size_t first_lineno;
|
202
|
+
} rb_iseq_location_t;
|
203
|
+
|
204
|
+
struct rb_iseq_struct;
|
205
|
+
|
206
|
+
struct rb_iseq_struct {
|
207
|
+
/***************/
|
208
|
+
/* static data */
|
209
|
+
/***************/
|
210
|
+
|
211
|
+
enum iseq_type {
|
212
|
+
ISEQ_TYPE_TOP,
|
213
|
+
ISEQ_TYPE_METHOD,
|
214
|
+
ISEQ_TYPE_BLOCK,
|
215
|
+
ISEQ_TYPE_CLASS,
|
216
|
+
ISEQ_TYPE_RESCUE,
|
217
|
+
ISEQ_TYPE_ENSURE,
|
218
|
+
ISEQ_TYPE_EVAL,
|
219
|
+
ISEQ_TYPE_MAIN,
|
220
|
+
ISEQ_TYPE_DEFINED_GUARD
|
221
|
+
} type; /* instruction sequence type */
|
222
|
+
|
223
|
+
rb_iseq_location_t location;
|
224
|
+
|
225
|
+
VALUE *iseq; /* iseq (insn number and operands) */
|
226
|
+
VALUE *iseq_encoded; /* encoded iseq */
|
227
|
+
unsigned long iseq_size;
|
228
|
+
const VALUE mark_ary; /* Array: includes operands which should be GC marked */
|
229
|
+
const VALUE coverage; /* coverage array */
|
230
|
+
|
231
|
+
/* insn info, must be freed */
|
232
|
+
struct iseq_line_info_entry *line_info_table;
|
233
|
+
size_t line_info_size;
|
234
|
+
|
235
|
+
ID *local_table; /* must free */
|
236
|
+
int local_table_size;
|
237
|
+
|
238
|
+
/* sizeof(vars) + 1 */
|
239
|
+
int local_size;
|
240
|
+
|
241
|
+
union iseq_inline_storage_entry *is_entries;
|
242
|
+
int is_size;
|
243
|
+
|
244
|
+
rb_call_info_t *callinfo_entries;
|
245
|
+
int callinfo_size;
|
246
|
+
|
247
|
+
/**
|
248
|
+
* argument information
|
249
|
+
*
|
250
|
+
* def m(a1, a2, ..., aM, # mandatory
|
251
|
+
* b1=(...), b2=(...), ..., bN=(...), # optional
|
252
|
+
* *c, # rest
|
253
|
+
* d1, d2, ..., dO, # post
|
254
|
+
* e1:(...), e2:(...), ..., eK:(...), # keyword
|
255
|
+
* **f, # keyword rest
|
256
|
+
* &g) # block
|
257
|
+
* =>
|
258
|
+
*
|
259
|
+
* argc = M // or 0 if no mandatory arg
|
260
|
+
* arg_opts = N+1 // or 0 if no optional arg
|
261
|
+
* arg_rest = M+N // or -1 if no rest arg
|
262
|
+
* arg_opt_table = [ (arg_opts entries) ]
|
263
|
+
* arg_post_start = M+N+(*1) // or 0 if no post arguments
|
264
|
+
* arg_post_len = O // or 0 if no post arguments
|
265
|
+
* arg_keywords = K // or 0 if no keyword arg
|
266
|
+
* arg_block = M+N+(*1)+O+K // or -1 if no block arg
|
267
|
+
* arg_keyword = M+N+(*1)+O+K+(&1) // or -1 if no keyword arg/rest
|
268
|
+
* arg_simple = 0 if not simple arguments.
|
269
|
+
* = 1 if no opt, rest, post, block.
|
270
|
+
* = 2 if ambiguous block parameter ({|a|}).
|
271
|
+
* arg_size = M+N+O+(*1)+K+(&1)+(**1) argument size.
|
272
|
+
*/
|
273
|
+
|
274
|
+
int argc;
|
275
|
+
int arg_simple;
|
276
|
+
int arg_rest;
|
277
|
+
int arg_block;
|
278
|
+
int arg_opts;
|
279
|
+
int arg_post_len;
|
280
|
+
int arg_post_start;
|
281
|
+
int arg_size;
|
282
|
+
VALUE *arg_opt_table;
|
283
|
+
int arg_keyword;
|
284
|
+
int arg_keyword_check; /* if this is true, raise an ArgumentError when unknown keyword argument is passed */
|
285
|
+
int arg_keywords;
|
286
|
+
int arg_keyword_required;
|
287
|
+
ID *arg_keyword_table;
|
288
|
+
|
289
|
+
size_t stack_max; /* for stack overflow check */
|
290
|
+
|
291
|
+
/* catch table */
|
292
|
+
struct iseq_catch_table_entry *catch_table;
|
293
|
+
int catch_table_size;
|
294
|
+
|
295
|
+
/* for child iseq */
|
296
|
+
struct rb_iseq_struct *parent_iseq;
|
297
|
+
struct rb_iseq_struct *local_iseq;
|
298
|
+
|
299
|
+
/****************/
|
300
|
+
/* dynamic data */
|
301
|
+
/****************/
|
302
|
+
|
303
|
+
VALUE self;
|
304
|
+
const VALUE orig; /* non-NULL if its data have origin */
|
305
|
+
|
306
|
+
/* block inlining */
|
307
|
+
/*
|
308
|
+
* NODE *node;
|
309
|
+
* void *special_block_builder;
|
310
|
+
* void *cached_special_block_builder;
|
311
|
+
* VALUE cached_special_block;
|
312
|
+
*/
|
313
|
+
|
314
|
+
/* klass/module nest information stack (cref) */
|
315
|
+
NODE * const cref_stack;
|
316
|
+
const VALUE klass;
|
317
|
+
|
318
|
+
/* misc */
|
319
|
+
ID defined_method_id; /* for define_method */
|
320
|
+
rb_num_t flip_cnt;
|
321
|
+
|
322
|
+
/* used at compile time */
|
323
|
+
struct iseq_compile_data *compile_data;
|
324
|
+
};
|
325
|
+
|
326
|
+
enum ruby_special_exceptions {
|
327
|
+
ruby_error_reenter,
|
328
|
+
ruby_error_nomemory,
|
329
|
+
ruby_error_sysstack,
|
330
|
+
ruby_error_closed_stream,
|
331
|
+
ruby_special_error_count
|
332
|
+
};
|
333
|
+
|
334
|
+
#define GetVMPtr(obj, ptr) \
|
335
|
+
GetCoreDataFromValue((obj), rb_vm_t, (ptr))
|
336
|
+
|
337
|
+
#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
|
338
|
+
struct rb_objspace;
|
339
|
+
void rb_objspace_free(struct rb_objspace *);
|
340
|
+
#endif
|
341
|
+
|
342
|
+
typedef struct rb_hook_list_struct {
|
343
|
+
struct rb_event_hook_struct *hooks;
|
344
|
+
rb_event_flag_t events;
|
345
|
+
int need_clean;
|
346
|
+
} rb_hook_list_t;
|
347
|
+
|
348
|
+
typedef struct rb_vm_struct {
|
349
|
+
VALUE self;
|
350
|
+
|
351
|
+
rb_global_vm_lock_t gvl;
|
352
|
+
rb_nativethread_lock_t thread_destruct_lock;
|
353
|
+
|
354
|
+
struct rb_thread_struct *main_thread;
|
355
|
+
struct rb_thread_struct *running_thread;
|
356
|
+
|
357
|
+
st_table *living_threads;
|
358
|
+
VALUE thgroup_default;
|
359
|
+
|
360
|
+
int running;
|
361
|
+
int thread_abort_on_exception;
|
362
|
+
int trace_running;
|
363
|
+
volatile int sleeper;
|
364
|
+
|
365
|
+
/* object management */
|
366
|
+
VALUE mark_object_ary;
|
367
|
+
|
368
|
+
VALUE special_exceptions[ruby_special_error_count];
|
369
|
+
|
370
|
+
/* load */
|
371
|
+
VALUE top_self;
|
372
|
+
VALUE load_path;
|
373
|
+
VALUE load_path_snapshot;
|
374
|
+
VALUE load_path_check_cache;
|
375
|
+
VALUE expanded_load_path;
|
376
|
+
VALUE loaded_features;
|
377
|
+
VALUE loaded_features_snapshot;
|
378
|
+
struct st_table *loaded_features_index;
|
379
|
+
struct st_table *loading_table;
|
380
|
+
|
381
|
+
/* signal */
|
382
|
+
struct {
|
383
|
+
VALUE cmd;
|
384
|
+
int safe;
|
385
|
+
} trap_list[RUBY_NSIG];
|
386
|
+
|
387
|
+
/* hook */
|
388
|
+
rb_hook_list_t event_hooks;
|
389
|
+
|
390
|
+
/* relation table of ensure - rollback for callcc */
|
391
|
+
struct st_table *ensure_rollback_table;
|
392
|
+
|
393
|
+
/* postponed_job */
|
394
|
+
struct rb_postponed_job_struct *postponed_job_buffer;
|
395
|
+
int postponed_job_index;
|
396
|
+
|
397
|
+
int src_encoding_index;
|
398
|
+
|
399
|
+
VALUE verbose, debug, orig_progname, progname;
|
400
|
+
VALUE coverages;
|
401
|
+
|
402
|
+
struct unlinked_method_entry_list_entry *unlinked_method_entry_list;
|
403
|
+
|
404
|
+
VALUE defined_module_hash;
|
405
|
+
|
406
|
+
#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
|
407
|
+
struct rb_objspace *objspace;
|
408
|
+
#endif
|
409
|
+
|
410
|
+
/*
|
411
|
+
* @shyouhei notes that this is not for storing normal Ruby
|
412
|
+
* objects so do *NOT* mark this when you GC.
|
413
|
+
*/
|
414
|
+
struct RArray at_exit;
|
415
|
+
|
416
|
+
VALUE *defined_strings;
|
417
|
+
|
418
|
+
/* params */
|
419
|
+
struct { /* size in byte */
|
420
|
+
size_t thread_vm_stack_size;
|
421
|
+
size_t thread_machine_stack_size;
|
422
|
+
size_t fiber_vm_stack_size;
|
423
|
+
size_t fiber_machine_stack_size;
|
424
|
+
} default_params;
|
425
|
+
} rb_vm_t;
|
426
|
+
|
427
|
+
/* default values */
|
428
|
+
|
429
|
+
#define RUBY_VM_SIZE_ALIGN 4096
|
430
|
+
|
431
|
+
#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
|
432
|
+
#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
|
433
|
+
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
|
434
|
+
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
435
|
+
|
436
|
+
#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
437
|
+
#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
|
438
|
+
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
|
439
|
+
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
|
440
|
+
|
441
|
+
#ifndef VM_DEBUG_BP_CHECK
|
442
|
+
#define VM_DEBUG_BP_CHECK 0
|
443
|
+
#endif
|
444
|
+
|
445
|
+
typedef struct rb_control_frame_struct {
|
446
|
+
VALUE *pc; /* cfp[0] */
|
447
|
+
VALUE *sp; /* cfp[1] */
|
448
|
+
rb_iseq_t *iseq; /* cfp[2] */
|
449
|
+
VALUE flag; /* cfp[3] */
|
450
|
+
VALUE self; /* cfp[4] / block[0] */
|
451
|
+
VALUE klass; /* cfp[5] / block[1] */
|
452
|
+
VALUE *ep; /* cfp[6] / block[2] */
|
453
|
+
rb_iseq_t *block_iseq; /* cfp[7] / block[3] */
|
454
|
+
VALUE proc; /* cfp[8] / block[4] */
|
455
|
+
const rb_method_entry_t *me;/* cfp[9] */
|
456
|
+
|
457
|
+
#if VM_DEBUG_BP_CHECK
|
458
|
+
VALUE *bp_check; /* cfp[10] */
|
459
|
+
#endif
|
460
|
+
} rb_control_frame_t;
|
461
|
+
|
462
|
+
typedef struct rb_block_struct {
|
463
|
+
VALUE self; /* share with method frame if it's only block */
|
464
|
+
VALUE klass; /* share with method frame if it's only block */
|
465
|
+
VALUE *ep; /* share with method frame if it's only block */
|
466
|
+
rb_iseq_t *iseq;
|
467
|
+
VALUE proc;
|
468
|
+
} rb_block_t;
|
469
|
+
|
470
|
+
extern const rb_data_type_t ruby_threadptr_data_type;
|
471
|
+
|
472
|
+
#define GetThreadPtr(obj, ptr) \
|
473
|
+
TypedData_Get_Struct((obj), rb_thread_t, &ruby_threadptr_data_type, (ptr))
|
474
|
+
|
475
|
+
enum rb_thread_status {
|
476
|
+
THREAD_RUNNABLE,
|
477
|
+
THREAD_STOPPED,
|
478
|
+
THREAD_STOPPED_FOREVER,
|
479
|
+
THREAD_KILLED
|
480
|
+
};
|
481
|
+
|
482
|
+
typedef RUBY_JMP_BUF rb_jmpbuf_t;
|
483
|
+
|
484
|
+
/*
|
485
|
+
the members which are written in TH_PUSH_TAG() should be placed at
|
486
|
+
the beginning and the end, so that entire region is accessible.
|
487
|
+
*/
|
488
|
+
struct rb_vm_tag {
|
489
|
+
VALUE tag;
|
490
|
+
VALUE retval;
|
491
|
+
rb_jmpbuf_t buf;
|
492
|
+
struct rb_vm_tag *prev;
|
493
|
+
};
|
494
|
+
|
495
|
+
struct rb_vm_protect_tag {
|
496
|
+
struct rb_vm_protect_tag *prev;
|
497
|
+
};
|
498
|
+
|
499
|
+
struct rb_unblock_callback {
|
500
|
+
rb_unblock_function_t *func;
|
501
|
+
void *arg;
|
502
|
+
};
|
503
|
+
|
504
|
+
struct rb_mutex_struct;
|
505
|
+
|
506
|
+
struct rb_thread_struct;
|
507
|
+
typedef struct rb_thread_list_struct{
|
508
|
+
struct rb_thread_list_struct *next;
|
509
|
+
struct rb_thread_struct *th;
|
510
|
+
} rb_thread_list_t;
|
511
|
+
|
512
|
+
|
513
|
+
typedef struct rb_ensure_entry {
|
514
|
+
VALUE marker;
|
515
|
+
VALUE (*e_proc)(ANYARGS);
|
516
|
+
VALUE data2;
|
517
|
+
} rb_ensure_entry_t;
|
518
|
+
|
519
|
+
typedef struct rb_ensure_list {
|
520
|
+
struct rb_ensure_list *next;
|
521
|
+
struct rb_ensure_entry entry;
|
522
|
+
} rb_ensure_list_t;
|
523
|
+
|
524
|
+
typedef struct rb_thread_struct {
|
525
|
+
VALUE self;
|
526
|
+
rb_vm_t *vm;
|
527
|
+
|
528
|
+
/* execution information */
|
529
|
+
VALUE *stack; /* must free, must mark */
|
530
|
+
size_t stack_size; /* size in word (byte size / sizeof(VALUE)) */
|
531
|
+
rb_control_frame_t *cfp;
|
532
|
+
int safe_level;
|
533
|
+
int raised_flag;
|
534
|
+
VALUE last_status; /* $? */
|
535
|
+
|
536
|
+
/* passing state */
|
537
|
+
int state;
|
538
|
+
|
539
|
+
int waiting_fd;
|
540
|
+
|
541
|
+
/* for rb_iterate */
|
542
|
+
const rb_block_t *passed_block;
|
543
|
+
|
544
|
+
/* for bmethod */
|
545
|
+
const rb_method_entry_t *passed_me;
|
546
|
+
|
547
|
+
/* for cfunc */
|
548
|
+
rb_call_info_t *passed_ci;
|
549
|
+
|
550
|
+
/* for load(true) */
|
551
|
+
VALUE top_self;
|
552
|
+
VALUE top_wrapper;
|
553
|
+
|
554
|
+
/* eval env */
|
555
|
+
rb_block_t *base_block;
|
556
|
+
|
557
|
+
VALUE *root_lep;
|
558
|
+
VALUE root_svar;
|
559
|
+
|
560
|
+
/* thread control */
|
561
|
+
rb_nativethread_id_t thread_id;
|
562
|
+
enum rb_thread_status status;
|
563
|
+
int to_kill;
|
564
|
+
int priority;
|
565
|
+
|
566
|
+
native_thread_data_t native_thread_data;
|
567
|
+
void *blocking_region_buffer;
|
568
|
+
|
569
|
+
VALUE thgroup;
|
570
|
+
VALUE value;
|
571
|
+
|
572
|
+
/* temporary place of errinfo */
|
573
|
+
VALUE errinfo;
|
574
|
+
|
575
|
+
/* temporary place of retval on OPT_CALL_THREADED_CODE */
|
576
|
+
#if OPT_CALL_THREADED_CODE
|
577
|
+
VALUE retval;
|
578
|
+
#endif
|
579
|
+
|
580
|
+
/* async errinfo queue */
|
581
|
+
VALUE pending_interrupt_queue;
|
582
|
+
int pending_interrupt_queue_checked;
|
583
|
+
VALUE pending_interrupt_mask_stack;
|
584
|
+
|
585
|
+
rb_atomic_t interrupt_flag;
|
586
|
+
unsigned long interrupt_mask;
|
587
|
+
rb_nativethread_lock_t interrupt_lock;
|
588
|
+
rb_nativethread_cond_t interrupt_cond;
|
589
|
+
struct rb_unblock_callback unblock;
|
590
|
+
VALUE locking_mutex;
|
591
|
+
struct rb_mutex_struct *keeping_mutexes;
|
592
|
+
|
593
|
+
struct rb_vm_tag *tag;
|
594
|
+
struct rb_vm_protect_tag *protect_tag;
|
595
|
+
|
596
|
+
/*! Thread-local state of evaluation context.
|
597
|
+
*
|
598
|
+
* If negative, this thread is evaluating the main program.
|
599
|
+
* If positive, this thread is evaluating a program under Kernel::eval
|
600
|
+
* family.
|
601
|
+
*/
|
602
|
+
int parse_in_eval;
|
603
|
+
|
604
|
+
/*! Thread-local state of compiling context.
|
605
|
+
*
|
606
|
+
* If non-zero, the parser does not automatically print error messages to
|
607
|
+
* stderr. */
|
608
|
+
int mild_compile_error;
|
609
|
+
|
610
|
+
/* storage */
|
611
|
+
st_table *local_storage;
|
612
|
+
|
613
|
+
rb_thread_list_t *join_list;
|
614
|
+
|
615
|
+
VALUE first_proc;
|
616
|
+
VALUE first_args;
|
617
|
+
VALUE (*first_func)(ANYARGS);
|
618
|
+
|
619
|
+
/* for GC */
|
620
|
+
VALUE *machine_stack_start;
|
621
|
+
VALUE *machine_stack_end;
|
622
|
+
size_t machine_stack_maxsize;
|
623
|
+
#ifdef __ia64
|
624
|
+
VALUE *machine_register_stack_start;
|
625
|
+
VALUE *machine_register_stack_end;
|
626
|
+
size_t machine_register_stack_maxsize;
|
627
|
+
#endif
|
628
|
+
jmp_buf machine_regs;
|
629
|
+
int mark_stack_len;
|
630
|
+
|
631
|
+
/* statistics data for profiler */
|
632
|
+
VALUE stat_insn_usage;
|
633
|
+
|
634
|
+
/* tracer */
|
635
|
+
rb_hook_list_t event_hooks;
|
636
|
+
struct rb_trace_arg_struct *trace_arg; /* trace information */
|
637
|
+
|
638
|
+
/* fiber */
|
639
|
+
VALUE fiber;
|
640
|
+
VALUE root_fiber;
|
641
|
+
rb_jmpbuf_t root_jmpbuf;
|
642
|
+
|
643
|
+
/* ensure & callcc */
|
644
|
+
rb_ensure_list_t *ensure_list;
|
645
|
+
|
646
|
+
/* misc */
|
647
|
+
int method_missing_reason;
|
648
|
+
int abort_on_exception;
|
649
|
+
#ifdef USE_SIGALTSTACK
|
650
|
+
void *altstack;
|
651
|
+
#endif
|
652
|
+
unsigned long running_time_us;
|
653
|
+
} rb_thread_t;
|
654
|
+
|
655
|
+
typedef enum {
|
656
|
+
VM_DEFINECLASS_TYPE_CLASS = 0x00,
|
657
|
+
VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
|
658
|
+
VM_DEFINECLASS_TYPE_MODULE = 0x02,
|
659
|
+
/* 0x03..0x06 is reserved */
|
660
|
+
VM_DEFINECLASS_TYPE_MASK = 0x07
|
661
|
+
} rb_vm_defineclass_type_t;
|
662
|
+
|
663
|
+
#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
|
664
|
+
#define VM_DEFINECLASS_FLAG_SCOPED 0x08
|
665
|
+
#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
|
666
|
+
#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
|
667
|
+
#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
|
668
|
+
((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
|
669
|
+
|
670
|
+
/* iseq.c */
|
671
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
672
|
+
|
673
|
+
/* node -> iseq */
|
674
|
+
VALUE rb_iseq_new(NODE*, VALUE, VALUE, VALUE, VALUE, enum iseq_type);
|
675
|
+
VALUE rb_iseq_new_top(NODE *node, VALUE name, VALUE path, VALUE absolute_path, VALUE parent);
|
676
|
+
VALUE rb_iseq_new_main(NODE *node, VALUE path, VALUE absolute_path);
|
677
|
+
VALUE rb_iseq_new_with_bopt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, enum iseq_type, VALUE);
|
678
|
+
VALUE rb_iseq_new_with_opt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, enum iseq_type, const rb_compile_option_t*);
|
679
|
+
|
680
|
+
/* src -> iseq */
|
681
|
+
VALUE rb_iseq_compile(VALUE src, VALUE file, VALUE line);
|
682
|
+
VALUE rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, rb_block_t *base_block);
|
683
|
+
VALUE rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE absolute_path, VALUE line, rb_block_t *base_block, VALUE opt);
|
684
|
+
|
685
|
+
VALUE rb_iseq_disasm(VALUE self);
|
686
|
+
int rb_iseq_disasm_insn(VALUE str, VALUE *iseqval, size_t pos, rb_iseq_t *iseq, VALUE child);
|
687
|
+
const char *ruby_node_name(int node);
|
688
|
+
|
689
|
+
RUBY_EXTERN VALUE rb_cISeq;
|
690
|
+
RUBY_EXTERN VALUE rb_cRubyVM;
|
691
|
+
RUBY_EXTERN VALUE rb_cEnv;
|
692
|
+
RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
|
693
|
+
RUBY_SYMBOL_EXPORT_END
|
694
|
+
|
695
|
+
#define GetProcPtr(obj, ptr) \
|
696
|
+
GetCoreDataFromValue((obj), rb_proc_t, (ptr))
|
697
|
+
|
698
|
+
typedef struct {
|
699
|
+
rb_block_t block;
|
700
|
+
|
701
|
+
VALUE envval; /* for GC mark */
|
702
|
+
VALUE blockprocval;
|
703
|
+
int safe_level;
|
704
|
+
int is_from_method;
|
705
|
+
int is_lambda;
|
706
|
+
} rb_proc_t;
|
707
|
+
|
708
|
+
#define GetEnvPtr(obj, ptr) \
|
709
|
+
GetCoreDataFromValue((obj), rb_env_t, (ptr))
|
710
|
+
|
711
|
+
typedef struct {
|
712
|
+
VALUE *env;
|
713
|
+
int env_size;
|
714
|
+
int local_size;
|
715
|
+
VALUE prev_envval; /* for GC mark */
|
716
|
+
rb_block_t block;
|
717
|
+
} rb_env_t;
|
718
|
+
|
719
|
+
extern const rb_data_type_t ruby_binding_data_type;
|
720
|
+
|
721
|
+
#define GetBindingPtr(obj, ptr) \
|
722
|
+
GetCoreDataFromValue((obj), rb_binding_t, (ptr))
|
723
|
+
|
724
|
+
typedef struct {
|
725
|
+
VALUE env;
|
726
|
+
VALUE path;
|
727
|
+
unsigned short first_lineno;
|
728
|
+
} rb_binding_t;
|
729
|
+
|
730
|
+
/* used by compile time and send insn */
|
731
|
+
|
732
|
+
enum vm_check_match_type {
|
733
|
+
VM_CHECKMATCH_TYPE_WHEN = 1,
|
734
|
+
VM_CHECKMATCH_TYPE_CASE = 2,
|
735
|
+
VM_CHECKMATCH_TYPE_RESCUE = 3
|
736
|
+
};
|
737
|
+
|
738
|
+
#define VM_CHECKMATCH_TYPE_MASK 0x03
|
739
|
+
#define VM_CHECKMATCH_ARRAY 0x04
|
740
|
+
|
741
|
+
#define VM_CALL_ARGS_SPLAT (0x01 << 1) /* m(*args) */
|
742
|
+
#define VM_CALL_ARGS_BLOCKARG (0x01 << 2) /* m(&block) */
|
743
|
+
#define VM_CALL_FCALL (0x01 << 3) /* m(...) */
|
744
|
+
#define VM_CALL_VCALL (0x01 << 4) /* m */
|
745
|
+
#define VM_CALL_TAILCALL (0x01 << 5) /* located at tail position */
|
746
|
+
#define VM_CALL_SUPER (0x01 << 6) /* super */
|
747
|
+
#define VM_CALL_OPT_SEND (0x01 << 7) /* internal flag */
|
748
|
+
#define VM_CALL_ARGS_SKIP_SETUP (0x01 << 8) /* (flag & (SPLAT|BLOCKARG)) && blockiseq == 0 */
|
749
|
+
|
750
|
+
enum vm_special_object_type {
|
751
|
+
VM_SPECIAL_OBJECT_VMCORE = 1,
|
752
|
+
VM_SPECIAL_OBJECT_CBASE,
|
753
|
+
VM_SPECIAL_OBJECT_CONST_BASE
|
754
|
+
};
|
755
|
+
|
756
|
+
#define VM_FRAME_MAGIC_METHOD 0x11
|
757
|
+
#define VM_FRAME_MAGIC_BLOCK 0x21
|
758
|
+
#define VM_FRAME_MAGIC_CLASS 0x31
|
759
|
+
#define VM_FRAME_MAGIC_TOP 0x41
|
760
|
+
#define VM_FRAME_MAGIC_CFUNC 0x61
|
761
|
+
#define VM_FRAME_MAGIC_PROC 0x71
|
762
|
+
#define VM_FRAME_MAGIC_IFUNC 0x81
|
763
|
+
#define VM_FRAME_MAGIC_EVAL 0x91
|
764
|
+
#define VM_FRAME_MAGIC_LAMBDA 0xa1
|
765
|
+
#define VM_FRAME_MAGIC_MASK_BITS 8
|
766
|
+
#define VM_FRAME_MAGIC_MASK (~(~0<<VM_FRAME_MAGIC_MASK_BITS))
|
767
|
+
|
768
|
+
#define VM_FRAME_TYPE(cfp) ((cfp)->flag & VM_FRAME_MAGIC_MASK)
|
769
|
+
|
770
|
+
/* other frame flag */
|
771
|
+
#define VM_FRAME_FLAG_PASSED 0x0100
|
772
|
+
#define VM_FRAME_FLAG_FINISH 0x0200
|
773
|
+
#define VM_FRAME_TYPE_FINISH_P(cfp) (((cfp)->flag & VM_FRAME_FLAG_FINISH) != 0)
|
774
|
+
|
775
|
+
#define RUBYVM_CFUNC_FRAME_P(cfp) \
|
776
|
+
(VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
|
777
|
+
|
778
|
+
/* inline cache */
|
779
|
+
typedef struct iseq_inline_cache_entry *IC;
|
780
|
+
typedef rb_call_info_t *CALL_INFO;
|
781
|
+
|
782
|
+
void rb_vm_change_state(void);
|
783
|
+
|
784
|
+
typedef VALUE CDHASH;
|
785
|
+
|
786
|
+
#ifndef FUNC_FASTCALL
|
787
|
+
#define FUNC_FASTCALL(x) x
|
788
|
+
#endif
|
789
|
+
|
790
|
+
typedef rb_control_frame_t *
|
791
|
+
(FUNC_FASTCALL(*rb_insn_func_t))(rb_thread_t *, rb_control_frame_t *);
|
792
|
+
|
793
|
+
#define GC_GUARDED_PTR(p) ((VALUE)((VALUE)(p) | 0x01))
|
794
|
+
#define GC_GUARDED_PTR_REF(p) ((void *)(((VALUE)(p)) & ~0x03))
|
795
|
+
#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
|
796
|
+
|
797
|
+
/*
|
798
|
+
* block frame:
|
799
|
+
* ep[ 0]: prev frame
|
800
|
+
* ep[-1]: CREF (for *_eval)
|
801
|
+
*
|
802
|
+
* method frame:
|
803
|
+
* ep[ 0]: block pointer (ptr | VM_ENVVAL_BLOCK_PTR_FLAG)
|
804
|
+
*/
|
805
|
+
|
806
|
+
#define VM_ENVVAL_BLOCK_PTR_FLAG 0x02
|
807
|
+
#define VM_ENVVAL_BLOCK_PTR(v) (GC_GUARDED_PTR(v) | VM_ENVVAL_BLOCK_PTR_FLAG)
|
808
|
+
#define VM_ENVVAL_BLOCK_PTR_P(v) ((v) & VM_ENVVAL_BLOCK_PTR_FLAG)
|
809
|
+
#define VM_ENVVAL_PREV_EP_PTR(v) ((VALUE)GC_GUARDED_PTR(v))
|
810
|
+
#define VM_ENVVAL_PREV_EP_PTR_P(v) (!(VM_ENVVAL_BLOCK_PTR_P(v)))
|
811
|
+
|
812
|
+
#define VM_EP_PREV_EP(ep) ((VALUE *)GC_GUARDED_PTR_REF((ep)[0]))
|
813
|
+
#define VM_EP_BLOCK_PTR(ep) ((rb_block_t *)GC_GUARDED_PTR_REF((ep)[0]))
|
814
|
+
#define VM_EP_LEP_P(ep) VM_ENVVAL_BLOCK_PTR_P((ep)[0])
|
815
|
+
|
816
|
+
VALUE *rb_vm_ep_local_ep(VALUE *ep);
|
817
|
+
rb_block_t *rb_vm_control_frame_block_ptr(rb_control_frame_t *cfp);
|
818
|
+
|
819
|
+
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
|
820
|
+
#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
|
821
|
+
#define RUBY_VM_END_CONTROL_FRAME(th) \
|
822
|
+
((rb_control_frame_t *)((th)->stack + (th)->stack_size))
|
823
|
+
#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
|
824
|
+
((void *)(ecfp) > (void *)(cfp))
|
825
|
+
#define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \
|
826
|
+
(!RUBY_VM_VALID_CONTROL_FRAME_P((cfp), RUBY_VM_END_CONTROL_FRAME(th)))
|
827
|
+
|
828
|
+
#define RUBY_VM_IFUNC_P(ptr) (BUILTIN_TYPE(ptr) == T_NODE)
|
829
|
+
#define RUBY_VM_NORMAL_ISEQ_P(ptr) \
|
830
|
+
((ptr) && !RUBY_VM_IFUNC_P(ptr))
|
831
|
+
|
832
|
+
#define RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp) ((rb_block_t *)(&(cfp)->self))
|
833
|
+
#define RUBY_VM_GET_CFP_FROM_BLOCK_PTR(b) \
|
834
|
+
((rb_control_frame_t *)((VALUE *)(b) - 4))
|
835
|
+
/* magic number `4' is depend on rb_control_frame_t layout. */
|
836
|
+
|
837
|
+
/* VM related object allocate functions */
|
838
|
+
VALUE rb_thread_alloc(VALUE klass);
|
839
|
+
VALUE rb_proc_alloc(VALUE klass);
|
840
|
+
|
841
|
+
/* for debug */
|
842
|
+
extern void rb_vmdebug_stack_dump_raw(rb_thread_t *, rb_control_frame_t *);
|
843
|
+
extern void rb_vmdebug_debug_print_pre(rb_thread_t *th, rb_control_frame_t *cfp, VALUE *_pc);
|
844
|
+
extern void rb_vmdebug_debug_print_post(rb_thread_t *th, rb_control_frame_t *cfp);
|
845
|
+
|
846
|
+
#define SDR() rb_vmdebug_stack_dump_raw(GET_THREAD(), GET_THREAD()->cfp)
|
847
|
+
#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_THREAD(), (cfp))
|
848
|
+
void rb_vm_bugreport(void);
|
849
|
+
|
850
|
+
/* functions about thread/vm execution */
|
851
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
852
|
+
VALUE rb_iseq_eval(VALUE iseqval);
|
853
|
+
VALUE rb_iseq_eval_main(VALUE iseqval);
|
854
|
+
RUBY_SYMBOL_EXPORT_END
|
855
|
+
int rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, VALUE *klassp);
|
856
|
+
|
857
|
+
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc,
|
858
|
+
int argc, const VALUE *argv, const rb_block_t *blockptr);
|
859
|
+
VALUE rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass);
|
860
|
+
VALUE rb_vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp);
|
861
|
+
VALUE rb_binding_new_with_cfp(rb_thread_t *th, const rb_control_frame_t *src_cfp);
|
862
|
+
VALUE *rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars);
|
863
|
+
void rb_vm_inc_const_missing_count(void);
|
864
|
+
void rb_vm_gvl_destroy(rb_vm_t *vm);
|
865
|
+
VALUE rb_vm_call(rb_thread_t *th, VALUE recv, VALUE id, int argc,
|
866
|
+
const VALUE *argv, const rb_method_entry_t *me,
|
867
|
+
VALUE defined_class);
|
868
|
+
void rb_unlink_method_entry(rb_method_entry_t *me);
|
869
|
+
void rb_gc_mark_unlinked_live_method_entries(void *pvm);
|
870
|
+
|
871
|
+
void rb_thread_start_timer_thread(void);
|
872
|
+
void rb_thread_stop_timer_thread(int);
|
873
|
+
void rb_thread_reset_timer_thread(void);
|
874
|
+
void rb_thread_wakeup_timer_thread(void);
|
875
|
+
|
876
|
+
int ruby_thread_has_gvl_p(void);
|
877
|
+
typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
|
878
|
+
rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, const rb_control_frame_t *cfp);
|
879
|
+
rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(rb_thread_t *th, const rb_control_frame_t *cfp);
|
880
|
+
int rb_vm_get_sourceline(const rb_control_frame_t *);
|
881
|
+
VALUE rb_name_err_mesg_new(VALUE obj, VALUE mesg, VALUE recv, VALUE method);
|
882
|
+
void rb_vm_stack_to_heap(rb_thread_t *th);
|
883
|
+
void ruby_thread_init_stack(rb_thread_t *th);
|
884
|
+
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, VALUE *klassp);
|
885
|
+
|
886
|
+
void rb_gc_mark_machine_stack(rb_thread_t *th);
|
887
|
+
|
888
|
+
int rb_autoloading_value(VALUE mod, ID id, VALUE* value);
|
889
|
+
|
890
|
+
#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
|
891
|
+
|
892
|
+
#define RUBY_CONST_ASSERT(expr) (1/!!(expr)) /* expr must be a compile-time constant */
|
893
|
+
#define VM_STACK_OVERFLOWED_P(cfp, sp, margin) \
|
894
|
+
(!RUBY_CONST_ASSERT(sizeof(*(sp)) == sizeof(VALUE)) || \
|
895
|
+
!RUBY_CONST_ASSERT(sizeof(*(cfp)) == sizeof(rb_control_frame_t)) || \
|
896
|
+
((rb_control_frame_t *)((sp) + (margin)) + 1) >= (cfp))
|
897
|
+
#define WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) \
|
898
|
+
if (LIKELY(!VM_STACK_OVERFLOWED_P(cfp, sp, margin))) {(void)0;} else /* overflowed */
|
899
|
+
#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) \
|
900
|
+
WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) vm_stackoverflow()
|
901
|
+
#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
|
902
|
+
WHEN_VM_STACK_OVERFLOWED(cfp, (cfp)->sp, margin) vm_stackoverflow()
|
903
|
+
|
904
|
+
/* for thread */
|
905
|
+
|
906
|
+
#if RUBY_VM_THREAD_MODEL == 2
|
907
|
+
extern rb_thread_t *ruby_current_thread;
|
908
|
+
extern rb_vm_t *ruby_current_vm;
|
909
|
+
extern rb_event_flag_t ruby_vm_event_flags;
|
910
|
+
|
911
|
+
#define GET_VM() ruby_current_vm
|
912
|
+
|
913
|
+
#ifndef OPT_CALL_CFUNC_WITHOUT_FRAME
|
914
|
+
#define OPT_CALL_CFUNC_WITHOUT_FRAME 0
|
915
|
+
#endif
|
916
|
+
|
917
|
+
static inline rb_thread_t *
|
918
|
+
GET_THREAD(void)
|
919
|
+
{
|
920
|
+
rb_thread_t *th = ruby_current_thread;
|
921
|
+
#if OPT_CALL_CFUNC_WITHOUT_FRAME
|
922
|
+
if (UNLIKELY(th->passed_ci != 0)) {
|
923
|
+
void vm_call_cfunc_push_frame(rb_thread_t *th);
|
924
|
+
vm_call_cfunc_push_frame(th);
|
925
|
+
}
|
926
|
+
#endif
|
927
|
+
return th;
|
928
|
+
}
|
929
|
+
|
930
|
+
#define rb_thread_set_current_raw(th) (void)(ruby_current_thread = (th))
|
931
|
+
#define rb_thread_set_current(th) do { \
|
932
|
+
if ((th)->vm->running_thread != (th)) { \
|
933
|
+
(th)->running_time_us = 0; \
|
934
|
+
} \
|
935
|
+
rb_thread_set_current_raw(th); \
|
936
|
+
(th)->vm->running_thread = (th); \
|
937
|
+
} while (0)
|
938
|
+
|
939
|
+
#else
|
940
|
+
#error "unsupported thread model"
|
941
|
+
#endif
|
942
|
+
|
943
|
+
enum {
|
944
|
+
TIMER_INTERRUPT_MASK = 0x01,
|
945
|
+
PENDING_INTERRUPT_MASK = 0x02,
|
946
|
+
POSTPONED_JOB_INTERRUPT_MASK = 0x04,
|
947
|
+
TRAP_INTERRUPT_MASK = 0x08
|
948
|
+
};
|
949
|
+
|
950
|
+
#define RUBY_VM_SET_TIMER_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, TIMER_INTERRUPT_MASK)
|
951
|
+
#define RUBY_VM_SET_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, PENDING_INTERRUPT_MASK)
|
952
|
+
#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
|
953
|
+
#define RUBY_VM_SET_TRAP_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, TRAP_INTERRUPT_MASK)
|
954
|
+
#define RUBY_VM_INTERRUPTED(th) ((th)->interrupt_flag & ~(th)->interrupt_mask & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
|
955
|
+
#define RUBY_VM_INTERRUPTED_ANY(th) ((th)->interrupt_flag & ~(th)->interrupt_mask)
|
956
|
+
|
957
|
+
int rb_signal_buff_size(void);
|
958
|
+
void rb_signal_exec(rb_thread_t *th, int sig);
|
959
|
+
void rb_threadptr_check_signal(rb_thread_t *mth);
|
960
|
+
void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
|
961
|
+
void rb_threadptr_signal_exit(rb_thread_t *th);
|
962
|
+
void rb_threadptr_execute_interrupts(rb_thread_t *, int);
|
963
|
+
void rb_threadptr_interrupt(rb_thread_t *th);
|
964
|
+
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
|
965
|
+
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
|
966
|
+
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
|
967
|
+
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th);
|
968
|
+
|
969
|
+
#define RUBY_VM_CHECK_INTS_BLOCKING(th) do { \
|
970
|
+
if (UNLIKELY(!rb_threadptr_pending_interrupt_empty_p(th))) { \
|
971
|
+
th->pending_interrupt_queue_checked = 0; \
|
972
|
+
RUBY_VM_SET_INTERRUPT(th); \
|
973
|
+
rb_threadptr_execute_interrupts(th, 1); \
|
974
|
+
} \
|
975
|
+
else if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(th))) { \
|
976
|
+
rb_threadptr_execute_interrupts(th, 1); \
|
977
|
+
} \
|
978
|
+
} while (0)
|
979
|
+
|
980
|
+
#define RUBY_VM_CHECK_INTS(th) do { \
|
981
|
+
if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(th))) { \
|
982
|
+
rb_threadptr_execute_interrupts(th, 0); \
|
983
|
+
} \
|
984
|
+
} while (0)
|
985
|
+
|
986
|
+
/* tracer */
|
987
|
+
struct rb_trace_arg_struct {
|
988
|
+
rb_event_flag_t event;
|
989
|
+
rb_thread_t *th;
|
990
|
+
rb_control_frame_t *cfp;
|
991
|
+
VALUE self;
|
992
|
+
ID id;
|
993
|
+
VALUE klass;
|
994
|
+
VALUE data;
|
995
|
+
|
996
|
+
int klass_solved;
|
997
|
+
|
998
|
+
/* calc from cfp */
|
999
|
+
int lineno;
|
1000
|
+
VALUE path;
|
1001
|
+
};
|
1002
|
+
|
1003
|
+
void rb_threadptr_exec_event_hooks(struct rb_trace_arg_struct *trace_arg);
|
1004
|
+
void rb_threadptr_exec_event_hooks_and_pop_frame(struct rb_trace_arg_struct *trace_arg);
|
1005
|
+
|
1006
|
+
#define EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, klass_, data_, pop_p_) do { \
|
1007
|
+
if (UNLIKELY(ruby_vm_event_flags & (flag_))) { \
|
1008
|
+
if (((th)->event_hooks.events | (th)->vm->event_hooks.events) & (flag_)) { \
|
1009
|
+
struct rb_trace_arg_struct trace_arg; \
|
1010
|
+
trace_arg.event = (flag_); \
|
1011
|
+
trace_arg.th = (th_); \
|
1012
|
+
trace_arg.cfp = (trace_arg.th)->cfp; \
|
1013
|
+
trace_arg.self = (self_); \
|
1014
|
+
trace_arg.id = (id_); \
|
1015
|
+
trace_arg.klass = (klass_); \
|
1016
|
+
trace_arg.data = (data_); \
|
1017
|
+
trace_arg.path = Qundef; \
|
1018
|
+
trace_arg.klass_solved = 0; \
|
1019
|
+
if (pop_p_) rb_threadptr_exec_event_hooks_and_pop_frame(&trace_arg); \
|
1020
|
+
else rb_threadptr_exec_event_hooks(&trace_arg); \
|
1021
|
+
} \
|
1022
|
+
} \
|
1023
|
+
} while (0)
|
1024
|
+
|
1025
|
+
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, klass_, data_) \
|
1026
|
+
EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, klass_, data_, 0)
|
1027
|
+
|
1028
|
+
#define EXEC_EVENT_HOOK_AND_POP_FRAME(th_, flag_, self_, id_, klass_, data_) \
|
1029
|
+
EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, klass_, data_, 1)
|
1030
|
+
|
1031
|
+
RUBY_SYMBOL_EXPORT_BEGIN
|
1032
|
+
|
1033
|
+
int rb_thread_check_trap_pending(void);
|
1034
|
+
|
1035
|
+
extern VALUE rb_get_coverages(void);
|
1036
|
+
extern void rb_set_coverages(VALUE);
|
1037
|
+
extern void rb_reset_coverages(void);
|
1038
|
+
|
1039
|
+
void rb_postponed_job_flush(rb_vm_t *vm);
|
1040
|
+
|
1041
|
+
RUBY_SYMBOL_EXPORT_END
|
1042
|
+
|
1043
|
+
#endif /* RUBY_VM_CORE_H */
|