debugger-ruby_core_source 1.3.6 → 1.3.7

Sign up to get free protection for your applications and to get access to all the features.
Files changed (43) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +3 -0
  3. data/README.md +1 -1
  4. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/addr2line.h +21 -0
  5. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/constant.h +34 -0
  6. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/debug.h +41 -0
  7. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/dln.h +50 -0
  8. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/encdb.h +167 -0
  9. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/eval_intern.h +234 -0
  10. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/gc.h +99 -0
  11. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/id.h +177 -0
  12. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/insns.inc +179 -0
  13. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/insns_info.inc +695 -0
  14. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/internal.h +242 -0
  15. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/iseq.h +126 -0
  16. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/known_errors.inc +731 -0
  17. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/method.h +105 -0
  18. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/node.h +504 -0
  19. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/node_name.inc +208 -0
  20. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/opt_sc.inc +670 -0
  21. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/optinsn.inc +30 -0
  22. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/optunifs.inc +116 -0
  23. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/parse.h +186 -0
  24. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/regenc.h +219 -0
  25. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/regint.h +850 -0
  26. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/regparse.h +362 -0
  27. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/revision.h +1 -0
  28. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/ruby_atomic.h +175 -0
  29. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/siphash.h +48 -0
  30. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/thread_pthread.h +51 -0
  31. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/thread_win32.h +40 -0
  32. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/timev.h +21 -0
  33. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/transcode_data.h +117 -0
  34. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/transdb.h +189 -0
  35. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/version.h +52 -0
  36. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/vm.inc +3054 -0
  37. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/vm_core.h +763 -0
  38. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/vm_exec.h +184 -0
  39. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/vm_insnhelper.h +220 -0
  40. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/vm_opts.h +51 -0
  41. data/lib/debugger/ruby_core_source/ruby-1.9.3-p551/vmtc.inc +97 -0
  42. data/lib/debugger/ruby_core_source/version.rb +1 -1
  43. metadata +40 -2
@@ -0,0 +1,763 @@
1
+ /**********************************************************************
2
+
3
+ vm_core.h -
4
+
5
+ $Author: usa $
6
+ created at: 04/01/01 19:41:38 JST
7
+
8
+ Copyright (C) 2004-2007 Koichi Sasada
9
+
10
+ **********************************************************************/
11
+
12
+ #ifndef RUBY_VM_CORE_H
13
+ #define RUBY_VM_CORE_H
14
+
15
+ #define RUBY_VM_THREAD_MODEL 2
16
+
17
+ #include "ruby/ruby.h"
18
+ #include "ruby/st.h"
19
+
20
+ #include "node.h"
21
+ #include "debug.h"
22
+ #include "vm_opts.h"
23
+ #include "id.h"
24
+ #include "method.h"
25
+ #include "ruby_atomic.h"
26
+
27
+ #if defined(_WIN32)
28
+ #include "thread_win32.h"
29
+ #elif defined(HAVE_PTHREAD_H)
30
+ #include "thread_pthread.h"
31
+ #else
32
+ #error "unsupported thread type"
33
+ #endif
34
+
35
+ #ifndef ENABLE_VM_OBJSPACE
36
+ #ifdef _WIN32
37
+ /*
38
+ * TODO: object space independent st_table.
39
+ * socklist needs st_table in rb_w32_sysinit(), before object space
40
+ * initialization.
41
+ * It is too early now to change st_hash_type, since it breaks binary
42
+ * compatibility.
43
+ */
44
+ #define ENABLE_VM_OBJSPACE 0
45
+ #else
46
+ #define ENABLE_VM_OBJSPACE 1
47
+ #endif
48
+ #endif
49
+
50
+ #include <setjmp.h>
51
+ #include <signal.h>
52
+
53
+ #ifndef NSIG
54
+ # define NSIG (_SIGMAX + 1) /* For QNX */
55
+ #endif
56
+
57
+ #define RUBY_NSIG NSIG
58
+
59
+ #ifdef HAVE_STDARG_PROTOTYPES
60
+ #include <stdarg.h>
61
+ #define va_init_list(a,b) va_start((a),(b))
62
+ #else
63
+ #include <varargs.h>
64
+ #define va_init_list(a,b) va_start((a))
65
+ #endif
66
+
67
+ #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
68
+ #define USE_SIGALTSTACK
69
+ #endif
70
+
71
+ /*****************/
72
+ /* configuration */
73
+ /*****************/
74
+
75
+ /* gcc ver. check */
76
+ #if defined(__GNUC__) && __GNUC__ >= 2
77
+
78
+ #if OPT_TOKEN_THREADED_CODE
79
+ #if OPT_DIRECT_THREADED_CODE
80
+ #undef OPT_DIRECT_THREADED_CODE
81
+ #endif
82
+ #endif
83
+
84
+ #else /* defined(__GNUC__) && __GNUC__ >= 2 */
85
+
86
+ /* disable threaded code options */
87
+ #if OPT_DIRECT_THREADED_CODE
88
+ #undef OPT_DIRECT_THREADED_CODE
89
+ #endif
90
+ #if OPT_TOKEN_THREADED_CODE
91
+ #undef OPT_TOKEN_THREADED_CODE
92
+ #endif
93
+ #endif
94
+
95
+ /* call threaded code */
96
+ #if OPT_CALL_THREADED_CODE
97
+ #if OPT_DIRECT_THREADED_CODE
98
+ #undef OPT_DIRECT_THREADED_CODE
99
+ #endif /* OPT_DIRECT_THREADED_CODE */
100
+ #if OPT_STACK_CACHING
101
+ #undef OPT_STACK_CACHING
102
+ #endif /* OPT_STACK_CACHING */
103
+ #endif /* OPT_CALL_THREADED_CODE */
104
+
105
+ /* likely */
106
+ #if __GNUC__ >= 3
107
+ #define LIKELY(x) (__builtin_expect((x), 1))
108
+ #define UNLIKELY(x) (__builtin_expect((x), 0))
109
+ #else /* __GNUC__ >= 3 */
110
+ #define LIKELY(x) (x)
111
+ #define UNLIKELY(x) (x)
112
+ #endif /* __GNUC__ >= 3 */
113
+
114
+ #if __GNUC__ >= 3
115
+ #define UNINITIALIZED_VAR(x) x = x
116
+ #else
117
+ #define UNINITIALIZED_VAR(x) x
118
+ #endif
119
+
120
+ typedef unsigned long rb_num_t;
121
+
122
+ /* iseq data type */
123
+
124
+ struct iseq_compile_data_ensure_node_stack;
125
+
126
+ typedef struct rb_compile_option_struct rb_compile_option_t;
127
+
128
+ struct iseq_inline_cache_entry {
129
+ VALUE ic_vmstat;
130
+ VALUE ic_class;
131
+ union {
132
+ VALUE value;
133
+ rb_method_entry_t *method;
134
+ long index;
135
+ } ic_value;
136
+ };
137
+
138
+ #if 1
139
+ #define GetCoreDataFromValue(obj, type, ptr) do { \
140
+ (ptr) = (type*)DATA_PTR(obj); \
141
+ } while (0)
142
+ #else
143
+ #define GetCoreDataFromValue(obj, type, ptr) Data_Get_Struct((obj), type, (ptr))
144
+ #endif
145
+
146
+ #define GetISeqPtr(obj, ptr) \
147
+ GetCoreDataFromValue((obj), rb_iseq_t, (ptr))
148
+
149
+ struct rb_iseq_struct;
150
+
151
+ struct rb_iseq_struct {
152
+ /***************/
153
+ /* static data */
154
+ /***************/
155
+
156
+ enum iseq_type {
157
+ ISEQ_TYPE_TOP,
158
+ ISEQ_TYPE_METHOD,
159
+ ISEQ_TYPE_BLOCK,
160
+ ISEQ_TYPE_CLASS,
161
+ ISEQ_TYPE_RESCUE,
162
+ ISEQ_TYPE_ENSURE,
163
+ ISEQ_TYPE_EVAL,
164
+ ISEQ_TYPE_MAIN,
165
+ ISEQ_TYPE_DEFINED_GUARD
166
+ } type; /* instruction sequence type */
167
+
168
+ VALUE name; /* String: iseq name */
169
+ VALUE filename; /* file information where this sequence from */
170
+ VALUE filepath; /* real file path or nil */
171
+ VALUE *iseq; /* iseq (insn number and operands) */
172
+ VALUE *iseq_encoded; /* encoded iseq */
173
+ unsigned long iseq_size;
174
+ VALUE mark_ary; /* Array: includes operands which should be GC marked */
175
+ VALUE coverage; /* coverage array */
176
+ unsigned short line_no;
177
+
178
+ /* insn info, must be freed */
179
+ struct iseq_insn_info_entry *insn_info_table;
180
+ size_t insn_info_size;
181
+
182
+ ID *local_table; /* must free */
183
+ int local_table_size;
184
+
185
+ /* method, class frame: sizeof(vars) + 1, block frame: sizeof(vars) */
186
+ int local_size;
187
+
188
+ struct iseq_inline_cache_entry *ic_entries;
189
+ int ic_size;
190
+
191
+ /**
192
+ * argument information
193
+ *
194
+ * def m(a1, a2, ..., aM, # mandatory
195
+ * b1=(...), b2=(...), ..., bN=(...), # optional
196
+ * *c, # rest
197
+ * d1, d2, ..., dO, # post
198
+ * &e) # block
199
+ * =>
200
+ *
201
+ * argc = M
202
+ * arg_rest = M+N+1 // or -1 if no rest arg
203
+ * arg_opts = N+1 // or 0 if no optional arg
204
+ * arg_opt_table = [ (arg_opts entries) ]
205
+ * arg_post_len = O // 0 if no post arguments
206
+ * arg_post_start = M+N+2
207
+ * arg_block = M+N + 1 + O + 1 // -1 if no block arg
208
+ * arg_simple = 0 if not simple arguments.
209
+ * = 1 if no opt, rest, post, block.
210
+ * = 2 if ambiguous block parameter ({|a|}).
211
+ * arg_size = argument size.
212
+ */
213
+
214
+ int argc;
215
+ int arg_simple;
216
+ int arg_rest;
217
+ int arg_block;
218
+ int arg_opts;
219
+ int arg_post_len;
220
+ int arg_post_start;
221
+ int arg_size;
222
+ VALUE *arg_opt_table;
223
+
224
+ size_t stack_max; /* for stack overflow check */
225
+
226
+ /* catch table */
227
+ struct iseq_catch_table_entry *catch_table;
228
+ int catch_table_size;
229
+
230
+ /* for child iseq */
231
+ struct rb_iseq_struct *parent_iseq;
232
+ struct rb_iseq_struct *local_iseq;
233
+
234
+ /****************/
235
+ /* dynamic data */
236
+ /****************/
237
+
238
+ VALUE self;
239
+ VALUE orig; /* non-NULL if its data have origin */
240
+
241
+ /* block inlining */
242
+ /*
243
+ * NODE *node;
244
+ * void *special_block_builder;
245
+ * void *cached_special_block_builder;
246
+ * VALUE cached_special_block;
247
+ */
248
+
249
+ /* klass/module nest information stack (cref) */
250
+ NODE *cref_stack;
251
+ VALUE klass;
252
+
253
+ /* misc */
254
+ ID defined_method_id; /* for define_method */
255
+ rb_num_t flip_cnt;
256
+
257
+ /* used at compile time */
258
+ struct iseq_compile_data *compile_data;
259
+ };
260
+
261
+ enum ruby_special_exceptions {
262
+ ruby_error_reenter,
263
+ ruby_error_nomemory,
264
+ ruby_error_sysstack,
265
+ ruby_error_closed_stream,
266
+ ruby_special_error_count
267
+ };
268
+
269
+ #define GetVMPtr(obj, ptr) \
270
+ GetCoreDataFromValue((obj), rb_vm_t, (ptr))
271
+
272
+ #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
273
+ struct rb_objspace;
274
+ void rb_objspace_free(struct rb_objspace *);
275
+ #endif
276
+
277
+ typedef struct rb_vm_struct {
278
+ VALUE self;
279
+
280
+ rb_global_vm_lock_t gvl;
281
+
282
+ struct rb_thread_struct *main_thread;
283
+ struct rb_thread_struct *running_thread;
284
+
285
+ st_table *living_threads;
286
+ VALUE thgroup_default;
287
+
288
+ int running;
289
+ int inhibit_thread_creation;
290
+ int thread_abort_on_exception;
291
+ unsigned long trace_flag;
292
+ volatile int sleeper;
293
+
294
+ /* object management */
295
+ VALUE mark_object_ary;
296
+
297
+ VALUE special_exceptions[ruby_special_error_count];
298
+
299
+ /* load */
300
+ VALUE top_self;
301
+ VALUE load_path;
302
+ VALUE loaded_features;
303
+ struct st_table *loading_table;
304
+
305
+ /* signal */
306
+ struct {
307
+ VALUE cmd;
308
+ int safe;
309
+ } trap_list[RUBY_NSIG];
310
+
311
+ /* hook */
312
+ rb_event_hook_t *event_hooks;
313
+
314
+ int src_encoding_index;
315
+
316
+ VALUE verbose, debug, progname;
317
+ VALUE coverages;
318
+
319
+ struct unlinked_method_entry_list_entry *unlinked_method_entry_list;
320
+
321
+ #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
322
+ struct rb_objspace *objspace;
323
+ #endif
324
+
325
+ /*
326
+ * @shyouhei notes that this is not for storing normal Ruby
327
+ * objects so do *NOT* mark this when you GC.
328
+ */
329
+ struct RArray at_exit;
330
+ } rb_vm_t;
331
+
332
+ typedef struct {
333
+ VALUE *pc; /* cfp[0] */
334
+ VALUE *sp; /* cfp[1] */
335
+ VALUE *bp; /* cfp[2] */
336
+ rb_iseq_t *iseq; /* cfp[3] */
337
+ VALUE flag; /* cfp[4] */
338
+ VALUE self; /* cfp[5] / block[0] */
339
+ VALUE *lfp; /* cfp[6] / block[1] */
340
+ VALUE *dfp; /* cfp[7] / block[2] */
341
+ rb_iseq_t *block_iseq; /* cfp[8] / block[3] */
342
+ VALUE proc; /* cfp[9] / block[4] */
343
+ const rb_method_entry_t *me;/* cfp[10] */
344
+ } rb_control_frame_t;
345
+
346
+ typedef struct rb_block_struct {
347
+ VALUE self; /* share with method frame if it's only block */
348
+ VALUE *lfp; /* share with method frame if it's only block */
349
+ VALUE *dfp; /* share with method frame if it's only block */
350
+ rb_iseq_t *iseq;
351
+ VALUE proc;
352
+ } rb_block_t;
353
+
354
+ extern const rb_data_type_t ruby_threadptr_data_type;
355
+
356
+ #define GetThreadPtr(obj, ptr) \
357
+ TypedData_Get_Struct((obj), rb_thread_t, &ruby_threadptr_data_type, (ptr))
358
+
359
+ enum rb_thread_status {
360
+ THREAD_TO_KILL,
361
+ THREAD_RUNNABLE,
362
+ THREAD_STOPPED,
363
+ THREAD_STOPPED_FOREVER,
364
+ THREAD_KILLED
365
+ };
366
+
367
+ typedef RUBY_JMP_BUF rb_jmpbuf_t;
368
+
369
+ struct rb_vm_tag {
370
+ rb_jmpbuf_t buf;
371
+ VALUE tag;
372
+ VALUE retval;
373
+ struct rb_vm_tag *prev;
374
+ };
375
+
376
+ struct rb_vm_protect_tag {
377
+ struct rb_vm_protect_tag *prev;
378
+ };
379
+
380
+ struct rb_unblock_callback {
381
+ rb_unblock_function_t *func;
382
+ void *arg;
383
+ };
384
+
385
+ struct rb_mutex_struct;
386
+
387
+ #ifdef SIGSTKSZ
388
+ #define ALT_STACK_SIZE (SIGSTKSZ*2)
389
+ #else
390
+ #define ALT_STACK_SIZE (4*1024)
391
+ #endif
392
+
393
+ typedef struct rb_thread_struct {
394
+ VALUE self;
395
+ rb_vm_t *vm;
396
+
397
+ /* execution information */
398
+ VALUE *stack; /* must free, must mark */
399
+ unsigned long stack_size;
400
+ rb_control_frame_t *cfp;
401
+ int safe_level;
402
+ int raised_flag;
403
+ VALUE last_status; /* $? */
404
+
405
+ /* passing state */
406
+ int state;
407
+
408
+ int waiting_fd;
409
+
410
+ /* for rb_iterate */
411
+ const rb_block_t *passed_block;
412
+
413
+ /* for bmethod */
414
+ const rb_method_entry_t *passed_me;
415
+
416
+ /* for load(true) */
417
+ VALUE top_self;
418
+ VALUE top_wrapper;
419
+
420
+ /* eval env */
421
+ rb_block_t *base_block;
422
+
423
+ VALUE *local_lfp;
424
+ VALUE local_svar;
425
+
426
+ /* thread control */
427
+ rb_thread_id_t thread_id;
428
+ enum rb_thread_status status;
429
+ int priority;
430
+
431
+ native_thread_data_t native_thread_data;
432
+ void *blocking_region_buffer;
433
+
434
+ VALUE thgroup;
435
+ VALUE value;
436
+
437
+ VALUE errinfo;
438
+ VALUE thrown_errinfo;
439
+
440
+ rb_atomic_t interrupt_flag;
441
+ rb_thread_lock_t interrupt_lock;
442
+ struct rb_unblock_callback unblock;
443
+ VALUE locking_mutex;
444
+ struct rb_mutex_struct *keeping_mutexes;
445
+
446
+ struct rb_vm_tag *tag;
447
+ struct rb_vm_protect_tag *protect_tag;
448
+
449
+ int parse_in_eval;
450
+ int mild_compile_error;
451
+
452
+ /* storage */
453
+ st_table *local_storage;
454
+
455
+ struct rb_thread_struct *join_list_next;
456
+ struct rb_thread_struct *join_list_head;
457
+
458
+ VALUE first_proc;
459
+ VALUE first_args;
460
+ VALUE (*first_func)(ANYARGS);
461
+
462
+ /* for GC */
463
+ VALUE *machine_stack_start;
464
+ VALUE *machine_stack_end;
465
+ size_t machine_stack_maxsize;
466
+ #ifdef __ia64
467
+ VALUE *machine_register_stack_start;
468
+ VALUE *machine_register_stack_end;
469
+ size_t machine_register_stack_maxsize;
470
+ #endif
471
+ jmp_buf machine_regs;
472
+ int mark_stack_len;
473
+
474
+ /* statistics data for profiler */
475
+ VALUE stat_insn_usage;
476
+
477
+ /* tracer */
478
+ rb_event_hook_t *event_hooks;
479
+ rb_event_flag_t event_flags;
480
+ int tracing;
481
+
482
+ /* fiber */
483
+ VALUE fiber;
484
+ VALUE root_fiber;
485
+ rb_jmpbuf_t root_jmpbuf;
486
+
487
+ /* misc */
488
+ int method_missing_reason;
489
+ int abort_on_exception;
490
+ #ifdef USE_SIGALTSTACK
491
+ void *altstack;
492
+ #endif
493
+ unsigned long running_time_us;
494
+ } rb_thread_t;
495
+
496
+ /* iseq.c */
497
+ #if defined __GNUC__ && __GNUC__ >= 4
498
+ #pragma GCC visibility push(default)
499
+ #endif
500
+ VALUE rb_iseq_new(NODE*, VALUE, VALUE, VALUE, VALUE, enum iseq_type);
501
+ VALUE rb_iseq_new_top(NODE *node, VALUE name, VALUE filename, VALUE filepath, VALUE parent);
502
+ VALUE rb_iseq_new_main(NODE *node, VALUE filename, VALUE filepath);
503
+ VALUE rb_iseq_new_with_bopt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, enum iseq_type, VALUE);
504
+ VALUE rb_iseq_new_with_opt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, enum iseq_type, const rb_compile_option_t*);
505
+ VALUE rb_iseq_compile(VALUE src, VALUE file, VALUE line);
506
+ VALUE rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE filepath, VALUE line, VALUE opt);
507
+ VALUE rb_iseq_disasm(VALUE self);
508
+ int rb_iseq_disasm_insn(VALUE str, VALUE *iseqval, size_t pos, rb_iseq_t *iseq, VALUE child);
509
+ const char *ruby_node_name(int node);
510
+ int rb_iseq_first_lineno(rb_iseq_t *iseq);
511
+
512
+ RUBY_EXTERN VALUE rb_cISeq;
513
+ RUBY_EXTERN VALUE rb_cRubyVM;
514
+ RUBY_EXTERN VALUE rb_cEnv;
515
+ RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
516
+ #if defined __GNUC__ && __GNUC__ >= 4
517
+ #pragma GCC visibility pop
518
+ #endif
519
+
520
+ /* each thread has this size stack : 128KB */
521
+ #define RUBY_VM_THREAD_STACK_SIZE (128 * 1024)
522
+
523
+ #define GetProcPtr(obj, ptr) \
524
+ GetCoreDataFromValue((obj), rb_proc_t, (ptr))
525
+
526
+ typedef struct {
527
+ rb_block_t block;
528
+
529
+ VALUE envval; /* for GC mark */
530
+ VALUE blockprocval;
531
+ int safe_level;
532
+ int is_from_method;
533
+ int is_lambda;
534
+ } rb_proc_t;
535
+
536
+ #define GetEnvPtr(obj, ptr) \
537
+ GetCoreDataFromValue((obj), rb_env_t, (ptr))
538
+
539
+ typedef struct {
540
+ VALUE *env;
541
+ int env_size;
542
+ int local_size;
543
+ VALUE prev_envval; /* for GC mark */
544
+ rb_block_t block;
545
+ } rb_env_t;
546
+
547
+ #define GetBindingPtr(obj, ptr) \
548
+ GetCoreDataFromValue((obj), rb_binding_t, (ptr))
549
+
550
+ typedef struct {
551
+ VALUE env;
552
+ VALUE filename;
553
+ unsigned short line_no;
554
+ } rb_binding_t;
555
+
556
+ /* used by compile time and send insn */
557
+ #define VM_CALL_ARGS_SPLAT_BIT (0x01 << 1)
558
+ #define VM_CALL_ARGS_BLOCKARG_BIT (0x01 << 2)
559
+ #define VM_CALL_FCALL_BIT (0x01 << 3)
560
+ #define VM_CALL_VCALL_BIT (0x01 << 4)
561
+ #define VM_CALL_TAILCALL_BIT (0x01 << 5)
562
+ #define VM_CALL_TAILRECURSION_BIT (0x01 << 6)
563
+ #define VM_CALL_SUPER_BIT (0x01 << 7)
564
+ #define VM_CALL_OPT_SEND_BIT (0x01 << 8)
565
+
566
+ enum vm_special_object_type {
567
+ VM_SPECIAL_OBJECT_VMCORE = 1,
568
+ VM_SPECIAL_OBJECT_CBASE,
569
+ VM_SPECIAL_OBJECT_CONST_BASE
570
+ };
571
+
572
+ #define VM_FRAME_MAGIC_METHOD 0x11
573
+ #define VM_FRAME_MAGIC_BLOCK 0x21
574
+ #define VM_FRAME_MAGIC_CLASS 0x31
575
+ #define VM_FRAME_MAGIC_TOP 0x41
576
+ #define VM_FRAME_MAGIC_FINISH 0x51
577
+ #define VM_FRAME_MAGIC_CFUNC 0x61
578
+ #define VM_FRAME_MAGIC_PROC 0x71
579
+ #define VM_FRAME_MAGIC_IFUNC 0x81
580
+ #define VM_FRAME_MAGIC_EVAL 0x91
581
+ #define VM_FRAME_MAGIC_LAMBDA 0xa1
582
+ #define VM_FRAME_MAGIC_MASK_BITS 8
583
+ #define VM_FRAME_MAGIC_MASK (~(~0<<VM_FRAME_MAGIC_MASK_BITS))
584
+
585
+ #define VM_FRAME_TYPE(cfp) ((cfp)->flag & VM_FRAME_MAGIC_MASK)
586
+
587
+ /* other frame flag */
588
+ #define VM_FRAME_FLAG_PASSED 0x0100
589
+
590
+ #define RUBYVM_CFUNC_FRAME_P(cfp) \
591
+ (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
592
+
593
+ /* inline cache */
594
+ typedef struct iseq_inline_cache_entry *IC;
595
+
596
+ void rb_vm_change_state(void);
597
+
598
+ typedef VALUE CDHASH;
599
+
600
+ #ifndef FUNC_FASTCALL
601
+ #define FUNC_FASTCALL(x) x
602
+ #endif
603
+
604
+ typedef rb_control_frame_t *
605
+ (FUNC_FASTCALL(*rb_insn_func_t))(rb_thread_t *, rb_control_frame_t *);
606
+
607
+ #define GC_GUARDED_PTR(p) ((VALUE)((VALUE)(p) | 0x01))
608
+ #define GC_GUARDED_PTR_REF(p) ((void *)(((VALUE)(p)) & ~0x03))
609
+ #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
610
+
611
+ #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
612
+ #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
613
+ #define RUBY_VM_END_CONTROL_FRAME(th) \
614
+ ((rb_control_frame_t *)((th)->stack + (th)->stack_size))
615
+ #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
616
+ ((void *)(ecfp) > (void *)(cfp))
617
+ #define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \
618
+ (!RUBY_VM_VALID_CONTROL_FRAME_P((cfp), RUBY_VM_END_CONTROL_FRAME(th)))
619
+
620
+ #define RUBY_VM_IFUNC_P(ptr) (BUILTIN_TYPE(ptr) == T_NODE)
621
+ #define RUBY_VM_NORMAL_ISEQ_P(ptr) \
622
+ ((ptr) && !RUBY_VM_IFUNC_P(ptr))
623
+
624
+ #define RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp) ((rb_block_t *)(&(cfp)->self))
625
+ #define RUBY_VM_GET_CFP_FROM_BLOCK_PTR(b) \
626
+ ((rb_control_frame_t *)((VALUE *)(b) - 5))
627
+
628
+ /* VM related object allocate functions */
629
+ VALUE rb_thread_alloc(VALUE klass);
630
+ VALUE rb_proc_alloc(VALUE klass);
631
+
632
+ /* for debug */
633
+ extern void rb_vmdebug_stack_dump_raw(rb_thread_t *, rb_control_frame_t *);
634
+ #define SDR() rb_vmdebug_stack_dump_raw(GET_THREAD(), GET_THREAD()->cfp)
635
+ #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_THREAD(), (cfp))
636
+ void rb_vm_bugreport(void);
637
+
638
+ /* functions about thread/vm execution */
639
+ #if defined __GNUC__ && __GNUC__ >= 4
640
+ #pragma GCC visibility push(default)
641
+ #endif
642
+ VALUE rb_iseq_eval(VALUE iseqval);
643
+ VALUE rb_iseq_eval_main(VALUE iseqval);
644
+ void rb_enable_interrupt(void);
645
+ void rb_disable_interrupt(void);
646
+ #if defined __GNUC__ && __GNUC__ >= 4
647
+ #pragma GCC visibility pop
648
+ #endif
649
+ int rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, VALUE *klassp);
650
+
651
+ VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
652
+ int argc, const VALUE *argv, const rb_block_t *blockptr);
653
+ VALUE rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass);
654
+ VALUE rb_vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp);
655
+ void rb_vm_rewrite_dfp_in_errinfo(rb_thread_t *th);
656
+ void rb_vm_inc_const_missing_count(void);
657
+ void rb_vm_gvl_destroy(rb_vm_t *vm);
658
+ VALUE rb_vm_call(rb_thread_t *th, VALUE recv, VALUE id, int argc,
659
+ const VALUE *argv, const rb_method_entry_t *me);
660
+ void rb_unlink_method_entry(rb_method_entry_t *me);
661
+ void rb_gc_mark_unlinked_live_method_entries(void *pvm);
662
+
663
+ void rb_thread_start_timer_thread(void);
664
+ void rb_thread_stop_timer_thread(int);
665
+ void rb_thread_reset_timer_thread(void);
666
+ void rb_thread_wakeup_timer_thread(void);
667
+
668
+ int ruby_thread_has_gvl_p(void);
669
+ VALUE rb_make_backtrace(void);
670
+ typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
671
+ int rb_backtrace_each(rb_backtrace_iter_func *iter, void *arg);
672
+ rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
673
+ int rb_vm_get_sourceline(const rb_control_frame_t *);
674
+ VALUE rb_name_err_mesg_new(VALUE obj, VALUE mesg, VALUE recv, VALUE method);
675
+ void rb_vm_stack_to_heap(rb_thread_t *th);
676
+ void ruby_thread_init_stack(rb_thread_t *th);
677
+
678
+ NOINLINE(void rb_gc_save_machine_context(rb_thread_t *));
679
+ void rb_gc_mark_machine_stack(rb_thread_t *th);
680
+
681
+ #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
682
+
683
+ /* for thread */
684
+
685
+ #if RUBY_VM_THREAD_MODEL == 2
686
+ RUBY_EXTERN rb_thread_t *ruby_current_thread;
687
+ extern rb_vm_t *ruby_current_vm;
688
+
689
+ #define GET_VM() ruby_current_vm
690
+ #define GET_THREAD() ruby_current_thread
691
+ #define rb_thread_set_current_raw(th) (void)(ruby_current_thread = (th))
692
+ #define rb_thread_set_current(th) do { \
693
+ if ((th)->vm->running_thread != (th)) { \
694
+ (th)->vm->running_thread->running_time_us = 0; \
695
+ } \
696
+ rb_thread_set_current_raw(th); \
697
+ (th)->vm->running_thread = (th); \
698
+ } while (0)
699
+
700
+ #else
701
+ #error "unsupported thread model"
702
+ #endif
703
+
704
+ #define RUBY_VM_SET_TIMER_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, 0x01)
705
+ #define RUBY_VM_SET_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, 0x02)
706
+ #define RUBY_VM_SET_FINALIZER_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, 0x04)
707
+ #define RUBY_VM_INTERRUPTED(th) ((th)->interrupt_flag & 0x02)
708
+
709
+ int rb_signal_buff_size(void);
710
+ void rb_signal_exec(rb_thread_t *th, int sig);
711
+ void rb_threadptr_check_signal(rb_thread_t *mth);
712
+ void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
713
+ void rb_threadptr_signal_exit(rb_thread_t *th);
714
+ void rb_threadptr_execute_interrupts(rb_thread_t *);
715
+ void rb_threadptr_interrupt(rb_thread_t *th);
716
+ void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
717
+
718
+ void rb_thread_lock_unlock(rb_thread_lock_t *);
719
+ void rb_thread_lock_destroy(rb_thread_lock_t *);
720
+
721
+ #define RUBY_VM_CHECK_INTS_TH(th) do { \
722
+ if (UNLIKELY((th)->interrupt_flag)) { \
723
+ rb_threadptr_execute_interrupts(th); \
724
+ } \
725
+ } while (0)
726
+
727
+ #define RUBY_VM_CHECK_INTS() \
728
+ RUBY_VM_CHECK_INTS_TH(GET_THREAD())
729
+
730
+ /* tracer */
731
+ void
732
+ rb_threadptr_exec_event_hooks(rb_thread_t *th, rb_event_flag_t flag, VALUE self, ID id, VALUE klass, int pop_p);
733
+
734
+ #define EXEC_EVENT_HOOK_ORIG(th, flag, self, id, klass, pop_p) do { \
735
+ rb_event_flag_t wait_event__ = (th)->event_flags; \
736
+ if (UNLIKELY(wait_event__)) { \
737
+ if (wait_event__ & ((flag) | RUBY_EVENT_VM)) { \
738
+ rb_threadptr_exec_event_hooks((th), (flag), (self), (id), (klass), (pop_p)); \
739
+ } \
740
+ } \
741
+ } while (0)
742
+
743
+ #define EXEC_EVENT_HOOK(th, flag, self, id, klass) \
744
+ EXEC_EVENT_HOOK_ORIG(th, flag, self, id, klass, 0)
745
+
746
+ #define EXEC_EVENT_HOOK_AND_POP_FRAME(th, flag, self, id, klass) \
747
+ EXEC_EVENT_HOOK_ORIG(th, flag, self, id, klass, 1)
748
+
749
+ #if defined __GNUC__ && __GNUC__ >= 4
750
+ #pragma GCC visibility push(default)
751
+ #endif
752
+
753
+ int rb_thread_check_trap_pending(void);
754
+
755
+ extern VALUE rb_get_coverages(void);
756
+ extern void rb_set_coverages(VALUE);
757
+ extern void rb_reset_coverages(void);
758
+
759
+ #if defined __GNUC__ && __GNUC__ >= 4
760
+ #pragma GCC visibility pop
761
+ #endif
762
+
763
+ #endif /* RUBY_VM_CORE_H */