debase-ruby_core_source 0.8.5 → 0.8.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +5 -1
  3. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/addr2line.h +21 -0
  4. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/constant.h +36 -0
  5. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/dln.h +51 -0
  6. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/encdb.h +170 -0
  7. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/eval_intern.h +270 -0
  8. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/gc.h +101 -0
  9. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/id.h +171 -0
  10. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/insns.inc +189 -0
  11. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/insns_info.inc +731 -0
  12. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/internal.h +896 -0
  13. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/iseq.h +136 -0
  14. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/known_errors.inc +731 -0
  15. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/method.h +145 -0
  16. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/node.h +543 -0
  17. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/node_name.inc +212 -0
  18. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/opt_sc.inc +710 -0
  19. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/optinsn.inc +83 -0
  20. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/optunifs.inc +121 -0
  21. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/parse.h +183 -0
  22. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/probes_helper.h +67 -0
  23. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/regenc.h +223 -0
  24. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/regint.h +911 -0
  25. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/regparse.h +363 -0
  26. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/revision.h +1 -0
  27. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/ruby_atomic.h +170 -0
  28. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/siphash.h +48 -0
  29. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/thread_native.h +23 -0
  30. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/thread_pthread.h +56 -0
  31. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/thread_win32.h +45 -0
  32. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/timev.h +42 -0
  33. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/transcode_data.h +123 -0
  34. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/transdb.h +190 -0
  35. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/version.h +52 -0
  36. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/vm.inc +3241 -0
  37. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/vm_core.h +1057 -0
  38. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/vm_debug.h +37 -0
  39. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/vm_exec.h +182 -0
  40. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/vm_insnhelper.h +273 -0
  41. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/vm_opts.h +56 -0
  42. data/lib/debase/ruby_core_source/ruby-2.1.10-p492/vmtc.inc +102 -0
  43. data/lib/debase/ruby_core_source/version.rb +1 -1
  44. metadata +42 -2
@@ -0,0 +1,1057 @@
1
+ /**********************************************************************
2
+
3
+ vm_core.h -
4
+
5
+ $Author: nagachika $
6
+ created at: 04/01/01 19:41:38 JST
7
+
8
+ Copyright (C) 2004-2007 Koichi Sasada
9
+
10
+ **********************************************************************/
11
+
12
+ #ifndef RUBY_VM_CORE_H
13
+ #define RUBY_VM_CORE_H
14
+
15
+ #define RUBY_VM_THREAD_MODEL 2
16
+
17
+ #include "ruby/ruby.h"
18
+ #include "ruby/st.h"
19
+
20
+ #include "node.h"
21
+ #include "vm_debug.h"
22
+ #include "vm_opts.h"
23
+ #include "id.h"
24
+ #include "method.h"
25
+ #include "ruby_atomic.h"
26
+
27
+ #include "thread_native.h"
28
+
29
+ #ifndef ENABLE_VM_OBJSPACE
30
+ #ifdef _WIN32
31
+ /*
32
+ * TODO: object space independent st_table.
33
+ * socklist needs st_table in rb_w32_sysinit(), before object space
34
+ * initialization.
35
+ * It is too early now to change st_hash_type, since it breaks binary
36
+ * compatibility.
37
+ */
38
+ #define ENABLE_VM_OBJSPACE 0
39
+ #else
40
+ #define ENABLE_VM_OBJSPACE 1
41
+ #endif
42
+ #endif
43
+
44
+ #include <setjmp.h>
45
+ #include <signal.h>
46
+
47
+ #ifndef NSIG
48
+ # define NSIG (_SIGMAX + 1) /* For QNX */
49
+ #endif
50
+
51
+ #define RUBY_NSIG NSIG
52
+
53
+ #ifdef HAVE_STDARG_PROTOTYPES
54
+ #include <stdarg.h>
55
+ #define va_init_list(a,b) va_start((a),(b))
56
+ #else
57
+ #include <varargs.h>
58
+ #define va_init_list(a,b) va_start((a))
59
+ #endif
60
+
61
+ #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
62
+ #define USE_SIGALTSTACK
63
+ #endif
64
+
65
+ /*****************/
66
+ /* configuration */
67
+ /*****************/
68
+
69
+ /* gcc ver. check */
70
+ #if defined(__GNUC__) && __GNUC__ >= 2
71
+
72
+ #if OPT_TOKEN_THREADED_CODE
73
+ #if OPT_DIRECT_THREADED_CODE
74
+ #undef OPT_DIRECT_THREADED_CODE
75
+ #endif
76
+ #endif
77
+
78
+ #else /* defined(__GNUC__) && __GNUC__ >= 2 */
79
+
80
+ /* disable threaded code options */
81
+ #if OPT_DIRECT_THREADED_CODE
82
+ #undef OPT_DIRECT_THREADED_CODE
83
+ #endif
84
+ #if OPT_TOKEN_THREADED_CODE
85
+ #undef OPT_TOKEN_THREADED_CODE
86
+ #endif
87
+ #endif
88
+
89
+ #ifdef __native_client__
90
+ #undef OPT_DIRECT_THREADED_CODE
91
+ #endif
92
+
93
+ /* call threaded code */
94
+ #if OPT_CALL_THREADED_CODE
95
+ #if OPT_DIRECT_THREADED_CODE
96
+ #undef OPT_DIRECT_THREADED_CODE
97
+ #endif /* OPT_DIRECT_THREADED_CODE */
98
+ #if OPT_STACK_CACHING
99
+ #undef OPT_STACK_CACHING
100
+ #endif /* OPT_STACK_CACHING */
101
+ #endif /* OPT_CALL_THREADED_CODE */
102
+
103
+ /* likely */
104
+ #if __GNUC__ >= 3
105
+ #define LIKELY(x) (__builtin_expect((x), 1))
106
+ #define UNLIKELY(x) (__builtin_expect((x), 0))
107
+ #else /* __GNUC__ >= 3 */
108
+ #define LIKELY(x) (x)
109
+ #define UNLIKELY(x) (x)
110
+ #endif /* __GNUC__ >= 3 */
111
+
112
+ #ifndef __has_attribute
113
+ # define __has_attribute(x) 0
114
+ #endif
115
+
116
+ #if __has_attribute(unused)
117
+ #define UNINITIALIZED_VAR(x) x __attribute__((unused))
118
+ #elif defined(__GNUC__) && __GNUC__ >= 3
119
+ #define UNINITIALIZED_VAR(x) x = x
120
+ #else
121
+ #define UNINITIALIZED_VAR(x) x
122
+ #endif
123
+
124
+ typedef unsigned long rb_num_t;
125
+
126
+ /* iseq data type */
127
+
128
+ struct iseq_compile_data_ensure_node_stack;
129
+
130
+ typedef struct rb_compile_option_struct rb_compile_option_t;
131
+
132
+
133
+ struct iseq_inline_cache_entry {
134
+ rb_serial_t ic_serial;
135
+ union {
136
+ size_t index;
137
+ VALUE value;
138
+ } ic_value;
139
+ };
140
+
141
+ union iseq_inline_storage_entry {
142
+ struct {
143
+ struct rb_thread_struct *running_thread;
144
+ VALUE value;
145
+ VALUE done;
146
+ } once;
147
+ struct iseq_inline_cache_entry cache;
148
+ };
149
+
150
+ /* to avoid warning */
151
+ struct rb_thread_struct;
152
+ struct rb_control_frame_struct;
153
+
154
+ /* rb_call_info_t contains calling information including inline cache */
155
+ typedef struct rb_call_info_struct {
156
+ /* fixed at compile time */
157
+ ID mid;
158
+ VALUE flag;
159
+ int orig_argc;
160
+ rb_iseq_t *blockiseq;
161
+
162
+ /* inline cache: keys */
163
+ rb_serial_t method_state;
164
+ rb_serial_t class_serial;
165
+ VALUE klass;
166
+
167
+ /* inline cache: values */
168
+ const rb_method_entry_t *me;
169
+ VALUE defined_class;
170
+
171
+ /* temporary values for method calling */
172
+ int argc;
173
+ struct rb_block_struct *blockptr;
174
+ VALUE recv;
175
+ union {
176
+ int opt_pc; /* used by iseq */
177
+ long index; /* used by ivar */
178
+ int missing_reason; /* used by method_missing */
179
+ int inc_sp; /* used by cfunc */
180
+ } aux;
181
+
182
+ VALUE (*call)(struct rb_thread_struct *th, struct rb_control_frame_struct *cfp, struct rb_call_info_struct *ci);
183
+ } rb_call_info_t;
184
+
185
+ #if 1
186
+ #define GetCoreDataFromValue(obj, type, ptr) do { \
187
+ (ptr) = (type*)DATA_PTR(obj); \
188
+ } while (0)
189
+ #else
190
+ #define GetCoreDataFromValue(obj, type, ptr) Data_Get_Struct((obj), type, (ptr))
191
+ #endif
192
+
193
+ #define GetISeqPtr(obj, ptr) \
194
+ GetCoreDataFromValue((obj), rb_iseq_t, (ptr))
195
+
196
+ typedef struct rb_iseq_location_struct {
197
+ const VALUE path;
198
+ const VALUE absolute_path;
199
+ const VALUE base_label;
200
+ const VALUE label;
201
+ size_t first_lineno;
202
+ } rb_iseq_location_t;
203
+
204
+ struct rb_iseq_struct;
205
+
206
+ struct rb_iseq_struct {
207
+ /***************/
208
+ /* static data */
209
+ /***************/
210
+
211
+ enum iseq_type {
212
+ ISEQ_TYPE_TOP,
213
+ ISEQ_TYPE_METHOD,
214
+ ISEQ_TYPE_BLOCK,
215
+ ISEQ_TYPE_CLASS,
216
+ ISEQ_TYPE_RESCUE,
217
+ ISEQ_TYPE_ENSURE,
218
+ ISEQ_TYPE_EVAL,
219
+ ISEQ_TYPE_MAIN,
220
+ ISEQ_TYPE_DEFINED_GUARD
221
+ } type; /* instruction sequence type */
222
+
223
+ rb_iseq_location_t location;
224
+
225
+ VALUE *iseq; /* iseq (insn number and operands) */
226
+ VALUE *iseq_encoded; /* encoded iseq */
227
+ unsigned long iseq_size;
228
+ const VALUE mark_ary; /* Array: includes operands which should be GC marked */
229
+ const VALUE coverage; /* coverage array */
230
+
231
+ /* insn info, must be freed */
232
+ struct iseq_line_info_entry *line_info_table;
233
+ size_t line_info_size;
234
+
235
+ ID *local_table; /* must free */
236
+ int local_table_size;
237
+
238
+ /* sizeof(vars) + 1 */
239
+ int local_size;
240
+
241
+ union iseq_inline_storage_entry *is_entries;
242
+ int is_size;
243
+
244
+ rb_call_info_t *callinfo_entries;
245
+ int callinfo_size;
246
+
247
+ /**
248
+ * argument information
249
+ *
250
+ * def m(a1, a2, ..., aM, # mandatory
251
+ * b1=(...), b2=(...), ..., bN=(...), # optional
252
+ * *c, # rest
253
+ * d1, d2, ..., dO, # post
254
+ * e1:(...), e2:(...), ..., eK:(...), # keyword
255
+ * **f, # keyword rest
256
+ * &g) # block
257
+ * =>
258
+ *
259
+ * argc = M // or 0 if no mandatory arg
260
+ * arg_opts = N+1 // or 0 if no optional arg
261
+ * arg_rest = M+N // or -1 if no rest arg
262
+ * arg_opt_table = [ (arg_opts entries) ]
263
+ * arg_post_start = M+N+(*1) // or 0 if no post arguments
264
+ * arg_post_len = O // or 0 if no post arguments
265
+ * arg_keywords = K // or 0 if no keyword arg
266
+ * arg_block = M+N+(*1)+O+K // or -1 if no block arg
267
+ * arg_keyword = M+N+(*1)+O+K+(&1) // or -1 if no keyword arg/rest
268
+ * arg_simple = 0 if not simple arguments.
269
+ * = 1 if no opt, rest, post, block.
270
+ * = 2 if ambiguous block parameter ({|a|}).
271
+ * arg_size = M+N+O+(*1)+K+(&1)+(**1) argument size.
272
+ */
273
+
274
+ int argc;
275
+ int arg_simple;
276
+ int arg_rest;
277
+ int arg_block;
278
+ int arg_opts;
279
+ int arg_post_len;
280
+ int arg_post_start;
281
+ int arg_size;
282
+ VALUE *arg_opt_table;
283
+ int arg_keyword;
284
+ int arg_keyword_check; /* if this is true, raise an ArgumentError when unknown keyword argument is passed */
285
+ int arg_keywords;
286
+ int arg_keyword_required;
287
+ ID *arg_keyword_table;
288
+
289
+ size_t stack_max; /* for stack overflow check */
290
+
291
+ /* catch table */
292
+ struct iseq_catch_table_entry *catch_table;
293
+ int catch_table_size;
294
+
295
+ /* for child iseq */
296
+ struct rb_iseq_struct *parent_iseq;
297
+ struct rb_iseq_struct *local_iseq;
298
+
299
+ /****************/
300
+ /* dynamic data */
301
+ /****************/
302
+
303
+ VALUE self;
304
+ const VALUE orig; /* non-NULL if its data have origin */
305
+
306
+ /* block inlining */
307
+ /*
308
+ * NODE *node;
309
+ * void *special_block_builder;
310
+ * void *cached_special_block_builder;
311
+ * VALUE cached_special_block;
312
+ */
313
+
314
+ /* klass/module nest information stack (cref) */
315
+ NODE * const cref_stack;
316
+ const VALUE klass;
317
+
318
+ /* misc */
319
+ ID defined_method_id; /* for define_method */
320
+ rb_num_t flip_cnt;
321
+
322
+ /* used at compile time */
323
+ struct iseq_compile_data *compile_data;
324
+ };
325
+
326
+ enum ruby_special_exceptions {
327
+ ruby_error_reenter,
328
+ ruby_error_nomemory,
329
+ ruby_error_sysstack,
330
+ ruby_error_closed_stream,
331
+ ruby_special_error_count
332
+ };
333
+
334
+ #define GetVMPtr(obj, ptr) \
335
+ GetCoreDataFromValue((obj), rb_vm_t, (ptr))
336
+
337
+ #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
338
+ struct rb_objspace;
339
+ void rb_objspace_free(struct rb_objspace *);
340
+ #endif
341
+
342
+ typedef struct rb_hook_list_struct {
343
+ struct rb_event_hook_struct *hooks;
344
+ rb_event_flag_t events;
345
+ int need_clean;
346
+ } rb_hook_list_t;
347
+
348
+ typedef struct rb_vm_struct {
349
+ VALUE self;
350
+
351
+ rb_global_vm_lock_t gvl;
352
+ rb_nativethread_lock_t thread_destruct_lock;
353
+
354
+ struct rb_thread_struct *main_thread;
355
+ struct rb_thread_struct *running_thread;
356
+
357
+ st_table *living_threads;
358
+ VALUE thgroup_default;
359
+
360
+ int running;
361
+ int thread_abort_on_exception;
362
+ int trace_running;
363
+ volatile int sleeper;
364
+
365
+ /* object management */
366
+ VALUE mark_object_ary;
367
+
368
+ VALUE special_exceptions[ruby_special_error_count];
369
+
370
+ /* load */
371
+ VALUE top_self;
372
+ VALUE load_path;
373
+ VALUE load_path_snapshot;
374
+ VALUE load_path_check_cache;
375
+ VALUE expanded_load_path;
376
+ VALUE loaded_features;
377
+ VALUE loaded_features_snapshot;
378
+ struct st_table *loaded_features_index;
379
+ struct st_table *loading_table;
380
+
381
+ /* signal */
382
+ struct {
383
+ VALUE cmd;
384
+ int safe;
385
+ } trap_list[RUBY_NSIG];
386
+
387
+ /* hook */
388
+ rb_hook_list_t event_hooks;
389
+
390
+ /* relation table of ensure - rollback for callcc */
391
+ struct st_table *ensure_rollback_table;
392
+
393
+ /* postponed_job */
394
+ struct rb_postponed_job_struct *postponed_job_buffer;
395
+ int postponed_job_index;
396
+
397
+ int src_encoding_index;
398
+
399
+ VALUE verbose, debug, orig_progname, progname;
400
+ VALUE coverages;
401
+
402
+ struct unlinked_method_entry_list_entry *unlinked_method_entry_list;
403
+
404
+ VALUE defined_module_hash;
405
+
406
+ #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
407
+ struct rb_objspace *objspace;
408
+ #endif
409
+
410
+ /*
411
+ * @shyouhei notes that this is not for storing normal Ruby
412
+ * objects so do *NOT* mark this when you GC.
413
+ */
414
+ struct RArray at_exit;
415
+
416
+ VALUE *defined_strings;
417
+
418
+ /* params */
419
+ struct { /* size in byte */
420
+ size_t thread_vm_stack_size;
421
+ size_t thread_machine_stack_size;
422
+ size_t fiber_vm_stack_size;
423
+ size_t fiber_machine_stack_size;
424
+ } default_params;
425
+ } rb_vm_t;
426
+
427
+ /* default values */
428
+
429
+ #define RUBY_VM_SIZE_ALIGN 4096
430
+
431
+ #define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
432
+ #define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
433
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
434
+ #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
435
+
436
+ #define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
437
+ #define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
438
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
439
+ #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
440
+
441
+ #ifndef VM_DEBUG_BP_CHECK
442
+ #define VM_DEBUG_BP_CHECK 0
443
+ #endif
444
+
445
+ typedef struct rb_control_frame_struct {
446
+ VALUE *pc; /* cfp[0] */
447
+ VALUE *sp; /* cfp[1] */
448
+ rb_iseq_t *iseq; /* cfp[2] */
449
+ VALUE flag; /* cfp[3] */
450
+ VALUE self; /* cfp[4] / block[0] */
451
+ VALUE klass; /* cfp[5] / block[1] */
452
+ VALUE *ep; /* cfp[6] / block[2] */
453
+ rb_iseq_t *block_iseq; /* cfp[7] / block[3] */
454
+ VALUE proc; /* cfp[8] / block[4] */
455
+ const rb_method_entry_t *me;/* cfp[9] */
456
+
457
+ #if VM_DEBUG_BP_CHECK
458
+ VALUE *bp_check; /* cfp[10] */
459
+ #endif
460
+ } rb_control_frame_t;
461
+
462
+ typedef struct rb_block_struct {
463
+ VALUE self; /* share with method frame if it's only block */
464
+ VALUE klass; /* share with method frame if it's only block */
465
+ VALUE *ep; /* share with method frame if it's only block */
466
+ rb_iseq_t *iseq;
467
+ VALUE proc;
468
+ } rb_block_t;
469
+
470
+ extern const rb_data_type_t ruby_threadptr_data_type;
471
+
472
+ #define GetThreadPtr(obj, ptr) \
473
+ TypedData_Get_Struct((obj), rb_thread_t, &ruby_threadptr_data_type, (ptr))
474
+
475
+ enum rb_thread_status {
476
+ THREAD_RUNNABLE,
477
+ THREAD_STOPPED,
478
+ THREAD_STOPPED_FOREVER,
479
+ THREAD_KILLED
480
+ };
481
+
482
+ typedef RUBY_JMP_BUF rb_jmpbuf_t;
483
+
484
+ /*
485
+ the members which are written in TH_PUSH_TAG() should be placed at
486
+ the beginning and the end, so that entire region is accessible.
487
+ */
488
+ struct rb_vm_tag {
489
+ VALUE tag;
490
+ VALUE retval;
491
+ rb_jmpbuf_t buf;
492
+ struct rb_vm_tag *prev;
493
+ };
494
+
495
+ struct rb_vm_protect_tag {
496
+ struct rb_vm_protect_tag *prev;
497
+ };
498
+
499
+ struct rb_unblock_callback {
500
+ rb_unblock_function_t *func;
501
+ void *arg;
502
+ };
503
+
504
+ struct rb_mutex_struct;
505
+
506
+ struct rb_thread_struct;
507
+ typedef struct rb_thread_list_struct{
508
+ struct rb_thread_list_struct *next;
509
+ struct rb_thread_struct *th;
510
+ } rb_thread_list_t;
511
+
512
+
513
+ typedef struct rb_ensure_entry {
514
+ VALUE marker;
515
+ VALUE (*e_proc)(ANYARGS);
516
+ VALUE data2;
517
+ } rb_ensure_entry_t;
518
+
519
+ typedef struct rb_ensure_list {
520
+ struct rb_ensure_list *next;
521
+ struct rb_ensure_entry entry;
522
+ } rb_ensure_list_t;
523
+
524
+ typedef struct rb_thread_struct {
525
+ VALUE self;
526
+ rb_vm_t *vm;
527
+
528
+ /* execution information */
529
+ VALUE *stack; /* must free, must mark */
530
+ size_t stack_size; /* size in word (byte size / sizeof(VALUE)) */
531
+ rb_control_frame_t *cfp;
532
+ int safe_level;
533
+ int raised_flag;
534
+ VALUE last_status; /* $? */
535
+
536
+ /* passing state */
537
+ int state;
538
+
539
+ int waiting_fd;
540
+
541
+ /* for rb_iterate */
542
+ const rb_block_t *passed_block;
543
+
544
+ /* for bmethod */
545
+ const rb_method_entry_t *passed_bmethod_me;
546
+
547
+ /* for cfunc */
548
+ rb_call_info_t *passed_ci;
549
+
550
+ /* for load(true) */
551
+ VALUE top_self;
552
+ VALUE top_wrapper;
553
+
554
+ /* eval env */
555
+ rb_block_t *base_block;
556
+
557
+ VALUE *root_lep;
558
+ VALUE root_svar;
559
+
560
+ /* thread control */
561
+ rb_nativethread_id_t thread_id;
562
+ enum rb_thread_status status;
563
+ int to_kill;
564
+ int priority;
565
+
566
+ native_thread_data_t native_thread_data;
567
+ void *blocking_region_buffer;
568
+
569
+ VALUE thgroup;
570
+ VALUE value;
571
+
572
+ /* temporary place of errinfo */
573
+ VALUE errinfo;
574
+
575
+ /* temporary place of retval on OPT_CALL_THREADED_CODE */
576
+ #if OPT_CALL_THREADED_CODE
577
+ VALUE retval;
578
+ #endif
579
+
580
+ /* async errinfo queue */
581
+ VALUE pending_interrupt_queue;
582
+ int pending_interrupt_queue_checked;
583
+ VALUE pending_interrupt_mask_stack;
584
+
585
+ rb_atomic_t interrupt_flag;
586
+ unsigned long interrupt_mask;
587
+ rb_nativethread_lock_t interrupt_lock;
588
+ rb_nativethread_cond_t interrupt_cond;
589
+ struct rb_unblock_callback unblock;
590
+ VALUE locking_mutex;
591
+ struct rb_mutex_struct *keeping_mutexes;
592
+
593
+ struct rb_vm_tag *tag;
594
+ struct rb_vm_protect_tag *protect_tag;
595
+
596
+ /*! Thread-local state of evaluation context.
597
+ *
598
+ * If negative, this thread is evaluating the main program.
599
+ * If positive, this thread is evaluating a program under Kernel::eval
600
+ * family.
601
+ */
602
+ int parse_in_eval;
603
+
604
+ /*! Thread-local state of compiling context.
605
+ *
606
+ * If non-zero, the parser does not automatically print error messages to
607
+ * stderr. */
608
+ int mild_compile_error;
609
+
610
+ /* storage */
611
+ st_table *local_storage;
612
+
613
+ rb_thread_list_t *join_list;
614
+
615
+ VALUE first_proc;
616
+ VALUE first_args;
617
+ VALUE (*first_func)(ANYARGS);
618
+
619
+ /* for GC */
620
+ struct {
621
+ VALUE *stack_start;
622
+ VALUE *stack_end;
623
+ size_t stack_maxsize;
624
+ #ifdef __ia64
625
+ VALUE *register_stack_start;
626
+ VALUE *register_stack_end;
627
+ size_t register_stack_maxsize;
628
+ #endif
629
+ jmp_buf regs;
630
+ } machine;
631
+ int mark_stack_len;
632
+
633
+ /* statistics data for profiler */
634
+ VALUE stat_insn_usage;
635
+
636
+ /* tracer */
637
+ rb_hook_list_t event_hooks;
638
+ struct rb_trace_arg_struct *trace_arg; /* trace information */
639
+
640
+ /* fiber */
641
+ VALUE fiber;
642
+ VALUE root_fiber;
643
+ rb_jmpbuf_t root_jmpbuf;
644
+
645
+ /* ensure & callcc */
646
+ rb_ensure_list_t *ensure_list;
647
+
648
+ /* misc */
649
+ int method_missing_reason;
650
+ int abort_on_exception;
651
+ #ifdef USE_SIGALTSTACK
652
+ void *altstack;
653
+ #endif
654
+ unsigned long running_time_us;
655
+ } rb_thread_t;
656
+
657
+ typedef enum {
658
+ VM_DEFINECLASS_TYPE_CLASS = 0x00,
659
+ VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
660
+ VM_DEFINECLASS_TYPE_MODULE = 0x02,
661
+ /* 0x03..0x06 is reserved */
662
+ VM_DEFINECLASS_TYPE_MASK = 0x07
663
+ } rb_vm_defineclass_type_t;
664
+
665
+ #define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
666
+ #define VM_DEFINECLASS_FLAG_SCOPED 0x08
667
+ #define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
668
+ #define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
669
+ #define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
670
+ ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
671
+
672
+ /* iseq.c */
673
+ RUBY_SYMBOL_EXPORT_BEGIN
674
+
675
+ /* node -> iseq */
676
+ VALUE rb_iseq_new(NODE*, VALUE, VALUE, VALUE, VALUE, enum iseq_type);
677
+ VALUE rb_iseq_new_top(NODE *node, VALUE name, VALUE path, VALUE absolute_path, VALUE parent);
678
+ VALUE rb_iseq_new_main(NODE *node, VALUE path, VALUE absolute_path);
679
+ VALUE rb_iseq_new_with_bopt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, enum iseq_type, VALUE);
680
+ VALUE rb_iseq_new_with_opt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, enum iseq_type, const rb_compile_option_t*);
681
+
682
+ /* src -> iseq */
683
+ VALUE rb_iseq_compile(VALUE src, VALUE file, VALUE line);
684
+ VALUE rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, rb_block_t *base_block);
685
+ VALUE rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE absolute_path, VALUE line, rb_block_t *base_block, VALUE opt);
686
+
687
+ VALUE rb_iseq_disasm(VALUE self);
688
+ int rb_iseq_disasm_insn(VALUE str, VALUE *iseqval, size_t pos, rb_iseq_t *iseq, VALUE child);
689
+ const char *ruby_node_name(int node);
690
+
691
+ RUBY_EXTERN VALUE rb_cISeq;
692
+ RUBY_EXTERN VALUE rb_cRubyVM;
693
+ RUBY_EXTERN VALUE rb_cEnv;
694
+ RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
695
+ RUBY_SYMBOL_EXPORT_END
696
+
697
+ #define GetProcPtr(obj, ptr) \
698
+ GetCoreDataFromValue((obj), rb_proc_t, (ptr))
699
+
700
+ typedef struct {
701
+ rb_block_t block;
702
+
703
+ VALUE envval; /* for GC mark */
704
+ VALUE blockprocval;
705
+ int safe_level;
706
+ int is_from_method;
707
+ int is_lambda;
708
+ } rb_proc_t;
709
+
710
+ #define GetEnvPtr(obj, ptr) \
711
+ GetCoreDataFromValue((obj), rb_env_t, (ptr))
712
+
713
+ typedef struct {
714
+ VALUE *env;
715
+ int env_size;
716
+ int local_size;
717
+ VALUE prev_envval; /* for GC mark */
718
+ rb_block_t block;
719
+ } rb_env_t;
720
+
721
+ extern const rb_data_type_t ruby_binding_data_type;
722
+
723
+ #define GetBindingPtr(obj, ptr) \
724
+ GetCoreDataFromValue((obj), rb_binding_t, (ptr))
725
+
726
+ typedef struct {
727
+ VALUE env;
728
+ VALUE path;
729
+ VALUE blockprocval; /* for GC mark */
730
+ unsigned short first_lineno;
731
+ } rb_binding_t;
732
+
733
+ /* used by compile time and send insn */
734
+
735
+ enum vm_check_match_type {
736
+ VM_CHECKMATCH_TYPE_WHEN = 1,
737
+ VM_CHECKMATCH_TYPE_CASE = 2,
738
+ VM_CHECKMATCH_TYPE_RESCUE = 3
739
+ };
740
+
741
+ #define VM_CHECKMATCH_TYPE_MASK 0x03
742
+ #define VM_CHECKMATCH_ARRAY 0x04
743
+
744
+ #define VM_CALL_ARGS_SPLAT (0x01 << 1) /* m(*args) */
745
+ #define VM_CALL_ARGS_BLOCKARG (0x01 << 2) /* m(&block) */
746
+ #define VM_CALL_FCALL (0x01 << 3) /* m(...) */
747
+ #define VM_CALL_VCALL (0x01 << 4) /* m */
748
+ #define VM_CALL_TAILCALL (0x01 << 5) /* located at tail position */
749
+ #define VM_CALL_SUPER (0x01 << 6) /* super */
750
+ #define VM_CALL_OPT_SEND (0x01 << 7) /* internal flag */
751
+ #define VM_CALL_ARGS_SKIP_SETUP (0x01 << 8) /* (flag & (SPLAT|BLOCKARG)) && blockiseq == 0 */
752
+
753
+ enum vm_special_object_type {
754
+ VM_SPECIAL_OBJECT_VMCORE = 1,
755
+ VM_SPECIAL_OBJECT_CBASE,
756
+ VM_SPECIAL_OBJECT_CONST_BASE
757
+ };
758
+
759
+ #define VM_FRAME_MAGIC_METHOD 0x11
760
+ #define VM_FRAME_MAGIC_BLOCK 0x21
761
+ #define VM_FRAME_MAGIC_CLASS 0x31
762
+ #define VM_FRAME_MAGIC_TOP 0x41
763
+ #define VM_FRAME_MAGIC_CFUNC 0x61
764
+ #define VM_FRAME_MAGIC_PROC 0x71
765
+ #define VM_FRAME_MAGIC_IFUNC 0x81
766
+ #define VM_FRAME_MAGIC_EVAL 0x91
767
+ #define VM_FRAME_MAGIC_LAMBDA 0xa1
768
+ #define VM_FRAME_MAGIC_RESCUE 0xb1
769
+ #define VM_FRAME_MAGIC_MASK_BITS 8
770
+ #define VM_FRAME_MAGIC_MASK (~(~0<<VM_FRAME_MAGIC_MASK_BITS))
771
+
772
+ #define VM_FRAME_TYPE(cfp) ((cfp)->flag & VM_FRAME_MAGIC_MASK)
773
+
774
+ /* other frame flag */
775
+ #define VM_FRAME_FLAG_PASSED 0x0100
776
+ #define VM_FRAME_FLAG_FINISH 0x0200
777
+ #define VM_FRAME_FLAG_BMETHOD 0x0400
778
+ #define VM_FRAME_TYPE_FINISH_P(cfp) (((cfp)->flag & VM_FRAME_FLAG_FINISH) != 0)
779
+ #define VM_FRAME_TYPE_BMETHOD_P(cfp) (((cfp)->flag & VM_FRAME_FLAG_BMETHOD) != 0)
780
+
781
+ #define RUBYVM_CFUNC_FRAME_P(cfp) \
782
+ (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
783
+
784
+ /* inline cache */
785
+ typedef struct iseq_inline_cache_entry *IC;
786
+ typedef rb_call_info_t *CALL_INFO;
787
+
788
+ void rb_vm_change_state(void);
789
+
790
+ typedef VALUE CDHASH;
791
+
792
+ #ifndef FUNC_FASTCALL
793
+ #define FUNC_FASTCALL(x) x
794
+ #endif
795
+
796
+ typedef rb_control_frame_t *
797
+ (FUNC_FASTCALL(*rb_insn_func_t))(rb_thread_t *, rb_control_frame_t *);
798
+
799
+ #define GC_GUARDED_PTR(p) ((VALUE)((VALUE)(p) | 0x01))
800
+ #define GC_GUARDED_PTR_REF(p) ((void *)(((VALUE)(p)) & ~0x03))
801
+ #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
802
+
803
+ /*
804
+ * block frame:
805
+ * ep[ 0]: prev frame
806
+ * ep[-1]: CREF (for *_eval)
807
+ *
808
+ * method frame:
809
+ * ep[ 0]: block pointer (ptr | VM_ENVVAL_BLOCK_PTR_FLAG)
810
+ */
811
+
812
+ #define VM_ENVVAL_BLOCK_PTR_FLAG 0x02
813
+ #define VM_ENVVAL_BLOCK_PTR(v) (GC_GUARDED_PTR(v) | VM_ENVVAL_BLOCK_PTR_FLAG)
814
+ #define VM_ENVVAL_BLOCK_PTR_P(v) ((v) & VM_ENVVAL_BLOCK_PTR_FLAG)
815
+ #define VM_ENVVAL_PREV_EP_PTR(v) ((VALUE)GC_GUARDED_PTR(v))
816
+ #define VM_ENVVAL_PREV_EP_PTR_P(v) (!(VM_ENVVAL_BLOCK_PTR_P(v)))
817
+
818
+ #define VM_EP_PREV_EP(ep) ((VALUE *)GC_GUARDED_PTR_REF((ep)[0]))
819
+ #define VM_EP_BLOCK_PTR(ep) ((rb_block_t *)GC_GUARDED_PTR_REF((ep)[0]))
820
+ #define VM_EP_LEP_P(ep) VM_ENVVAL_BLOCK_PTR_P((ep)[0])
821
+
822
+ VALUE *rb_vm_ep_local_ep(VALUE *ep);
823
+ rb_block_t *rb_vm_control_frame_block_ptr(rb_control_frame_t *cfp);
824
+
825
+ #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
826
+ #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
827
+ #define RUBY_VM_END_CONTROL_FRAME(th) \
828
+ ((rb_control_frame_t *)((th)->stack + (th)->stack_size))
829
+ #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
830
+ ((void *)(ecfp) > (void *)(cfp))
831
+ #define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \
832
+ (!RUBY_VM_VALID_CONTROL_FRAME_P((cfp), RUBY_VM_END_CONTROL_FRAME(th)))
833
+
834
+ #define RUBY_VM_IFUNC_P(ptr) (BUILTIN_TYPE(ptr) == T_NODE)
835
+ #define RUBY_VM_NORMAL_ISEQ_P(ptr) \
836
+ ((ptr) && !RUBY_VM_IFUNC_P(ptr))
837
+
838
+ #define RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp) ((rb_block_t *)(&(cfp)->self))
839
+ #define RUBY_VM_GET_CFP_FROM_BLOCK_PTR(b) \
840
+ ((rb_control_frame_t *)((VALUE *)(b) - 4))
841
+ /* magic number `4' is depend on rb_control_frame_t layout. */
842
+
843
+ /* VM related object allocate functions */
844
+ VALUE rb_thread_alloc(VALUE klass);
845
+ VALUE rb_proc_alloc(VALUE klass);
846
+ VALUE rb_binding_alloc(VALUE klass);
847
+
848
+ /* for debug */
849
+ extern void rb_vmdebug_stack_dump_raw(rb_thread_t *, rb_control_frame_t *);
850
+ extern void rb_vmdebug_debug_print_pre(rb_thread_t *th, rb_control_frame_t *cfp, VALUE *_pc);
851
+ extern void rb_vmdebug_debug_print_post(rb_thread_t *th, rb_control_frame_t *cfp);
852
+
853
+ #define SDR() rb_vmdebug_stack_dump_raw(GET_THREAD(), GET_THREAD()->cfp)
854
+ #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_THREAD(), (cfp))
855
+ void rb_vm_bugreport(void);
856
+
857
+ /* functions about thread/vm execution */
858
+ RUBY_SYMBOL_EXPORT_BEGIN
859
+ VALUE rb_iseq_eval(VALUE iseqval);
860
+ VALUE rb_iseq_eval_main(VALUE iseqval);
861
+ RUBY_SYMBOL_EXPORT_END
862
+ int rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, VALUE *klassp);
863
+
864
+ VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc,
865
+ int argc, const VALUE *argv, const rb_block_t *blockptr);
866
+ VALUE rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass);
867
+ VALUE rb_vm_make_binding(rb_thread_t *th, const rb_control_frame_t *src_cfp);
868
+ VALUE rb_vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp);
869
+ VALUE rb_binding_new_with_cfp(rb_thread_t *th, const rb_control_frame_t *src_cfp);
870
+ VALUE *rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars);
871
+ void rb_vm_inc_const_missing_count(void);
872
+ void rb_vm_gvl_destroy(rb_vm_t *vm);
873
+ VALUE rb_vm_call(rb_thread_t *th, VALUE recv, VALUE id, int argc,
874
+ const VALUE *argv, const rb_method_entry_t *me,
875
+ VALUE defined_class);
876
+ void rb_unlink_method_entry(rb_method_entry_t *me);
877
+ void rb_gc_mark_unlinked_live_method_entries(void *pvm);
878
+
879
+ void rb_thread_start_timer_thread(void);
880
+ void rb_thread_stop_timer_thread(int);
881
+ void rb_thread_reset_timer_thread(void);
882
+ void rb_thread_wakeup_timer_thread(void);
883
+
884
+ int ruby_thread_has_gvl_p(void);
885
+ typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
886
+ rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, const rb_control_frame_t *cfp);
887
+ rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(rb_thread_t *th, const rb_control_frame_t *cfp);
888
+ int rb_vm_get_sourceline(const rb_control_frame_t *);
889
+ VALUE rb_name_err_mesg_new(VALUE obj, VALUE mesg, VALUE recv, VALUE method);
890
+ void rb_vm_stack_to_heap(rb_thread_t *th);
891
+ void ruby_thread_init_stack(rb_thread_t *th);
892
+ int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, VALUE *klassp);
893
+ void rb_vm_rewind_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
894
+
895
+ void rb_gc_mark_machine_stack(rb_thread_t *th);
896
+
897
+ int rb_autoloading_value(VALUE mod, ID id, VALUE* value);
898
+
899
+ void rb_vm_rewrite_cref_stack(NODE *node, VALUE old_klass, VALUE new_klass, NODE **new_cref_ptr);
900
+
901
+ #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
902
+
903
+ #define RUBY_CONST_ASSERT(expr) (1/!!(expr)) /* expr must be a compile-time constant */
904
+ #define VM_STACK_OVERFLOWED_P(cfp, sp, margin) \
905
+ (!RUBY_CONST_ASSERT(sizeof(*(sp)) == sizeof(VALUE)) || \
906
+ !RUBY_CONST_ASSERT(sizeof(*(cfp)) == sizeof(rb_control_frame_t)) || \
907
+ ((rb_control_frame_t *)((sp) + (margin)) + 1) >= (cfp))
908
+ #define WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) \
909
+ if (LIKELY(!VM_STACK_OVERFLOWED_P(cfp, sp, margin))) {(void)0;} else /* overflowed */
910
+ #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) \
911
+ WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) vm_stackoverflow()
912
+ #define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
913
+ WHEN_VM_STACK_OVERFLOWED(cfp, (cfp)->sp, margin) vm_stackoverflow()
914
+
915
+ /* for thread */
916
+
917
+ #if RUBY_VM_THREAD_MODEL == 2
918
+ extern rb_thread_t *ruby_current_thread;
919
+ extern rb_vm_t *ruby_current_vm;
920
+ extern rb_event_flag_t ruby_vm_event_flags;
921
+
922
+ #define GET_VM() ruby_current_vm
923
+
924
+ #ifndef OPT_CALL_CFUNC_WITHOUT_FRAME
925
+ #define OPT_CALL_CFUNC_WITHOUT_FRAME 0
926
+ #endif
927
+
928
+ static inline rb_thread_t *
929
+ GET_THREAD(void)
930
+ {
931
+ rb_thread_t *th = ruby_current_thread;
932
+ #if OPT_CALL_CFUNC_WITHOUT_FRAME
933
+ if (UNLIKELY(th->passed_ci != 0)) {
934
+ void vm_call_cfunc_push_frame(rb_thread_t *th);
935
+ vm_call_cfunc_push_frame(th);
936
+ }
937
+ #endif
938
+ return th;
939
+ }
940
+
941
+ #define rb_thread_set_current_raw(th) (void)(ruby_current_thread = (th))
942
+ #define rb_thread_set_current(th) do { \
943
+ if ((th)->vm->running_thread != (th)) { \
944
+ (th)->running_time_us = 0; \
945
+ } \
946
+ rb_thread_set_current_raw(th); \
947
+ (th)->vm->running_thread = (th); \
948
+ } while (0)
949
+
950
+ #else
951
+ #error "unsupported thread model"
952
+ #endif
953
+
954
+ enum {
955
+ TIMER_INTERRUPT_MASK = 0x01,
956
+ PENDING_INTERRUPT_MASK = 0x02,
957
+ POSTPONED_JOB_INTERRUPT_MASK = 0x04,
958
+ TRAP_INTERRUPT_MASK = 0x08
959
+ };
960
+
961
+ #define RUBY_VM_SET_TIMER_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, TIMER_INTERRUPT_MASK)
962
+ #define RUBY_VM_SET_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, PENDING_INTERRUPT_MASK)
963
+ #define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
964
+ #define RUBY_VM_SET_TRAP_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, TRAP_INTERRUPT_MASK)
965
+ #define RUBY_VM_INTERRUPTED(th) ((th)->interrupt_flag & ~(th)->interrupt_mask & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
966
+ #define RUBY_VM_INTERRUPTED_ANY(th) ((th)->interrupt_flag & ~(th)->interrupt_mask)
967
+
968
+ int rb_signal_buff_size(void);
969
+ void rb_signal_exec(rb_thread_t *th, int sig);
970
+ void rb_threadptr_check_signal(rb_thread_t *mth);
971
+ void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
972
+ void rb_threadptr_signal_exit(rb_thread_t *th);
973
+ void rb_threadptr_execute_interrupts(rb_thread_t *, int);
974
+ void rb_threadptr_interrupt(rb_thread_t *th);
975
+ void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
976
+ void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
977
+ void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
978
+ int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th);
979
+
980
+ #define RUBY_VM_CHECK_INTS_BLOCKING(th) do { \
981
+ if (UNLIKELY(!rb_threadptr_pending_interrupt_empty_p(th))) { \
982
+ th->pending_interrupt_queue_checked = 0; \
983
+ RUBY_VM_SET_INTERRUPT(th); \
984
+ rb_threadptr_execute_interrupts(th, 1); \
985
+ } \
986
+ else if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(th))) { \
987
+ rb_threadptr_execute_interrupts(th, 1); \
988
+ } \
989
+ } while (0)
990
+
991
+ #define RUBY_VM_CHECK_INTS(th) do { \
992
+ if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(th))) { \
993
+ rb_threadptr_execute_interrupts(th, 0); \
994
+ } \
995
+ } while (0)
996
+
997
+ /* tracer */
998
+ struct rb_trace_arg_struct {
999
+ rb_event_flag_t event;
1000
+ rb_thread_t *th;
1001
+ rb_control_frame_t *cfp;
1002
+ VALUE self;
1003
+ ID id;
1004
+ VALUE klass;
1005
+ VALUE data;
1006
+
1007
+ int klass_solved;
1008
+
1009
+ /* calc from cfp */
1010
+ int lineno;
1011
+ VALUE path;
1012
+ };
1013
+
1014
+ void rb_threadptr_exec_event_hooks(struct rb_trace_arg_struct *trace_arg);
1015
+ void rb_threadptr_exec_event_hooks_and_pop_frame(struct rb_trace_arg_struct *trace_arg);
1016
+
1017
+ #define EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, klass_, data_, pop_p_) do { \
1018
+ if (UNLIKELY(ruby_vm_event_flags & (flag_))) { \
1019
+ if (((th)->event_hooks.events | (th)->vm->event_hooks.events) & (flag_)) { \
1020
+ struct rb_trace_arg_struct trace_arg; \
1021
+ trace_arg.event = (flag_); \
1022
+ trace_arg.th = (th_); \
1023
+ trace_arg.cfp = (trace_arg.th)->cfp; \
1024
+ trace_arg.self = (self_); \
1025
+ trace_arg.id = (id_); \
1026
+ trace_arg.klass = (klass_); \
1027
+ trace_arg.data = (data_); \
1028
+ trace_arg.path = Qundef; \
1029
+ trace_arg.klass_solved = 0; \
1030
+ if (pop_p_) rb_threadptr_exec_event_hooks_and_pop_frame(&trace_arg); \
1031
+ else rb_threadptr_exec_event_hooks(&trace_arg); \
1032
+ } \
1033
+ } \
1034
+ } while (0)
1035
+
1036
+ #define EXEC_EVENT_HOOK(th_, flag_, self_, id_, klass_, data_) \
1037
+ EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, klass_, data_, 0)
1038
+
1039
+ #define EXEC_EVENT_HOOK_AND_POP_FRAME(th_, flag_, self_, id_, klass_, data_) \
1040
+ EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, klass_, data_, 1)
1041
+
1042
+ VALUE rb_threadptr_reset_recursive_data(rb_thread_t *th);
1043
+ void rb_threadptr_restore_recursive_data(rb_thread_t *th, VALUE old);
1044
+
1045
+ RUBY_SYMBOL_EXPORT_BEGIN
1046
+
1047
+ int rb_thread_check_trap_pending(void);
1048
+
1049
+ extern VALUE rb_get_coverages(void);
1050
+ extern void rb_set_coverages(VALUE);
1051
+ extern void rb_reset_coverages(void);
1052
+
1053
+ void rb_postponed_job_flush(rb_vm_t *vm);
1054
+
1055
+ RUBY_SYMBOL_EXPORT_END
1056
+
1057
+ #endif /* RUBY_VM_CORE_H */