looksee 3.0.0-universal-java-1.8

Sign up to get free protection for your applications and to get access to all the features.
Files changed (60) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG +66 -0
  3. data/LICENSE +22 -0
  4. data/README.markdown +175 -0
  5. data/Rakefile +13 -0
  6. data/ext/extconf.rb +19 -0
  7. data/ext/mri/1.9.2/debug.h +36 -0
  8. data/ext/mri/1.9.2/id.h +170 -0
  9. data/ext/mri/1.9.2/method.h +103 -0
  10. data/ext/mri/1.9.2/node.h +483 -0
  11. data/ext/mri/1.9.2/thread_pthread.h +27 -0
  12. data/ext/mri/1.9.2/vm_core.h +707 -0
  13. data/ext/mri/1.9.2/vm_opts.h +51 -0
  14. data/ext/mri/1.9.3/atomic.h +56 -0
  15. data/ext/mri/1.9.3/debug.h +41 -0
  16. data/ext/mri/1.9.3/id.h +175 -0
  17. data/ext/mri/1.9.3/internal.h +227 -0
  18. data/ext/mri/1.9.3/internal_falcon.h +248 -0
  19. data/ext/mri/1.9.3/method.h +105 -0
  20. data/ext/mri/1.9.3/node.h +503 -0
  21. data/ext/mri/1.9.3/thread_pthread.h +51 -0
  22. data/ext/mri/1.9.3/vm_core.h +755 -0
  23. data/ext/mri/1.9.3/vm_opts.h +51 -0
  24. data/ext/mri/2.0.0/internal.h +378 -0
  25. data/ext/mri/2.0.0/method.h +138 -0
  26. data/ext/mri/2.1.0/internal.h +889 -0
  27. data/ext/mri/2.1.0/method.h +142 -0
  28. data/ext/mri/2.2.0/internal.h +1182 -0
  29. data/ext/mri/2.2.0/method.h +141 -0
  30. data/ext/mri/env-1.8.h +27 -0
  31. data/ext/mri/eval_c-1.8.h +27 -0
  32. data/ext/mri/mri.c +309 -0
  33. data/ext/mri/node-1.9.h +35 -0
  34. data/ext/rbx/rbx.c +13 -0
  35. data/lib/looksee.rb +2 -0
  36. data/lib/looksee/JRuby.jar +0 -0
  37. data/lib/looksee/adapter.rb +8 -0
  38. data/lib/looksee/adapter/base.rb +105 -0
  39. data/lib/looksee/adapter/rubinius.rb +84 -0
  40. data/lib/looksee/clean.rb +169 -0
  41. data/lib/looksee/columnizer.rb +73 -0
  42. data/lib/looksee/core_ext.rb +48 -0
  43. data/lib/looksee/editor.rb +64 -0
  44. data/lib/looksee/help.rb +54 -0
  45. data/lib/looksee/inspector.rb +70 -0
  46. data/lib/looksee/lookup_path.rb +95 -0
  47. data/lib/looksee/rbx.bundle +0 -0
  48. data/lib/looksee/version.rb +11 -0
  49. data/spec/looksee/adapter_spec.rb +588 -0
  50. data/spec/looksee/clean_spec.rb +41 -0
  51. data/spec/looksee/columnizer_spec.rb +52 -0
  52. data/spec/looksee/core_ext_spec.rb +17 -0
  53. data/spec/looksee/editor_spec.rb +107 -0
  54. data/spec/looksee/inspector_spec.rb +179 -0
  55. data/spec/looksee/lookup_path_spec.rb +87 -0
  56. data/spec/spec_helper.rb +29 -0
  57. data/spec/support/core_ext.rb +25 -0
  58. data/spec/support/temporary_classes.rb +78 -0
  59. data/spec/support/test_adapter.rb +83 -0
  60. metadata +116 -0
@@ -0,0 +1,51 @@
1
+ /**********************************************************************
2
+
3
+ thread_pthread.h -
4
+
5
+ $Author: kosaki $
6
+
7
+ Copyright (C) 2004-2007 Koichi Sasada
8
+
9
+ **********************************************************************/
10
+
11
+ #ifndef RUBY_THREAD_PTHREAD_H
12
+ #define RUBY_THREAD_PTHREAD_H
13
+
14
+ #include <pthread.h>
15
+ #ifdef HAVE_PTHREAD_NP_H
16
+ #include <pthread_np.h>
17
+ #endif
18
+ typedef pthread_t rb_thread_id_t;
19
+ typedef pthread_mutex_t rb_thread_lock_t;
20
+
21
+ typedef struct rb_thread_cond_struct {
22
+ pthread_cond_t cond;
23
+ #ifdef HAVE_CLOCKID_T
24
+ clockid_t clockid;
25
+ #endif
26
+ } rb_thread_cond_t;
27
+
28
+ typedef struct native_thread_data_struct {
29
+ void *signal_thread_list;
30
+ rb_thread_cond_t sleep_cond;
31
+ } native_thread_data_t;
32
+
33
+ #include <semaphore.h>
34
+
35
+ typedef struct rb_global_vm_lock_struct {
36
+ /* fast path */
37
+ unsigned long acquired;
38
+ pthread_mutex_t lock;
39
+
40
+ /* slow path */
41
+ volatile unsigned long waiting;
42
+ rb_thread_cond_t cond;
43
+
44
+ /* yield */
45
+ rb_thread_cond_t switch_cond;
46
+ rb_thread_cond_t switch_wait_cond;
47
+ int need_yield;
48
+ int wait_yield;
49
+ } rb_global_vm_lock_t;
50
+
51
+ #endif /* RUBY_THREAD_PTHREAD_H */
@@ -0,0 +1,755 @@
1
+ /**********************************************************************
2
+
3
+ vm_core.h -
4
+
5
+ $Author: kosaki $
6
+ created at: 04/01/01 19:41:38 JST
7
+
8
+ Copyright (C) 2004-2007 Koichi Sasada
9
+
10
+ **********************************************************************/
11
+
12
+ #ifndef RUBY_VM_CORE_H
13
+ #define RUBY_VM_CORE_H
14
+
15
+ #define RUBY_VM_THREAD_MODEL 2
16
+
17
+ #include "ruby/ruby.h"
18
+ #include "ruby/st.h"
19
+
20
+ #include "node.h"
21
+ #include "debug.h"
22
+ #include "vm_opts.h"
23
+ #include "id.h"
24
+ #include "method.h"
25
+ #include "atomic.h"
26
+
27
+ #if defined(_WIN32)
28
+ #include "thread_win32.h"
29
+ #elif defined(HAVE_PTHREAD_H)
30
+ #include "thread_pthread.h"
31
+ #else
32
+ #error "unsupported thread type"
33
+ #endif
34
+
35
+ #ifndef ENABLE_VM_OBJSPACE
36
+ #ifdef _WIN32
37
+ /*
38
+ * TODO: object space independent st_table.
39
+ * socklist needs st_table in rb_w32_sysinit(), before object space
40
+ * initialization.
41
+ * It is too early now to change st_hash_type, since it breaks binary
42
+ * compatibility.
43
+ */
44
+ #define ENABLE_VM_OBJSPACE 0
45
+ #else
46
+ #define ENABLE_VM_OBJSPACE 1
47
+ #endif
48
+ #endif
49
+
50
+ #include <setjmp.h>
51
+ #include <signal.h>
52
+
53
+ #ifndef NSIG
54
+ # define NSIG (_SIGMAX + 1) /* For QNX */
55
+ #endif
56
+
57
+ #define RUBY_NSIG NSIG
58
+
59
+ #ifdef HAVE_STDARG_PROTOTYPES
60
+ #include <stdarg.h>
61
+ #define va_init_list(a,b) va_start((a),(b))
62
+ #else
63
+ #include <varargs.h>
64
+ #define va_init_list(a,b) va_start((a))
65
+ #endif
66
+
67
+ #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
68
+ #define USE_SIGALTSTACK
69
+ #endif
70
+
71
+ /*****************/
72
+ /* configuration */
73
+ /*****************/
74
+
75
+ /* gcc ver. check */
76
+ #if defined(__GNUC__) && __GNUC__ >= 2
77
+
78
+ #if OPT_TOKEN_THREADED_CODE
79
+ #if OPT_DIRECT_THREADED_CODE
80
+ #undef OPT_DIRECT_THREADED_CODE
81
+ #endif
82
+ #endif
83
+
84
+ #else /* defined(__GNUC__) && __GNUC__ >= 2 */
85
+
86
+ /* disable threaded code options */
87
+ #if OPT_DIRECT_THREADED_CODE
88
+ #undef OPT_DIRECT_THREADED_CODE
89
+ #endif
90
+ #if OPT_TOKEN_THREADED_CODE
91
+ #undef OPT_TOKEN_THREADED_CODE
92
+ #endif
93
+ #endif
94
+
95
+ /* call threaded code */
96
+ #if OPT_CALL_THREADED_CODE
97
+ #if OPT_DIRECT_THREADED_CODE
98
+ #undef OPT_DIRECT_THREADED_CODE
99
+ #endif /* OPT_DIRECT_THREADED_CODE */
100
+ #if OPT_STACK_CACHING
101
+ #undef OPT_STACK_CACHING
102
+ #endif /* OPT_STACK_CACHING */
103
+ #endif /* OPT_CALL_THREADED_CODE */
104
+
105
+ /* likely */
106
+ #if __GNUC__ >= 3
107
+ #define LIKELY(x) (__builtin_expect((x), 1))
108
+ #define UNLIKELY(x) (__builtin_expect((x), 0))
109
+ #else /* __GNUC__ >= 3 */
110
+ #define LIKELY(x) (x)
111
+ #define UNLIKELY(x) (x)
112
+ #endif /* __GNUC__ >= 3 */
113
+
114
+ #if __GNUC__ >= 3
115
+ #define UNINITIALIZED_VAR(x) x = x
116
+ #else
117
+ #define UNINITIALIZED_VAR(x) x
118
+ #endif
119
+
120
+ typedef unsigned long rb_num_t;
121
+
122
+ /* iseq data type */
123
+
124
+ struct iseq_compile_data_ensure_node_stack;
125
+
126
+ typedef struct rb_compile_option_struct rb_compile_option_t;
127
+
128
+ struct iseq_inline_cache_entry {
129
+ VALUE ic_vmstat;
130
+ VALUE ic_class;
131
+ union {
132
+ VALUE value;
133
+ rb_method_entry_t *method;
134
+ long index;
135
+ } ic_value;
136
+ };
137
+
138
+ #if 1
139
+ #define GetCoreDataFromValue(obj, type, ptr) do { \
140
+ (ptr) = (type*)DATA_PTR(obj); \
141
+ } while (0)
142
+ #else
143
+ #define GetCoreDataFromValue(obj, type, ptr) Data_Get_Struct((obj), type, (ptr))
144
+ #endif
145
+
146
+ #define GetISeqPtr(obj, ptr) \
147
+ GetCoreDataFromValue((obj), rb_iseq_t, (ptr))
148
+
149
+ struct rb_iseq_struct;
150
+
151
+ struct rb_iseq_struct {
152
+ /***************/
153
+ /* static data */
154
+ /***************/
155
+
156
+ enum iseq_type {
157
+ ISEQ_TYPE_TOP,
158
+ ISEQ_TYPE_METHOD,
159
+ ISEQ_TYPE_BLOCK,
160
+ ISEQ_TYPE_CLASS,
161
+ ISEQ_TYPE_RESCUE,
162
+ ISEQ_TYPE_ENSURE,
163
+ ISEQ_TYPE_EVAL,
164
+ ISEQ_TYPE_MAIN,
165
+ ISEQ_TYPE_DEFINED_GUARD
166
+ } type; /* instruction sequence type */
167
+
168
+ VALUE name; /* String: iseq name */
169
+ VALUE filename; /* file information where this sequence from */
170
+ VALUE filepath; /* real file path or nil */
171
+ VALUE *iseq; /* iseq (insn number and operands) */
172
+ VALUE *iseq_encoded; /* encoded iseq */
173
+ unsigned long iseq_size;
174
+ VALUE mark_ary; /* Array: includes operands which should be GC marked */
175
+ VALUE coverage; /* coverage array */
176
+ unsigned short line_no;
177
+
178
+ /* insn info, must be freed */
179
+ struct iseq_insn_info_entry *insn_info_table;
180
+ size_t insn_info_size;
181
+
182
+ ID *local_table; /* must free */
183
+ int local_table_size;
184
+
185
+ /* method, class frame: sizeof(vars) + 1, block frame: sizeof(vars) */
186
+ int local_size;
187
+
188
+ struct iseq_inline_cache_entry *ic_entries;
189
+ int ic_size;
190
+
191
+ /**
192
+ * argument information
193
+ *
194
+ * def m(a1, a2, ..., aM, # mandatory
195
+ * b1=(...), b2=(...), ..., bN=(...), # optional
196
+ * *c, # rest
197
+ * d1, d2, ..., dO, # post
198
+ * &e) # block
199
+ * =>
200
+ *
201
+ * argc = M
202
+ * arg_rest = M+N+1 // or -1 if no rest arg
203
+ * arg_opts = N
204
+ * arg_opts_tbl = [ (N entries) ]
205
+ * arg_post_len = O // 0 if no post arguments
206
+ * arg_post_start = M+N+2
207
+ * arg_block = M+N + 1 + O + 1 // -1 if no block arg
208
+ * arg_simple = 0 if not simple arguments.
209
+ * = 1 if no opt, rest, post, block.
210
+ * = 2 if ambiguos block parameter ({|a|}).
211
+ * arg_size = argument size.
212
+ */
213
+
214
+ int argc;
215
+ int arg_simple;
216
+ int arg_rest;
217
+ int arg_block;
218
+ int arg_opts;
219
+ int arg_post_len;
220
+ int arg_post_start;
221
+ int arg_size;
222
+ VALUE *arg_opt_table;
223
+
224
+ size_t stack_max; /* for stack overflow check */
225
+
226
+ /* catch table */
227
+ struct iseq_catch_table_entry *catch_table;
228
+ int catch_table_size;
229
+
230
+ /* for child iseq */
231
+ struct rb_iseq_struct *parent_iseq;
232
+ struct rb_iseq_struct *local_iseq;
233
+
234
+ /****************/
235
+ /* dynamic data */
236
+ /****************/
237
+
238
+ VALUE self;
239
+ VALUE orig; /* non-NULL if its data have origin */
240
+
241
+ /* block inlining */
242
+ /*
243
+ * NODE *node;
244
+ * void *special_block_builder;
245
+ * void *cached_special_block_builder;
246
+ * VALUE cached_special_block;
247
+ */
248
+
249
+ /* klass/module nest information stack (cref) */
250
+ NODE *cref_stack;
251
+ VALUE klass;
252
+
253
+ /* misc */
254
+ ID defined_method_id; /* for define_method */
255
+
256
+ /* used at compile time */
257
+ struct iseq_compile_data *compile_data;
258
+ };
259
+
260
+ enum ruby_special_exceptions {
261
+ ruby_error_reenter,
262
+ ruby_error_nomemory,
263
+ ruby_error_sysstack,
264
+ ruby_error_closed_stream,
265
+ ruby_special_error_count
266
+ };
267
+
268
+ #define GetVMPtr(obj, ptr) \
269
+ GetCoreDataFromValue((obj), rb_vm_t, (ptr))
270
+
271
+ #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
272
+ struct rb_objspace;
273
+ void rb_objspace_free(struct rb_objspace *);
274
+ #endif
275
+
276
+ typedef struct rb_vm_struct {
277
+ VALUE self;
278
+
279
+ rb_global_vm_lock_t gvl;
280
+
281
+ struct rb_thread_struct *main_thread;
282
+ struct rb_thread_struct *running_thread;
283
+
284
+ st_table *living_threads;
285
+ VALUE thgroup_default;
286
+
287
+ int running;
288
+ int inhibit_thread_creation;
289
+ int thread_abort_on_exception;
290
+ unsigned long trace_flag;
291
+ volatile int sleeper;
292
+
293
+ /* object management */
294
+ VALUE mark_object_ary;
295
+
296
+ VALUE special_exceptions[ruby_special_error_count];
297
+
298
+ /* load */
299
+ VALUE top_self;
300
+ VALUE load_path;
301
+ VALUE loaded_features;
302
+ struct st_table *loading_table;
303
+
304
+ /* signal */
305
+ struct {
306
+ VALUE cmd;
307
+ int safe;
308
+ } trap_list[RUBY_NSIG];
309
+
310
+ /* hook */
311
+ rb_event_hook_t *event_hooks;
312
+
313
+ int src_encoding_index;
314
+
315
+ VALUE verbose, debug, progname;
316
+ VALUE coverages;
317
+
318
+ struct unlinked_method_entry_list_entry *unlinked_method_entry_list;
319
+
320
+ #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
321
+ struct rb_objspace *objspace;
322
+ #endif
323
+
324
+ /*
325
+ * @shyouhei notes that this is not for storing normal Ruby
326
+ * objects so do *NOT* mark this when you GC.
327
+ */
328
+ struct RArray at_exit;
329
+ } rb_vm_t;
330
+
331
+ typedef struct {
332
+ VALUE *pc; /* cfp[0] */
333
+ VALUE *sp; /* cfp[1] */
334
+ VALUE *bp; /* cfp[2] */
335
+ rb_iseq_t *iseq; /* cfp[3] */
336
+ VALUE flag; /* cfp[4] */
337
+ VALUE self; /* cfp[5] / block[0] */
338
+ VALUE *lfp; /* cfp[6] / block[1] */
339
+ VALUE *dfp; /* cfp[7] / block[2] */
340
+ rb_iseq_t *block_iseq; /* cfp[8] / block[3] */
341
+ VALUE proc; /* cfp[9] / block[4] */
342
+ const rb_method_entry_t *me;/* cfp[10] */
343
+ } rb_control_frame_t;
344
+
345
+ typedef struct rb_block_struct {
346
+ VALUE self; /* share with method frame if it's only block */
347
+ VALUE *lfp; /* share with method frame if it's only block */
348
+ VALUE *dfp; /* share with method frame if it's only block */
349
+ rb_iseq_t *iseq;
350
+ VALUE proc;
351
+ } rb_block_t;
352
+
353
+ extern const rb_data_type_t ruby_thread_data_type;
354
+
355
+ #define GetThreadPtr(obj, ptr) \
356
+ TypedData_Get_Struct((obj), rb_thread_t, &ruby_thread_data_type, (ptr))
357
+
358
+ enum rb_thread_status {
359
+ THREAD_TO_KILL,
360
+ THREAD_RUNNABLE,
361
+ THREAD_STOPPED,
362
+ THREAD_STOPPED_FOREVER,
363
+ THREAD_KILLED
364
+ };
365
+
366
+ typedef RUBY_JMP_BUF rb_jmpbuf_t;
367
+
368
+ struct rb_vm_tag {
369
+ rb_jmpbuf_t buf;
370
+ VALUE tag;
371
+ VALUE retval;
372
+ struct rb_vm_tag *prev;
373
+ };
374
+
375
+ struct rb_vm_protect_tag {
376
+ struct rb_vm_protect_tag *prev;
377
+ };
378
+
379
+ struct rb_unblock_callback {
380
+ rb_unblock_function_t *func;
381
+ void *arg;
382
+ };
383
+
384
+ struct rb_mutex_struct;
385
+
386
+ #ifdef SIGSTKSZ
387
+ #define ALT_STACK_SIZE (SIGSTKSZ*2)
388
+ #else
389
+ #define ALT_STACK_SIZE (4*1024)
390
+ #endif
391
+
392
+ typedef struct rb_thread_struct {
393
+ VALUE self;
394
+ rb_vm_t *vm;
395
+
396
+ /* execution information */
397
+ VALUE *stack; /* must free, must mark */
398
+ unsigned long stack_size;
399
+ rb_control_frame_t *cfp;
400
+ int safe_level;
401
+ int raised_flag;
402
+ VALUE last_status; /* $? */
403
+
404
+ /* passing state */
405
+ int state;
406
+
407
+ int waiting_fd;
408
+
409
+ /* for rb_iterate */
410
+ const rb_block_t *passed_block;
411
+
412
+ /* for bmethod */
413
+ const rb_method_entry_t *passed_me;
414
+
415
+ /* for load(true) */
416
+ VALUE top_self;
417
+ VALUE top_wrapper;
418
+
419
+ /* eval env */
420
+ rb_block_t *base_block;
421
+
422
+ VALUE *local_lfp;
423
+ VALUE local_svar;
424
+
425
+ /* thread control */
426
+ rb_thread_id_t thread_id;
427
+ enum rb_thread_status status;
428
+ int priority;
429
+
430
+ native_thread_data_t native_thread_data;
431
+ void *blocking_region_buffer;
432
+
433
+ VALUE thgroup;
434
+ VALUE value;
435
+
436
+ VALUE errinfo;
437
+ VALUE thrown_errinfo;
438
+
439
+ rb_atomic_t interrupt_flag;
440
+ rb_thread_lock_t interrupt_lock;
441
+ struct rb_unblock_callback unblock;
442
+ VALUE locking_mutex;
443
+ struct rb_mutex_struct *keeping_mutexes;
444
+
445
+ struct rb_vm_tag *tag;
446
+ struct rb_vm_protect_tag *protect_tag;
447
+
448
+ int parse_in_eval;
449
+ int mild_compile_error;
450
+
451
+ /* storage */
452
+ st_table *local_storage;
453
+
454
+ struct rb_thread_struct *join_list_next;
455
+ struct rb_thread_struct *join_list_head;
456
+
457
+ VALUE first_proc;
458
+ VALUE first_args;
459
+ VALUE (*first_func)(ANYARGS);
460
+
461
+ /* for GC */
462
+ VALUE *machine_stack_start;
463
+ VALUE *machine_stack_end;
464
+ size_t machine_stack_maxsize;
465
+ #ifdef __ia64
466
+ VALUE *machine_register_stack_start;
467
+ VALUE *machine_register_stack_end;
468
+ size_t machine_register_stack_maxsize;
469
+ #endif
470
+ jmp_buf machine_regs;
471
+ int mark_stack_len;
472
+
473
+ /* statistics data for profiler */
474
+ VALUE stat_insn_usage;
475
+
476
+ /* tracer */
477
+ rb_event_hook_t *event_hooks;
478
+ rb_event_flag_t event_flags;
479
+ int tracing;
480
+
481
+ /* fiber */
482
+ VALUE fiber;
483
+ VALUE root_fiber;
484
+ rb_jmpbuf_t root_jmpbuf;
485
+
486
+ /* misc */
487
+ int method_missing_reason;
488
+ int abort_on_exception;
489
+ #ifdef USE_SIGALTSTACK
490
+ void *altstack;
491
+ #endif
492
+ unsigned long running_time_us;
493
+ } rb_thread_t;
494
+
495
+ /* iseq.c */
496
+ #if defined __GNUC__ && __GNUC__ >= 4
497
+ #pragma GCC visibility push(default)
498
+ #endif
499
+ VALUE rb_iseq_new(NODE*, VALUE, VALUE, VALUE, VALUE, enum iseq_type);
500
+ VALUE rb_iseq_new_top(NODE *node, VALUE name, VALUE filename, VALUE filepath, VALUE parent);
501
+ VALUE rb_iseq_new_main(NODE *node, VALUE filename, VALUE filepath);
502
+ VALUE rb_iseq_new_with_bopt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, enum iseq_type, VALUE);
503
+ VALUE rb_iseq_new_with_opt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, enum iseq_type, const rb_compile_option_t*);
504
+ VALUE rb_iseq_compile(VALUE src, VALUE file, VALUE line);
505
+ VALUE rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE filepath, VALUE line, VALUE opt);
506
+ VALUE rb_iseq_disasm(VALUE self);
507
+ int rb_iseq_disasm_insn(VALUE str, VALUE *iseqval, size_t pos, rb_iseq_t *iseq, VALUE child);
508
+ const char *ruby_node_name(int node);
509
+ int rb_iseq_first_lineno(rb_iseq_t *iseq);
510
+
511
+ RUBY_EXTERN VALUE rb_cISeq;
512
+ RUBY_EXTERN VALUE rb_cRubyVM;
513
+ RUBY_EXTERN VALUE rb_cEnv;
514
+ RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
515
+ #if defined __GNUC__ && __GNUC__ >= 4
516
+ #pragma GCC visibility pop
517
+ #endif
518
+
519
+ /* each thread has this size stack : 128KB */
520
+ #define RUBY_VM_THREAD_STACK_SIZE (128 * 1024)
521
+
522
+ #define GetProcPtr(obj, ptr) \
523
+ GetCoreDataFromValue((obj), rb_proc_t, (ptr))
524
+
525
+ typedef struct {
526
+ rb_block_t block;
527
+
528
+ VALUE envval; /* for GC mark */
529
+ VALUE blockprocval;
530
+ int safe_level;
531
+ int is_from_method;
532
+ int is_lambda;
533
+ } rb_proc_t;
534
+
535
+ #define GetEnvPtr(obj, ptr) \
536
+ GetCoreDataFromValue((obj), rb_env_t, (ptr))
537
+
538
+ typedef struct {
539
+ VALUE *env;
540
+ int env_size;
541
+ int local_size;
542
+ VALUE prev_envval; /* for GC mark */
543
+ rb_block_t block;
544
+ } rb_env_t;
545
+
546
+ #define GetBindingPtr(obj, ptr) \
547
+ GetCoreDataFromValue((obj), rb_binding_t, (ptr))
548
+
549
+ typedef struct {
550
+ VALUE env;
551
+ VALUE filename;
552
+ unsigned short line_no;
553
+ } rb_binding_t;
554
+
555
+ /* used by compile time and send insn */
556
+ #define VM_CALL_ARGS_SPLAT_BIT (0x01 << 1)
557
+ #define VM_CALL_ARGS_BLOCKARG_BIT (0x01 << 2)
558
+ #define VM_CALL_FCALL_BIT (0x01 << 3)
559
+ #define VM_CALL_VCALL_BIT (0x01 << 4)
560
+ #define VM_CALL_TAILCALL_BIT (0x01 << 5)
561
+ #define VM_CALL_TAILRECURSION_BIT (0x01 << 6)
562
+ #define VM_CALL_SUPER_BIT (0x01 << 7)
563
+ #define VM_CALL_OPT_SEND_BIT (0x01 << 8)
564
+
565
+ enum vm_special_object_type {
566
+ VM_SPECIAL_OBJECT_VMCORE = 1,
567
+ VM_SPECIAL_OBJECT_CBASE,
568
+ VM_SPECIAL_OBJECT_CONST_BASE
569
+ };
570
+
571
+ #define VM_FRAME_MAGIC_METHOD 0x11
572
+ #define VM_FRAME_MAGIC_BLOCK 0x21
573
+ #define VM_FRAME_MAGIC_CLASS 0x31
574
+ #define VM_FRAME_MAGIC_TOP 0x41
575
+ #define VM_FRAME_MAGIC_FINISH 0x51
576
+ #define VM_FRAME_MAGIC_CFUNC 0x61
577
+ #define VM_FRAME_MAGIC_PROC 0x71
578
+ #define VM_FRAME_MAGIC_IFUNC 0x81
579
+ #define VM_FRAME_MAGIC_EVAL 0x91
580
+ #define VM_FRAME_MAGIC_LAMBDA 0xa1
581
+ #define VM_FRAME_MAGIC_MASK_BITS 8
582
+ #define VM_FRAME_MAGIC_MASK (~(~0<<VM_FRAME_MAGIC_MASK_BITS))
583
+
584
+ #define VM_FRAME_TYPE(cfp) ((cfp)->flag & VM_FRAME_MAGIC_MASK)
585
+
586
+ /* other frame flag */
587
+ #define VM_FRAME_FLAG_PASSED 0x0100
588
+
589
+ #define RUBYVM_CFUNC_FRAME_P(cfp) \
590
+ (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
591
+
592
+ /* inline cache */
593
+ typedef struct iseq_inline_cache_entry *IC;
594
+
595
+ void rb_vm_change_state(void);
596
+
597
+ typedef VALUE CDHASH;
598
+
599
+ #ifndef FUNC_FASTCALL
600
+ #define FUNC_FASTCALL(x) x
601
+ #endif
602
+
603
+ typedef rb_control_frame_t *
604
+ (FUNC_FASTCALL(*rb_insn_func_t))(rb_thread_t *, rb_control_frame_t *);
605
+
606
+ #define GC_GUARDED_PTR(p) ((VALUE)((VALUE)(p) | 0x01))
607
+ #define GC_GUARDED_PTR_REF(p) ((void *)(((VALUE)(p)) & ~0x03))
608
+ #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
609
+
610
+ #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
611
+ #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
612
+ #define RUBY_VM_END_CONTROL_FRAME(th) \
613
+ ((rb_control_frame_t *)((th)->stack + (th)->stack_size))
614
+ #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
615
+ ((void *)(ecfp) > (void *)(cfp))
616
+ #define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \
617
+ (!RUBY_VM_VALID_CONTROL_FRAME_P((cfp), RUBY_VM_END_CONTROL_FRAME(th)))
618
+
619
+ #define RUBY_VM_IFUNC_P(ptr) (BUILTIN_TYPE(ptr) == T_NODE)
620
+ #define RUBY_VM_NORMAL_ISEQ_P(ptr) \
621
+ ((ptr) && !RUBY_VM_IFUNC_P(ptr))
622
+
623
+ #define RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp) ((rb_block_t *)(&(cfp)->self))
624
+ #define RUBY_VM_GET_CFP_FROM_BLOCK_PTR(b) \
625
+ ((rb_control_frame_t *)((VALUE *)(b) - 5))
626
+
627
+ /* VM related object allocate functions */
628
+ VALUE rb_thread_alloc(VALUE klass);
629
+ VALUE rb_proc_alloc(VALUE klass);
630
+
631
+ /* for debug */
632
+ extern void rb_vmdebug_stack_dump_raw(rb_thread_t *, rb_control_frame_t *);
633
+ #define SDR() rb_vmdebug_stack_dump_raw(GET_THREAD(), GET_THREAD()->cfp)
634
+ #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_THREAD(), (cfp))
635
+ void rb_vm_bugreport(void);
636
+
637
+ /* functions about thread/vm execution */
638
+ #if defined __GNUC__ && __GNUC__ >= 4
639
+ #pragma GCC visibility push(default)
640
+ #endif
641
+ VALUE rb_iseq_eval(VALUE iseqval);
642
+ VALUE rb_iseq_eval_main(VALUE iseqval);
643
+ void rb_enable_interrupt(void);
644
+ void rb_disable_interrupt(void);
645
+ #if defined __GNUC__ && __GNUC__ >= 4
646
+ #pragma GCC visibility pop
647
+ #endif
648
+ int rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, VALUE *klassp);
649
+
650
+ VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
651
+ int argc, const VALUE *argv, const rb_block_t *blockptr);
652
+ VALUE rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass);
653
+ VALUE rb_vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp);
654
+ void rb_vm_inc_const_missing_count(void);
655
+ void rb_vm_gvl_destroy(rb_vm_t *vm);
656
+ VALUE rb_vm_call(rb_thread_t *th, VALUE recv, VALUE id, int argc,
657
+ const VALUE *argv, const rb_method_entry_t *me);
658
+ void rb_unlink_method_entry(rb_method_entry_t *me);
659
+ void rb_gc_mark_unlinked_live_method_entries(void *pvm);
660
+
661
+ void rb_thread_start_timer_thread(void);
662
+ void rb_thread_stop_timer_thread(int);
663
+ void rb_thread_reset_timer_thread(void);
664
+ void rb_thread_wakeup_timer_thread(void);
665
+
666
+ int ruby_thread_has_gvl_p(void);
667
+ VALUE rb_make_backtrace(void);
668
+ typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
669
+ int rb_backtrace_each(rb_backtrace_iter_func *iter, void *arg);
670
+ rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
671
+ int rb_vm_get_sourceline(const rb_control_frame_t *);
672
+ VALUE rb_name_err_mesg_new(VALUE obj, VALUE mesg, VALUE recv, VALUE method);
673
+ void rb_vm_stack_to_heap(rb_thread_t *th);
674
+ void ruby_thread_init_stack(rb_thread_t *th);
675
+
676
+ NOINLINE(void rb_gc_save_machine_context(rb_thread_t *));
677
+ void rb_gc_mark_machine_stack(rb_thread_t *th);
678
+
679
+ #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
680
+
681
+ /* for thread */
682
+
683
+ #if RUBY_VM_THREAD_MODEL == 2
684
+ RUBY_EXTERN rb_thread_t *ruby_current_thread;
685
+ extern rb_vm_t *ruby_current_vm;
686
+
687
+ #define GET_VM() ruby_current_vm
688
+ #define GET_THREAD() ruby_current_thread
689
+ #define rb_thread_set_current_raw(th) (void)(ruby_current_thread = (th))
690
+ #define rb_thread_set_current(th) do { \
691
+ if ((th)->vm->running_thread != (th)) { \
692
+ (th)->vm->running_thread->running_time_us = 0; \
693
+ } \
694
+ rb_thread_set_current_raw(th); \
695
+ (th)->vm->running_thread = (th); \
696
+ } while (0)
697
+
698
+ #else
699
+ #error "unsupported thread model"
700
+ #endif
701
+
702
+ #define RUBY_VM_SET_TIMER_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, 0x01)
703
+ #define RUBY_VM_SET_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, 0x02)
704
+ #define RUBY_VM_SET_FINALIZER_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, 0x04)
705
+ #define RUBY_VM_INTERRUPTED(th) ((th)->interrupt_flag & 0x02)
706
+
707
+ int rb_signal_buff_size(void);
708
+ void rb_signal_exec(rb_thread_t *th, int sig);
709
+ void rb_threadptr_check_signal(rb_thread_t *mth);
710
+ void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
711
+ void rb_threadptr_signal_exit(rb_thread_t *th);
712
+ void rb_threadptr_execute_interrupts(rb_thread_t *);
713
+ void rb_threadptr_interrupt(rb_thread_t *th);
714
+ void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
715
+
716
+ void rb_thread_lock_unlock(rb_thread_lock_t *);
717
+ void rb_thread_lock_destroy(rb_thread_lock_t *);
718
+
719
+ #define RUBY_VM_CHECK_INTS_TH(th) do { \
720
+ if (UNLIKELY((th)->interrupt_flag)) { \
721
+ rb_threadptr_execute_interrupts(th); \
722
+ } \
723
+ } while (0)
724
+
725
+ #define RUBY_VM_CHECK_INTS() \
726
+ RUBY_VM_CHECK_INTS_TH(GET_THREAD())
727
+
728
+ /* tracer */
729
+ void
730
+ rb_threadptr_exec_event_hooks(rb_thread_t *th, rb_event_flag_t flag, VALUE self, ID id, VALUE klass);
731
+
732
+ #define EXEC_EVENT_HOOK(th, flag, self, id, klass) do { \
733
+ rb_event_flag_t wait_event__ = (th)->event_flags; \
734
+ if (UNLIKELY(wait_event__)) { \
735
+ if (wait_event__ & ((flag) | RUBY_EVENT_VM)) { \
736
+ rb_threadptr_exec_event_hooks((th), (flag), (self), (id), (klass)); \
737
+ } \
738
+ } \
739
+ } while (0)
740
+
741
+ #if defined __GNUC__ && __GNUC__ >= 4
742
+ #pragma GCC visibility push(default)
743
+ #endif
744
+
745
+ int rb_thread_check_trap_pending(void);
746
+
747
+ extern VALUE rb_get_coverages(void);
748
+ extern void rb_set_coverages(VALUE);
749
+ extern void rb_reset_coverages(void);
750
+
751
+ #if defined __GNUC__ && __GNUC__ >= 4
752
+ #pragma GCC visibility pop
753
+ #endif
754
+
755
+ #endif /* RUBY_VM_CORE_H */