looksee 1.0.0-universal-java-1.6

Sign up to get free protection for your applications and to get access to all the features.
Files changed (45) hide show
  1. data/CHANGELOG +14 -0
  2. data/LICENSE +22 -0
  3. data/README.markdown +161 -0
  4. data/Rakefile +10 -0
  5. data/ext/extconf.rb +9 -0
  6. data/ext/mri/1.9.2/debug.h +36 -0
  7. data/ext/mri/1.9.2/id.h +170 -0
  8. data/ext/mri/1.9.2/method.h +103 -0
  9. data/ext/mri/1.9.2/node.h +483 -0
  10. data/ext/mri/1.9.2/thread_pthread.h +27 -0
  11. data/ext/mri/1.9.2/vm_core.h +707 -0
  12. data/ext/mri/1.9.2/vm_opts.h +51 -0
  13. data/ext/mri/env-1.8.h +27 -0
  14. data/ext/mri/eval_c-1.8.h +27 -0
  15. data/ext/mri/mri.c +269 -0
  16. data/ext/mri/node-1.9.h +35 -0
  17. data/ext/rbx/rbx.c +13 -0
  18. data/lib/looksee.rb +5 -0
  19. data/lib/looksee/adapter.rb +10 -0
  20. data/lib/looksee/adapter/base.rb +100 -0
  21. data/lib/looksee/adapter/rubinius.rb +73 -0
  22. data/lib/looksee/clean.rb +122 -0
  23. data/lib/looksee/columnizer.rb +73 -0
  24. data/lib/looksee/core_ext.rb +59 -0
  25. data/lib/looksee/editor.rb +58 -0
  26. data/lib/looksee/help.rb +54 -0
  27. data/lib/looksee/inspector.rb +55 -0
  28. data/lib/looksee/jruby.jar +0 -0
  29. data/lib/looksee/lookup_path.rb +95 -0
  30. data/lib/looksee/rbx.bundle +0 -0
  31. data/lib/looksee/shortcuts.rb +3 -0
  32. data/lib/looksee/version.rb +11 -0
  33. data/lib/looksee/wirble_compatibility.rb +86 -0
  34. data/spec/adapter_spec.rb +546 -0
  35. data/spec/columnizer_spec.rb +52 -0
  36. data/spec/core_ext_spec.rb +41 -0
  37. data/spec/editor_spec.rb +128 -0
  38. data/spec/inspector_spec.rb +178 -0
  39. data/spec/lookup_path_spec.rb +84 -0
  40. data/spec/spec_helper.rb +25 -0
  41. data/spec/support/core_ext.rb +25 -0
  42. data/spec/support/temporary_classes.rb +102 -0
  43. data/spec/support/test_adapter.rb +72 -0
  44. data/spec/wirble_compatibility_spec.rb +116 -0
  45. metadata +158 -0
@@ -0,0 +1,27 @@
1
+ /**********************************************************************
2
+
3
+ thread_pthread.h -
4
+
5
+ $Author: naruse $
6
+
7
+ Copyright (C) 2004-2007 Koichi Sasada
8
+
9
+ **********************************************************************/
10
+
11
+ #ifndef RUBY_THREAD_PTHREAD_H
12
+ #define RUBY_THREAD_PTHREAD_H
13
+
14
+ #include <pthread.h>
15
+ #ifdef HAVE_PTHREAD_NP_H
16
+ #include <pthread_np.h>
17
+ #endif
18
+ typedef pthread_t rb_thread_id_t;
19
+ typedef pthread_mutex_t rb_thread_lock_t;
20
+ typedef pthread_cond_t rb_thread_cond_t;
21
+
22
+ typedef struct native_thread_data_struct {
23
+ void *signal_thread_list;
24
+ pthread_cond_t sleep_cond;
25
+ } native_thread_data_t;
26
+
27
+ #endif /* RUBY_THREAD_PTHREAD_H */
@@ -0,0 +1,707 @@
1
+ /**********************************************************************
2
+
3
+ vm_core.h -
4
+
5
+ $Author: yugui $
6
+ created at: 04/01/01 19:41:38 JST
7
+
8
+ Copyright (C) 2004-2007 Koichi Sasada
9
+
10
+ **********************************************************************/
11
+
12
+ #ifndef RUBY_VM_CORE_H
13
+ #define RUBY_VM_CORE_H
14
+
15
+ #define RUBY_VM_THREAD_MODEL 2
16
+
17
+ #include "ruby/ruby.h"
18
+ #include "ruby/st.h"
19
+
20
+ #include "node.h"
21
+ #include "debug.h"
22
+ #include "vm_opts.h"
23
+ #include "id.h"
24
+ #include "method.h"
25
+
26
+ #if defined(_WIN32)
27
+ #include "thread_win32.h"
28
+ #elif defined(HAVE_PTHREAD_H)
29
+ #include "thread_pthread.h"
30
+ #else
31
+ #error "unsupported thread type"
32
+ #endif
33
+
34
+ #ifndef ENABLE_VM_OBJSPACE
35
+ #ifdef _WIN32
36
+ /*
37
+ * TODO: object space indenpendent st_table.
38
+ * socklist needs st_table in rb_w32_sysinit(), before object space
39
+ * initialization.
40
+ * It is too early now to change st_hash_type, since it breaks binary
41
+ * compatibility.
42
+ */
43
+ #define ENABLE_VM_OBJSPACE 0
44
+ #else
45
+ #define ENABLE_VM_OBJSPACE 1
46
+ #endif
47
+ #endif
48
+
49
+ #include <setjmp.h>
50
+ #include <signal.h>
51
+
52
+ #ifndef NSIG
53
+ # define NSIG (_SIGMAX + 1) /* For QNX */
54
+ #endif
55
+
56
+ #define RUBY_NSIG NSIG
57
+
58
+ #ifdef HAVE_STDARG_PROTOTYPES
59
+ #include <stdarg.h>
60
+ #define va_init_list(a,b) va_start(a,b)
61
+ #else
62
+ #include <varargs.h>
63
+ #define va_init_list(a,b) va_start(a)
64
+ #endif
65
+
66
+ #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
67
+ #define USE_SIGALTSTACK
68
+ #endif
69
+
70
+ /*****************/
71
+ /* configuration */
72
+ /*****************/
73
+
74
+ /* gcc ver. check */
75
+ #if defined(__GNUC__) && __GNUC__ >= 2
76
+
77
+ #if OPT_TOKEN_THREADED_CODE
78
+ #if OPT_DIRECT_THREADED_CODE
79
+ #undef OPT_DIRECT_THREADED_CODE
80
+ #endif
81
+ #endif
82
+
83
+ #else /* defined(__GNUC__) && __GNUC__ >= 2 */
84
+
85
+ /* disable threaded code options */
86
+ #if OPT_DIRECT_THREADED_CODE
87
+ #undef OPT_DIRECT_THREADED_CODE
88
+ #endif
89
+ #if OPT_TOKEN_THREADED_CODE
90
+ #undef OPT_TOKEN_THREADED_CODE
91
+ #endif
92
+ #endif
93
+
94
+ /* call threaded code */
95
+ #if OPT_CALL_THREADED_CODE
96
+ #if OPT_DIRECT_THREADED_CODE
97
+ #undef OPT_DIRECT_THREADED_CODE
98
+ #endif /* OPT_DIRECT_THREADED_CODE */
99
+ #if OPT_STACK_CACHING
100
+ #undef OPT_STACK_CACHING
101
+ #endif /* OPT_STACK_CACHING */
102
+ #endif /* OPT_CALL_THREADED_CODE */
103
+
104
+ /* likely */
105
+ #if __GNUC__ >= 3
106
+ #define LIKELY(x) (__builtin_expect((x), 1))
107
+ #define UNLIKELY(x) (__builtin_expect((x), 0))
108
+ #else /* __GNUC__ >= 3 */
109
+ #define LIKELY(x) (x)
110
+ #define UNLIKELY(x) (x)
111
+ #endif /* __GNUC__ >= 3 */
112
+
113
+ #if __GNUC__ >= 3
114
+ #define UNINITIALIZED_VAR(x) x = x
115
+ #else
116
+ #define UNINITIALIZED_VAR(x) x
117
+ #endif
118
+
119
+ typedef unsigned long rb_num_t;
120
+
121
+ /* iseq data type */
122
+
123
+ struct iseq_compile_data_ensure_node_stack;
124
+
125
+ typedef struct rb_compile_option_struct {
126
+ int inline_const_cache;
127
+ int peephole_optimization;
128
+ int tailcall_optimization;
129
+ int specialized_instruction;
130
+ int operands_unification;
131
+ int instructions_unification;
132
+ int stack_caching;
133
+ int trace_instruction;
134
+ int debug_level;
135
+ } rb_compile_option_t;
136
+
137
+ struct iseq_inline_cache_entry {
138
+ VALUE ic_vmstat;
139
+ VALUE ic_class;
140
+ union {
141
+ VALUE value;
142
+ rb_method_entry_t *method;
143
+ long index;
144
+ } ic_value;
145
+ };
146
+
147
+ #if 1
148
+ #define GetCoreDataFromValue(obj, type, ptr) do { \
149
+ ptr = (type*)DATA_PTR(obj); \
150
+ } while (0)
151
+ #else
152
+ #define GetCoreDataFromValue(obj, type, ptr) Data_Get_Struct(obj, type, ptr)
153
+ #endif
154
+
155
+ #define GetISeqPtr(obj, ptr) \
156
+ GetCoreDataFromValue(obj, rb_iseq_t, ptr)
157
+
158
+ struct rb_iseq_struct;
159
+
160
+ struct rb_iseq_struct {
161
+ /***************/
162
+ /* static data */
163
+ /***************/
164
+
165
+ VALUE type; /* instruction sequence type */
166
+ VALUE name; /* String: iseq name */
167
+ VALUE filename; /* file information where this sequence from */
168
+ VALUE filepath; /* real file path or nil */
169
+ VALUE *iseq; /* iseq (insn number and openrads) */
170
+ VALUE *iseq_encoded; /* encoded iseq */
171
+ unsigned long iseq_size;
172
+ VALUE mark_ary; /* Array: includes operands which should be GC marked */
173
+ VALUE coverage; /* coverage array */
174
+ unsigned short line_no;
175
+
176
+ /* insn info, must be freed */
177
+ struct iseq_insn_info_entry *insn_info_table;
178
+ size_t insn_info_size;
179
+
180
+ ID *local_table; /* must free */
181
+ int local_table_size;
182
+
183
+ /* method, class frame: sizeof(vars) + 1, block frame: sizeof(vars) */
184
+ int local_size;
185
+
186
+ struct iseq_inline_cache_entry *ic_entries;
187
+ int ic_size;
188
+
189
+ /**
190
+ * argument information
191
+ *
192
+ * def m(a1, a2, ..., aM, # mandatory
193
+ * b1=(...), b2=(...), ..., bN=(...), # optinal
194
+ * *c, # rest
195
+ * d1, d2, ..., dO, # post
196
+ * &e) # block
197
+ * =>
198
+ *
199
+ * argc = M
200
+ * arg_rest = M+N+1 // or -1 if no rest arg
201
+ * arg_opts = N
202
+ * arg_opts_tbl = [ (N entries) ]
203
+ * arg_post_len = O // 0 if no post arguments
204
+ * arg_post_start = M+N+2
205
+ * arg_block = M+N + 1 + O + 1 // -1 if no block arg
206
+ * arg_simple = 0 if not simple arguments.
207
+ * = 1 if no opt, rest, post, block.
208
+ * = 2 if ambiguos block parameter ({|a|}).
209
+ * arg_size = argument size.
210
+ */
211
+
212
+ int argc;
213
+ int arg_simple;
214
+ int arg_rest;
215
+ int arg_block;
216
+ int arg_opts;
217
+ int arg_post_len;
218
+ int arg_post_start;
219
+ int arg_size;
220
+ VALUE *arg_opt_table;
221
+
222
+ size_t stack_max; /* for stack overflow check */
223
+
224
+ /* catch table */
225
+ struct iseq_catch_table_entry *catch_table;
226
+ int catch_table_size;
227
+
228
+ /* for child iseq */
229
+ struct rb_iseq_struct *parent_iseq;
230
+ struct rb_iseq_struct *local_iseq;
231
+
232
+ /****************/
233
+ /* dynamic data */
234
+ /****************/
235
+
236
+ VALUE self;
237
+ VALUE orig; /* non-NULL if its data have origin */
238
+
239
+ /* block inlining */
240
+ /*
241
+ * NODE *node;
242
+ * void *special_block_builder;
243
+ * void *cached_special_block_builder;
244
+ * VALUE cached_special_block;
245
+ */
246
+
247
+ /* klass/module nest information stack (cref) */
248
+ NODE *cref_stack;
249
+ VALUE klass;
250
+
251
+ /* misc */
252
+ ID defined_method_id; /* for define_method */
253
+
254
+ /* used at compile time */
255
+ struct iseq_compile_data *compile_data;
256
+ };
257
+
258
+ enum ruby_special_exceptions {
259
+ ruby_error_reenter,
260
+ ruby_error_nomemory,
261
+ ruby_error_sysstack,
262
+ ruby_special_error_count
263
+ };
264
+
265
+ #define GetVMPtr(obj, ptr) \
266
+ GetCoreDataFromValue(obj, rb_vm_t, ptr)
267
+
268
+ #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
269
+ struct rb_objspace;
270
+ void rb_objspace_free(struct rb_objspace *);
271
+ #endif
272
+
273
+ typedef struct rb_vm_struct {
274
+ VALUE self;
275
+
276
+ rb_thread_lock_t global_vm_lock;
277
+
278
+ struct rb_thread_struct *main_thread;
279
+ struct rb_thread_struct *running_thread;
280
+
281
+ st_table *living_threads;
282
+ VALUE thgroup_default;
283
+
284
+ int running;
285
+ int thread_abort_on_exception;
286
+ unsigned long trace_flag;
287
+ volatile int sleeper;
288
+
289
+ /* object management */
290
+ VALUE mark_object_ary;
291
+
292
+ VALUE special_exceptions[ruby_special_error_count];
293
+
294
+ /* load */
295
+ VALUE top_self;
296
+ VALUE load_path;
297
+ VALUE loaded_features;
298
+ struct st_table *loading_table;
299
+
300
+ /* signal */
301
+ struct {
302
+ VALUE cmd;
303
+ int safe;
304
+ } trap_list[RUBY_NSIG];
305
+
306
+ /* hook */
307
+ rb_event_hook_t *event_hooks;
308
+
309
+ int src_encoding_index;
310
+
311
+ VALUE verbose, debug, progname;
312
+ VALUE coverages;
313
+
314
+ struct unlinked_method_entry_list_entry *unlinked_method_entry_list;
315
+
316
+ #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
317
+ struct rb_objspace *objspace;
318
+ #endif
319
+ } rb_vm_t;
320
+
321
+ typedef struct {
322
+ VALUE *pc; /* cfp[0] */
323
+ VALUE *sp; /* cfp[1] */
324
+ VALUE *bp; /* cfp[2] */
325
+ rb_iseq_t *iseq; /* cfp[3] */
326
+ VALUE flag; /* cfp[4] */
327
+ VALUE self; /* cfp[5] / block[0] */
328
+ VALUE *lfp; /* cfp[6] / block[1] */
329
+ VALUE *dfp; /* cfp[7] / block[2] */
330
+ rb_iseq_t *block_iseq; /* cfp[8] / block[3] */
331
+ VALUE proc; /* cfp[9] / block[4] */
332
+ const rb_method_entry_t *me;/* cfp[10] */
333
+ } rb_control_frame_t;
334
+
335
+ typedef struct rb_block_struct {
336
+ VALUE self; /* share with method frame if it's only block */
337
+ VALUE *lfp; /* share with method frame if it's only block */
338
+ VALUE *dfp; /* share with method frame if it's only block */
339
+ rb_iseq_t *iseq;
340
+ VALUE proc;
341
+ } rb_block_t;
342
+
343
+ #define GetThreadPtr(obj, ptr) \
344
+ GetCoreDataFromValue(obj, rb_thread_t, ptr)
345
+
346
+ enum rb_thread_status {
347
+ THREAD_TO_KILL,
348
+ THREAD_RUNNABLE,
349
+ THREAD_STOPPED,
350
+ THREAD_STOPPED_FOREVER,
351
+ THREAD_KILLED
352
+ };
353
+
354
+ typedef RUBY_JMP_BUF rb_jmpbuf_t;
355
+
356
+ struct rb_vm_tag {
357
+ rb_jmpbuf_t buf;
358
+ VALUE tag;
359
+ VALUE retval;
360
+ struct rb_vm_tag *prev;
361
+ };
362
+
363
+ struct rb_vm_protect_tag {
364
+ struct rb_vm_protect_tag *prev;
365
+ };
366
+
367
+ #define RUBY_VM_VALUE_CACHE_SIZE 0x1000
368
+ #define USE_VALUE_CACHE 0
369
+
370
+ struct rb_unblock_callback {
371
+ rb_unblock_function_t *func;
372
+ void *arg;
373
+ };
374
+
375
+ struct rb_mutex_struct;
376
+
377
+ typedef struct rb_thread_struct
378
+ {
379
+ VALUE self;
380
+ rb_vm_t *vm;
381
+
382
+ /* execution information */
383
+ VALUE *stack; /* must free, must mark */
384
+ unsigned long stack_size;
385
+ rb_control_frame_t *cfp;
386
+ int safe_level;
387
+ int raised_flag;
388
+ VALUE last_status; /* $? */
389
+
390
+ /* passing state */
391
+ int state;
392
+
393
+ /* for rb_iterate */
394
+ const rb_block_t *passed_block;
395
+
396
+ /* for bmethod */
397
+ const rb_method_entry_t *passed_me;
398
+
399
+ /* for load(true) */
400
+ VALUE top_self;
401
+ VALUE top_wrapper;
402
+
403
+ /* eval env */
404
+ rb_block_t *base_block;
405
+
406
+ VALUE *local_lfp;
407
+ VALUE local_svar;
408
+
409
+ /* thread control */
410
+ rb_thread_id_t thread_id;
411
+ enum rb_thread_status status;
412
+ int priority;
413
+ int slice;
414
+
415
+ native_thread_data_t native_thread_data;
416
+ void *blocking_region_buffer;
417
+
418
+ VALUE thgroup;
419
+ VALUE value;
420
+
421
+ VALUE errinfo;
422
+ VALUE thrown_errinfo;
423
+ int exec_signal;
424
+
425
+ int interrupt_flag;
426
+ rb_thread_lock_t interrupt_lock;
427
+ struct rb_unblock_callback unblock;
428
+ VALUE locking_mutex;
429
+ struct rb_mutex_struct *keeping_mutexes;
430
+ int transition_for_lock;
431
+
432
+ struct rb_vm_tag *tag;
433
+ struct rb_vm_protect_tag *protect_tag;
434
+
435
+ int parse_in_eval;
436
+ int mild_compile_error;
437
+
438
+ /* storage */
439
+ st_table *local_storage;
440
+ #if USE_VALUE_CACHE
441
+ VALUE value_cache[RUBY_VM_VALUE_CACHE_SIZE + 1];
442
+ VALUE *value_cache_ptr;
443
+ #endif
444
+
445
+ struct rb_thread_struct *join_list_next;
446
+ struct rb_thread_struct *join_list_head;
447
+
448
+ VALUE first_proc;
449
+ VALUE first_args;
450
+ VALUE (*first_func)(ANYARGS);
451
+
452
+ /* for GC */
453
+ VALUE *machine_stack_start;
454
+ VALUE *machine_stack_end;
455
+ size_t machine_stack_maxsize;
456
+ #ifdef __ia64
457
+ VALUE *machine_register_stack_start;
458
+ VALUE *machine_register_stack_end;
459
+ size_t machine_register_stack_maxsize;
460
+ #endif
461
+ jmp_buf machine_regs;
462
+ int mark_stack_len;
463
+
464
+ /* statistics data for profiler */
465
+ VALUE stat_insn_usage;
466
+
467
+ /* tracer */
468
+ rb_event_hook_t *event_hooks;
469
+ rb_event_flag_t event_flags;
470
+ int tracing;
471
+
472
+ /* fiber */
473
+ VALUE fiber;
474
+ VALUE root_fiber;
475
+ rb_jmpbuf_t root_jmpbuf;
476
+
477
+ /* misc */
478
+ int method_missing_reason;
479
+ int abort_on_exception;
480
+ #ifdef USE_SIGALTSTACK
481
+ void *altstack;
482
+ #endif
483
+ } rb_thread_t;
484
+
485
+ /* iseq.c */
486
+ VALUE rb_iseq_new(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE);
487
+ VALUE rb_iseq_new_top(NODE *node, VALUE name, VALUE filename, VALUE filepath, VALUE parent);
488
+ VALUE rb_iseq_new_main(NODE *node, VALUE filename, VALUE filepath);
489
+ VALUE rb_iseq_new_with_bopt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE);
490
+ VALUE rb_iseq_new_with_opt(NODE*, VALUE, VALUE, VALUE, VALUE, VALUE, VALUE, const rb_compile_option_t*);
491
+ VALUE rb_iseq_compile(VALUE src, VALUE file, VALUE line);
492
+ VALUE rb_iseq_disasm(VALUE self);
493
+ int rb_iseq_disasm_insn(VALUE str, VALUE *iseqval, size_t pos, rb_iseq_t *iseq, VALUE child);
494
+ const char *ruby_node_name(int node);
495
+ int rb_iseq_first_lineno(rb_iseq_t *iseq);
496
+
497
+ RUBY_EXTERN VALUE rb_cISeq;
498
+ RUBY_EXTERN VALUE rb_cRubyVM;
499
+ RUBY_EXTERN VALUE rb_cEnv;
500
+ RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
501
+
502
+ /* each thread has this size stack : 128KB */
503
+ #define RUBY_VM_THREAD_STACK_SIZE (128 * 1024)
504
+
505
+ #define GetProcPtr(obj, ptr) \
506
+ GetCoreDataFromValue(obj, rb_proc_t, ptr)
507
+
508
+ typedef struct {
509
+ rb_block_t block;
510
+
511
+ VALUE envval; /* for GC mark */
512
+ VALUE blockprocval;
513
+ int safe_level;
514
+ int is_from_method;
515
+ int is_lambda;
516
+ } rb_proc_t;
517
+
518
+ #define GetEnvPtr(obj, ptr) \
519
+ GetCoreDataFromValue(obj, rb_env_t, ptr)
520
+
521
+ typedef struct {
522
+ VALUE *env;
523
+ int env_size;
524
+ int local_size;
525
+ VALUE prev_envval; /* for GC mark */
526
+ rb_block_t block;
527
+ } rb_env_t;
528
+
529
+ #define GetBindingPtr(obj, ptr) \
530
+ GetCoreDataFromValue(obj, rb_binding_t, ptr)
531
+
532
+ typedef struct {
533
+ VALUE env;
534
+ VALUE filename;
535
+ unsigned short line_no;
536
+ } rb_binding_t;
537
+
538
+ /* used by compile time and send insn */
539
+ #define VM_CALL_ARGS_SPLAT_BIT (0x01 << 1)
540
+ #define VM_CALL_ARGS_BLOCKARG_BIT (0x01 << 2)
541
+ #define VM_CALL_FCALL_BIT (0x01 << 3)
542
+ #define VM_CALL_VCALL_BIT (0x01 << 4)
543
+ #define VM_CALL_TAILCALL_BIT (0x01 << 5)
544
+ #define VM_CALL_TAILRECURSION_BIT (0x01 << 6)
545
+ #define VM_CALL_SUPER_BIT (0x01 << 7)
546
+ #define VM_CALL_OPT_SEND_BIT (0x01 << 8)
547
+
548
+ #define VM_SPECIAL_OBJECT_VMCORE 0x01
549
+ #define VM_SPECIAL_OBJECT_CBASE 0x02
550
+ #define VM_SPECIAL_OBJECT_CONST_BASE 0x03
551
+
552
+ #define VM_FRAME_MAGIC_METHOD 0x11
553
+ #define VM_FRAME_MAGIC_BLOCK 0x21
554
+ #define VM_FRAME_MAGIC_CLASS 0x31
555
+ #define VM_FRAME_MAGIC_TOP 0x41
556
+ #define VM_FRAME_MAGIC_FINISH 0x51
557
+ #define VM_FRAME_MAGIC_CFUNC 0x61
558
+ #define VM_FRAME_MAGIC_PROC 0x71
559
+ #define VM_FRAME_MAGIC_IFUNC 0x81
560
+ #define VM_FRAME_MAGIC_EVAL 0x91
561
+ #define VM_FRAME_MAGIC_LAMBDA 0xa1
562
+ #define VM_FRAME_MAGIC_MASK_BITS 8
563
+ #define VM_FRAME_MAGIC_MASK (~(~0<<VM_FRAME_MAGIC_MASK_BITS))
564
+
565
+ #define VM_FRAME_TYPE(cfp) ((cfp)->flag & VM_FRAME_MAGIC_MASK)
566
+
567
+ /* other frame flag */
568
+ #define VM_FRAME_FLAG_PASSED 0x0100
569
+
570
+ #define RUBYVM_CFUNC_FRAME_P(cfp) \
571
+ (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
572
+
573
+ /* inline cache */
574
+ typedef struct iseq_inline_cache_entry *IC;
575
+
576
+ extern VALUE ruby_vm_global_state_version;
577
+
578
+ #define GET_VM_STATE_VERSION() (ruby_vm_global_state_version)
579
+ #define INC_VM_STATE_VERSION() \
580
+ (ruby_vm_global_state_version = (ruby_vm_global_state_version+1) & 0x8fffffff)
581
+ void rb_vm_change_state(void);
582
+
583
+ typedef VALUE CDHASH;
584
+
585
+ #ifndef FUNC_FASTCALL
586
+ #define FUNC_FASTCALL(x) x
587
+ #endif
588
+
589
+ typedef rb_control_frame_t *
590
+ (FUNC_FASTCALL(*rb_insn_func_t))(rb_thread_t *, rb_control_frame_t *);
591
+
592
+ #define GC_GUARDED_PTR(p) ((VALUE)((VALUE)(p) | 0x01))
593
+ #define GC_GUARDED_PTR_REF(p) ((void *)(((VALUE)p) & ~0x03))
594
+ #define GC_GUARDED_PTR_P(p) (((VALUE)p) & 0x01)
595
+
596
+ #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) (cfp+1)
597
+ #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) (cfp-1)
598
+ #define RUBY_VM_END_CONTROL_FRAME(th) \
599
+ ((rb_control_frame_t *)((th)->stack + (th)->stack_size))
600
+ #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
601
+ ((void *)(ecfp) > (void *)(cfp))
602
+ #define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \
603
+ (!RUBY_VM_VALID_CONTROL_FRAME_P((cfp), RUBY_VM_END_CONTROL_FRAME(th)))
604
+
605
+ #define RUBY_VM_IFUNC_P(ptr) (BUILTIN_TYPE(ptr) == T_NODE)
606
+ #define RUBY_VM_NORMAL_ISEQ_P(ptr) \
607
+ (ptr && !RUBY_VM_IFUNC_P(ptr))
608
+
609
+ #define RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp) ((rb_block_t *)(&(cfp)->self))
610
+ #define RUBY_VM_GET_CFP_FROM_BLOCK_PTR(b) \
611
+ ((rb_control_frame_t *)((VALUE *)(b) - 5))
612
+
613
+ /* VM related object allocate functions */
614
+ VALUE rb_thread_alloc(VALUE klass);
615
+ VALUE rb_proc_alloc(VALUE klass);
616
+
617
+ /* for debug */
618
+ extern void rb_vmdebug_stack_dump_raw(rb_thread_t *, rb_control_frame_t *);
619
+ #define SDR() rb_vmdebug_stack_dump_raw(GET_THREAD(), GET_THREAD()->cfp)
620
+ #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_THREAD(), (cfp))
621
+ void rb_vm_bugreport(void);
622
+
623
+ /* functions about thread/vm execution */
624
+ VALUE rb_iseq_eval(VALUE iseqval);
625
+ VALUE rb_iseq_eval_main(VALUE iseqval);
626
+ void rb_enable_interrupt(void);
627
+ void rb_disable_interrupt(void);
628
+ int rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, VALUE *klassp);
629
+
630
+ VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
631
+ int argc, const VALUE *argv, const rb_block_t *blockptr);
632
+ VALUE rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass);
633
+ VALUE rb_vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp);
634
+
635
+ void rb_thread_start_timer_thread(void);
636
+ void rb_thread_stop_timer_thread(void);
637
+ void rb_thread_reset_timer_thread(void);
638
+ void *rb_thread_call_with_gvl(void *(*func)(void *), void *data1);
639
+ int ruby_thread_has_gvl_p(void);
640
+ VALUE rb_make_backtrace(void);
641
+ typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
642
+ int rb_backtrace_each(rb_backtrace_iter_func *iter, void *arg);
643
+ rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
644
+ int rb_vm_get_sourceline(const rb_control_frame_t *);
645
+ VALUE rb_name_err_mesg_new(VALUE obj, VALUE mesg, VALUE recv, VALUE method);
646
+
647
+ NOINLINE(void rb_gc_save_machine_context(rb_thread_t *));
648
+
649
+ #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
650
+
651
+ VALUE rb_str_resurrect(VALUE str);
652
+ VALUE rb_ary_resurrect(VALUE ary);
653
+
654
+ /* for thread */
655
+
656
+ #if RUBY_VM_THREAD_MODEL == 2
657
+ RUBY_EXTERN rb_thread_t *ruby_current_thread;
658
+ extern rb_vm_t *ruby_current_vm;
659
+
660
+ #define GET_VM() ruby_current_vm
661
+ #define GET_THREAD() ruby_current_thread
662
+ #define rb_thread_set_current_raw(th) (void)(ruby_current_thread = (th))
663
+ #define rb_thread_set_current(th) do { \
664
+ rb_thread_set_current_raw(th); \
665
+ th->vm->running_thread = th; \
666
+ } while (0)
667
+
668
+ #else
669
+ #error "unsupported thread model"
670
+ #endif
671
+
672
+ #define RUBY_VM_SET_INTERRUPT(th) ((th)->interrupt_flag |= 0x02)
673
+ #define RUBY_VM_SET_TIMER_INTERRUPT(th) ((th)->interrupt_flag |= 0x01)
674
+ #define RUBY_VM_SET_FINALIZER_INTERRUPT(th) ((th)->interrupt_flag |= 0x04)
675
+ #define RUBY_VM_INTERRUPTED(th) ((th)->interrupt_flag & 0x02)
676
+
677
+ void rb_threadptr_check_signal(rb_thread_t *mth);
678
+ void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
679
+ void rb_threadptr_signal_exit(rb_thread_t *th);
680
+ void rb_threadptr_execute_interrupts(rb_thread_t *);
681
+
682
+ void rb_thread_lock_unlock(rb_thread_lock_t *);
683
+ void rb_thread_lock_destroy(rb_thread_lock_t *);
684
+
685
+ #define RUBY_VM_CHECK_INTS_TH(th) do { \
686
+ if (UNLIKELY(th->interrupt_flag)) { \
687
+ rb_threadptr_execute_interrupts(th); \
688
+ } \
689
+ } while (0)
690
+
691
+ #define RUBY_VM_CHECK_INTS() \
692
+ RUBY_VM_CHECK_INTS_TH(GET_THREAD())
693
+
694
+ /* tracer */
695
+ void
696
+ rb_threadptr_exec_event_hooks(rb_thread_t *th, rb_event_flag_t flag, VALUE self, ID id, VALUE klass);
697
+
698
+ #define EXEC_EVENT_HOOK(th, flag, self, id, klass) do { \
699
+ rb_event_flag_t wait_event__ = th->event_flags; \
700
+ if (UNLIKELY(wait_event__)) { \
701
+ if (wait_event__ & (flag | RUBY_EVENT_VM)) { \
702
+ rb_threadptr_exec_event_hooks(th, flag, self, id, klass); \
703
+ } \
704
+ } \
705
+ } while (0)
706
+
707
+ #endif /* RUBY_VM_CORE_H */