pline 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. data/README +134 -0
  2. data/ext/pline/depend +55 -0
  3. data/ext/pline/extconf.rb +14 -0
  4. data/ext/pline/iseq.c +124 -0
  5. data/ext/pline/minfo.c +167 -0
  6. data/ext/pline/pline.c.rb +68 -0
  7. data/ext/pline/profiler.c +125 -0
  8. data/ext/pline/ruby_source/1.9.2/debug.h +36 -0
  9. data/ext/pline/ruby_source/1.9.2/eval_intern.h +232 -0
  10. data/ext/pline/ruby_source/1.9.2/gc.h +77 -0
  11. data/ext/pline/ruby_source/1.9.2/id.h +170 -0
  12. data/ext/pline/ruby_source/1.9.2/insns.inc +179 -0
  13. data/ext/pline/ruby_source/1.9.2/insns_info.inc +695 -0
  14. data/ext/pline/ruby_source/1.9.2/iseq.h +104 -0
  15. data/ext/pline/ruby_source/1.9.2/manual_update.h +19 -0
  16. data/ext/pline/ruby_source/1.9.2/method.h +103 -0
  17. data/ext/pline/ruby_source/1.9.2/node.h +483 -0
  18. data/ext/pline/ruby_source/1.9.2/thread_pthread.h +27 -0
  19. data/ext/pline/ruby_source/1.9.2/thread_win32.h +33 -0
  20. data/ext/pline/ruby_source/1.9.2/vm_core.h +706 -0
  21. data/ext/pline/ruby_source/1.9.2/vm_exec.h +184 -0
  22. data/ext/pline/ruby_source/1.9.2/vm_insnhelper.c +1734 -0
  23. data/ext/pline/ruby_source/1.9.2/vm_insnhelper.h +208 -0
  24. data/ext/pline/ruby_source/1.9.2/vm_opts.h +51 -0
  25. data/ext/pline/ruby_source/1.9.3/atomic.h +56 -0
  26. data/ext/pline/ruby_source/1.9.3/constant.h +34 -0
  27. data/ext/pline/ruby_source/1.9.3/debug.h +41 -0
  28. data/ext/pline/ruby_source/1.9.3/eval_intern.h +234 -0
  29. data/ext/pline/ruby_source/1.9.3/gc.h +98 -0
  30. data/ext/pline/ruby_source/1.9.3/id.h +175 -0
  31. data/ext/pline/ruby_source/1.9.3/insns.inc +179 -0
  32. data/ext/pline/ruby_source/1.9.3/insns_info.inc +695 -0
  33. data/ext/pline/ruby_source/1.9.3/internal.h +227 -0
  34. data/ext/pline/ruby_source/1.9.3/iseq.h +125 -0
  35. data/ext/pline/ruby_source/1.9.3/manual_update.h +19 -0
  36. data/ext/pline/ruby_source/1.9.3/method.h +105 -0
  37. data/ext/pline/ruby_source/1.9.3/node.h +503 -0
  38. data/ext/pline/ruby_source/1.9.3/thread_pthread.h +51 -0
  39. data/ext/pline/ruby_source/1.9.3/thread_win32.h +40 -0
  40. data/ext/pline/ruby_source/1.9.3/vm_core.h +756 -0
  41. data/ext/pline/ruby_source/1.9.3/vm_exec.h +184 -0
  42. data/ext/pline/ruby_source/1.9.3/vm_insnhelper.c +1749 -0
  43. data/ext/pline/ruby_source/1.9.3/vm_insnhelper.h +220 -0
  44. data/ext/pline/ruby_source/1.9.3/vm_opts.h +51 -0
  45. data/ext/pline/sinfo.c +311 -0
  46. data/lib/pline.rb +11 -0
  47. data/lib/pline/minfo.rb +22 -0
  48. data/lib/pline/summarize.rb +127 -0
  49. data/lib/pline/util.rb +54 -0
  50. data/pline.gemspec +28 -0
  51. metadata +102 -0
@@ -0,0 +1,184 @@
1
+ /**********************************************************************
2
+
3
+ vm.h -
4
+
5
+ $Author$
6
+ created at: 04/01/01 16:56:59 JST
7
+
8
+ Copyright (C) 2004-2007 Koichi Sasada
9
+
10
+ **********************************************************************/
11
+
12
+ #ifndef RUBY_VM_EXEC_H
13
+ #define RUBY_VM_EXEC_H
14
+
15
+ typedef long OFFSET;
16
+ typedef unsigned long lindex_t;
17
+ typedef unsigned long dindex_t;
18
+ typedef rb_num_t GENTRY;
19
+ typedef rb_iseq_t *ISEQ;
20
+
21
+ #ifdef COLLECT_USAGE_ANALYSIS
22
+ #define USAGE_ANALYSIS_INSN(insn) vm_analysis_insn(insn)
23
+ #define USAGE_ANALYSIS_OPERAND(insn, n, op) vm_analysis_operand((insn), (n), (VALUE)(op))
24
+ #define USAGE_ANALYSIS_REGISTER(reg, s) vm_analysis_register((reg), (s))
25
+ #else
26
+ #define USAGE_ANALYSIS_INSN(insn) /* none */
27
+ #define USAGE_ANALYSIS_OPERAND(insn, n, op) /* none */
28
+ #define USAGE_ANALYSIS_REGISTER(reg, s) /* none */
29
+ #endif
30
+
31
+ #ifdef __GCC__
32
+ /* TODO: machine dependent prefetch instruction */
33
+ #define PREFETCH(pc)
34
+ #else
35
+ #define PREFETCH(pc)
36
+ #endif
37
+
38
+ #if VMDEBUG > 0
39
+ #define debugs printf
40
+ #define DEBUG_ENTER_INSN(insn) \
41
+ debug_print_pre(th, GET_CFP());
42
+
43
+ #if OPT_STACK_CACHING
44
+ #define SC_REGS() , reg_a, reg_b
45
+ #else
46
+ #define SC_REGS()
47
+ #endif
48
+
49
+ #define DEBUG_END_INSN() \
50
+ debug_print_post(th, GET_CFP() SC_REGS());
51
+
52
+ #else
53
+
54
+ #define debugs
55
+ #define DEBUG_ENTER_INSN(insn)
56
+ #define DEBUG_END_INSN()
57
+ #endif
58
+
59
+ #define throwdebug if(0)printf
60
+ /* #define throwdebug printf */
61
+
62
+ /************************************************/
63
+ #if DISPATCH_XXX
64
+ error !
65
+ /************************************************/
66
+ #elif OPT_CALL_THREADED_CODE
67
+
68
+ #define LABEL(x) insn_func_##x
69
+ #define ELABEL(x)
70
+ #define LABEL_PTR(x) &LABEL(x)
71
+
72
+ #define INSN_ENTRY(insn) \
73
+ static rb_control_frame_t * \
74
+ FUNC_FASTCALL(LABEL(insn))(rb_thread_t *th, rb_control_frame_t *reg_cfp) {
75
+
76
+ #define END_INSN(insn) return reg_cfp;}
77
+
78
+ #define NEXT_INSN() return reg_cfp;
79
+
80
+ /************************************************/
81
+ #elif OPT_TOKEN_THREADED_CODE || OPT_DIRECT_THREADED_CODE
82
+ /* threaded code with gcc */
83
+
84
+ #define LABEL(x) INSN_LABEL_##x
85
+ #define ELABEL(x) INSN_ELABEL_##x
86
+ #define LABEL_PTR(x) &&LABEL(x)
87
+
88
+ #define INSN_ENTRY_SIG(insn)
89
+
90
+
91
+ #define INSN_DISPATCH_SIG(insn)
92
+
93
+ #define INSN_ENTRY(insn) \
94
+ LABEL(insn): \
95
+ INSN_ENTRY_SIG(insn); \
96
+
97
+ /* dispather */
98
+ #if __GNUC__ && (__i386__ || __x86_64__) && __GNUC__ == 3
99
+ #define DISPATCH_ARCH_DEPEND_WAY(addr) \
100
+ asm volatile("jmp *%0;\t# -- inseted by vm.h\t[length = 2]" : : "r" (addr))
101
+
102
+ #else
103
+ #define DISPATCH_ARCH_DEPEND_WAY(addr) \
104
+ /* do nothing */
105
+
106
+ #endif
107
+
108
+
109
+ /**********************************/
110
+ #if OPT_DIRECT_THREADED_CODE
111
+
112
+ /* for GCC 3.4.x */
113
+ #define TC_DISPATCH(insn) \
114
+ INSN_DISPATCH_SIG(insn); \
115
+ goto *(void const *)GET_CURRENT_INSN(); \
116
+ ;
117
+
118
+ #else
119
+ /* token threade code */
120
+
121
+ #define TC_DISPATCH(insn) \
122
+ DISPATCH_ARCH_DEPEND_WAY(insns_address_table[GET_CURRENT_INSN()]); \
123
+ INSN_DISPATCH_SIG(insn); \
124
+ goto *insns_address_table[GET_CURRENT_INSN()]; \
125
+ rb_bug("tc error");
126
+
127
+
128
+ #endif /* DISPATCH_DIRECT_THREADED_CODE */
129
+
130
+ #define END_INSN(insn) \
131
+ DEBUG_END_INSN(); \
132
+ TC_DISPATCH(insn); \
133
+
134
+ #define INSN_DISPATCH() \
135
+ TC_DISPATCH(__START__) \
136
+ {
137
+
138
+ #define END_INSNS_DISPATCH() \
139
+ rb_bug("unknown insn: %"PRIdVALUE, GET_CURRENT_INSN()); \
140
+ } /* end of while loop */ \
141
+
142
+ #define NEXT_INSN() TC_DISPATCH(__NEXT_INSN__)
143
+
144
+ /************************************************/
145
+ #else /* no threaded code */
146
+ /* most common method */
147
+
148
+ #define INSN_ENTRY(insn) \
149
+ case BIN(insn):
150
+
151
+ #define END_INSN(insn) \
152
+ DEBUG_END_INSN(); \
153
+ break;
154
+
155
+
156
+ #define INSN_DISPATCH() \
157
+ while(1){ \
158
+ switch(GET_CURRENT_INSN()){
159
+
160
+ #define END_INSNS_DISPATCH() \
161
+ default: \
162
+ SDR(); \
163
+ rb_bug("unknown insn: %ld", GET_CURRENT_INSN()); \
164
+ } /* end of switch */ \
165
+ } /* end of while loop */ \
166
+
167
+ #define NEXT_INSN() goto first
168
+
169
+ #endif
170
+
171
+ #define VM_SP_CNT(th, sp) ((sp) - (th)->stack)
172
+
173
+ #if OPT_CALL_THREADED_CODE
174
+ #define THROW_EXCEPTION(exc) do { \
175
+ th->errinfo = (VALUE)(exc); \
176
+ return 0; \
177
+ } while (0)
178
+ #else
179
+ #define THROW_EXCEPTION(exc) return (VALUE)(exc)
180
+ #endif
181
+
182
+ #define SCREG(r) (reg_##r)
183
+
184
+ #endif /* RUBY_VM_EXEC_H */
@@ -0,0 +1,1749 @@
1
+ /**********************************************************************
2
+
3
+ vm_insnhelper.c - instruction helper functions.
4
+
5
+ $Author$
6
+
7
+ Copyright (C) 2007 Koichi Sasada
8
+
9
+ **********************************************************************/
10
+
11
+ /* finish iseq array */
12
+ #include "insns.inc"
13
+ #include <math.h>
14
+ #include "constant.h"
15
+ #include "internal.h"
16
+
17
+ /* control stack frame */
18
+
19
+ #ifndef INLINE
20
+ #define INLINE inline
21
+ #endif
22
+
23
+ static rb_control_frame_t *vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
24
+
25
+ static inline rb_control_frame_t *
26
+ vm_push_frame(rb_thread_t * th, const rb_iseq_t * iseq,
27
+ VALUE type, VALUE self, VALUE specval,
28
+ const VALUE *pc, VALUE *sp, VALUE *lfp,
29
+ int local_size)
30
+ {
31
+ rb_control_frame_t * const cfp = th->cfp - 1;
32
+ int i;
33
+
34
+ if ((void *)(sp + local_size) >= (void *)cfp) {
35
+ rb_exc_raise(sysstack_error);
36
+ }
37
+ th->cfp = cfp;
38
+ /* setup vm value stack */
39
+
40
+ /* nil initialize */
41
+ for (i=0; i < local_size; i++) {
42
+ *sp = Qnil;
43
+ sp++;
44
+ }
45
+
46
+ /* set special val */
47
+ *sp = GC_GUARDED_PTR(specval);
48
+
49
+ if (lfp == 0) {
50
+ lfp = sp;
51
+ }
52
+
53
+ /* setup vm control frame stack */
54
+
55
+ cfp->pc = (VALUE *)pc;
56
+ cfp->sp = sp + 1;
57
+ cfp->bp = sp + 1;
58
+ cfp->iseq = (rb_iseq_t *) iseq;
59
+ cfp->flag = type;
60
+ cfp->self = self;
61
+ cfp->lfp = lfp;
62
+ cfp->dfp = sp;
63
+ cfp->block_iseq = 0;
64
+ cfp->proc = 0;
65
+ cfp->me = 0;
66
+
67
+ #define COLLECT_PROFILE 0
68
+ #if COLLECT_PROFILE
69
+ cfp->prof_time_self = clock();
70
+ cfp->prof_time_chld = 0;
71
+ #endif
72
+
73
+ if (VMDEBUG == 2) {
74
+ SDR();
75
+ }
76
+
77
+ return cfp;
78
+ }
79
+
80
+ static inline void
81
+ vm_pop_frame(rb_thread_t *th)
82
+ {
83
+ #if COLLECT_PROFILE
84
+ rb_control_frame_t *cfp = th->cfp;
85
+
86
+ if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
87
+ VALUE current_time = clock();
88
+ rb_control_frame_t *cfp = th->cfp;
89
+ cfp->prof_time_self = current_time - cfp->prof_time_self;
90
+ (cfp+1)->prof_time_chld += cfp->prof_time_self;
91
+
92
+ cfp->iseq->profile.count++;
93
+ cfp->iseq->profile.time_cumu = cfp->prof_time_self;
94
+ cfp->iseq->profile.time_self = cfp->prof_time_self - cfp->prof_time_chld;
95
+ }
96
+ else if (0 /* c method? */) {
97
+
98
+ }
99
+ #endif
100
+ th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
101
+
102
+ if (VMDEBUG == 2) {
103
+ SDR();
104
+ }
105
+ }
106
+
107
+ /* method dispatch */
108
+
109
+ NORETURN(static void argument_error(const rb_iseq_t *iseq, int miss_argc, int correct_argc));
110
+ static void
111
+ argument_error(const rb_iseq_t *iseq, int miss_argc, int correct_argc)
112
+ {
113
+ VALUE mesg = rb_sprintf("wrong number of arguments (%d for %d)", miss_argc, correct_argc);
114
+ VALUE exc = rb_exc_new3(rb_eArgError, mesg);
115
+ VALUE bt = rb_make_backtrace();
116
+ VALUE err_line = 0;
117
+
118
+ if (iseq) {
119
+ int line_no = 1;
120
+
121
+ if (iseq->insn_info_size) {
122
+ line_no = iseq->insn_info_table[0].line_no;
123
+ }
124
+
125
+ err_line = rb_sprintf("%s:%d:in `%s'",
126
+ RSTRING_PTR(iseq->filename),
127
+ line_no, RSTRING_PTR(iseq->name));
128
+ rb_funcall(bt, rb_intern("unshift"), 1, err_line);
129
+ }
130
+
131
+ rb_funcall(exc, rb_intern("set_backtrace"), 1, bt);
132
+ rb_exc_raise(exc);
133
+ }
134
+
135
+ #define VM_CALLEE_SETUP_ARG(ret, th, iseq, orig_argc, orig_argv, block) \
136
+ if (LIKELY((iseq)->arg_simple & 0x01)) { \
137
+ /* simple check */ \
138
+ if ((orig_argc) != (iseq)->argc) { \
139
+ argument_error((iseq), (orig_argc), (iseq)->argc); \
140
+ } \
141
+ (ret) = 0; \
142
+ } \
143
+ else { \
144
+ (ret) = vm_callee_setup_arg_complex((th), (iseq), (orig_argc), (orig_argv), (block)); \
145
+ }
146
+
147
+ static inline int
148
+ vm_callee_setup_arg_complex(rb_thread_t *th, const rb_iseq_t * iseq,
149
+ int orig_argc, VALUE * orig_argv,
150
+ const rb_block_t **block)
151
+ {
152
+ const int m = iseq->argc;
153
+ int argc = orig_argc;
154
+ VALUE *argv = orig_argv;
155
+ rb_num_t opt_pc = 0;
156
+
157
+ th->mark_stack_len = argc + iseq->arg_size;
158
+
159
+ /* mandatory */
160
+ if (argc < (m + iseq->arg_post_len)) { /* check with post arg */
161
+ argument_error(iseq, argc, m + iseq->arg_post_len);
162
+ }
163
+
164
+ argv += m;
165
+ argc -= m;
166
+
167
+ /* post arguments */
168
+ if (iseq->arg_post_len) {
169
+ if (!(orig_argc < iseq->arg_post_start)) {
170
+ VALUE *new_argv = ALLOCA_N(VALUE, argc);
171
+ MEMCPY(new_argv, argv, VALUE, argc);
172
+ argv = new_argv;
173
+ }
174
+
175
+ MEMCPY(&orig_argv[iseq->arg_post_start], &argv[argc -= iseq->arg_post_len],
176
+ VALUE, iseq->arg_post_len);
177
+ }
178
+
179
+ /* opt arguments */
180
+ if (iseq->arg_opts) {
181
+ const int opts = iseq->arg_opts - 1 /* no opt */;
182
+
183
+ if (iseq->arg_rest == -1 && argc > opts) {
184
+ argument_error(iseq, orig_argc, m + opts + iseq->arg_post_len);
185
+ }
186
+
187
+ if (argc > opts) {
188
+ argc -= opts;
189
+ argv += opts;
190
+ opt_pc = iseq->arg_opt_table[opts]; /* no opt */
191
+ }
192
+ else {
193
+ int i;
194
+ for (i = argc; i<opts; i++) {
195
+ orig_argv[i + m] = Qnil;
196
+ }
197
+ opt_pc = iseq->arg_opt_table[argc];
198
+ argc = 0;
199
+ }
200
+ }
201
+
202
+ /* rest arguments */
203
+ if (iseq->arg_rest != -1) {
204
+ orig_argv[iseq->arg_rest] = rb_ary_new4(argc, argv);
205
+ argc = 0;
206
+ }
207
+
208
+ /* block arguments */
209
+ if (block && iseq->arg_block != -1) {
210
+ VALUE blockval = Qnil;
211
+ const rb_block_t *blockptr = *block;
212
+
213
+ if (argc != 0) {
214
+ argument_error(iseq, orig_argc, m + iseq->arg_post_len);
215
+ }
216
+
217
+ if (blockptr) {
218
+ /* make Proc object */
219
+ if (blockptr->proc == 0) {
220
+ rb_proc_t *proc;
221
+ blockval = rb_vm_make_proc(th, blockptr, rb_cProc);
222
+ GetProcPtr(blockval, proc);
223
+ *block = &proc->block;
224
+ }
225
+ else {
226
+ blockval = blockptr->proc;
227
+ }
228
+ }
229
+
230
+ orig_argv[iseq->arg_block] = blockval; /* Proc or nil */
231
+ }
232
+
233
+ th->mark_stack_len = 0;
234
+ return (int)opt_pc;
235
+ }
236
+
237
+ static inline int
238
+ caller_setup_args(const rb_thread_t *th, rb_control_frame_t *cfp, VALUE flag,
239
+ int argc, rb_iseq_t *blockiseq, rb_block_t **block)
240
+ {
241
+ rb_block_t *blockptr = 0;
242
+
243
+ if (block) {
244
+ if (flag & VM_CALL_ARGS_BLOCKARG_BIT) {
245
+ rb_proc_t *po;
246
+ VALUE proc;
247
+
248
+ proc = *(--cfp->sp);
249
+
250
+ if (proc != Qnil) {
251
+ if (!rb_obj_is_proc(proc)) {
252
+ VALUE b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
253
+ if (NIL_P(b) || !rb_obj_is_proc(b)) {
254
+ rb_raise(rb_eTypeError,
255
+ "wrong argument type %s (expected Proc)",
256
+ rb_obj_classname(proc));
257
+ }
258
+ proc = b;
259
+ }
260
+ GetProcPtr(proc, po);
261
+ blockptr = &po->block;
262
+ RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp)->proc = proc;
263
+ *block = blockptr;
264
+ }
265
+ }
266
+ else if (blockiseq) {
267
+ blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
268
+ blockptr->iseq = blockiseq;
269
+ blockptr->proc = 0;
270
+ *block = blockptr;
271
+ }
272
+ }
273
+
274
+ /* expand top of stack? */
275
+ if (flag & VM_CALL_ARGS_SPLAT_BIT) {
276
+ VALUE ary = *(cfp->sp - 1);
277
+ VALUE *ptr;
278
+ int i;
279
+ VALUE tmp = rb_check_convert_type(ary, T_ARRAY, "Array", "to_a");
280
+
281
+ if (NIL_P(tmp)) {
282
+ /* do nothing */
283
+ }
284
+ else {
285
+ long len = RARRAY_LEN(tmp);
286
+ ptr = RARRAY_PTR(tmp);
287
+ cfp->sp -= 1;
288
+
289
+ CHECK_STACK_OVERFLOW(cfp, len);
290
+
291
+ for (i = 0; i < len; i++) {
292
+ *cfp->sp++ = ptr[i];
293
+ }
294
+ argc += i-1;
295
+ }
296
+ }
297
+
298
+ return argc;
299
+ }
300
+
301
+ static inline VALUE
302
+ call_cfunc(VALUE (*func)(), VALUE recv,
303
+ int len, int argc, const VALUE *argv)
304
+ {
305
+ /* printf("len: %d, argc: %d\n", len, argc); */
306
+
307
+ if (len >= 0 && argc != len) {
308
+ rb_raise(rb_eArgError, "wrong number of arguments(%d for %d)",
309
+ argc, len);
310
+ }
311
+
312
+ switch (len) {
313
+ case -2:
314
+ return (*func) (recv, rb_ary_new4(argc, argv));
315
+ break;
316
+ case -1:
317
+ return (*func) (argc, argv, recv);
318
+ break;
319
+ case 0:
320
+ return (*func) (recv);
321
+ break;
322
+ case 1:
323
+ return (*func) (recv, argv[0]);
324
+ break;
325
+ case 2:
326
+ return (*func) (recv, argv[0], argv[1]);
327
+ break;
328
+ case 3:
329
+ return (*func) (recv, argv[0], argv[1], argv[2]);
330
+ break;
331
+ case 4:
332
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3]);
333
+ break;
334
+ case 5:
335
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
336
+ break;
337
+ case 6:
338
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
339
+ argv[5]);
340
+ break;
341
+ case 7:
342
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
343
+ argv[5], argv[6]);
344
+ break;
345
+ case 8:
346
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
347
+ argv[5], argv[6], argv[7]);
348
+ break;
349
+ case 9:
350
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
351
+ argv[5], argv[6], argv[7], argv[8]);
352
+ break;
353
+ case 10:
354
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
355
+ argv[5], argv[6], argv[7], argv[8], argv[9]);
356
+ break;
357
+ case 11:
358
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
359
+ argv[5], argv[6], argv[7], argv[8], argv[9],
360
+ argv[10]);
361
+ break;
362
+ case 12:
363
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
364
+ argv[5], argv[6], argv[7], argv[8], argv[9],
365
+ argv[10], argv[11]);
366
+ break;
367
+ case 13:
368
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
369
+ argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
370
+ argv[11], argv[12]);
371
+ break;
372
+ case 14:
373
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
374
+ argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
375
+ argv[11], argv[12], argv[13]);
376
+ break;
377
+ case 15:
378
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
379
+ argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
380
+ argv[11], argv[12], argv[13], argv[14]);
381
+ break;
382
+ default:
383
+ rb_raise(rb_eArgError, "too many arguments(%d)", len);
384
+ return Qundef; /* not reached */
385
+ }
386
+ }
387
+
388
+ static inline VALUE
389
+ vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp,
390
+ int num, volatile VALUE recv, const rb_block_t *blockptr,
391
+ const rb_method_entry_t *me)
392
+ {
393
+ volatile VALUE val = 0;
394
+ const rb_method_definition_t *def = me->def;
395
+ rb_control_frame_t *cfp;
396
+
397
+ EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->klass);
398
+
399
+ cfp = vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC,
400
+ recv, (VALUE) blockptr, 0, reg_cfp->sp, 0, 1);
401
+ cfp->me = me;
402
+ reg_cfp->sp -= num + 1;
403
+
404
+ val = call_cfunc(def->body.cfunc.func, recv, (int)def->body.cfunc.argc, num, reg_cfp->sp + 1);
405
+
406
+ if (reg_cfp != th->cfp + 1) {
407
+ rb_bug("cfp consistency error - send");
408
+ }
409
+ #ifdef __llvm__
410
+ #define RB_LLVM_GUARD(v) RB_GC_GUARD(v)
411
+ RB_LLVM_GUARD(reg_cfp);
412
+ #endif
413
+
414
+ vm_pop_frame(th);
415
+
416
+ EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, me->called_id, me->klass);
417
+
418
+ return val;
419
+ }
420
+
421
+ static inline VALUE
422
+ vm_call_bmethod(rb_thread_t *th, VALUE recv, int argc, const VALUE *argv,
423
+ const rb_block_t *blockptr, const rb_method_entry_t *me)
424
+ {
425
+ rb_proc_t *proc;
426
+ VALUE val;
427
+
428
+ EXEC_EVENT_HOOK(th, RUBY_EVENT_CALL, recv, me->called_id, me->klass);
429
+
430
+ /* control block frame */
431
+ th->passed_me = me;
432
+ GetProcPtr(me->def->body.proc, proc);
433
+ val = rb_vm_invoke_proc(th, proc, recv, argc, argv, blockptr);
434
+
435
+ EXEC_EVENT_HOOK(th, RUBY_EVENT_RETURN, recv, me->called_id, me->klass);
436
+
437
+ return val;
438
+ }
439
+
440
+ static inline void
441
+ vm_method_missing_args(rb_thread_t *th, VALUE *argv,
442
+ int num, const rb_block_t *blockptr, int opt)
443
+ {
444
+ rb_control_frame_t * const reg_cfp = th->cfp;
445
+ MEMCPY(argv, STACK_ADDR_FROM_TOP(num + 1), VALUE, num + 1);
446
+ th->method_missing_reason = opt;
447
+ th->passed_block = blockptr;
448
+ POPN(num + 1);
449
+ }
450
+
451
+ static inline VALUE
452
+ vm_method_missing(rb_thread_t *th, ID id, VALUE recv,
453
+ int num, const rb_block_t *blockptr, int opt)
454
+ {
455
+ VALUE *argv = ALLOCA_N(VALUE, num + 1);
456
+ vm_method_missing_args(th, argv, num, blockptr, opt);
457
+ argv[0] = ID2SYM(id);
458
+ return rb_funcall2(recv, idMethodMissing, num + 1, argv);
459
+ }
460
+
461
+ static inline void
462
+ vm_setup_method(rb_thread_t *th, rb_control_frame_t *cfp,
463
+ VALUE recv, int argc, const rb_block_t *blockptr, VALUE flag,
464
+ const rb_method_entry_t *me)
465
+ {
466
+ int opt_pc, i;
467
+ VALUE *sp, *rsp = cfp->sp - argc;
468
+ rb_iseq_t *iseq = me->def->body.iseq;
469
+
470
+ VM_CALLEE_SETUP_ARG(opt_pc, th, iseq, argc, rsp, &blockptr);
471
+
472
+ /* stack overflow check */
473
+ CHECK_STACK_OVERFLOW(cfp, iseq->stack_max);
474
+
475
+ sp = rsp + iseq->arg_size;
476
+
477
+ if (LIKELY(!(flag & VM_CALL_TAILCALL_BIT))) {
478
+ if (0) printf("local_size: %d, arg_size: %d\n",
479
+ iseq->local_size, iseq->arg_size);
480
+
481
+ /* clear local variables */
482
+ for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
483
+ *sp++ = Qnil;
484
+ }
485
+
486
+ vm_push_frame(th, iseq,
487
+ VM_FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
488
+ iseq->iseq_encoded + opt_pc, sp, 0, 0);
489
+
490
+ cfp->sp = rsp - 1 /* recv */;
491
+ }
492
+ else {
493
+ VALUE *p_rsp;
494
+ th->cfp++; /* pop cf */
495
+ p_rsp = th->cfp->sp;
496
+
497
+ /* copy arguments */
498
+ for (i=0; i < (sp - rsp); i++) {
499
+ p_rsp[i] = rsp[i];
500
+ }
501
+
502
+ sp -= rsp - p_rsp;
503
+
504
+ /* clear local variables */
505
+ for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
506
+ *sp++ = Qnil;
507
+ }
508
+
509
+ vm_push_frame(th, iseq,
510
+ VM_FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
511
+ iseq->iseq_encoded + opt_pc, sp, 0, 0);
512
+ }
513
+ }
514
+
515
+ static inline VALUE
516
+ vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp,
517
+ int num, const rb_block_t *blockptr, VALUE flag,
518
+ ID id, const rb_method_entry_t *me, VALUE recv)
519
+ {
520
+ VALUE val;
521
+
522
+ start_method_dispatch:
523
+
524
+ if (me != 0) {
525
+ if ((me->flag == 0)) {
526
+ normal_method_dispatch:
527
+ switch (me->def->type) {
528
+ case VM_METHOD_TYPE_ISEQ:{
529
+ vm_setup_method(th, cfp, recv, num, blockptr, flag, me);
530
+ return Qundef;
531
+ }
532
+ case VM_METHOD_TYPE_NOTIMPLEMENTED:
533
+ case VM_METHOD_TYPE_CFUNC:{
534
+ val = vm_call_cfunc(th, cfp, num, recv, blockptr, me);
535
+ break;
536
+ }
537
+ case VM_METHOD_TYPE_ATTRSET:{
538
+ if (num != 1) {
539
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", num);
540
+ }
541
+ val = rb_ivar_set(recv, me->def->body.attr.id, *(cfp->sp - 1));
542
+ cfp->sp -= 2;
543
+ break;
544
+ }
545
+ case VM_METHOD_TYPE_IVAR:{
546
+ if (num != 0) {
547
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for 0)", num);
548
+ }
549
+ val = rb_attr_get(recv, me->def->body.attr.id);
550
+ cfp->sp -= 1;
551
+ break;
552
+ }
553
+ case VM_METHOD_TYPE_MISSING:{
554
+ VALUE *argv = ALLOCA_N(VALUE, num+1);
555
+ argv[0] = ID2SYM(me->def->original_id);
556
+ MEMCPY(argv+1, cfp->sp - num, VALUE, num);
557
+ cfp->sp += - num - 1;
558
+ th->passed_block = blockptr;
559
+ val = rb_funcall2(recv, rb_intern("method_missing"), num+1, argv);
560
+ break;
561
+ }
562
+ case VM_METHOD_TYPE_BMETHOD:{
563
+ VALUE *argv = ALLOCA_N(VALUE, num);
564
+ MEMCPY(argv, cfp->sp - num, VALUE, num);
565
+ cfp->sp += - num - 1;
566
+ val = vm_call_bmethod(th, recv, num, argv, blockptr, me);
567
+ break;
568
+ }
569
+ case VM_METHOD_TYPE_ZSUPER:{
570
+ VALUE klass = RCLASS_SUPER(me->klass);
571
+ me = rb_method_entry(klass, id);
572
+
573
+ if (me != 0) {
574
+ goto normal_method_dispatch;
575
+ }
576
+ else {
577
+ goto start_method_dispatch;
578
+ }
579
+ }
580
+ case VM_METHOD_TYPE_OPTIMIZED:{
581
+ switch (me->def->body.optimize_type) {
582
+ case OPTIMIZED_METHOD_TYPE_SEND: {
583
+ rb_control_frame_t *reg_cfp = cfp;
584
+ rb_num_t i = num - 1;
585
+ VALUE sym;
586
+
587
+ if (num == 0) {
588
+ rb_raise(rb_eArgError, "no method name given");
589
+ }
590
+
591
+ sym = TOPN(i);
592
+ id = SYMBOL_P(sym) ? SYM2ID(sym) : rb_to_id(sym);
593
+ /* shift arguments */
594
+ if (i > 0) {
595
+ MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
596
+ }
597
+ me = rb_method_entry(CLASS_OF(recv), id);
598
+ num -= 1;
599
+ DEC_SP(1);
600
+ flag |= VM_CALL_FCALL_BIT | VM_CALL_OPT_SEND_BIT;
601
+
602
+ goto start_method_dispatch;
603
+ }
604
+ case OPTIMIZED_METHOD_TYPE_CALL: {
605
+ rb_proc_t *proc;
606
+ int argc = num;
607
+ VALUE *argv = ALLOCA_N(VALUE, num);
608
+ GetProcPtr(recv, proc);
609
+ MEMCPY(argv, cfp->sp - num, VALUE, num);
610
+ cfp->sp -= num + 1;
611
+
612
+ val = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, blockptr);
613
+ break;
614
+ }
615
+ default:
616
+ rb_bug("eval_invoke_method: unsupported optimized method type (%d)",
617
+ me->def->body.optimize_type);
618
+ }
619
+ break;
620
+ }
621
+ default:{
622
+ rb_bug("eval_invoke_method: unsupported method type (%d)", me->def->type);
623
+ break;
624
+ }
625
+ }
626
+ }
627
+ else {
628
+ int noex_safe;
629
+
630
+ if (!(flag & VM_CALL_FCALL_BIT) &&
631
+ (me->flag & NOEX_MASK) & NOEX_PRIVATE) {
632
+ int stat = NOEX_PRIVATE;
633
+
634
+ if (flag & VM_CALL_VCALL_BIT) {
635
+ stat |= NOEX_VCALL;
636
+ }
637
+ val = vm_method_missing(th, id, recv, num, blockptr, stat);
638
+ }
639
+ else if (!(flag & VM_CALL_OPT_SEND_BIT) && (me->flag & NOEX_MASK) & NOEX_PROTECTED) {
640
+ VALUE defined_class = me->klass;
641
+
642
+ if (RB_TYPE_P(defined_class, T_ICLASS)) {
643
+ defined_class = RBASIC(defined_class)->klass;
644
+ }
645
+
646
+ if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
647
+ val = vm_method_missing(th, id, recv, num, blockptr, NOEX_PROTECTED);
648
+ }
649
+ else {
650
+ goto normal_method_dispatch;
651
+ }
652
+ }
653
+ else if ((noex_safe = NOEX_SAFE(me->flag)) > th->safe_level &&
654
+ (noex_safe > 2)) {
655
+ rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(id));
656
+ }
657
+ else {
658
+ goto normal_method_dispatch;
659
+ }
660
+ }
661
+ }
662
+ else {
663
+ /* method missing */
664
+ int stat = 0;
665
+ if (flag & VM_CALL_VCALL_BIT) {
666
+ stat |= NOEX_VCALL;
667
+ }
668
+ if (flag & VM_CALL_SUPER_BIT) {
669
+ stat |= NOEX_SUPER;
670
+ }
671
+ if (id == idMethodMissing) {
672
+ VALUE *argv = ALLOCA_N(VALUE, num);
673
+ vm_method_missing_args(th, argv, num - 1, 0, stat);
674
+ rb_raise_method_missing(th, num, argv, recv, stat);
675
+ }
676
+ else {
677
+ val = vm_method_missing(th, id, recv, num, blockptr, stat);
678
+ }
679
+ }
680
+
681
+ RUBY_VM_CHECK_INTS();
682
+ return val;
683
+ }
684
+
685
+ /* yield */
686
+
687
+ static inline int
688
+ block_proc_is_lambda(const VALUE procval)
689
+ {
690
+ rb_proc_t *proc;
691
+
692
+ if (procval) {
693
+ GetProcPtr(procval, proc);
694
+ return proc->is_lambda;
695
+ }
696
+ else {
697
+ return 0;
698
+ }
699
+ }
700
+
701
+ static inline VALUE
702
+ vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block,
703
+ VALUE self, int argc, const VALUE *argv,
704
+ const rb_block_t *blockargptr)
705
+ {
706
+ NODE *ifunc = (NODE *) block->iseq;
707
+ VALUE val, arg, blockarg;
708
+ int lambda = block_proc_is_lambda(block->proc);
709
+
710
+ if (lambda) {
711
+ arg = rb_ary_new4(argc, argv);
712
+ }
713
+ else if (argc == 0) {
714
+ arg = Qnil;
715
+ }
716
+ else {
717
+ arg = argv[0];
718
+ }
719
+
720
+ if (blockargptr) {
721
+ if (blockargptr->proc) {
722
+ blockarg = blockargptr->proc;
723
+ }
724
+ else {
725
+ blockarg = rb_vm_make_proc(th, blockargptr, rb_cProc);
726
+ }
727
+ }
728
+ else {
729
+ blockarg = Qnil;
730
+ }
731
+
732
+ vm_push_frame(th, (rb_iseq_t *)ifunc, VM_FRAME_MAGIC_IFUNC,
733
+ self, (VALUE)block->dfp,
734
+ 0, th->cfp->sp, block->lfp, 1);
735
+
736
+ if (blockargptr) {
737
+ th->cfp->lfp[0] = GC_GUARDED_PTR((VALUE)blockargptr);
738
+ }
739
+ val = (*ifunc->nd_cfnc) (arg, ifunc->nd_tval, argc, argv, blockarg);
740
+
741
+ th->cfp++;
742
+ return val;
743
+ }
744
+
745
+
746
+ /*--
747
+ * @brief on supplied all of optional, rest and post parameters.
748
+ * @pre iseq is block style (not lambda style)
749
+ */
750
+ static inline int
751
+ vm_yield_setup_block_args_complex(rb_thread_t *th, const rb_iseq_t *iseq,
752
+ int argc, VALUE *argv)
753
+ {
754
+ rb_num_t opt_pc = 0;
755
+ int i;
756
+ const int m = iseq->argc;
757
+ const int r = iseq->arg_rest;
758
+ int len = iseq->arg_post_len;
759
+ int start = iseq->arg_post_start;
760
+ int rsize = argc > m ? argc - m : 0; /* # of arguments which did not consumed yet */
761
+ int psize = rsize > len ? len : rsize; /* # of post arguments */
762
+ int osize = 0; /* # of opt arguments */
763
+ VALUE ary;
764
+
765
+ /* reserves arguments for post parameters */
766
+ rsize -= psize;
767
+
768
+ if (iseq->arg_opts) {
769
+ const int opts = iseq->arg_opts - 1;
770
+ if (rsize > opts) {
771
+ osize = opts;
772
+ opt_pc = iseq->arg_opt_table[opts];
773
+ }
774
+ else {
775
+ osize = rsize;
776
+ opt_pc = iseq->arg_opt_table[rsize];
777
+ }
778
+ }
779
+ rsize -= osize;
780
+
781
+ if (0) {
782
+ printf(" argc: %d\n", argc);
783
+ printf(" len: %d\n", len);
784
+ printf("start: %d\n", start);
785
+ printf("rsize: %d\n", rsize);
786
+ }
787
+
788
+ if (r == -1) {
789
+ /* copy post argument */
790
+ MEMMOVE(&argv[start], &argv[m+osize], VALUE, psize);
791
+ }
792
+ else {
793
+ ary = rb_ary_new4(rsize, &argv[r]);
794
+
795
+ /* copy post argument */
796
+ MEMMOVE(&argv[start], &argv[m+rsize+osize], VALUE, psize);
797
+ argv[r] = ary;
798
+ }
799
+
800
+ for (i=psize; i<len; i++) {
801
+ argv[start + i] = Qnil;
802
+ }
803
+
804
+ return (int)opt_pc;
805
+ }
806
+
807
+ static inline int
808
+ vm_yield_setup_block_args(rb_thread_t *th, const rb_iseq_t * iseq,
809
+ int orig_argc, VALUE *argv,
810
+ const rb_block_t *blockptr)
811
+ {
812
+ int i;
813
+ int argc = orig_argc;
814
+ const int m = iseq->argc;
815
+ VALUE ary, arg0;
816
+ int opt_pc = 0;
817
+
818
+ th->mark_stack_len = argc;
819
+
820
+ /*
821
+ * yield [1, 2]
822
+ * => {|a|} => a = [1, 2]
823
+ * => {|a, b|} => a, b = [1, 2]
824
+ */
825
+ arg0 = argv[0];
826
+ if (!(iseq->arg_simple & 0x02) && /* exclude {|a|} */
827
+ (m + iseq->arg_post_len) > 0 && /* this process is meaningful */
828
+ argc == 1 && !NIL_P(ary = rb_check_array_type(arg0))) { /* rhs is only an array */
829
+ th->mark_stack_len = argc = RARRAY_LENINT(ary);
830
+
831
+ CHECK_STACK_OVERFLOW(th->cfp, argc);
832
+
833
+ MEMCPY(argv, RARRAY_PTR(ary), VALUE, argc);
834
+ }
835
+ else {
836
+ argv[0] = arg0;
837
+ }
838
+
839
+ for (i=argc; i<m; i++) {
840
+ argv[i] = Qnil;
841
+ }
842
+
843
+ if (iseq->arg_rest == -1 && iseq->arg_opts == 0) {
844
+ const int arg_size = iseq->arg_size;
845
+ if (arg_size < argc) {
846
+ /*
847
+ * yield 1, 2
848
+ * => {|a|} # truncate
849
+ */
850
+ th->mark_stack_len = argc = arg_size;
851
+ }
852
+ }
853
+ else {
854
+ int r = iseq->arg_rest;
855
+
856
+ if (iseq->arg_post_len ||
857
+ iseq->arg_opts) { /* TODO: implement simple version for (iseq->arg_post_len==0 && iseq->arg_opts > 0) */
858
+ opt_pc = vm_yield_setup_block_args_complex(th, iseq, argc, argv);
859
+ }
860
+ else {
861
+ if (argc < r) {
862
+ /* yield 1
863
+ * => {|a, b, *r|}
864
+ */
865
+ for (i=argc; i<r; i++) {
866
+ argv[i] = Qnil;
867
+ }
868
+ argv[r] = rb_ary_new();
869
+ }
870
+ else {
871
+ argv[r] = rb_ary_new4(argc-r, &argv[r]);
872
+ }
873
+ }
874
+
875
+ th->mark_stack_len = iseq->arg_size;
876
+ }
877
+
878
+ /* {|&b|} */
879
+ if (iseq->arg_block != -1) {
880
+ VALUE procval = Qnil;
881
+
882
+ if (blockptr) {
883
+ if (blockptr->proc == 0) {
884
+ procval = rb_vm_make_proc(th, blockptr, rb_cProc);
885
+ }
886
+ else {
887
+ procval = blockptr->proc;
888
+ }
889
+ }
890
+
891
+ argv[iseq->arg_block] = procval;
892
+ }
893
+
894
+ th->mark_stack_len = 0;
895
+ return opt_pc;
896
+ }
897
+
898
+ static inline int
899
+ vm_yield_setup_args(rb_thread_t * const th, const rb_iseq_t *iseq,
900
+ int argc, VALUE *argv,
901
+ const rb_block_t *blockptr, int lambda)
902
+ {
903
+ if (0) { /* for debug */
904
+ printf(" argc: %d\n", argc);
905
+ printf("iseq argc: %d\n", iseq->argc);
906
+ printf("iseq opts: %d\n", iseq->arg_opts);
907
+ printf("iseq rest: %d\n", iseq->arg_rest);
908
+ printf("iseq post: %d\n", iseq->arg_post_len);
909
+ printf("iseq blck: %d\n", iseq->arg_block);
910
+ printf("iseq smpl: %d\n", iseq->arg_simple);
911
+ printf(" lambda: %s\n", lambda ? "true" : "false");
912
+ }
913
+
914
+ if (lambda) {
915
+ /* call as method */
916
+ int opt_pc;
917
+ VM_CALLEE_SETUP_ARG(opt_pc, th, iseq, argc, argv, &blockptr);
918
+ return opt_pc;
919
+ }
920
+ else {
921
+ return vm_yield_setup_block_args(th, iseq, argc, argv, blockptr);
922
+ }
923
+ }
924
+
925
+ static VALUE
926
+ vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_num_t num, rb_num_t flag)
927
+ {
928
+ const rb_block_t *block = GET_BLOCK_PTR();
929
+ rb_iseq_t *iseq;
930
+ int argc = (int)num;
931
+ VALUE type = GET_ISEQ()->local_iseq->type;
932
+
933
+ if ((type != ISEQ_TYPE_METHOD && type != ISEQ_TYPE_CLASS) || block == 0) {
934
+ rb_vm_localjump_error("no block given (yield)", Qnil, 0);
935
+ }
936
+ iseq = block->iseq;
937
+
938
+ argc = caller_setup_args(th, GET_CFP(), flag, argc, 0, 0);
939
+
940
+ if (BUILTIN_TYPE(iseq) != T_NODE) {
941
+ int opt_pc;
942
+ const int arg_size = iseq->arg_size;
943
+ VALUE * const rsp = GET_SP() - argc;
944
+ SET_SP(rsp);
945
+
946
+ CHECK_STACK_OVERFLOW(GET_CFP(), iseq->stack_max);
947
+ opt_pc = vm_yield_setup_args(th, iseq, argc, rsp, 0,
948
+ block_proc_is_lambda(block->proc));
949
+
950
+ vm_push_frame(th, iseq,
951
+ VM_FRAME_MAGIC_BLOCK, block->self, (VALUE) block->dfp,
952
+ iseq->iseq_encoded + opt_pc, rsp + arg_size, block->lfp,
953
+ iseq->local_size - arg_size);
954
+
955
+ return Qundef;
956
+ }
957
+ else {
958
+ VALUE val = vm_yield_with_cfunc(th, block, block->self, argc, STACK_ADDR_FROM_TOP(argc), 0);
959
+ POPN(argc); /* TODO: should put before C/yield? */
960
+ return val;
961
+ }
962
+ }
963
+
964
+ /* svar */
965
+
966
+ static inline NODE *
967
+ lfp_svar_place(rb_thread_t *th, VALUE *lfp)
968
+ {
969
+ VALUE *svar;
970
+
971
+ if (lfp && th->local_lfp != lfp) {
972
+ svar = &lfp[-1];
973
+ }
974
+ else {
975
+ svar = &th->local_svar;
976
+ }
977
+ if (NIL_P(*svar)) {
978
+ *svar = (VALUE)NEW_IF(Qnil, Qnil, Qnil);
979
+ }
980
+ return (NODE *)*svar;
981
+ }
982
+
983
+ static VALUE
984
+ lfp_svar_get(rb_thread_t *th, VALUE *lfp, VALUE key)
985
+ {
986
+ NODE *svar = lfp_svar_place(th, lfp);
987
+
988
+ switch (key) {
989
+ case 0:
990
+ return svar->u1.value;
991
+ case 1:
992
+ return svar->u2.value;
993
+ default: {
994
+ const VALUE hash = svar->u3.value;
995
+
996
+ if (hash == Qnil) {
997
+ return Qnil;
998
+ }
999
+ else {
1000
+ return rb_hash_lookup(hash, key);
1001
+ }
1002
+ }
1003
+ }
1004
+ }
1005
+
1006
+ static void
1007
+ lfp_svar_set(rb_thread_t *th, VALUE *lfp, VALUE key, VALUE val)
1008
+ {
1009
+ NODE *svar = lfp_svar_place(th, lfp);
1010
+
1011
+ switch (key) {
1012
+ case 0:
1013
+ svar->u1.value = val;
1014
+ return;
1015
+ case 1:
1016
+ svar->u2.value = val;
1017
+ return;
1018
+ default: {
1019
+ VALUE hash = svar->u3.value;
1020
+
1021
+ if (hash == Qnil) {
1022
+ svar->u3.value = hash = rb_hash_new();
1023
+ }
1024
+ rb_hash_aset(hash, key, val);
1025
+ }
1026
+ }
1027
+ }
1028
+
1029
+ static inline VALUE
1030
+ vm_getspecial(rb_thread_t *th, VALUE *lfp, VALUE key, rb_num_t type)
1031
+ {
1032
+ VALUE val;
1033
+
1034
+ if (type == 0) {
1035
+ VALUE k = key;
1036
+ if (FIXNUM_P(key)) {
1037
+ k = FIX2INT(key);
1038
+ }
1039
+ val = lfp_svar_get(th, lfp, k);
1040
+ }
1041
+ else {
1042
+ VALUE backref = lfp_svar_get(th, lfp, 1);
1043
+
1044
+ if (type & 0x01) {
1045
+ switch (type >> 1) {
1046
+ case '&':
1047
+ val = rb_reg_last_match(backref);
1048
+ break;
1049
+ case '`':
1050
+ val = rb_reg_match_pre(backref);
1051
+ break;
1052
+ case '\'':
1053
+ val = rb_reg_match_post(backref);
1054
+ break;
1055
+ case '+':
1056
+ val = rb_reg_match_last(backref);
1057
+ break;
1058
+ default:
1059
+ rb_bug("unexpected back-ref");
1060
+ }
1061
+ }
1062
+ else {
1063
+ val = rb_reg_nth_match((int)(type >> 1), backref);
1064
+ }
1065
+ }
1066
+ return val;
1067
+ }
1068
+
1069
+ static NODE *
1070
+ vm_get_cref0(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
1071
+ {
1072
+ while (1) {
1073
+ if (lfp == dfp) {
1074
+ if (!RUBY_VM_NORMAL_ISEQ_P(iseq)) return NULL;
1075
+ return iseq->cref_stack;
1076
+ }
1077
+ else if (dfp[-1] != Qnil) {
1078
+ return (NODE *)dfp[-1];
1079
+ }
1080
+ dfp = GET_PREV_DFP(dfp);
1081
+ }
1082
+ }
1083
+
1084
+ static NODE *
1085
+ vm_get_cref(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
1086
+ {
1087
+ NODE *cref = vm_get_cref0(iseq, lfp, dfp);
1088
+
1089
+ if (cref == 0) {
1090
+ rb_bug("vm_get_cref: unreachable");
1091
+ }
1092
+ return cref;
1093
+ }
1094
+
1095
+ static NODE *
1096
+ vm_cref_push(rb_thread_t *th, VALUE klass, int noex, rb_block_t *blockptr)
1097
+ {
1098
+ rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->cfp);
1099
+ NODE *cref = NEW_BLOCK(klass);
1100
+ cref->nd_visi = noex;
1101
+
1102
+ if (blockptr) {
1103
+ cref->nd_next = vm_get_cref0(blockptr->iseq, blockptr->lfp, blockptr->dfp);
1104
+ }
1105
+ else if (cfp) {
1106
+ cref->nd_next = vm_get_cref0(cfp->iseq, cfp->lfp, cfp->dfp);
1107
+ }
1108
+
1109
+ return cref;
1110
+ }
1111
+
1112
+ static inline VALUE
1113
+ vm_get_cbase(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
1114
+ {
1115
+ NODE *cref = vm_get_cref(iseq, lfp, dfp);
1116
+ VALUE klass = Qundef;
1117
+
1118
+ while (cref) {
1119
+ if ((klass = cref->nd_clss) != 0) {
1120
+ break;
1121
+ }
1122
+ cref = cref->nd_next;
1123
+ }
1124
+
1125
+ return klass;
1126
+ }
1127
+
1128
+ static inline VALUE
1129
+ vm_get_const_base(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
1130
+ {
1131
+ NODE *cref = vm_get_cref(iseq, lfp, dfp);
1132
+ VALUE klass = Qundef;
1133
+
1134
+ while (cref) {
1135
+ if (!(cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) &&
1136
+ (klass = cref->nd_clss) != 0) {
1137
+ break;
1138
+ }
1139
+ cref = cref->nd_next;
1140
+ }
1141
+
1142
+ return klass;
1143
+ }
1144
+
1145
+ static inline void
1146
+ vm_check_if_namespace(VALUE klass)
1147
+ {
1148
+ VALUE str;
1149
+ switch (TYPE(klass)) {
1150
+ case T_CLASS:
1151
+ case T_MODULE:
1152
+ break;
1153
+ default:
1154
+ str = rb_inspect(klass);
1155
+ rb_raise(rb_eTypeError, "%s is not a class/module",
1156
+ StringValuePtr(str));
1157
+ }
1158
+ }
1159
+
1160
+ static inline VALUE
1161
+ vm_get_ev_const(rb_thread_t *th, const rb_iseq_t *iseq,
1162
+ VALUE orig_klass, ID id, int is_defined)
1163
+ {
1164
+ VALUE val;
1165
+
1166
+ if (orig_klass == Qnil) {
1167
+ /* in current lexical scope */
1168
+ const NODE *root_cref = vm_get_cref(iseq, th->cfp->lfp, th->cfp->dfp);
1169
+ const NODE *cref;
1170
+ VALUE klass = orig_klass;
1171
+
1172
+ while (root_cref && root_cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) {
1173
+ root_cref = root_cref->nd_next;
1174
+ }
1175
+ cref = root_cref;
1176
+ while (cref && cref->nd_next) {
1177
+ if (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) {
1178
+ klass = Qnil;
1179
+ }
1180
+ else {
1181
+ klass = cref->nd_clss;
1182
+ }
1183
+ cref = cref->nd_next;
1184
+
1185
+ if (!NIL_P(klass)) {
1186
+ VALUE am = 0;
1187
+ st_data_t data;
1188
+ search_continue:
1189
+ if (RCLASS_CONST_TBL(klass) &&
1190
+ st_lookup(RCLASS_CONST_TBL(klass), id, &data)) {
1191
+ val = ((rb_const_entry_t*)data)->value;
1192
+ if (val == Qundef) {
1193
+ if (am == klass) break;
1194
+ am = klass;
1195
+ if (is_defined) return 1;
1196
+ rb_autoload_load(klass, id);
1197
+ goto search_continue;
1198
+ }
1199
+ else {
1200
+ if (is_defined) {
1201
+ return 1;
1202
+ }
1203
+ else {
1204
+ return val;
1205
+ }
1206
+ }
1207
+ }
1208
+ }
1209
+ }
1210
+
1211
+ /* search self */
1212
+ if (root_cref && !NIL_P(root_cref->nd_clss)) {
1213
+ klass = root_cref->nd_clss;
1214
+ }
1215
+ else {
1216
+ klass = CLASS_OF(th->cfp->self);
1217
+ }
1218
+
1219
+ if (is_defined) {
1220
+ return rb_const_defined(klass, id);
1221
+ }
1222
+ else {
1223
+ return rb_const_get(klass, id);
1224
+ }
1225
+ }
1226
+ else {
1227
+ vm_check_if_namespace(orig_klass);
1228
+ if (is_defined) {
1229
+ return rb_public_const_defined_from(orig_klass, id);
1230
+ }
1231
+ else {
1232
+ return rb_public_const_get_from(orig_klass, id);
1233
+ }
1234
+ }
1235
+ }
1236
+
1237
+ static inline VALUE
1238
+ vm_get_cvar_base(NODE *cref)
1239
+ {
1240
+ VALUE klass;
1241
+
1242
+ while (cref && cref->nd_next &&
1243
+ (NIL_P(cref->nd_clss) || FL_TEST(cref->nd_clss, FL_SINGLETON) ||
1244
+ (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL))) {
1245
+ cref = cref->nd_next;
1246
+
1247
+ if (!cref->nd_next) {
1248
+ rb_warn("class variable access from toplevel");
1249
+ }
1250
+ }
1251
+
1252
+ klass = cref->nd_clss;
1253
+
1254
+ if (NIL_P(klass)) {
1255
+ rb_raise(rb_eTypeError, "no class variables available");
1256
+ }
1257
+ return klass;
1258
+ }
1259
+
1260
+ static VALUE
1261
+ vm_search_const_defined_class(const VALUE cbase, ID id)
1262
+ {
1263
+ if (rb_const_defined_at(cbase, id)) return cbase;
1264
+ if (cbase == rb_cObject) {
1265
+ VALUE tmp = RCLASS_SUPER(cbase);
1266
+ while (tmp) {
1267
+ if (rb_const_defined_at(tmp, id)) return tmp;
1268
+ tmp = RCLASS_SUPER(tmp);
1269
+ }
1270
+ }
1271
+ return 0;
1272
+ }
1273
+
1274
+ #ifndef USE_IC_FOR_IVAR
1275
+ #define USE_IC_FOR_IVAR 1
1276
+ #endif
1277
+
1278
+ static VALUE
1279
+ vm_getivar(VALUE obj, ID id, IC ic)
1280
+ {
1281
+ #if USE_IC_FOR_IVAR
1282
+ if (TYPE(obj) == T_OBJECT) {
1283
+ VALUE val = Qundef;
1284
+ VALUE klass = RBASIC(obj)->klass;
1285
+
1286
+ if (LIKELY(ic->ic_class == klass &&
1287
+ ic->ic_vmstat == GET_VM_STATE_VERSION())) {
1288
+ long index = ic->ic_value.index;
1289
+ long len = ROBJECT_NUMIV(obj);
1290
+ VALUE *ptr = ROBJECT_IVPTR(obj);
1291
+
1292
+ if (index < len) {
1293
+ val = ptr[index];
1294
+ }
1295
+ }
1296
+ else {
1297
+ st_data_t index;
1298
+ long len = ROBJECT_NUMIV(obj);
1299
+ VALUE *ptr = ROBJECT_IVPTR(obj);
1300
+ struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
1301
+
1302
+ if (iv_index_tbl) {
1303
+ if (st_lookup(iv_index_tbl, id, &index)) {
1304
+ if ((long)index < len) {
1305
+ val = ptr[index];
1306
+ }
1307
+ ic->ic_class = klass;
1308
+ ic->ic_value.index = index;
1309
+ ic->ic_vmstat = GET_VM_STATE_VERSION();
1310
+ }
1311
+ }
1312
+ }
1313
+ if (UNLIKELY(val == Qundef)) {
1314
+ rb_warning("instance variable %s not initialized", rb_id2name(id));
1315
+ val = Qnil;
1316
+ }
1317
+ return val;
1318
+ }
1319
+ else {
1320
+ return rb_ivar_get(obj, id);
1321
+ }
1322
+ #else
1323
+ return rb_ivar_get(obj, id);
1324
+ #endif
1325
+ }
1326
+
1327
+ static void
1328
+ vm_setivar(VALUE obj, ID id, VALUE val, IC ic)
1329
+ {
1330
+ #if USE_IC_FOR_IVAR
1331
+ if (!OBJ_UNTRUSTED(obj) && rb_safe_level() >= 4) {
1332
+ rb_raise(rb_eSecurityError, "Insecure: can't modify instance variable");
1333
+ }
1334
+
1335
+ rb_check_frozen(obj);
1336
+
1337
+ if (TYPE(obj) == T_OBJECT) {
1338
+ VALUE klass = RBASIC(obj)->klass;
1339
+ st_data_t index;
1340
+
1341
+ if (LIKELY(ic->ic_class == klass &&
1342
+ ic->ic_vmstat == GET_VM_STATE_VERSION())) {
1343
+ long index = ic->ic_value.index;
1344
+ long len = ROBJECT_NUMIV(obj);
1345
+ VALUE *ptr = ROBJECT_IVPTR(obj);
1346
+
1347
+ if (index < len) {
1348
+ ptr[index] = val;
1349
+ return; /* inline cache hit */
1350
+ }
1351
+ }
1352
+ else {
1353
+ struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
1354
+
1355
+ if (iv_index_tbl && st_lookup(iv_index_tbl, (st_data_t)id, &index)) {
1356
+ ic->ic_class = klass;
1357
+ ic->ic_value.index = index;
1358
+ ic->ic_vmstat = GET_VM_STATE_VERSION();
1359
+ }
1360
+ /* fall through */
1361
+ }
1362
+ }
1363
+ rb_ivar_set(obj, id, val);
1364
+ #else
1365
+ rb_ivar_set(obj, id, val);
1366
+ #endif
1367
+ }
1368
+
1369
+ static inline const rb_method_entry_t *
1370
+ vm_method_search(VALUE id, VALUE klass, IC ic)
1371
+ {
1372
+ rb_method_entry_t *me;
1373
+ #if OPT_INLINE_METHOD_CACHE
1374
+ if (LIKELY(klass == ic->ic_class &&
1375
+ GET_VM_STATE_VERSION() == ic->ic_vmstat)) {
1376
+ me = ic->ic_value.method;
1377
+ }
1378
+ else {
1379
+ me = rb_method_entry(klass, id);
1380
+ ic->ic_class = klass;
1381
+ ic->ic_value.method = me;
1382
+ ic->ic_vmstat = GET_VM_STATE_VERSION();
1383
+ }
1384
+ #else
1385
+ me = rb_method_entry(klass, id);
1386
+ #endif
1387
+ return me;
1388
+ }
1389
+
1390
+ static inline VALUE
1391
+ vm_search_normal_superclass(VALUE klass, VALUE recv)
1392
+ {
1393
+ if (BUILTIN_TYPE(klass) == T_CLASS) {
1394
+ return RCLASS_SUPER(klass);
1395
+ }
1396
+ else if (BUILTIN_TYPE(klass) == T_MODULE) {
1397
+ VALUE k = CLASS_OF(recv);
1398
+ while (k) {
1399
+ if (BUILTIN_TYPE(k) == T_ICLASS && RBASIC(k)->klass == klass) {
1400
+ return RCLASS_SUPER(k);
1401
+ }
1402
+ k = RCLASS_SUPER(k);
1403
+ }
1404
+ return rb_cObject;
1405
+ }
1406
+ else {
1407
+ rb_bug("vm_search_normal_superclass: should not be reach here");
1408
+ }
1409
+ }
1410
+
1411
+ static void
1412
+ vm_search_superclass(rb_control_frame_t *reg_cfp, rb_iseq_t *iseq,
1413
+ VALUE recv, VALUE sigval,
1414
+ ID *idp, VALUE *klassp)
1415
+ {
1416
+ ID id;
1417
+ VALUE klass;
1418
+
1419
+ while (iseq && !iseq->klass) {
1420
+ iseq = iseq->parent_iseq;
1421
+ }
1422
+
1423
+ if (iseq == 0) {
1424
+ rb_raise(rb_eNoMethodError, "super called outside of method");
1425
+ }
1426
+
1427
+ id = iseq->defined_method_id;
1428
+
1429
+ if (iseq != iseq->local_iseq) {
1430
+ /* defined by Module#define_method() */
1431
+ rb_control_frame_t *lcfp = GET_CFP();
1432
+
1433
+ if (!sigval) {
1434
+ /* zsuper */
1435
+ rb_raise(rb_eRuntimeError, "implicit argument passing of super from method defined by define_method() is not supported. Specify all arguments explicitly.");
1436
+ }
1437
+
1438
+ while (lcfp->iseq != iseq) {
1439
+ rb_thread_t *th = GET_THREAD();
1440
+ VALUE *tdfp = GET_PREV_DFP(lcfp->dfp);
1441
+ while (1) {
1442
+ lcfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(lcfp);
1443
+ if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, lcfp)) {
1444
+ rb_raise(rb_eNoMethodError,
1445
+ "super called outside of method");
1446
+ }
1447
+ if (lcfp->dfp == tdfp) {
1448
+ break;
1449
+ }
1450
+ }
1451
+ }
1452
+
1453
+ /* temporary measure for [Bug #2420] [Bug #3136] */
1454
+ if (!lcfp->me) {
1455
+ rb_raise(rb_eNoMethodError, "super called outside of method");
1456
+ }
1457
+
1458
+ id = lcfp->me->def->original_id;
1459
+ klass = vm_search_normal_superclass(lcfp->me->klass, recv);
1460
+ }
1461
+ else {
1462
+ klass = vm_search_normal_superclass(iseq->klass, recv);
1463
+ }
1464
+
1465
+ *idp = id;
1466
+ *klassp = klass;
1467
+ }
1468
+
1469
+ static VALUE
1470
+ vm_throw(rb_thread_t *th, rb_control_frame_t *reg_cfp,
1471
+ rb_num_t throw_state, VALUE throwobj)
1472
+ {
1473
+ int state = (int)(throw_state & 0xff);
1474
+ int flag = (int)(throw_state & 0x8000);
1475
+ rb_num_t level = throw_state >> 16;
1476
+
1477
+ if (state != 0) {
1478
+ VALUE *pt = 0;
1479
+ if (flag != 0) {
1480
+ pt = (void *) 1;
1481
+ }
1482
+ else {
1483
+ if (state == TAG_BREAK) {
1484
+ rb_control_frame_t *cfp = GET_CFP();
1485
+ VALUE *dfp = GET_DFP();
1486
+ int is_orphan = 1;
1487
+ rb_iseq_t *base_iseq = GET_ISEQ();
1488
+
1489
+ search_parent:
1490
+ if (cfp->iseq->type != ISEQ_TYPE_BLOCK) {
1491
+ if (cfp->iseq->type == ISEQ_TYPE_CLASS) {
1492
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1493
+ dfp = cfp->dfp;
1494
+ goto search_parent;
1495
+ }
1496
+ dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
1497
+ base_iseq = base_iseq->parent_iseq;
1498
+
1499
+ while ((VALUE *) cfp < th->stack + th->stack_size) {
1500
+ if (cfp->dfp == dfp) {
1501
+ goto search_parent;
1502
+ }
1503
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1504
+ }
1505
+ rb_bug("VM (throw): can't find break base.");
1506
+ }
1507
+
1508
+ if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
1509
+ /* lambda{... break ...} */
1510
+ is_orphan = 0;
1511
+ pt = cfp->dfp;
1512
+ state = TAG_RETURN;
1513
+ }
1514
+ else {
1515
+ dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
1516
+
1517
+ while ((VALUE *)cfp < th->stack + th->stack_size) {
1518
+ if (cfp->dfp == dfp) {
1519
+ VALUE epc = cfp->pc - cfp->iseq->iseq_encoded;
1520
+ rb_iseq_t *iseq = cfp->iseq;
1521
+ int i;
1522
+
1523
+ for (i=0; i<iseq->catch_table_size; i++) {
1524
+ struct iseq_catch_table_entry *entry = &iseq->catch_table[i];
1525
+
1526
+ if (entry->type == CATCH_TYPE_BREAK &&
1527
+ entry->start < epc && entry->end >= epc) {
1528
+ if (entry->cont == epc) {
1529
+ goto found;
1530
+ }
1531
+ else {
1532
+ break;
1533
+ }
1534
+ }
1535
+ }
1536
+ break;
1537
+
1538
+ found:
1539
+ pt = dfp;
1540
+ is_orphan = 0;
1541
+ break;
1542
+ }
1543
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1544
+ }
1545
+ }
1546
+
1547
+ if (is_orphan) {
1548
+ rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1549
+ }
1550
+ }
1551
+ else if (state == TAG_RETRY) {
1552
+ rb_num_t i;
1553
+ pt = GC_GUARDED_PTR_REF((VALUE *) * GET_DFP());
1554
+ for (i = 0; i < level; i++) {
1555
+ pt = GC_GUARDED_PTR_REF((VALUE *) * pt);
1556
+ }
1557
+ }
1558
+ else if (state == TAG_RETURN) {
1559
+ rb_control_frame_t *cfp = GET_CFP();
1560
+ VALUE *dfp = GET_DFP();
1561
+ VALUE *lfp = GET_LFP();
1562
+ int in_class_frame = 0;
1563
+
1564
+ /* check orphan and get dfp */
1565
+ while ((VALUE *) cfp < th->stack + th->stack_size) {
1566
+ if (!lfp) {
1567
+ lfp = cfp->lfp;
1568
+ }
1569
+ if (cfp->dfp == lfp && cfp->iseq->type == ISEQ_TYPE_CLASS) {
1570
+ in_class_frame = 1;
1571
+ lfp = 0;
1572
+ }
1573
+
1574
+ if (cfp->lfp == lfp) {
1575
+ if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
1576
+ VALUE *tdfp = dfp;
1577
+
1578
+ if (in_class_frame) {
1579
+ /* lambda {class A; ... return ...; end} */
1580
+ dfp = cfp->dfp;
1581
+ goto valid_return;
1582
+ }
1583
+
1584
+ while (lfp != tdfp) {
1585
+ if (cfp->dfp == tdfp) {
1586
+ /* in lambda */
1587
+ dfp = cfp->dfp;
1588
+ goto valid_return;
1589
+ }
1590
+ tdfp = GC_GUARDED_PTR_REF((VALUE *)*tdfp);
1591
+ }
1592
+ }
1593
+ }
1594
+
1595
+ if (cfp->dfp == lfp && cfp->iseq->type == ISEQ_TYPE_METHOD) {
1596
+ dfp = lfp;
1597
+ goto valid_return;
1598
+ }
1599
+
1600
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1601
+ }
1602
+
1603
+ rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1604
+
1605
+ valid_return:
1606
+ pt = dfp;
1607
+ }
1608
+ else {
1609
+ rb_bug("isns(throw): unsupport throw type");
1610
+ }
1611
+ }
1612
+ th->state = state;
1613
+ return (VALUE)NEW_THROW_OBJECT(throwobj, (VALUE) pt, state);
1614
+ }
1615
+ else {
1616
+ /* continue throw */
1617
+ VALUE err = throwobj;
1618
+
1619
+ if (FIXNUM_P(err)) {
1620
+ th->state = FIX2INT(err);
1621
+ }
1622
+ else if (SYMBOL_P(err)) {
1623
+ th->state = TAG_THROW;
1624
+ }
1625
+ else if (BUILTIN_TYPE(err) == T_NODE) {
1626
+ th->state = GET_THROWOBJ_STATE(err);
1627
+ }
1628
+ else {
1629
+ th->state = TAG_RAISE;
1630
+ /*th->state = FIX2INT(rb_ivar_get(err, idThrowState));*/
1631
+ }
1632
+ return err;
1633
+ }
1634
+ }
1635
+
1636
+ static inline void
1637
+ vm_expandarray(rb_control_frame_t *cfp, VALUE ary, rb_num_t num, int flag)
1638
+ {
1639
+ int is_splat = flag & 0x01;
1640
+ rb_num_t space_size = num + is_splat;
1641
+ VALUE *base = cfp->sp, *ptr;
1642
+ volatile VALUE tmp_ary;
1643
+ rb_num_t len;
1644
+
1645
+ if (TYPE(ary) != T_ARRAY) {
1646
+ ary = rb_ary_to_ary(ary);
1647
+ }
1648
+
1649
+ cfp->sp += space_size;
1650
+
1651
+ tmp_ary = ary;
1652
+ ptr = RARRAY_PTR(ary);
1653
+ len = (rb_num_t)RARRAY_LEN(ary);
1654
+
1655
+ if (flag & 0x02) {
1656
+ /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1657
+ rb_num_t i = 0, j;
1658
+
1659
+ if (len < num) {
1660
+ for (i=0; i<num-len; i++) {
1661
+ *base++ = Qnil;
1662
+ }
1663
+ }
1664
+ for (j=0; i<num; i++, j++) {
1665
+ VALUE v = ptr[len - j - 1];
1666
+ *base++ = v;
1667
+ }
1668
+ if (is_splat) {
1669
+ *base = rb_ary_new4(len - j, ptr);
1670
+ }
1671
+ }
1672
+ else {
1673
+ /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1674
+ rb_num_t i;
1675
+ VALUE *bptr = &base[space_size - 1];
1676
+
1677
+ for (i=0; i<num; i++) {
1678
+ if (len <= i) {
1679
+ for (; i<num; i++) {
1680
+ *bptr-- = Qnil;
1681
+ }
1682
+ break;
1683
+ }
1684
+ *bptr-- = ptr[i];
1685
+ }
1686
+ if (is_splat) {
1687
+ if (num > len) {
1688
+ *bptr = rb_ary_new();
1689
+ }
1690
+ else {
1691
+ *bptr = rb_ary_new4(len - num, ptr + num);
1692
+ }
1693
+ }
1694
+ }
1695
+ }
1696
+
1697
+ static inline int
1698
+ check_cfunc(const rb_method_entry_t *me, VALUE (*func)())
1699
+ {
1700
+ if (me && me->def->type == VM_METHOD_TYPE_CFUNC &&
1701
+ me->def->body.cfunc.func == func) {
1702
+ return 1;
1703
+ }
1704
+ else {
1705
+ return 0;
1706
+ }
1707
+ }
1708
+
1709
+ static
1710
+ #ifndef NO_BIG_INLINE
1711
+ inline
1712
+ #endif
1713
+ VALUE
1714
+ opt_eq_func(VALUE recv, VALUE obj, IC ic)
1715
+ {
1716
+ if (FIXNUM_2_P(recv, obj) &&
1717
+ BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
1718
+ return (recv == obj) ? Qtrue : Qfalse;
1719
+ }
1720
+ else if (!SPECIAL_CONST_P(recv) && !SPECIAL_CONST_P(obj)) {
1721
+ if (HEAP_CLASS_OF(recv) == rb_cFloat &&
1722
+ HEAP_CLASS_OF(obj) == rb_cFloat &&
1723
+ BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
1724
+ double a = RFLOAT_VALUE(recv);
1725
+ double b = RFLOAT_VALUE(obj);
1726
+
1727
+ if (isnan(a) || isnan(b)) {
1728
+ return Qfalse;
1729
+ }
1730
+ return (a == b) ? Qtrue : Qfalse;
1731
+ }
1732
+ else if (HEAP_CLASS_OF(recv) == rb_cString &&
1733
+ HEAP_CLASS_OF(obj) == rb_cString &&
1734
+ BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
1735
+ return rb_str_equal(recv, obj);
1736
+ }
1737
+ }
1738
+
1739
+ {
1740
+ const rb_method_entry_t *me = vm_method_search(idEq, CLASS_OF(recv), ic);
1741
+
1742
+ if (check_cfunc(me, rb_obj_equal)) {
1743
+ return recv == obj ? Qtrue : Qfalse;
1744
+ }
1745
+ }
1746
+
1747
+ return Qundef;
1748
+ }
1749
+