pline 0.0.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (51) hide show
  1. data/README +134 -0
  2. data/ext/pline/depend +55 -0
  3. data/ext/pline/extconf.rb +14 -0
  4. data/ext/pline/iseq.c +124 -0
  5. data/ext/pline/minfo.c +167 -0
  6. data/ext/pline/pline.c.rb +68 -0
  7. data/ext/pline/profiler.c +125 -0
  8. data/ext/pline/ruby_source/1.9.2/debug.h +36 -0
  9. data/ext/pline/ruby_source/1.9.2/eval_intern.h +232 -0
  10. data/ext/pline/ruby_source/1.9.2/gc.h +77 -0
  11. data/ext/pline/ruby_source/1.9.2/id.h +170 -0
  12. data/ext/pline/ruby_source/1.9.2/insns.inc +179 -0
  13. data/ext/pline/ruby_source/1.9.2/insns_info.inc +695 -0
  14. data/ext/pline/ruby_source/1.9.2/iseq.h +104 -0
  15. data/ext/pline/ruby_source/1.9.2/manual_update.h +19 -0
  16. data/ext/pline/ruby_source/1.9.2/method.h +103 -0
  17. data/ext/pline/ruby_source/1.9.2/node.h +483 -0
  18. data/ext/pline/ruby_source/1.9.2/thread_pthread.h +27 -0
  19. data/ext/pline/ruby_source/1.9.2/thread_win32.h +33 -0
  20. data/ext/pline/ruby_source/1.9.2/vm_core.h +706 -0
  21. data/ext/pline/ruby_source/1.9.2/vm_exec.h +184 -0
  22. data/ext/pline/ruby_source/1.9.2/vm_insnhelper.c +1734 -0
  23. data/ext/pline/ruby_source/1.9.2/vm_insnhelper.h +208 -0
  24. data/ext/pline/ruby_source/1.9.2/vm_opts.h +51 -0
  25. data/ext/pline/ruby_source/1.9.3/atomic.h +56 -0
  26. data/ext/pline/ruby_source/1.9.3/constant.h +34 -0
  27. data/ext/pline/ruby_source/1.9.3/debug.h +41 -0
  28. data/ext/pline/ruby_source/1.9.3/eval_intern.h +234 -0
  29. data/ext/pline/ruby_source/1.9.3/gc.h +98 -0
  30. data/ext/pline/ruby_source/1.9.3/id.h +175 -0
  31. data/ext/pline/ruby_source/1.9.3/insns.inc +179 -0
  32. data/ext/pline/ruby_source/1.9.3/insns_info.inc +695 -0
  33. data/ext/pline/ruby_source/1.9.3/internal.h +227 -0
  34. data/ext/pline/ruby_source/1.9.3/iseq.h +125 -0
  35. data/ext/pline/ruby_source/1.9.3/manual_update.h +19 -0
  36. data/ext/pline/ruby_source/1.9.3/method.h +105 -0
  37. data/ext/pline/ruby_source/1.9.3/node.h +503 -0
  38. data/ext/pline/ruby_source/1.9.3/thread_pthread.h +51 -0
  39. data/ext/pline/ruby_source/1.9.3/thread_win32.h +40 -0
  40. data/ext/pline/ruby_source/1.9.3/vm_core.h +756 -0
  41. data/ext/pline/ruby_source/1.9.3/vm_exec.h +184 -0
  42. data/ext/pline/ruby_source/1.9.3/vm_insnhelper.c +1749 -0
  43. data/ext/pline/ruby_source/1.9.3/vm_insnhelper.h +220 -0
  44. data/ext/pline/ruby_source/1.9.3/vm_opts.h +51 -0
  45. data/ext/pline/sinfo.c +311 -0
  46. data/lib/pline.rb +11 -0
  47. data/lib/pline/minfo.rb +22 -0
  48. data/lib/pline/summarize.rb +127 -0
  49. data/lib/pline/util.rb +54 -0
  50. data/pline.gemspec +28 -0
  51. metadata +102 -0
@@ -0,0 +1,184 @@
1
+ /**********************************************************************
2
+
3
+ vm.h -
4
+
5
+ $Author: yugui $
6
+ created at: 04/01/01 16:56:59 JST
7
+
8
+ Copyright (C) 2004-2007 Koichi Sasada
9
+
10
+ **********************************************************************/
11
+
12
+ #ifndef RUBY_VM_EXEC_H
13
+ #define RUBY_VM_EXEC_H
14
+
15
+ typedef long OFFSET;
16
+ typedef unsigned long lindex_t;
17
+ typedef unsigned long dindex_t;
18
+ typedef rb_num_t GENTRY;
19
+ typedef rb_iseq_t *ISEQ;
20
+
21
+ #ifdef COLLECT_USAGE_ANALYSIS
22
+ #define USAGE_ANALYSIS_INSN(insn) vm_analysis_insn(insn)
23
+ #define USAGE_ANALYSIS_OPERAND(insn, n, op) vm_analysis_operand(insn, n, (VALUE)op)
24
+ #define USAGE_ANALYSIS_REGISTER(reg, s) vm_analysis_register(reg, s)
25
+ #else
26
+ #define USAGE_ANALYSIS_INSN(insn) /* none */
27
+ #define USAGE_ANALYSIS_OPERAND(insn, n, op) /* none */
28
+ #define USAGE_ANALYSIS_REGISTER(reg, s) /* none */
29
+ #endif
30
+
31
+ #ifdef __GCC__
32
+ /* TODO: machine dependent prefetch instruction */
33
+ #define PREFETCH(pc)
34
+ #else
35
+ #define PREFETCH(pc)
36
+ #endif
37
+
38
+ #if VMDEBUG > 0
39
+ #define debugs printf
40
+ #define DEBUG_ENTER_INSN(insn) \
41
+ debug_print_pre(th, GET_CFP());
42
+
43
+ #if OPT_STACK_CACHING
44
+ #define SC_REGS() , reg_a, reg_b
45
+ #else
46
+ #define SC_REGS()
47
+ #endif
48
+
49
+ #define DEBUG_END_INSN() \
50
+ debug_print_post(th, GET_CFP() SC_REGS());
51
+
52
+ #else
53
+
54
+ #define debugs
55
+ #define DEBUG_ENTER_INSN(insn)
56
+ #define DEBUG_END_INSN()
57
+ #endif
58
+
59
+ #define throwdebug if(0)printf
60
+ /* #define throwdebug printf */
61
+
62
+ /************************************************/
63
+ #if DISPATCH_XXX
64
+ error !
65
+ /************************************************/
66
+ #elif OPT_CALL_THREADED_CODE
67
+
68
+ #define LABEL(x) insn_func_##x
69
+ #define ELABEL(x)
70
+ #define LABEL_PTR(x) &LABEL(x)
71
+
72
+ #define INSN_ENTRY(insn) \
73
+ static rb_control_frame_t * \
74
+ FUNC_FASTCALL(LABEL(insn))(rb_thread_t *th, rb_control_frame_t *reg_cfp) {
75
+
76
+ #define END_INSN(insn) return reg_cfp;}
77
+
78
+ #define NEXT_INSN() return reg_cfp;
79
+
80
+ /************************************************/
81
+ #elif OPT_TOKEN_THREADED_CODE || OPT_DIRECT_THREADED_CODE
82
+ /* threaded code with gcc */
83
+
84
+ #define LABEL(x) INSN_LABEL_##x
85
+ #define ELABEL(x) INSN_ELABEL_##x
86
+ #define LABEL_PTR(x) &&LABEL(x)
87
+
88
+ #define INSN_ENTRY_SIG(insn)
89
+
90
+
91
+ #define INSN_DISPATCH_SIG(insn)
92
+
93
+ #define INSN_ENTRY(insn) \
94
+ LABEL(insn): \
95
+ INSN_ENTRY_SIG(insn); \
96
+
97
+ /* dispather */
98
+ #if __GNUC__ && (__i386__ || __x86_64__) && __GNUC__ == 3
99
+ #define DISPATCH_ARCH_DEPEND_WAY(addr) \
100
+ asm volatile("jmp *%0;\t# -- inseted by vm.h\t[length = 2]" : : "r" (addr))
101
+
102
+ #else
103
+ #define DISPATCH_ARCH_DEPEND_WAY(addr) \
104
+ /* do nothing */
105
+
106
+ #endif
107
+
108
+
109
+ /**********************************/
110
+ #if OPT_DIRECT_THREADED_CODE
111
+
112
+ /* for GCC 3.4.x */
113
+ #define TC_DISPATCH(insn) \
114
+ INSN_DISPATCH_SIG(insn); \
115
+ goto *GET_CURRENT_INSN(); \
116
+ ;
117
+
118
+ #else
119
+ /* token threade code */
120
+
121
+ #define TC_DISPATCH(insn) \
122
+ DISPATCH_ARCH_DEPEND_WAY(insns_address_table[GET_CURRENT_INSN()]); \
123
+ INSN_DISPATCH_SIG(insn); \
124
+ goto *insns_address_table[GET_CURRENT_INSN()]; \
125
+ rb_bug("tc error");
126
+
127
+
128
+ #endif /* DISPATCH_DIRECT_THREADED_CODE */
129
+
130
+ #define END_INSN(insn) \
131
+ DEBUG_END_INSN(); \
132
+ TC_DISPATCH(insn); \
133
+
134
+ #define INSN_DISPATCH() \
135
+ TC_DISPATCH(__START__) \
136
+ {
137
+
138
+ #define END_INSNS_DISPATCH() \
139
+ rb_bug("unknown insn: %ld", GET_CURRENT_INSN()); \
140
+ } /* end of while loop */ \
141
+
142
+ #define NEXT_INSN() TC_DISPATCH(__NEXT_INSN__)
143
+
144
+ /************************************************/
145
+ #else /* no threaded code */
146
+ /* most common method */
147
+
148
+ #define INSN_ENTRY(insn) \
149
+ case BIN(insn):
150
+
151
+ #define END_INSN(insn) \
152
+ DEBUG_END_INSN(); \
153
+ break;
154
+
155
+
156
+ #define INSN_DISPATCH() \
157
+ while(1){ \
158
+ switch(GET_CURRENT_INSN()){
159
+
160
+ #define END_INSNS_DISPATCH() \
161
+ default: \
162
+ SDR(); \
163
+ rb_bug("unknown insn: %ld", GET_CURRENT_INSN()); \
164
+ } /* end of switch */ \
165
+ } /* end of while loop */ \
166
+
167
+ #define NEXT_INSN() goto first
168
+
169
+ #endif
170
+
171
+ #define VM_SP_CNT(th, sp) ((sp) - (th)->stack)
172
+
173
+ #if OPT_CALL_THREADED_CODE
174
+ #define THROW_EXCEPTION(exc) do { \
175
+ th->errinfo = (VALUE)(exc); \
176
+ return 0; \
177
+ } while (0)
178
+ #else
179
+ #define THROW_EXCEPTION(exc) return (VALUE)(exc)
180
+ #endif
181
+
182
+ #define SCREG(r) (reg_##r)
183
+
184
+ #endif /* RUBY_VM_EXEC_H */
@@ -0,0 +1,1734 @@
1
+ /**********************************************************************
2
+
3
+ vm_insnhelper.c - instruction helper functions.
4
+
5
+ $Author: yugui $
6
+
7
+ Copyright (C) 2007 Koichi Sasada
8
+
9
+ **********************************************************************/
10
+
11
+ /* finish iseq array */
12
+ #include "insns.inc"
13
+ #include <math.h>
14
+
15
+ /* control stack frame */
16
+
17
+ #ifndef INLINE
18
+ #define INLINE inline
19
+ #endif
20
+
21
+ static rb_control_frame_t *vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
22
+
23
+ static inline rb_control_frame_t *
24
+ vm_push_frame(rb_thread_t * th, const rb_iseq_t * iseq,
25
+ VALUE type, VALUE self, VALUE specval,
26
+ const VALUE *pc, VALUE *sp, VALUE *lfp,
27
+ int local_size)
28
+ {
29
+ rb_control_frame_t * const cfp = th->cfp - 1;
30
+ int i;
31
+
32
+ if ((void *)(sp + local_size) >= (void *)cfp) {
33
+ rb_exc_raise(sysstack_error);
34
+ }
35
+ th->cfp = cfp;
36
+ /* setup vm value stack */
37
+
38
+ /* nil initialize */
39
+ for (i=0; i < local_size; i++) {
40
+ *sp = Qnil;
41
+ sp++;
42
+ }
43
+
44
+ /* set special val */
45
+ *sp = GC_GUARDED_PTR(specval);
46
+
47
+ if (lfp == 0) {
48
+ lfp = sp;
49
+ }
50
+
51
+ /* setup vm control frame stack */
52
+
53
+ cfp->pc = (VALUE *)pc;
54
+ cfp->sp = sp + 1;
55
+ cfp->bp = sp + 1;
56
+ cfp->iseq = (rb_iseq_t *) iseq;
57
+ cfp->flag = type;
58
+ cfp->self = self;
59
+ cfp->lfp = lfp;
60
+ cfp->dfp = sp;
61
+ cfp->block_iseq = 0;
62
+ cfp->proc = 0;
63
+ cfp->me = 0;
64
+
65
+ #define COLLECT_PROFILE 0
66
+ #if COLLECT_PROFILE
67
+ cfp->prof_time_self = clock();
68
+ cfp->prof_time_chld = 0;
69
+ #endif
70
+
71
+ if (VMDEBUG == 2) {
72
+ SDR();
73
+ }
74
+
75
+ return cfp;
76
+ }
77
+
78
+ static inline void
79
+ vm_pop_frame(rb_thread_t *th)
80
+ {
81
+ #if COLLECT_PROFILE
82
+ rb_control_frame_t *cfp = th->cfp;
83
+
84
+ if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
85
+ VALUE current_time = clock();
86
+ rb_control_frame_t *cfp = th->cfp;
87
+ cfp->prof_time_self = current_time - cfp->prof_time_self;
88
+ (cfp+1)->prof_time_chld += cfp->prof_time_self;
89
+
90
+ cfp->iseq->profile.count++;
91
+ cfp->iseq->profile.time_cumu = cfp->prof_time_self;
92
+ cfp->iseq->profile.time_self = cfp->prof_time_self - cfp->prof_time_chld;
93
+ }
94
+ else if (0 /* c method? */) {
95
+
96
+ }
97
+ #endif
98
+ th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
99
+
100
+ if (VMDEBUG == 2) {
101
+ SDR();
102
+ }
103
+ }
104
+
105
+ /* method dispatch */
106
+
107
+ NORETURN(static void argument_error(const rb_iseq_t *iseq, int miss_argc, int correct_argc));
108
+ static void
109
+ argument_error(const rb_iseq_t *iseq, int miss_argc, int correct_argc)
110
+ {
111
+ VALUE mesg = rb_sprintf("wrong number of arguments (%d for %d)", miss_argc, correct_argc);
112
+ VALUE exc = rb_exc_new3(rb_eArgError, mesg);
113
+ VALUE bt = rb_make_backtrace();
114
+ VALUE err_line = 0;
115
+
116
+ if (iseq) {
117
+ int line_no = 1;
118
+
119
+ if (iseq->insn_info_size) {
120
+ line_no = iseq->insn_info_table[0].line_no;
121
+ }
122
+
123
+ err_line = rb_sprintf("%s:%d:in `%s'",
124
+ RSTRING_PTR(iseq->filename),
125
+ line_no, RSTRING_PTR(iseq->name));
126
+ rb_funcall(bt, rb_intern("unshift"), 1, err_line);
127
+ }
128
+
129
+ rb_funcall(exc, rb_intern("set_backtrace"), 1, bt);
130
+ rb_exc_raise(exc);
131
+ }
132
+
133
+ #define VM_CALLEE_SETUP_ARG(ret, th, iseq, orig_argc, orig_argv, block) \
134
+ if (LIKELY(iseq->arg_simple & 0x01)) { \
135
+ /* simple check */ \
136
+ if (orig_argc != iseq->argc) { \
137
+ argument_error(iseq, orig_argc, iseq->argc); \
138
+ } \
139
+ ret = 0; \
140
+ } \
141
+ else { \
142
+ ret = vm_callee_setup_arg_complex(th, iseq, orig_argc, orig_argv, block); \
143
+ }
144
+
145
+ static inline int
146
+ vm_callee_setup_arg_complex(rb_thread_t *th, const rb_iseq_t * iseq,
147
+ int orig_argc, VALUE * orig_argv,
148
+ const rb_block_t **block)
149
+ {
150
+ const int m = iseq->argc;
151
+ int argc = orig_argc;
152
+ VALUE *argv = orig_argv;
153
+ rb_num_t opt_pc = 0;
154
+
155
+ th->mark_stack_len = argc + iseq->arg_size;
156
+
157
+ /* mandatory */
158
+ if (argc < (m + iseq->arg_post_len)) { /* check with post arg */
159
+ argument_error(iseq, argc, m + iseq->arg_post_len);
160
+ }
161
+
162
+ argv += m;
163
+ argc -= m;
164
+
165
+ /* post arguments */
166
+ if (iseq->arg_post_len) {
167
+ if (!(orig_argc < iseq->arg_post_start)) {
168
+ VALUE *new_argv = ALLOCA_N(VALUE, argc);
169
+ MEMCPY(new_argv, argv, VALUE, argc);
170
+ argv = new_argv;
171
+ }
172
+
173
+ MEMCPY(&orig_argv[iseq->arg_post_start], &argv[argc -= iseq->arg_post_len],
174
+ VALUE, iseq->arg_post_len);
175
+ }
176
+
177
+ /* opt arguments */
178
+ if (iseq->arg_opts) {
179
+ const int opts = iseq->arg_opts - 1 /* no opt */;
180
+
181
+ if (iseq->arg_rest == -1 && argc > opts) {
182
+ argument_error(iseq, orig_argc, m + opts + iseq->arg_post_len);
183
+ }
184
+
185
+ if (argc > opts) {
186
+ argc -= opts;
187
+ argv += opts;
188
+ opt_pc = iseq->arg_opt_table[opts]; /* no opt */
189
+ }
190
+ else {
191
+ int i;
192
+ for (i = argc; i<opts; i++) {
193
+ orig_argv[i + m] = Qnil;
194
+ }
195
+ opt_pc = iseq->arg_opt_table[argc];
196
+ argc = 0;
197
+ }
198
+ }
199
+
200
+ /* rest arguments */
201
+ if (iseq->arg_rest != -1) {
202
+ orig_argv[iseq->arg_rest] = rb_ary_new4(argc, argv);
203
+ argc = 0;
204
+ }
205
+
206
+ /* block arguments */
207
+ if (block && iseq->arg_block != -1) {
208
+ VALUE blockval = Qnil;
209
+ const rb_block_t *blockptr = *block;
210
+
211
+ if (argc != 0) {
212
+ argument_error(iseq, orig_argc, m + iseq->arg_post_len);
213
+ }
214
+
215
+ if (blockptr) {
216
+ /* make Proc object */
217
+ if (blockptr->proc == 0) {
218
+ rb_proc_t *proc;
219
+ blockval = rb_vm_make_proc(th, blockptr, rb_cProc);
220
+ GetProcPtr(blockval, proc);
221
+ *block = &proc->block;
222
+ }
223
+ else {
224
+ blockval = blockptr->proc;
225
+ }
226
+ }
227
+
228
+ orig_argv[iseq->arg_block] = blockval; /* Proc or nil */
229
+ }
230
+
231
+ th->mark_stack_len = 0;
232
+ return (int)opt_pc;
233
+ }
234
+
235
+ static inline int
236
+ caller_setup_args(const rb_thread_t *th, rb_control_frame_t *cfp, VALUE flag,
237
+ int argc, rb_iseq_t *blockiseq, rb_block_t **block)
238
+ {
239
+ rb_block_t *blockptr = 0;
240
+
241
+ if (block) {
242
+ if (flag & VM_CALL_ARGS_BLOCKARG_BIT) {
243
+ rb_proc_t *po;
244
+ VALUE proc;
245
+
246
+ proc = *(--cfp->sp);
247
+
248
+ if (proc != Qnil) {
249
+ if (!rb_obj_is_proc(proc)) {
250
+ VALUE b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
251
+ if (NIL_P(b) || !rb_obj_is_proc(b)) {
252
+ rb_raise(rb_eTypeError,
253
+ "wrong argument type %s (expected Proc)",
254
+ rb_obj_classname(proc));
255
+ }
256
+ proc = b;
257
+ }
258
+ GetProcPtr(proc, po);
259
+ blockptr = &po->block;
260
+ RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp)->proc = proc;
261
+ *block = blockptr;
262
+ }
263
+ }
264
+ else if (blockiseq) {
265
+ blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
266
+ blockptr->iseq = blockiseq;
267
+ blockptr->proc = 0;
268
+ *block = blockptr;
269
+ }
270
+ }
271
+
272
+ /* expand top of stack? */
273
+ if (flag & VM_CALL_ARGS_SPLAT_BIT) {
274
+ VALUE ary = *(cfp->sp - 1);
275
+ VALUE *ptr;
276
+ int i;
277
+ VALUE tmp = rb_check_convert_type(ary, T_ARRAY, "Array", "to_a");
278
+
279
+ if (NIL_P(tmp)) {
280
+ /* do nothing */
281
+ }
282
+ else {
283
+ long len = RARRAY_LEN(tmp);
284
+ ptr = RARRAY_PTR(tmp);
285
+ cfp->sp -= 1;
286
+
287
+ CHECK_STACK_OVERFLOW(cfp, len);
288
+
289
+ for (i = 0; i < len; i++) {
290
+ *cfp->sp++ = ptr[i];
291
+ }
292
+ argc += i-1;
293
+ }
294
+ }
295
+
296
+ return argc;
297
+ }
298
+
299
+ static inline VALUE
300
+ call_cfunc(VALUE (*func)(), VALUE recv,
301
+ int len, int argc, const VALUE *argv)
302
+ {
303
+ /* printf("len: %d, argc: %d\n", len, argc); */
304
+
305
+ if (len >= 0 && argc != len) {
306
+ rb_raise(rb_eArgError, "wrong number of arguments(%d for %d)",
307
+ argc, len);
308
+ }
309
+
310
+ switch (len) {
311
+ case -2:
312
+ return (*func) (recv, rb_ary_new4(argc, argv));
313
+ break;
314
+ case -1:
315
+ return (*func) (argc, argv, recv);
316
+ break;
317
+ case 0:
318
+ return (*func) (recv);
319
+ break;
320
+ case 1:
321
+ return (*func) (recv, argv[0]);
322
+ break;
323
+ case 2:
324
+ return (*func) (recv, argv[0], argv[1]);
325
+ break;
326
+ case 3:
327
+ return (*func) (recv, argv[0], argv[1], argv[2]);
328
+ break;
329
+ case 4:
330
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3]);
331
+ break;
332
+ case 5:
333
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
334
+ break;
335
+ case 6:
336
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
337
+ argv[5]);
338
+ break;
339
+ case 7:
340
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
341
+ argv[5], argv[6]);
342
+ break;
343
+ case 8:
344
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
345
+ argv[5], argv[6], argv[7]);
346
+ break;
347
+ case 9:
348
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
349
+ argv[5], argv[6], argv[7], argv[8]);
350
+ break;
351
+ case 10:
352
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
353
+ argv[5], argv[6], argv[7], argv[8], argv[9]);
354
+ break;
355
+ case 11:
356
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
357
+ argv[5], argv[6], argv[7], argv[8], argv[9],
358
+ argv[10]);
359
+ break;
360
+ case 12:
361
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
362
+ argv[5], argv[6], argv[7], argv[8], argv[9],
363
+ argv[10], argv[11]);
364
+ break;
365
+ case 13:
366
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
367
+ argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
368
+ argv[11], argv[12]);
369
+ break;
370
+ case 14:
371
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
372
+ argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
373
+ argv[11], argv[12], argv[13]);
374
+ break;
375
+ case 15:
376
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
377
+ argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
378
+ argv[11], argv[12], argv[13], argv[14]);
379
+ break;
380
+ default:
381
+ rb_raise(rb_eArgError, "too many arguments(%d)", len);
382
+ return Qundef; /* not reached */
383
+ }
384
+ }
385
+
386
+ static inline VALUE
387
+ vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp,
388
+ int num, VALUE recv, const rb_block_t *blockptr,
389
+ const rb_method_entry_t *me)
390
+ {
391
+ VALUE val = 0;
392
+ const rb_method_definition_t *def = me->def;
393
+ rb_control_frame_t *cfp;
394
+
395
+ EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->klass);
396
+
397
+ cfp = vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC,
398
+ recv, (VALUE) blockptr, 0, reg_cfp->sp, 0, 1);
399
+ cfp->me = me;
400
+ reg_cfp->sp -= num + 1;
401
+
402
+ val = call_cfunc(def->body.cfunc.func, recv, (int)def->body.cfunc.argc, num, reg_cfp->sp + 1);
403
+
404
+ if (reg_cfp != th->cfp + 1) {
405
+ rb_bug("cfp consistency error - send");
406
+ }
407
+
408
+ vm_pop_frame(th);
409
+
410
+ EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, me->called_id, me->klass);
411
+
412
+ return val;
413
+ }
414
+
415
+ static inline VALUE
416
+ vm_call_bmethod(rb_thread_t *th, VALUE recv, int argc, const VALUE *argv,
417
+ const rb_block_t *blockptr, const rb_method_entry_t *me)
418
+ {
419
+ rb_proc_t *proc;
420
+ VALUE val;
421
+
422
+ EXEC_EVENT_HOOK(th, RUBY_EVENT_CALL, recv, me->called_id, me->klass);
423
+
424
+ /* control block frame */
425
+ th->passed_me = me;
426
+ GetProcPtr(me->def->body.proc, proc);
427
+ val = rb_vm_invoke_proc(th, proc, recv, argc, argv, blockptr);
428
+
429
+ EXEC_EVENT_HOOK(th, RUBY_EVENT_RETURN, recv, me->called_id, me->klass);
430
+
431
+ return val;
432
+ }
433
+
434
+ static inline void
435
+ vm_method_missing_args(rb_thread_t *th, VALUE *argv,
436
+ int num, const rb_block_t *blockptr, int opt)
437
+ {
438
+ rb_control_frame_t * const reg_cfp = th->cfp;
439
+ MEMCPY(argv, STACK_ADDR_FROM_TOP(num + 1), VALUE, num + 1);
440
+ th->method_missing_reason = opt;
441
+ th->passed_block = blockptr;
442
+ POPN(num + 1);
443
+ }
444
+
445
+ static inline VALUE
446
+ vm_method_missing(rb_thread_t *th, ID id, VALUE recv,
447
+ int num, const rb_block_t *blockptr, int opt)
448
+ {
449
+ VALUE *argv = ALLOCA_N(VALUE, num + 1);
450
+ vm_method_missing_args(th, argv, num, blockptr, opt);
451
+ argv[0] = ID2SYM(id);
452
+ return rb_funcall2(recv, idMethodMissing, num + 1, argv);
453
+ }
454
+
455
+ static inline void
456
+ vm_setup_method(rb_thread_t *th, rb_control_frame_t *cfp,
457
+ VALUE recv, int argc, const rb_block_t *blockptr, VALUE flag,
458
+ const rb_method_entry_t *me)
459
+ {
460
+ int opt_pc, i;
461
+ VALUE *sp, *rsp = cfp->sp - argc;
462
+ rb_iseq_t *iseq = me->def->body.iseq;
463
+
464
+ VM_CALLEE_SETUP_ARG(opt_pc, th, iseq, argc, rsp, &blockptr);
465
+
466
+ /* stack overflow check */
467
+ CHECK_STACK_OVERFLOW(cfp, iseq->stack_max);
468
+
469
+ sp = rsp + iseq->arg_size;
470
+
471
+ if (LIKELY(!(flag & VM_CALL_TAILCALL_BIT))) {
472
+ if (0) printf("local_size: %d, arg_size: %d\n",
473
+ iseq->local_size, iseq->arg_size);
474
+
475
+ /* clear local variables */
476
+ for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
477
+ *sp++ = Qnil;
478
+ }
479
+
480
+ vm_push_frame(th, iseq,
481
+ VM_FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
482
+ iseq->iseq_encoded + opt_pc, sp, 0, 0);
483
+
484
+ cfp->sp = rsp - 1 /* recv */;
485
+ }
486
+ else {
487
+ VALUE *p_rsp;
488
+ th->cfp++; /* pop cf */
489
+ p_rsp = th->cfp->sp;
490
+
491
+ /* copy arguments */
492
+ for (i=0; i < (sp - rsp); i++) {
493
+ p_rsp[i] = rsp[i];
494
+ }
495
+
496
+ sp -= rsp - p_rsp;
497
+
498
+ /* clear local variables */
499
+ for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
500
+ *sp++ = Qnil;
501
+ }
502
+
503
+ vm_push_frame(th, iseq,
504
+ VM_FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
505
+ iseq->iseq_encoded + opt_pc, sp, 0, 0);
506
+ }
507
+ }
508
+
509
+ static inline VALUE
510
+ vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp,
511
+ int num, const rb_block_t *blockptr, VALUE flag,
512
+ ID id, const rb_method_entry_t *me, VALUE recv)
513
+ {
514
+ VALUE val;
515
+
516
+ start_method_dispatch:
517
+
518
+ if (me != 0) {
519
+ if ((me->flag == 0)) {
520
+ normal_method_dispatch:
521
+ switch (me->def->type) {
522
+ case VM_METHOD_TYPE_ISEQ:{
523
+ vm_setup_method(th, cfp, recv, num, blockptr, flag, me);
524
+ return Qundef;
525
+ }
526
+ case VM_METHOD_TYPE_NOTIMPLEMENTED:
527
+ case VM_METHOD_TYPE_CFUNC:{
528
+ val = vm_call_cfunc(th, cfp, num, recv, blockptr, me);
529
+ break;
530
+ }
531
+ case VM_METHOD_TYPE_ATTRSET:{
532
+ if (num != 1) {
533
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", num);
534
+ }
535
+ val = rb_ivar_set(recv, me->def->body.attr.id, *(cfp->sp - 1));
536
+ cfp->sp -= 2;
537
+ break;
538
+ }
539
+ case VM_METHOD_TYPE_IVAR:{
540
+ if (num != 0) {
541
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for 0)", num);
542
+ }
543
+ val = rb_attr_get(recv, me->def->body.attr.id);
544
+ cfp->sp -= 1;
545
+ break;
546
+ }
547
+ case VM_METHOD_TYPE_MISSING:{
548
+ VALUE *argv = ALLOCA_N(VALUE, num+1);
549
+ argv[0] = ID2SYM(me->def->original_id);
550
+ MEMCPY(argv+1, cfp->sp - num, VALUE, num);
551
+ cfp->sp += - num - 1;
552
+ val = rb_funcall2(recv, rb_intern("method_missing"), num+1, argv);
553
+ break;
554
+ }
555
+ case VM_METHOD_TYPE_BMETHOD:{
556
+ VALUE *argv = ALLOCA_N(VALUE, num);
557
+ MEMCPY(argv, cfp->sp - num, VALUE, num);
558
+ cfp->sp += - num - 1;
559
+ val = vm_call_bmethod(th, recv, num, argv, blockptr, me);
560
+ break;
561
+ }
562
+ case VM_METHOD_TYPE_ZSUPER:{
563
+ VALUE klass = RCLASS_SUPER(me->klass);
564
+ me = rb_method_entry(klass, id);
565
+
566
+ if (me != 0) {
567
+ goto normal_method_dispatch;
568
+ }
569
+ else {
570
+ goto start_method_dispatch;
571
+ }
572
+ }
573
+ case VM_METHOD_TYPE_OPTIMIZED:{
574
+ switch (me->def->body.optimize_type) {
575
+ case OPTIMIZED_METHOD_TYPE_SEND: {
576
+ rb_control_frame_t *reg_cfp = cfp;
577
+ rb_num_t i = num - 1;
578
+ VALUE sym;
579
+
580
+ if (num == 0) {
581
+ rb_raise(rb_eArgError, "no method name given");
582
+ }
583
+
584
+ sym = TOPN(i);
585
+ id = SYMBOL_P(sym) ? SYM2ID(sym) : rb_to_id(sym);
586
+ /* shift arguments */
587
+ if (i > 0) {
588
+ MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
589
+ }
590
+ me = rb_method_entry(CLASS_OF(recv), id);
591
+ num -= 1;
592
+ DEC_SP(1);
593
+ flag |= VM_CALL_FCALL_BIT | VM_CALL_OPT_SEND_BIT;
594
+
595
+ goto start_method_dispatch;
596
+ }
597
+ case OPTIMIZED_METHOD_TYPE_CALL: {
598
+ rb_proc_t *proc;
599
+ int argc = num;
600
+ VALUE *argv = ALLOCA_N(VALUE, num);
601
+ GetProcPtr(recv, proc);
602
+ MEMCPY(argv, cfp->sp - num, VALUE, num);
603
+ cfp->sp -= num + 1;
604
+
605
+ val = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, blockptr);
606
+ break;
607
+ }
608
+ default:
609
+ rb_bug("eval_invoke_method: unsupported optimized method type (%d)",
610
+ me->def->body.optimize_type);
611
+ }
612
+ break;
613
+ }
614
+ default:{
615
+ rb_bug("eval_invoke_method: unsupported method type (%d)", me->def->type);
616
+ break;
617
+ }
618
+ }
619
+ }
620
+ else {
621
+ int noex_safe;
622
+
623
+ if (!(flag & VM_CALL_FCALL_BIT) &&
624
+ (me->flag & NOEX_MASK) & NOEX_PRIVATE) {
625
+ int stat = NOEX_PRIVATE;
626
+
627
+ if (flag & VM_CALL_VCALL_BIT) {
628
+ stat |= NOEX_VCALL;
629
+ }
630
+ val = vm_method_missing(th, id, recv, num, blockptr, stat);
631
+ }
632
+ else if (!(flag & VM_CALL_OPT_SEND_BIT) && (me->flag & NOEX_MASK) & NOEX_PROTECTED) {
633
+ VALUE defined_class = me->klass;
634
+
635
+ if (TYPE(defined_class) == T_ICLASS) {
636
+ defined_class = RBASIC(defined_class)->klass;
637
+ }
638
+
639
+ if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
640
+ val = vm_method_missing(th, id, recv, num, blockptr, NOEX_PROTECTED);
641
+ }
642
+ else {
643
+ goto normal_method_dispatch;
644
+ }
645
+ }
646
+ else if ((noex_safe = NOEX_SAFE(me->flag)) > th->safe_level &&
647
+ (noex_safe > 2)) {
648
+ rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(id));
649
+ }
650
+ else {
651
+ goto normal_method_dispatch;
652
+ }
653
+ }
654
+ }
655
+ else {
656
+ /* method missing */
657
+ int stat = 0;
658
+ if (flag & VM_CALL_VCALL_BIT) {
659
+ stat |= NOEX_VCALL;
660
+ }
661
+ if (flag & VM_CALL_SUPER_BIT) {
662
+ stat |= NOEX_SUPER;
663
+ }
664
+ if (id == idMethodMissing) {
665
+ VALUE *argv = ALLOCA_N(VALUE, num);
666
+ vm_method_missing_args(th, argv, num - 1, 0, stat);
667
+ rb_raise_method_missing(th, num, argv, recv, stat);
668
+ }
669
+ else {
670
+ val = vm_method_missing(th, id, recv, num, blockptr, stat);
671
+ }
672
+ }
673
+
674
+ RUBY_VM_CHECK_INTS();
675
+ return val;
676
+ }
677
+
678
+ /* yield */
679
+
680
+ static inline int
681
+ block_proc_is_lambda(const VALUE procval)
682
+ {
683
+ rb_proc_t *proc;
684
+
685
+ if (procval) {
686
+ GetProcPtr(procval, proc);
687
+ return proc->is_lambda;
688
+ }
689
+ else {
690
+ return 0;
691
+ }
692
+ }
693
+
694
+ static inline VALUE
695
+ vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block,
696
+ VALUE self, int argc, const VALUE *argv,
697
+ const rb_block_t *blockargptr)
698
+ {
699
+ NODE *ifunc = (NODE *) block->iseq;
700
+ VALUE val, arg, blockarg;
701
+ int lambda = block_proc_is_lambda(block->proc);
702
+
703
+ if (lambda) {
704
+ arg = rb_ary_new4(argc, argv);
705
+ }
706
+ else if (argc == 0) {
707
+ arg = Qnil;
708
+ }
709
+ else {
710
+ arg = argv[0];
711
+ }
712
+
713
+ if (blockargptr) {
714
+ if (blockargptr->proc) {
715
+ blockarg = blockargptr->proc;
716
+ }
717
+ else {
718
+ blockarg = rb_vm_make_proc(th, blockargptr, rb_cProc);
719
+ }
720
+ }
721
+ else {
722
+ blockarg = Qnil;
723
+ }
724
+
725
+ vm_push_frame(th, 0, VM_FRAME_MAGIC_IFUNC,
726
+ self, (VALUE)block->dfp,
727
+ 0, th->cfp->sp, block->lfp, 1);
728
+
729
+ if (blockargptr) {
730
+ th->cfp->lfp[0] = GC_GUARDED_PTR((VALUE)blockargptr);
731
+ }
732
+ val = (*ifunc->nd_cfnc) (arg, ifunc->nd_tval, argc, argv, blockarg);
733
+
734
+ th->cfp++;
735
+ return val;
736
+ }
737
+
738
+
739
+ /*--
740
+ * @brief on supplied all of optional, rest and post parameters.
741
+ * @pre iseq is block style (not lambda style)
742
+ */
743
+ static inline int
744
+ vm_yield_setup_block_args_complex(rb_thread_t *th, const rb_iseq_t *iseq,
745
+ int argc, VALUE *argv)
746
+ {
747
+ rb_num_t opt_pc = 0;
748
+ int i;
749
+ const int m = iseq->argc;
750
+ const int r = iseq->arg_rest;
751
+ int len = iseq->arg_post_len;
752
+ int start = iseq->arg_post_start;
753
+ int rsize = argc > m ? argc - m : 0; /* # of arguments which did not consumed yet */
754
+ int psize = rsize > len ? len : rsize; /* # of post arguments */
755
+ int osize = 0; /* # of opt arguments */
756
+ VALUE ary;
757
+
758
+ /* reserves arguments for post parameters */
759
+ rsize -= psize;
760
+
761
+ if (iseq->arg_opts) {
762
+ const int opts = iseq->arg_opts - 1;
763
+ if (rsize > opts) {
764
+ osize = opts;
765
+ opt_pc = iseq->arg_opt_table[opts];
766
+ }
767
+ else {
768
+ osize = rsize;
769
+ opt_pc = iseq->arg_opt_table[rsize];
770
+ }
771
+ }
772
+ rsize -= osize;
773
+
774
+ if (0) {
775
+ printf(" argc: %d\n", argc);
776
+ printf(" len: %d\n", len);
777
+ printf("start: %d\n", start);
778
+ printf("rsize: %d\n", rsize);
779
+ }
780
+
781
+ if (r == -1) {
782
+ /* copy post argument */
783
+ MEMMOVE(&argv[start], &argv[m+osize], VALUE, psize);
784
+ }
785
+ else {
786
+ ary = rb_ary_new4(rsize, &argv[r]);
787
+
788
+ /* copy post argument */
789
+ MEMMOVE(&argv[start], &argv[m+rsize+osize], VALUE, psize);
790
+ argv[r] = ary;
791
+ }
792
+
793
+ for (i=psize; i<len; i++) {
794
+ argv[start + i] = Qnil;
795
+ }
796
+
797
+ return (int)opt_pc;
798
+ }
799
+
800
+ static inline int
801
+ vm_yield_setup_block_args(rb_thread_t *th, const rb_iseq_t * iseq,
802
+ int orig_argc, VALUE *argv,
803
+ const rb_block_t *blockptr)
804
+ {
805
+ int i;
806
+ int argc = orig_argc;
807
+ const int m = iseq->argc;
808
+ VALUE ary, arg0;
809
+ int opt_pc = 0;
810
+
811
+ th->mark_stack_len = argc;
812
+
813
+ /*
814
+ * yield [1, 2]
815
+ * => {|a|} => a = [1, 2]
816
+ * => {|a, b|} => a, b = [1, 2]
817
+ */
818
+ arg0 = argv[0];
819
+ if (!(iseq->arg_simple & 0x02) && /* exclude {|a|} */
820
+ (m + iseq->arg_post_len) > 0 && /* this process is meaningful */
821
+ argc == 1 && !NIL_P(ary = rb_check_array_type(arg0))) { /* rhs is only an array */
822
+ th->mark_stack_len = argc = RARRAY_LENINT(ary);
823
+
824
+ CHECK_STACK_OVERFLOW(th->cfp, argc);
825
+
826
+ MEMCPY(argv, RARRAY_PTR(ary), VALUE, argc);
827
+ }
828
+ else {
829
+ argv[0] = arg0;
830
+ }
831
+
832
+ for (i=argc; i<m; i++) {
833
+ argv[i] = Qnil;
834
+ }
835
+
836
+ if (iseq->arg_rest == -1 && iseq->arg_opts == 0) {
837
+ const int arg_size = iseq->arg_size;
838
+ if (arg_size < argc) {
839
+ /*
840
+ * yield 1, 2
841
+ * => {|a|} # truncate
842
+ */
843
+ th->mark_stack_len = argc = arg_size;
844
+ }
845
+ }
846
+ else {
847
+ int r = iseq->arg_rest;
848
+
849
+ if (iseq->arg_post_len ||
850
+ iseq->arg_opts) { /* TODO: implement simple version for (iseq->arg_post_len==0 && iseq->arg_opts > 0) */
851
+ opt_pc = vm_yield_setup_block_args_complex(th, iseq, argc, argv);
852
+ }
853
+ else {
854
+ if (argc < r) {
855
+ /* yield 1
856
+ * => {|a, b, *r|}
857
+ */
858
+ for (i=argc; i<r; i++) {
859
+ argv[i] = Qnil;
860
+ }
861
+ argv[r] = rb_ary_new();
862
+ }
863
+ else {
864
+ argv[r] = rb_ary_new4(argc-r, &argv[r]);
865
+ }
866
+ }
867
+
868
+ th->mark_stack_len = iseq->arg_size;
869
+ }
870
+
871
+ /* {|&b|} */
872
+ if (iseq->arg_block != -1) {
873
+ VALUE procval = Qnil;
874
+
875
+ if (blockptr) {
876
+ if (blockptr->proc == 0) {
877
+ procval = rb_vm_make_proc(th, blockptr, rb_cProc);
878
+ }
879
+ else {
880
+ procval = blockptr->proc;
881
+ }
882
+ }
883
+
884
+ argv[iseq->arg_block] = procval;
885
+ }
886
+
887
+ th->mark_stack_len = 0;
888
+ return opt_pc;
889
+ }
890
+
891
+ static inline int
892
+ vm_yield_setup_args(rb_thread_t * const th, const rb_iseq_t *iseq,
893
+ int argc, VALUE *argv,
894
+ const rb_block_t *blockptr, int lambda)
895
+ {
896
+ if (0) { /* for debug */
897
+ printf(" argc: %d\n", argc);
898
+ printf("iseq argc: %d\n", iseq->argc);
899
+ printf("iseq opts: %d\n", iseq->arg_opts);
900
+ printf("iseq rest: %d\n", iseq->arg_rest);
901
+ printf("iseq post: %d\n", iseq->arg_post_len);
902
+ printf("iseq blck: %d\n", iseq->arg_block);
903
+ printf("iseq smpl: %d\n", iseq->arg_simple);
904
+ printf(" lambda: %s\n", lambda ? "true" : "false");
905
+ }
906
+
907
+ if (lambda) {
908
+ /* call as method */
909
+ int opt_pc;
910
+ VM_CALLEE_SETUP_ARG(opt_pc, th, iseq, argc, argv, &blockptr);
911
+ return opt_pc;
912
+ }
913
+ else {
914
+ return vm_yield_setup_block_args(th, iseq, argc, argv, blockptr);
915
+ }
916
+ }
917
+
918
+ static VALUE
919
+ vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_num_t num, rb_num_t flag)
920
+ {
921
+ const rb_block_t *block = GET_BLOCK_PTR();
922
+ rb_iseq_t *iseq;
923
+ int argc = (int)num;
924
+ VALUE type = GET_ISEQ()->local_iseq->type;
925
+
926
+ if ((type != ISEQ_TYPE_METHOD && type != ISEQ_TYPE_CLASS) || block == 0) {
927
+ rb_vm_localjump_error("no block given (yield)", Qnil, 0);
928
+ }
929
+ iseq = block->iseq;
930
+
931
+ argc = caller_setup_args(th, GET_CFP(), flag, argc, 0, 0);
932
+
933
+ if (BUILTIN_TYPE(iseq) != T_NODE) {
934
+ int opt_pc;
935
+ const int arg_size = iseq->arg_size;
936
+ VALUE * const rsp = GET_SP() - argc;
937
+ SET_SP(rsp);
938
+
939
+ CHECK_STACK_OVERFLOW(GET_CFP(), iseq->stack_max);
940
+ opt_pc = vm_yield_setup_args(th, iseq, argc, rsp, 0,
941
+ block_proc_is_lambda(block->proc));
942
+
943
+ vm_push_frame(th, iseq,
944
+ VM_FRAME_MAGIC_BLOCK, block->self, (VALUE) block->dfp,
945
+ iseq->iseq_encoded + opt_pc, rsp + arg_size, block->lfp,
946
+ iseq->local_size - arg_size);
947
+
948
+ return Qundef;
949
+ }
950
+ else {
951
+ VALUE val = vm_yield_with_cfunc(th, block, block->self, argc, STACK_ADDR_FROM_TOP(argc), 0);
952
+ POPN(argc); /* TODO: should put before C/yield? */
953
+ return val;
954
+ }
955
+ }
956
+
957
+ /* svar */
958
+
959
+ static inline NODE *
960
+ lfp_svar_place(rb_thread_t *th, VALUE *lfp)
961
+ {
962
+ VALUE *svar;
963
+
964
+ if (lfp && th->local_lfp != lfp) {
965
+ svar = &lfp[-1];
966
+ }
967
+ else {
968
+ svar = &th->local_svar;
969
+ }
970
+ if (NIL_P(*svar)) {
971
+ *svar = (VALUE)NEW_IF(Qnil, Qnil, Qnil);
972
+ }
973
+ return (NODE *)*svar;
974
+ }
975
+
976
+ static VALUE
977
+ lfp_svar_get(rb_thread_t *th, VALUE *lfp, VALUE key)
978
+ {
979
+ NODE *svar = lfp_svar_place(th, lfp);
980
+
981
+ switch (key) {
982
+ case 0:
983
+ return svar->u1.value;
984
+ case 1:
985
+ return svar->u2.value;
986
+ default: {
987
+ const VALUE hash = svar->u3.value;
988
+
989
+ if (hash == Qnil) {
990
+ return Qnil;
991
+ }
992
+ else {
993
+ return rb_hash_lookup(hash, key);
994
+ }
995
+ }
996
+ }
997
+ }
998
+
999
+ static void
1000
+ lfp_svar_set(rb_thread_t *th, VALUE *lfp, VALUE key, VALUE val)
1001
+ {
1002
+ NODE *svar = lfp_svar_place(th, lfp);
1003
+
1004
+ switch (key) {
1005
+ case 0:
1006
+ svar->u1.value = val;
1007
+ return;
1008
+ case 1:
1009
+ svar->u2.value = val;
1010
+ return;
1011
+ default: {
1012
+ VALUE hash = svar->u3.value;
1013
+
1014
+ if (hash == Qnil) {
1015
+ svar->u3.value = hash = rb_hash_new();
1016
+ }
1017
+ rb_hash_aset(hash, key, val);
1018
+ }
1019
+ }
1020
+ }
1021
+
1022
+ static inline VALUE
1023
+ vm_getspecial(rb_thread_t *th, VALUE *lfp, VALUE key, rb_num_t type)
1024
+ {
1025
+ VALUE val;
1026
+
1027
+ if (type == 0) {
1028
+ VALUE k = key;
1029
+ if (FIXNUM_P(key)) {
1030
+ k = FIX2INT(key);
1031
+ }
1032
+ val = lfp_svar_get(th, lfp, k);
1033
+ }
1034
+ else {
1035
+ VALUE backref = lfp_svar_get(th, lfp, 1);
1036
+
1037
+ if (type & 0x01) {
1038
+ switch (type >> 1) {
1039
+ case '&':
1040
+ val = rb_reg_last_match(backref);
1041
+ break;
1042
+ case '`':
1043
+ val = rb_reg_match_pre(backref);
1044
+ break;
1045
+ case '\'':
1046
+ val = rb_reg_match_post(backref);
1047
+ break;
1048
+ case '+':
1049
+ val = rb_reg_match_last(backref);
1050
+ break;
1051
+ default:
1052
+ rb_bug("unexpected back-ref");
1053
+ }
1054
+ }
1055
+ else {
1056
+ val = rb_reg_nth_match((int)(type >> 1), backref);
1057
+ }
1058
+ }
1059
+ return val;
1060
+ }
1061
+
1062
+ static NODE *
1063
+ vm_get_cref0(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
1064
+ {
1065
+ while (1) {
1066
+ if (lfp == dfp) {
1067
+ return iseq->cref_stack;
1068
+ }
1069
+ else if (dfp[-1] != Qnil) {
1070
+ return (NODE *)dfp[-1];
1071
+ }
1072
+ dfp = GET_PREV_DFP(dfp);
1073
+ }
1074
+ }
1075
+
1076
+ static NODE *
1077
+ vm_get_cref(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
1078
+ {
1079
+ NODE *cref = vm_get_cref0(iseq, lfp, dfp);
1080
+
1081
+ if (cref == 0) {
1082
+ rb_bug("vm_get_cref: unreachable");
1083
+ }
1084
+ return cref;
1085
+ }
1086
+
1087
+ static NODE *
1088
+ vm_cref_push(rb_thread_t *th, VALUE klass, int noex, rb_block_t *blockptr)
1089
+ {
1090
+ rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->cfp);
1091
+ NODE *cref = NEW_BLOCK(klass);
1092
+ cref->nd_file = 0;
1093
+ cref->nd_visi = noex;
1094
+
1095
+ if (blockptr) {
1096
+ cref->nd_next = vm_get_cref0(blockptr->iseq, blockptr->lfp, blockptr->dfp);
1097
+ }
1098
+ else if (cfp) {
1099
+ cref->nd_next = vm_get_cref0(cfp->iseq, cfp->lfp, cfp->dfp);
1100
+ }
1101
+
1102
+ return cref;
1103
+ }
1104
+
1105
+ static inline VALUE
1106
+ vm_get_cbase(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
1107
+ {
1108
+ NODE *cref = vm_get_cref(iseq, lfp, dfp);
1109
+ VALUE klass = Qundef;
1110
+
1111
+ while (cref) {
1112
+ if ((klass = cref->nd_clss) != 0) {
1113
+ break;
1114
+ }
1115
+ cref = cref->nd_next;
1116
+ }
1117
+
1118
+ return klass;
1119
+ }
1120
+
1121
+ static inline VALUE
1122
+ vm_get_const_base(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
1123
+ {
1124
+ NODE *cref = vm_get_cref(iseq, lfp, dfp);
1125
+ VALUE klass = Qundef;
1126
+
1127
+ while (cref) {
1128
+ if (!(cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) &&
1129
+ (klass = cref->nd_clss) != 0) {
1130
+ break;
1131
+ }
1132
+ cref = cref->nd_next;
1133
+ }
1134
+
1135
+ return klass;
1136
+ }
1137
+
1138
+ static inline void
1139
+ vm_check_if_namespace(VALUE klass)
1140
+ {
1141
+ VALUE str;
1142
+ switch (TYPE(klass)) {
1143
+ case T_CLASS:
1144
+ case T_MODULE:
1145
+ break;
1146
+ default:
1147
+ str = rb_inspect(klass);
1148
+ rb_raise(rb_eTypeError, "%s is not a class/module",
1149
+ StringValuePtr(str));
1150
+ }
1151
+ }
1152
+
1153
+ static inline VALUE
1154
+ vm_get_ev_const(rb_thread_t *th, const rb_iseq_t *iseq,
1155
+ VALUE orig_klass, ID id, int is_defined)
1156
+ {
1157
+ VALUE val;
1158
+
1159
+ if (orig_klass == Qnil) {
1160
+ /* in current lexical scope */
1161
+ const NODE *cref = vm_get_cref(iseq, th->cfp->lfp, th->cfp->dfp);
1162
+ const NODE *root_cref = NULL;
1163
+ VALUE klass = orig_klass;
1164
+
1165
+ while (cref && cref->nd_next) {
1166
+ if (!(cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL)) {
1167
+ klass = cref->nd_clss;
1168
+ if (root_cref == NULL)
1169
+ root_cref = cref;
1170
+ }
1171
+ cref = cref->nd_next;
1172
+
1173
+ if (!NIL_P(klass)) {
1174
+ VALUE am = 0;
1175
+ search_continue:
1176
+ if (RCLASS_IV_TBL(klass) &&
1177
+ st_lookup(RCLASS_IV_TBL(klass), id, &val)) {
1178
+ if (val == Qundef) {
1179
+ if (am == klass) break;
1180
+ am = klass;
1181
+ rb_autoload_load(klass, id);
1182
+ goto search_continue;
1183
+ }
1184
+ else {
1185
+ if (is_defined) {
1186
+ return 1;
1187
+ }
1188
+ else {
1189
+ return val;
1190
+ }
1191
+ }
1192
+ }
1193
+ }
1194
+ }
1195
+
1196
+ /* search self */
1197
+ if (root_cref && !NIL_P(root_cref->nd_clss)) {
1198
+ klass = root_cref->nd_clss;
1199
+ }
1200
+ else {
1201
+ klass = CLASS_OF(th->cfp->self);
1202
+ }
1203
+
1204
+ if (is_defined) {
1205
+ return rb_const_defined(klass, id);
1206
+ }
1207
+ else {
1208
+ return rb_const_get(klass, id);
1209
+ }
1210
+ }
1211
+ else {
1212
+ vm_check_if_namespace(orig_klass);
1213
+ if (is_defined) {
1214
+ return rb_const_defined_from(orig_klass, id);
1215
+ }
1216
+ else {
1217
+ return rb_const_get_from(orig_klass, id);
1218
+ }
1219
+ }
1220
+ }
1221
+
1222
+ static inline VALUE
1223
+ vm_get_cvar_base(NODE *cref)
1224
+ {
1225
+ VALUE klass;
1226
+
1227
+ while (cref && cref->nd_next &&
1228
+ (NIL_P(cref->nd_clss) || FL_TEST(cref->nd_clss, FL_SINGLETON) ||
1229
+ (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL))) {
1230
+ cref = cref->nd_next;
1231
+
1232
+ if (!cref->nd_next) {
1233
+ rb_warn("class variable access from toplevel");
1234
+ }
1235
+ }
1236
+
1237
+ klass = cref->nd_clss;
1238
+
1239
+ if (NIL_P(klass)) {
1240
+ rb_raise(rb_eTypeError, "no class variables available");
1241
+ }
1242
+ return klass;
1243
+ }
1244
+
1245
+
1246
+ #ifndef USE_IC_FOR_IVAR
1247
+ #define USE_IC_FOR_IVAR 1
1248
+ #endif
1249
+
1250
+ static VALUE
1251
+ vm_getivar(VALUE obj, ID id, IC ic)
1252
+ {
1253
+ #if USE_IC_FOR_IVAR
1254
+ if (TYPE(obj) == T_OBJECT) {
1255
+ VALUE val = Qundef;
1256
+ VALUE klass = RBASIC(obj)->klass;
1257
+
1258
+ if (LIKELY(ic->ic_class == klass &&
1259
+ ic->ic_vmstat == GET_VM_STATE_VERSION())) {
1260
+ long index = ic->ic_value.index;
1261
+ long len = ROBJECT_NUMIV(obj);
1262
+ VALUE *ptr = ROBJECT_IVPTR(obj);
1263
+
1264
+ if (index < len) {
1265
+ val = ptr[index];
1266
+ }
1267
+ }
1268
+ else {
1269
+ st_data_t index;
1270
+ long len = ROBJECT_NUMIV(obj);
1271
+ VALUE *ptr = ROBJECT_IVPTR(obj);
1272
+ struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
1273
+
1274
+ if (iv_index_tbl) {
1275
+ if (st_lookup(iv_index_tbl, id, &index)) {
1276
+ if ((long)index < len) {
1277
+ val = ptr[index];
1278
+ }
1279
+ ic->ic_class = klass;
1280
+ ic->ic_value.index = index;
1281
+ ic->ic_vmstat = GET_VM_STATE_VERSION();
1282
+ }
1283
+ }
1284
+ }
1285
+ if (UNLIKELY(val == Qundef)) {
1286
+ rb_warning("instance variable %s not initialized", rb_id2name(id));
1287
+ val = Qnil;
1288
+ }
1289
+ return val;
1290
+ }
1291
+ else {
1292
+ return rb_ivar_get(obj, id);
1293
+ }
1294
+ #else
1295
+ return rb_ivar_get(obj, id);
1296
+ #endif
1297
+ }
1298
+
1299
+ static void
1300
+ vm_setivar(VALUE obj, ID id, VALUE val, IC ic)
1301
+ {
1302
+ #if USE_IC_FOR_IVAR
1303
+ if (!OBJ_UNTRUSTED(obj) && rb_safe_level() >= 4) {
1304
+ rb_raise(rb_eSecurityError, "Insecure: can't modify instance variable");
1305
+ }
1306
+ if (OBJ_FROZEN(obj)) {
1307
+ rb_error_frozen("object");
1308
+ }
1309
+
1310
+ if (TYPE(obj) == T_OBJECT) {
1311
+ VALUE klass = RBASIC(obj)->klass;
1312
+ st_data_t index;
1313
+
1314
+ if (LIKELY(ic->ic_class == klass &&
1315
+ ic->ic_vmstat == GET_VM_STATE_VERSION())) {
1316
+ long index = ic->ic_value.index;
1317
+ long len = ROBJECT_NUMIV(obj);
1318
+ VALUE *ptr = ROBJECT_IVPTR(obj);
1319
+
1320
+ if (index < len) {
1321
+ ptr[index] = val;
1322
+ return; /* inline cache hit */
1323
+ }
1324
+ }
1325
+ else {
1326
+ struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
1327
+
1328
+ if (iv_index_tbl && st_lookup(iv_index_tbl, (st_data_t)id, &index)) {
1329
+ ic->ic_class = klass;
1330
+ ic->ic_value.index = index;
1331
+ ic->ic_vmstat = GET_VM_STATE_VERSION();
1332
+ }
1333
+ /* fall through */
1334
+ }
1335
+ }
1336
+ rb_ivar_set(obj, id, val);
1337
+ #else
1338
+ rb_ivar_set(obj, id, val);
1339
+ #endif
1340
+ }
1341
+
1342
+ static inline const rb_method_entry_t *
1343
+ vm_method_search(VALUE id, VALUE klass, IC ic)
1344
+ {
1345
+ rb_method_entry_t *me;
1346
+ #if OPT_INLINE_METHOD_CACHE
1347
+ if (LIKELY(klass == ic->ic_class) &&
1348
+ LIKELY(GET_VM_STATE_VERSION() == ic->ic_vmstat)) {
1349
+ me = ic->ic_value.method;
1350
+ }
1351
+ else {
1352
+ me = rb_method_entry(klass, id);
1353
+ ic->ic_class = klass;
1354
+ ic->ic_value.method = me;
1355
+ ic->ic_vmstat = GET_VM_STATE_VERSION();
1356
+ }
1357
+ #else
1358
+ me = rb_method_entry(klass, id);
1359
+ #endif
1360
+ return me;
1361
+ }
1362
+
1363
+ static inline VALUE
1364
+ vm_search_normal_superclass(VALUE klass, VALUE recv)
1365
+ {
1366
+ if (BUILTIN_TYPE(klass) == T_CLASS) {
1367
+ return RCLASS_SUPER(klass);
1368
+ }
1369
+ else if (BUILTIN_TYPE(klass) == T_MODULE) {
1370
+ VALUE k = CLASS_OF(recv);
1371
+ while (k) {
1372
+ if (BUILTIN_TYPE(k) == T_ICLASS && RBASIC(k)->klass == klass) {
1373
+ return RCLASS_SUPER(k);
1374
+ }
1375
+ k = RCLASS_SUPER(k);
1376
+ }
1377
+ return rb_cObject;
1378
+ }
1379
+ else {
1380
+ rb_bug("vm_search_normal_superclass: should not be reach here");
1381
+ }
1382
+ }
1383
+
1384
+ static void
1385
+ vm_search_superclass(rb_control_frame_t *reg_cfp, rb_iseq_t *ip,
1386
+ VALUE recv, VALUE sigval,
1387
+ ID *idp, VALUE *klassp)
1388
+ {
1389
+ ID id;
1390
+ VALUE klass;
1391
+
1392
+ while (ip && !ip->klass) {
1393
+ ip = ip->parent_iseq;
1394
+ }
1395
+
1396
+ if (ip == 0) {
1397
+ rb_raise(rb_eNoMethodError, "super called outside of method");
1398
+ }
1399
+
1400
+ id = ip->defined_method_id;
1401
+
1402
+ if (ip != ip->local_iseq) {
1403
+ /* defined by Module#define_method() */
1404
+ rb_control_frame_t *lcfp = GET_CFP();
1405
+
1406
+ if (!sigval) {
1407
+ /* zsuper */
1408
+ rb_raise(rb_eRuntimeError, "implicit argument passing of super from method defined by define_method() is not supported. Specify all arguments explicitly.");
1409
+ }
1410
+
1411
+ while (lcfp->iseq != ip) {
1412
+ rb_thread_t *th = GET_THREAD();
1413
+ VALUE *tdfp = GET_PREV_DFP(lcfp->dfp);
1414
+ while (1) {
1415
+ lcfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(lcfp);
1416
+ if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, lcfp)) {
1417
+ rb_raise(rb_eNoMethodError,
1418
+ "super called outside of method");
1419
+ }
1420
+ if (lcfp->dfp == tdfp) {
1421
+ break;
1422
+ }
1423
+ }
1424
+ }
1425
+
1426
+ /* temporary measure for [Bug #2420] [Bug #3136] */
1427
+ if (!lcfp->me) {
1428
+ rb_raise(rb_eNoMethodError, "super called outside of method");
1429
+ }
1430
+
1431
+ id = lcfp->me->def->original_id;
1432
+ klass = vm_search_normal_superclass(lcfp->me->klass, recv);
1433
+ }
1434
+ else {
1435
+ klass = vm_search_normal_superclass(ip->klass, recv);
1436
+ }
1437
+
1438
+ *idp = id;
1439
+ *klassp = klass;
1440
+ }
1441
+
1442
+ static VALUE
1443
+ vm_throw(rb_thread_t *th, rb_control_frame_t *reg_cfp,
1444
+ rb_num_t throw_state, VALUE throwobj)
1445
+ {
1446
+ int state = (int)(throw_state & 0xff);
1447
+ int flag = (int)(throw_state & 0x8000);
1448
+ rb_num_t level = throw_state >> 16;
1449
+
1450
+ if (state != 0) {
1451
+ VALUE *pt = 0;
1452
+ if (flag != 0) {
1453
+ pt = (void *) 1;
1454
+ }
1455
+ else {
1456
+ if (state == TAG_BREAK) {
1457
+ rb_control_frame_t *cfp = GET_CFP();
1458
+ VALUE *dfp = GET_DFP();
1459
+ int is_orphan = 1;
1460
+ rb_iseq_t *base_iseq = GET_ISEQ();
1461
+
1462
+ search_parent:
1463
+ if (cfp->iseq->type != ISEQ_TYPE_BLOCK) {
1464
+ if (cfp->iseq->type == ISEQ_TYPE_CLASS) {
1465
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1466
+ dfp = cfp->dfp;
1467
+ goto search_parent;
1468
+ }
1469
+ dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
1470
+ base_iseq = base_iseq->parent_iseq;
1471
+
1472
+ while ((VALUE *) cfp < th->stack + th->stack_size) {
1473
+ if (cfp->dfp == dfp) {
1474
+ goto search_parent;
1475
+ }
1476
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1477
+ }
1478
+ rb_bug("VM (throw): can't find break base.");
1479
+ }
1480
+
1481
+ if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
1482
+ /* lambda{... break ...} */
1483
+ is_orphan = 0;
1484
+ pt = cfp->dfp;
1485
+ state = TAG_RETURN;
1486
+ }
1487
+ else {
1488
+ dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
1489
+
1490
+ while ((VALUE *)cfp < th->stack + th->stack_size) {
1491
+ if (cfp->dfp == dfp) {
1492
+ VALUE epc = cfp->pc - cfp->iseq->iseq_encoded;
1493
+ rb_iseq_t *iseq = cfp->iseq;
1494
+ int i;
1495
+
1496
+ for (i=0; i<iseq->catch_table_size; i++) {
1497
+ struct iseq_catch_table_entry *entry = &iseq->catch_table[i];
1498
+
1499
+ if (entry->type == CATCH_TYPE_BREAK &&
1500
+ entry->start < epc && entry->end >= epc) {
1501
+ if (entry->cont == epc) {
1502
+ goto found;
1503
+ }
1504
+ else {
1505
+ break;
1506
+ }
1507
+ }
1508
+ }
1509
+ break;
1510
+
1511
+ found:
1512
+ pt = dfp;
1513
+ is_orphan = 0;
1514
+ break;
1515
+ }
1516
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1517
+ }
1518
+ }
1519
+
1520
+ if (is_orphan) {
1521
+ rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
1522
+ }
1523
+ }
1524
+ else if (state == TAG_RETRY) {
1525
+ rb_num_t i;
1526
+ pt = GC_GUARDED_PTR_REF((VALUE *) * GET_DFP());
1527
+ for (i = 0; i < level; i++) {
1528
+ pt = GC_GUARDED_PTR_REF((VALUE *) * pt);
1529
+ }
1530
+ }
1531
+ else if (state == TAG_RETURN) {
1532
+ rb_control_frame_t *cfp = GET_CFP();
1533
+ VALUE *dfp = GET_DFP();
1534
+ VALUE *lfp = GET_LFP();
1535
+
1536
+ /* check orphan and get dfp */
1537
+ while ((VALUE *) cfp < th->stack + th->stack_size) {
1538
+ if (!lfp) {
1539
+ lfp = cfp->lfp;
1540
+ }
1541
+ if (cfp->dfp == lfp && cfp->iseq->type == ISEQ_TYPE_CLASS) {
1542
+ lfp = 0;
1543
+ }
1544
+
1545
+ if (cfp->lfp == lfp) {
1546
+ if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
1547
+ VALUE *tdfp = dfp;
1548
+
1549
+ while (lfp != tdfp) {
1550
+ if (cfp->dfp == tdfp) {
1551
+ /* in lambda */
1552
+ dfp = cfp->dfp;
1553
+ goto valid_return;
1554
+ }
1555
+ tdfp = GC_GUARDED_PTR_REF((VALUE *)*tdfp);
1556
+ }
1557
+ }
1558
+ }
1559
+
1560
+ if (cfp->dfp == lfp && cfp->iseq->type == ISEQ_TYPE_METHOD) {
1561
+ dfp = lfp;
1562
+ goto valid_return;
1563
+ }
1564
+
1565
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1566
+ }
1567
+
1568
+ rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
1569
+
1570
+ valid_return:
1571
+ pt = dfp;
1572
+ }
1573
+ else {
1574
+ rb_bug("isns(throw): unsupport throw type");
1575
+ }
1576
+ }
1577
+ th->state = state;
1578
+ return (VALUE)NEW_THROW_OBJECT(throwobj, (VALUE) pt, state);
1579
+ }
1580
+ else {
1581
+ /* continue throw */
1582
+ VALUE err = throwobj;
1583
+
1584
+ if (FIXNUM_P(err)) {
1585
+ th->state = FIX2INT(err);
1586
+ }
1587
+ else if (SYMBOL_P(err)) {
1588
+ th->state = TAG_THROW;
1589
+ }
1590
+ else if (BUILTIN_TYPE(err) == T_NODE) {
1591
+ th->state = GET_THROWOBJ_STATE(err);
1592
+ }
1593
+ else {
1594
+ th->state = TAG_RAISE;
1595
+ /*th->state = FIX2INT(rb_ivar_get(err, idThrowState));*/
1596
+ }
1597
+ return err;
1598
+ }
1599
+ }
1600
+
1601
+ static inline void
1602
+ vm_expandarray(rb_control_frame_t *cfp, VALUE ary, rb_num_t num, int flag)
1603
+ {
1604
+ int is_splat = flag & 0x01;
1605
+ rb_num_t space_size = num + is_splat;
1606
+ VALUE *base = cfp->sp, *ptr;
1607
+ volatile VALUE tmp_ary;
1608
+ rb_num_t len;
1609
+
1610
+ if (TYPE(ary) != T_ARRAY) {
1611
+ ary = rb_ary_to_ary(ary);
1612
+ }
1613
+
1614
+ cfp->sp += space_size;
1615
+
1616
+ tmp_ary = ary;
1617
+ ptr = RARRAY_PTR(ary);
1618
+ len = (rb_num_t)RARRAY_LEN(ary);
1619
+
1620
+ if (flag & 0x02) {
1621
+ /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1622
+ rb_num_t i = 0, j;
1623
+
1624
+ if (len < num) {
1625
+ for (i=0; i<num-len; i++) {
1626
+ *base++ = Qnil;
1627
+ }
1628
+ }
1629
+ for (j=0; i<num; i++, j++) {
1630
+ VALUE v = ptr[len - j - 1];
1631
+ *base++ = v;
1632
+ }
1633
+ if (is_splat) {
1634
+ *base = rb_ary_new4(len - j, ptr);
1635
+ }
1636
+ }
1637
+ else {
1638
+ /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1639
+ rb_num_t i;
1640
+ VALUE *bptr = &base[space_size - 1];
1641
+
1642
+ for (i=0; i<num; i++) {
1643
+ if (len <= i) {
1644
+ for (; i<num; i++) {
1645
+ *bptr-- = Qnil;
1646
+ }
1647
+ break;
1648
+ }
1649
+ *bptr-- = ptr[i];
1650
+ }
1651
+ if (is_splat) {
1652
+ if (num > len) {
1653
+ *bptr = rb_ary_new();
1654
+ }
1655
+ else {
1656
+ *bptr = rb_ary_new4(len - num, ptr + num);
1657
+ }
1658
+ }
1659
+ }
1660
+ }
1661
+
1662
+ static inline int
1663
+ check_cfunc(const rb_method_entry_t *me, VALUE (*func)())
1664
+ {
1665
+ if (me && me->def->type == VM_METHOD_TYPE_CFUNC &&
1666
+ me->def->body.cfunc.func == func) {
1667
+ return 1;
1668
+ }
1669
+ else {
1670
+ return 0;
1671
+ }
1672
+ }
1673
+
1674
+ static
1675
+ #ifndef NO_BIG_INLINE
1676
+ inline
1677
+ #endif
1678
+ VALUE
1679
+ opt_eq_func(VALUE recv, VALUE obj, IC ic)
1680
+ {
1681
+ if (FIXNUM_2_P(recv, obj) &&
1682
+ BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
1683
+ return (recv == obj) ? Qtrue : Qfalse;
1684
+ }
1685
+ else if (!SPECIAL_CONST_P(recv) && !SPECIAL_CONST_P(obj)) {
1686
+ if (HEAP_CLASS_OF(recv) == rb_cFloat &&
1687
+ HEAP_CLASS_OF(obj) == rb_cFloat &&
1688
+ BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
1689
+ double a = RFLOAT_VALUE(recv);
1690
+ double b = RFLOAT_VALUE(obj);
1691
+
1692
+ if (isnan(a) || isnan(b)) {
1693
+ return Qfalse;
1694
+ }
1695
+ return (a == b) ? Qtrue : Qfalse;
1696
+ }
1697
+ else if (HEAP_CLASS_OF(recv) == rb_cString &&
1698
+ HEAP_CLASS_OF(obj) == rb_cString &&
1699
+ BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
1700
+ return rb_str_equal(recv, obj);
1701
+ }
1702
+ }
1703
+
1704
+ {
1705
+ const rb_method_entry_t *me = vm_method_search(idEq, CLASS_OF(recv), ic);
1706
+ extern VALUE rb_obj_equal(VALUE obj1, VALUE obj2);
1707
+
1708
+ if (check_cfunc(me, rb_obj_equal)) {
1709
+ return recv == obj ? Qtrue : Qfalse;
1710
+ }
1711
+ }
1712
+
1713
+ return Qundef;
1714
+ }
1715
+
1716
+ struct opt_case_dispatch_i_arg {
1717
+ VALUE obj;
1718
+ int label;
1719
+ };
1720
+
1721
+ static int
1722
+ opt_case_dispatch_i(st_data_t key, st_data_t data, st_data_t p)
1723
+ {
1724
+ struct opt_case_dispatch_i_arg *arg = (void *)p;
1725
+
1726
+ if (RTEST(rb_funcall((VALUE)key, idEqq, 1, arg->obj))) {
1727
+ arg->label = FIX2INT((VALUE)data);
1728
+ return ST_STOP;
1729
+ }
1730
+ else {
1731
+ return ST_CONTINUE;
1732
+ }
1733
+ }
1734
+