cast_off 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. data/README +578 -0
  2. data/README.en +256 -0
  3. data/bin/CastOff +145 -0
  4. data/cast_off.gemspec +25 -0
  5. data/ext/cast_off/cast_off.c.rb +1386 -0
  6. data/ext/cast_off/cast_off.h +24 -0
  7. data/ext/cast_off/depend +70 -0
  8. data/ext/cast_off/extconf.rb +19 -0
  9. data/ext/cast_off/generated_c_include/inline_api.h +507 -0
  10. data/ext/cast_off/generated_c_include/iter_api.h +595 -0
  11. data/ext/cast_off/generated_c_include/unbox_api.h.rb +76 -0
  12. data/ext/cast_off/generated_c_include/vm_api.h +751 -0
  13. data/ext/cast_off/ruby_source/atomic.h +56 -0
  14. data/ext/cast_off/ruby_source/constant.h +34 -0
  15. data/ext/cast_off/ruby_source/debug.h +41 -0
  16. data/ext/cast_off/ruby_source/eval_intern.h +234 -0
  17. data/ext/cast_off/ruby_source/gc.h +98 -0
  18. data/ext/cast_off/ruby_source/id.h +175 -0
  19. data/ext/cast_off/ruby_source/insns.inc +179 -0
  20. data/ext/cast_off/ruby_source/insns_info.inc +695 -0
  21. data/ext/cast_off/ruby_source/internal.h +227 -0
  22. data/ext/cast_off/ruby_source/iseq.h +125 -0
  23. data/ext/cast_off/ruby_source/manual_update.h +135 -0
  24. data/ext/cast_off/ruby_source/method.h +105 -0
  25. data/ext/cast_off/ruby_source/node.h +503 -0
  26. data/ext/cast_off/ruby_source/thread_pthread.h +51 -0
  27. data/ext/cast_off/ruby_source/thread_win32.h +40 -0
  28. data/ext/cast_off/ruby_source/vm_core.h +756 -0
  29. data/ext/cast_off/ruby_source/vm_exec.h +184 -0
  30. data/ext/cast_off/ruby_source/vm_insnhelper.c +1748 -0
  31. data/ext/cast_off/ruby_source/vm_insnhelper.h +220 -0
  32. data/ext/cast_off/ruby_source/vm_opts.h +51 -0
  33. data/lib/cast_off.rb +15 -0
  34. data/lib/cast_off/compile.rb +629 -0
  35. data/lib/cast_off/compile/basicblock.rb +144 -0
  36. data/lib/cast_off/compile/cfg.rb +391 -0
  37. data/lib/cast_off/compile/code_manager.rb +284 -0
  38. data/lib/cast_off/compile/configuration.rb +2368 -0
  39. data/lib/cast_off/compile/dependency.rb +240 -0
  40. data/lib/cast_off/compile/information.rb +775 -0
  41. data/lib/cast_off/compile/instruction.rb +446 -0
  42. data/lib/cast_off/compile/ir/call_ir.rb +2348 -0
  43. data/lib/cast_off/compile/ir/guard_ir.rb +423 -0
  44. data/lib/cast_off/compile/ir/jump_ir.rb +223 -0
  45. data/lib/cast_off/compile/ir/operand.rb +934 -0
  46. data/lib/cast_off/compile/ir/param_ir.rb +98 -0
  47. data/lib/cast_off/compile/ir/return_ir.rb +92 -0
  48. data/lib/cast_off/compile/ir/simple_ir.rb +808 -0
  49. data/lib/cast_off/compile/ir/sub_ir.rb +212 -0
  50. data/lib/cast_off/compile/iseq.rb +454 -0
  51. data/lib/cast_off/compile/method_information.rb +1384 -0
  52. data/lib/cast_off/compile/namespace/namespace.rb +556 -0
  53. data/lib/cast_off/compile/namespace/uuid.rb +323 -0
  54. data/lib/cast_off/compile/stack.rb +65 -0
  55. data/lib/cast_off/compile/translator.rb +1562 -0
  56. data/lib/cast_off/suggestion.rb +98 -0
  57. data/lib/cast_off/util.rb +58 -0
  58. metadata +107 -0
@@ -0,0 +1,76 @@
1
+ #if 1
2
+ #define __END__ /* void */
3
+ #else
4
+ require("erb");
5
+ require('rbconfig');
6
+ DATA.rewind();
7
+
8
+ ERB.new(DATA.read(), 0, '%-').run();
9
+ #endif
10
+ __END__
11
+ #define CBOOL2RBOOL(b) ((b) ? Qtrue : Qfalse)
12
+ #ifdef INJECT_GUARD
13
+ #define CFIX2RFIX(b) (FIXABLE?((b)) ? LONG2FIX((b)) : rb_raise(rb_eCastOffExecutionError, TYPE_ERROR_MESSAGE()))
14
+ #else
15
+ #define CFIX2RFIX(b) LONG2FIX((b))
16
+ #endif
17
+ %# arg size, [type0, type1, ...]
18
+ %[['float_float',
19
+ % {'+' => 'plus', '-' => 'minus', '*' => 'mult', '/' => 'div'},
20
+ % 1,
21
+ % {'VALUE' => 'RFLOAT_VALUE', 'double' => ''},
22
+ % {'VALUE' => 'RFLOAT_VALUE', 'double' => ''},
23
+ % {'VALUE' => 'DBL2NUM', 'double' => ''}],
24
+ % ['float_float',
25
+ % {'>' => 'gt', '>=' => 'ge', '<' => 'lt', '<=' => 'le'},
26
+ % 1,
27
+ % {'VALUE' => 'RFLOAT_VALUE', 'double' => ''},
28
+ % {'VALUE' => 'RFLOAT_VALUE', 'double' => ''},
29
+ % {'VALUE' => 'CBOOL2RBOOL'}],
30
+ % ['fixnum_fixnum',
31
+ % {'+' => 'plus', '-' => 'minus', '*' => 'mult', '/' => 'div'},
32
+ % 1,
33
+ % {'VALUE' => 'FIX2LONG', 'long' => ''},
34
+ % {'VALUE' => 'FIX2LONG', 'long' => ''},
35
+ % {'VALUE' => 'CFIX2RFIX', 'long' => ''}],
36
+ % ['fixnum_float',
37
+ % {'+' => 'plus', '-' => 'minus', '*' => 'mult', '/' => 'div'},
38
+ % 1,
39
+ % {'VALUE' => '(double)FIX2LONG', 'long' => ''},
40
+ % {'VALUE' => 'RFLOAT_VALUE', 'double' => ''},
41
+ % {'VALUE' => 'DBL2NUM', 'double' => ''}],
42
+ % ['float_fixnum',
43
+ % {'+' => 'plus', '-' => 'minus', '*' => 'mult', '/' => 'div'},
44
+ % 1,
45
+ % {'VALUE' => 'RFLOAT_VALUE', 'double' => ''},
46
+ % {'VALUE' => '(double)FIX2LONG', 'long' => ''},
47
+ % {'VALUE' => 'DBL2NUM', 'double' => ''}]].each do |(function_name, h, argc, reciever_converter, arguments_converter, return_value_converter)|
48
+ % arguments_decls = arguments_converter.keys
49
+ % reciever_decls = reciever_converter.keys
50
+ % return_value_decls = return_value_converter.keys
51
+ % reciever_permutation = reciever_decls.map{|r| [r]}
52
+ % arguments_permutation = Array.new(argc){arguments_decls}.flatten.permutation(argc).to_a.uniq
53
+ % return_value_permutation = return_value_decls.map{|r| [r]}
54
+ % h.each do |(operator, name)|
55
+ % ary = reciever_permutation.zip(Array.new(reciever_permutation.size){arguments_permutation}).map{|(_r, _a)| _a.map{|__a| _r + __a}}.inject(:+)
56
+ % Array.new(return_value_permutation.size){ary}.zip(return_value_permutation).map{|(_a, _r)| _a.map{|__a| __a + _r}}.inject(:+).each do |ds|
57
+ % raise unless ds.size >= 2
58
+ % suffix = ds.join('_')
59
+ % reciever = ds.shift
60
+ % return_value = ds.pop
61
+ % arguments = ds
62
+ % parameter = []
63
+ % parameter << "#{reciever} v0"
64
+ % arguments.each_with_index{|a, idx| parameter << "#{a} v#{idx + 1}"}
65
+ % statement = []
66
+ % statement << "#{reciever_converter[reciever]}(v0)"
67
+ % arguments.each_with_index{|a, idx| statement << "#{arguments_converter[a]}(v#{idx + 1})"}
68
+ static inline <%= return_value %>
69
+ cast_off_inline_<%= function_name %>_<%= name %>_<%= suffix %>(<%= parameter.join(', ') %>)
70
+ {
71
+ return <%= return_value_converter[return_value] %>(<%= statement.join(" #{operator} ") %>);
72
+ }
73
+ % end
74
+ % end
75
+ %end
76
+
@@ -0,0 +1,751 @@
1
+ static VALUE handle_blockarg(VALUE blockarg)
2
+ {
3
+ VALUE thval = rb_thread_current();
4
+ rb_thread_t *th = DATA_PTR(thval);
5
+ rb_control_frame_t *cfp = th->cfp;
6
+ rb_proc_t *po;
7
+ rb_block_t *block;
8
+ VALUE proc;
9
+
10
+ proc = blockarg;
11
+ if (proc != Qnil) {
12
+ if (!rb_obj_is_proc(proc)) {
13
+ VALUE b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
14
+ if (NIL_P(b) || !rb_obj_is_proc(b)) {
15
+ rb_raise(rb_eTypeError, "wrong argument type %s (expected Proc)", rb_obj_classname(proc));
16
+ }
17
+ proc = b;
18
+ }
19
+ po = DATA_PTR(proc);
20
+ block = &po->block;
21
+ th->passed_block = block;
22
+ }
23
+ return proc;
24
+ }
25
+ NOINLINE(static VALUE handle_blockarg(VALUE blockarg));
26
+
27
+ #define IN_HEAP_P(th, ptr) \
28
+ (!((th)->stack < (ptr) && (ptr) < ((th)->stack + (th)->stack_size)))
29
+
30
+ static VALUE cfp_env(rb_thread_t *th, rb_control_frame_t *cfp)
31
+ {
32
+ VALUE *dfp = cfp->dfp;
33
+ VALUE *envptr = GC_GUARDED_PTR_REF(dfp[0]);
34
+ VALUE envval;
35
+
36
+ if (!envptr) {
37
+ rb_bug("cfp_env: should not be reached(0)");
38
+ }
39
+
40
+ if (IN_HEAP_P(th, envptr)) {
41
+ envval = envptr[1];
42
+ if (rb_class_of(envval) != rb_cEnv) {
43
+ rb_bug("cfp_env: should not be reached(1)");
44
+ }
45
+ } else {
46
+ envval = Qnil;
47
+ }
48
+
49
+ return envval;
50
+ }
51
+
52
+ /* ensure に対応すると大幅に書き直す必要がありそう */
53
+ static VALUE return_block(rb_num_t raw_state, VALUE throwobj, int lambda_p)
54
+ {
55
+ int state = (int)(raw_state & 0xff);
56
+ int flag = (int)(raw_state & 0x8000);
57
+ /*rb_num_t level = raw_state >> 16;*/
58
+
59
+ VALUE thval = rb_thread_current();
60
+ rb_thread_t *th = DATA_PTR(thval);
61
+ rb_control_frame_t *cfp = th->cfp;
62
+ VALUE *dfp = cfp->dfp;
63
+ VALUE *lfp = cfp->lfp;
64
+
65
+ rb_thread_check_ints();
66
+
67
+ if (state != TAG_RETURN || flag != 0) {
68
+ rb_bug("return_block: should not be reached(0)");
69
+ }
70
+
71
+ if (VM_FRAME_TYPE(cfp) != VM_FRAME_MAGIC_IFUNC) {
72
+ rb_bug("return_block: should not be reached(1)");
73
+ }
74
+
75
+ if (dfp == lfp) {
76
+ rb_bug("return_block: should not be reached(2)");
77
+ }
78
+
79
+ if (lambda_p) {
80
+ return throwobj;
81
+ }
82
+
83
+ /* check orphan and get dfp */
84
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
85
+ while ((VALUE *) cfp < th->stack + th->stack_size) {
86
+ if (cfp->lfp == lfp) {
87
+ VALUE env;
88
+
89
+ switch(VM_FRAME_TYPE(cfp)) {
90
+ case VM_FRAME_MAGIC_IFUNC:
91
+ /* check lambda */
92
+ env = cfp_env(th, cfp);
93
+ if (env != Qnil) {
94
+ rb_raise(rb_eCastOffExecutionError, "Currently, CastOff cannot handle this return statement");
95
+ }
96
+ break;
97
+ case VM_FRAME_MAGIC_CFUNC:
98
+ /* nothing to do */
99
+ break;
100
+ default:
101
+ rb_bug("return_block: should not be reached(3)");
102
+ }
103
+ }
104
+
105
+ if (cfp->dfp == lfp) {
106
+ int ok;
107
+
108
+ switch(VM_FRAME_TYPE(cfp)) {
109
+ case VM_FRAME_MAGIC_METHOD:
110
+ /* deoptimized frame */
111
+ /* nothing to do */
112
+ case VM_FRAME_MAGIC_CFUNC:
113
+ ok = 1;
114
+ break;
115
+ default:
116
+ ok = 0;
117
+ }
118
+
119
+ if (ok) {
120
+ dfp = lfp;
121
+ goto valid_return;
122
+ }
123
+ }
124
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
125
+ }
126
+
127
+ rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
128
+
129
+ valid_return:
130
+
131
+ th->errinfo = (VALUE)NEW_THROW_OBJECT(throwobj, (VALUE) dfp, state);
132
+ th->state = 0;
133
+
134
+ TH_JUMP_TAG(th, TAG_RETURN);
135
+ }
136
+ NOINLINE(static VALUE return_block(rb_num_t raw_state, VALUE throwobj, int lambda_p));
137
+
138
+ static inline VALUE catch_return(rb_thread_t *th)
139
+ {
140
+ VALUE err = th->errinfo;
141
+ VALUE *escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
142
+
143
+ if (th->cfp->dfp == escape_dfp) {
144
+ th->errinfo = Qnil;
145
+ return GET_THROWOBJ_VAL(err);
146
+ }
147
+ return Qundef;
148
+ }
149
+
150
+ static void return_from_execute(rb_num_t raw_state, VALUE val)
151
+ {
152
+ VALUE thval = rb_thread_current();
153
+ rb_num_t state;
154
+ rb_thread_t *th;
155
+ rb_control_frame_t *cfp;
156
+
157
+ th = DATA_PTR(thval);
158
+ cfp = th->cfp; /* current cfp */
159
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); /* CastOff.execute cfp */
160
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
161
+ rb_thread_check_ints();
162
+ th->errinfo = vm_throw(th, cfp, raw_state, val);
163
+ state = th->state;
164
+ th->state = 0;
165
+ TH_JUMP_TAG(th, state);
166
+ }
167
+ NOINLINE(static void return_from_execute(rb_num_t raw_state, VALUE val));
168
+
169
+ static rb_iseq_t *iseq_from_cfp(rb_control_frame_t *cfp)
170
+ {
171
+ rb_iseq_t *iseq;
172
+ if (SPECIAL_CONST_P(cfp->iseq)) {
173
+ /* when finish frame, cfp->self = 4 (Qnil), cfp->flag = 0x51 (VM_FRAME_MAGIC_FINISH) */
174
+ rb_bug("iseq_from_cfp: should not be reached(0): self = %lx, flag = %lx", cfp-> self, cfp->flag);
175
+ } else if (BUILTIN_TYPE(cfp->iseq) != T_NODE) {
176
+ iseq = cfp->iseq;
177
+ } else {
178
+ /* NODE *ifunc = (NODE *)cfp->iseq; */
179
+ /* VALUE iseqval = ifunc->nd_tval; */
180
+ /* iseq = DATA_PTR(iseqval); */
181
+ rb_bug("iseq_from_cfp: should not be reached(1)");
182
+ }
183
+ if (rb_class_of(iseq->self) != rb_cISeq) {
184
+ rb_bug("iseq_from_cfp: should not be reached(2)");
185
+ }
186
+ return iseq;
187
+ }
188
+
189
+ static void break_block(rb_num_t raw_state, VALUE epc, VALUE throwobj)
190
+ {
191
+ int state = (int)(raw_state & 0xff);
192
+ int flag = (int)(raw_state & 0x8000);
193
+ /*rb_num_t level = raw_state >> 16;*/
194
+ int is_orphan = 1;
195
+
196
+ VALUE thval = rb_thread_current();
197
+ rb_thread_t *th = DATA_PTR(thval);
198
+ rb_control_frame_t *cfp = th->cfp;
199
+ VALUE *dfp = cfp->dfp;
200
+
201
+ rb_thread_check_ints();
202
+
203
+ if (state != TAG_BREAK || flag != 0) {
204
+ /*
205
+ * flag != 0 のときは dfp の値が変わるので、vm_exec を見て
206
+ * state == TAG_BREAK && ((VALUE)escape_dfp & ~0x03) == 0 のくだりを実装する必要がある。
207
+ */
208
+ rb_bug("break_block: should not be reached(0)");
209
+ }
210
+ if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
211
+ rb_raise(rb_eCastOffExecutionError, "Currently, CastOff doesn't support break statement in lambda");
212
+ }
213
+
214
+ dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
215
+ while ((VALUE *)cfp < th->stack + th->stack_size) {
216
+ if (cfp->dfp == dfp) {
217
+ rb_iseq_t *iseq = iseq_from_cfp(cfp);
218
+ int i;
219
+
220
+ for (i=0; i<iseq->catch_table_size; i++) {
221
+ struct iseq_catch_table_entry *entry = &iseq->catch_table[i];
222
+ if (entry->type == CATCH_TYPE_BREAK && entry->start < epc && entry->end >= epc) {
223
+ if (entry->cont == epc) {
224
+ goto found;
225
+ } else {
226
+ break;
227
+ }
228
+ }
229
+ }
230
+ break;
231
+
232
+ found:
233
+ is_orphan = 0;
234
+ break;
235
+ }
236
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
237
+ }
238
+
239
+ if (is_orphan) {
240
+ rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
241
+ }
242
+
243
+ switch(VM_FRAME_TYPE(cfp)) {
244
+ case VM_FRAME_MAGIC_CFUNC:
245
+ case VM_FRAME_MAGIC_IFUNC:
246
+ cfp->pc = cfp->iseq->iseq_encoded + epc;
247
+ break;
248
+ case VM_FRAME_MAGIC_BLOCK:
249
+ case VM_FRAME_MAGIC_LAMBDA:
250
+ case VM_FRAME_MAGIC_METHOD:
251
+ /* deoptimized frame */
252
+ /* nothing to do */
253
+ break;
254
+ default:
255
+ rb_bug("break_block: should not be reached(1)");
256
+ }
257
+ th->errinfo = (VALUE)NEW_THROW_OBJECT(throwobj, (VALUE) dfp, state);
258
+ th->state = 0;
259
+
260
+ TH_JUMP_TAG(th, TAG_BREAK);
261
+ }
262
+ NOINLINE(static void break_block(rb_num_t raw_state, VALUE epc, VALUE throwobj));
263
+
264
+ static inline VALUE catch_break(rb_thread_t *th)
265
+ {
266
+ VALUE err = th->errinfo;
267
+ VALUE *escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
268
+
269
+ if (th->cfp->dfp == escape_dfp) {
270
+ th->errinfo = Qnil;
271
+ return GET_THROWOBJ_VAL(err);
272
+ }
273
+ return Qundef;
274
+ }
275
+
276
+ static VALUE
277
+ rb_vm_set_finish_env(rb_thread_t * th, VALUE *sp)
278
+ {
279
+ rb_control_frame_t *cfp = th->cfp;
280
+ VALUE *finish_insn_seq;
281
+
282
+ while(VM_FRAME_TYPE(cfp) != VM_FRAME_MAGIC_FINISH) {
283
+ /* FIXME */
284
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
285
+ }
286
+ finish_insn_seq = cfp->pc;
287
+
288
+ vm_push_frame(th, 0, VM_FRAME_MAGIC_FINISH, Qnil, th->cfp->lfp[0], 0, sp, 0, 1);
289
+ th->cfp->pc = (VALUE *)&finish_insn_seq[0];
290
+ return Qtrue;
291
+ }
292
+
293
+ static void construct_frame_inline(VALUE self, rb_thread_t *th, rb_iseq_t *iseq, long pc, int local_c, VALUE *local_v, int stack_c, VALUE *stack_v, int method_p, VALUE *lfp, VALUE *dfp)
294
+ {
295
+ rb_control_frame_t *cfp;
296
+ VALUE *sp;
297
+ int i;
298
+
299
+ cfp = th->cfp;
300
+ sp = cfp->sp;
301
+ CHECK_STACK_OVERFLOW(cfp, local_c);
302
+ /* restore local variables */
303
+ for(i = 0; i < local_c; i++, sp++) {
304
+ *sp = local_v[i];
305
+ }
306
+ if (local_c + 1 != iseq->local_size) {
307
+ rb_bug("should not be reached (3)");
308
+ }
309
+ vm_push_frame(th, iseq, method_p ? VM_FRAME_MAGIC_METHOD : VM_FRAME_MAGIC_BLOCK, self, (VALUE)dfp, iseq->iseq_encoded + pc, sp, lfp, 1);
310
+ sp = th->cfp->sp;
311
+ for(i = 0; i < stack_c; i++, sp++) {
312
+ *sp = stack_v[i];
313
+ }
314
+ th->cfp->sp = sp;
315
+ }
316
+
317
+ static VALUE cast_off_deoptimize_inline(VALUE self, rb_iseq_t *iseq, rb_iseq_t *biseq, long pc, int local_c, VALUE *local_v, int stack_c, VALUE *stack_v, int top_p, int bottom_p, int method_p, VALUE *lfp, VALUE *dfp)
318
+ {
319
+ VALUE val = Qundef;
320
+ VALUE thval = rb_thread_current();
321
+ rb_thread_t *th = DATA_PTR(thval);
322
+ rb_control_frame_t *rcfp;
323
+ VALUE *rsp;
324
+
325
+ if (bottom_p) {
326
+ if (top_p) {
327
+ rcfp = th->cfp;
328
+ rsp = rcfp->sp;
329
+ }
330
+ rb_vm_set_finish_env(th, th->cfp->sp);
331
+ }
332
+ construct_frame_inline(self, th, iseq, pc, local_c, local_v, stack_c, stack_v, method_p, lfp, dfp);
333
+ if (top_p) {
334
+ val = rb_funcall(rb_mCastOffCompiler, rb_intern("vm_exec"), 0);
335
+ if (bottom_p) {
336
+ if (th->cfp != rcfp || th->cfp->sp != rsp) {
337
+ rb_bug("cast_off_deoptimize_inline: should not be reached(0)");
338
+ }
339
+ }
340
+ return val;
341
+ } else {
342
+ rb_block_t *blockptr = (rb_block_t *)(&(th->cfp->self));
343
+ blockptr->iseq = biseq;
344
+ return (VALUE)blockptr;
345
+ }
346
+ }
347
+ NOINLINE(static VALUE cast_off_deoptimize_inline(VALUE self, rb_iseq_t *iseq, rb_iseq_t *biseq, long pc, int local_c, VALUE *local_v, int stack_c, VALUE *stack_v, int top_p, int bottom_p, int method_p, VALUE *lfp, VALUE *dfp));
348
+
349
+ static void construct_frame_noinline(rb_thread_t *th, rb_iseq_t *iseq, long pc, int stack_c, VALUE *stack_v, int method_p, int lambda_p, rb_control_frame_t *buf)
350
+ {
351
+ rb_control_frame_t *cfp = th->cfp;
352
+ VALUE *sp;
353
+ VALUE frame = method_p ? VM_FRAME_MAGIC_METHOD : (lambda_p ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK);
354
+ int i;
355
+
356
+ /* insert finish frame */
357
+ MEMCPY(buf, cfp, rb_control_frame_t, 1); /* backup current frame */
358
+ vm_pop_frame(th); /* pop current frame */
359
+ rb_vm_set_finish_env(th, buf->sp); /* push finish frame to use vm_exec */
360
+ sp = th->cfp->sp;
361
+ cfp = th->cfp = RUBY_VM_NEXT_CONTROL_FRAME(th->cfp);
362
+ MEMCPY(cfp, buf, rb_control_frame_t, 1); /* restore current frame */
363
+ cfp->flag = cfp->flag - VM_FRAME_TYPE(cfp) + frame; /* modify frame type */
364
+ cfp->pc = iseq->iseq_encoded + pc;
365
+ cfp->iseq = iseq;
366
+ cfp->bp = sp;
367
+ for(i = 0; i < stack_c; i++, sp++) {
368
+ *sp = stack_v[i];
369
+ }
370
+ cfp->sp = sp;
371
+ }
372
+
373
+ static void instrument_parent_frame(rb_thread_t *th, int parent_pc)
374
+ {
375
+ rb_control_frame_t *cfp = th->cfp;
376
+ VALUE *dfp = GC_GUARDED_PTR_REF(cfp->dfp[0]);
377
+
378
+ if (parent_pc < 0) {
379
+ if (th->cfp->lfp != th->cfp->dfp) {
380
+ rb_bug("instrument_parent_frame: should not be reached(0)");
381
+ }
382
+ return;
383
+ }
384
+ if (th->cfp->lfp == th->cfp->dfp) {
385
+ rb_bug("instrument_parent_frame: should not be reached(1)");
386
+ }
387
+
388
+ while ((VALUE *) cfp < th->stack + th->stack_size) {
389
+ if (cfp->dfp == dfp) {
390
+ rb_iseq_t *piseq = cfp->iseq;
391
+ if (rb_class_of(piseq->self) != rb_cISeq) {
392
+ rb_bug("instrument_parent_frame: should not be reached(2)");
393
+ }
394
+ cfp->pc = piseq->iseq_encoded + parent_pc;
395
+ return;
396
+ }
397
+ cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
398
+ }
399
+ }
400
+
401
+ static VALUE cast_off_deoptimize_noinline(VALUE self, rb_iseq_t *iseq, unsigned long pc, int stack_c, VALUE *stack_v, int method_p, int lambda_p, int parent_pc)
402
+ {
403
+ VALUE val;
404
+ VALUE thval = rb_thread_current();
405
+ rb_thread_t *th = DATA_PTR(thval);
406
+ rb_control_frame_t *rcfp = th->cfp;
407
+ VALUE *rsp = rcfp->sp;
408
+ rb_control_frame_t buf;
409
+
410
+ construct_frame_noinline(th, iseq, pc, stack_c, stack_v, method_p, lambda_p, &buf);
411
+ instrument_parent_frame(th, parent_pc);
412
+ val = rb_funcall(rb_mCastOffCompiler, rb_intern("vm_exec"), 0);
413
+ th->cfp = RUBY_VM_NEXT_CONTROL_FRAME(th->cfp);
414
+ MEMCPY(th->cfp, &buf, rb_control_frame_t, 1); /* restore frame */
415
+ if (th->cfp != rcfp || th->cfp->sp != rsp) {
416
+ rb_bug("cast_off_deoptimize_ifunc: should not be reached (2)");
417
+ }
418
+
419
+ return val;
420
+ }
421
+ NOINLINE(static VALUE cast_off_deoptimize_noinline(VALUE self, rb_iseq_t *iseq, unsigned long pc, int stack_c, VALUE *stack_v, int method_p, int lambda_p, int parent_pc));
422
+
423
+ static VALUE *cast_off_get_iv_table_ptr(VALUE self)
424
+ {
425
+ VALUE *iv_table_ptr = ROBJECT(self)->as.heap.ivptr;
426
+
427
+ if (!iv_table_ptr) {
428
+ RBASIC(self)->flags |= ROBJECT_EMBED;
429
+ }
430
+ if (RBASIC(self)->flags & ROBJECT_EMBED) {
431
+ int i;
432
+ int len = ROBJECT_EMBED_LEN_MAX;
433
+ VALUE parent = rb_obj_class(self);
434
+ VALUE *ptr = ROBJECT_IVPTR(self);
435
+ struct st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(parent);
436
+
437
+ if (!iv_index_tbl) rb_bug("should not be reached");
438
+ if (iv_index_tbl->num_entries <= len) rb_bug("should not be reached");
439
+ iv_table_ptr = ALLOC_N(VALUE, iv_index_tbl->num_entries);
440
+ MEMCPY(iv_table_ptr, ptr, VALUE, len);
441
+ for (i = len; i < iv_index_tbl->num_entries; i++) {
442
+ iv_table_ptr[i] = Qnil;
443
+ }
444
+ RBASIC(self)->flags &= ~ROBJECT_EMBED;
445
+ ROBJECT(self)->as.heap.ivptr = iv_table_ptr;
446
+ }
447
+
448
+ return iv_table_ptr;
449
+ }
450
+ NOINLINE(static VALUE *cast_off_get_iv_table_ptr(VALUE self));
451
+
452
+ static long cast_off_get_iv_index(struct st_table *iv_index_tbl, ID iv_id)
453
+ {
454
+ st_data_t index;
455
+
456
+ if (!st_lookup(iv_index_tbl, (st_data_t)iv_id, &index)) {
457
+ index = iv_index_tbl->num_entries;
458
+ st_add_direct(iv_index_tbl, (st_data_t)iv_id, index);
459
+ }
460
+
461
+ return (long)index;
462
+ }
463
+ NOINLINE(static long cast_off_get_iv_index(struct st_table *iv_index_tbl, ID iv_id));
464
+
465
+ static struct st_table *cast_off_get_iv_index_tbl(VALUE self)
466
+ {
467
+ struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(self);
468
+
469
+ if (!iv_index_tbl) {
470
+ VALUE parent = rb_obj_class(self);
471
+ iv_index_tbl = RCLASS_IV_INDEX_TBL(parent);
472
+ if (!iv_index_tbl) iv_index_tbl = RCLASS_IV_INDEX_TBL(parent) = st_init_numtable();
473
+ }
474
+
475
+ return iv_index_tbl;
476
+ }
477
+ NOINLINE(static struct st_table *cast_off_get_iv_index_tbl(VALUE self));
478
+
479
+ static int cast_off_singleton_p(VALUE klass)
480
+ {
481
+ VALUE k0 = rb_class_of(klass);
482
+
483
+ if (FL_TEST(k0, FL_SINGLETON)) {
484
+ VALUE k1 = rb_obj_class(klass);
485
+ if (k1 == rb_cClass) {
486
+ return 1;
487
+ } else if (k1 == rb_cModule) {
488
+ return 1;
489
+ } else {
490
+ rb_raise(rb_eCastOffExecutionError, "CastOff can't handle singleton object without Class and Module");
491
+ }
492
+ } else {
493
+ return 0;
494
+ }
495
+ }
496
+ NOINLINE(static int cast_off_singleton_p(VALUE klass));
497
+
498
+ static void should_be_singleton(VALUE klass)
499
+ {
500
+ if (cast_off_singleton_p(klass)) {
501
+ return;
502
+ } else {
503
+ rb_raise(rb_eCastOffExecutionError, "unexpected method(1)");
504
+ }
505
+ }
506
+ NOINLINE(static void should_be_singleton(VALUE klass));
507
+
508
+ static void should_be_cfunc(rb_method_entry_t *me)
509
+ {
510
+ if (!(me && me->def->type == VM_METHOD_TYPE_CFUNC)) {
511
+ rb_raise(rb_eCastOffExecutionError, "unexpected method(0)");
512
+ }
513
+
514
+ return;
515
+ }
516
+ NOINLINE(static void should_be_cfunc(rb_method_entry_t *me));
517
+
518
+ static void* c_function_pointer(rb_method_entry_t *me)
519
+ {
520
+ if (me && me->def->type == VM_METHOD_TYPE_CFUNC) {
521
+ return me->def->body.cfunc.func;
522
+ } else {
523
+ return NULL;
524
+ }
525
+ }
526
+ NOINLINE(static void* c_function_pointer(rb_method_entry_t *me));
527
+
528
+ static int c_function_argc(rb_method_entry_t *me)
529
+ {
530
+ if (!(me && me->def->type == VM_METHOD_TYPE_CFUNC)) {
531
+ rb_bug("c_function_argc: should not be reached");
532
+ }
533
+
534
+ return me->def->body.cfunc.argc;
535
+ }
536
+ NOINLINE(static int c_function_argc(rb_method_entry_t *me));
537
+
538
+ typedef struct method_wrapper_struct {
539
+ VALUE class_or_module;
540
+ VALUE class_or_module_wrapper;
541
+ ID mid;
542
+ } method_wrapper_t; /* FIXME */
543
+
544
+ static inline int method_wrapper_fptr_eq(VALUE self, VALUE (*fptr)(ANYARGS))
545
+ {
546
+ if (rb_class_of(self) == rb_cCastOffMethodWrapper) {
547
+ method_wrapper_t *wrapper = DATA_PTR(self);
548
+ rb_method_entry_t *me = search_method(wrapper->class_or_module, wrapper->mid);
549
+
550
+ if (me && me->def->type == VM_METHOD_TYPE_CFUNC) {
551
+ return me->def->body.cfunc.func == fptr;
552
+ } else {
553
+ return 0;
554
+ }
555
+ } else {
556
+ rb_bug("method_wrapper_eq: should not be reached(0)");
557
+ }
558
+ }
559
+
560
+ static int should_be_call_directly_p(VALUE (*fptr)(ANYARGS))
561
+ {
562
+ static ID id_direct_call_targets;
563
+ VALUE direct_call_targets;
564
+ VALUE *mptrs;
565
+ int i, len;
566
+
567
+ if (!id_direct_call_targets) id_direct_call_targets = rb_intern("DIRECT_CALL_TARGETS");
568
+ direct_call_targets = rb_const_get(rb_cCastOffConfiguration, id_direct_call_targets);
569
+ mptrs = RARRAY_PTR(direct_call_targets);
570
+ len = RARRAY_LEN(direct_call_targets);
571
+
572
+ for (i = 0; i < len; i++) {
573
+ VALUE m = mptrs[i];
574
+ if (method_wrapper_fptr_eq(m, fptr)) {
575
+ return 1;
576
+ }
577
+ }
578
+
579
+ return 0;
580
+ }
581
+ NOINLINE(static int should_be_call_directly_p(VALUE (*fptr)(ANYARGS)));
582
+
583
+ #define cast_off_get_constant(klass, id) cast_off_get_ev_const(self, cast_off_orig_iseq->cref_stack, (klass), (id))
584
+ #define cast_off_get_cvar_base() vm_get_cvar_base(cast_off_orig_iseq->cref_stack)
585
+
586
+ static VALUE
587
+ cast_off_get_ev_const(VALUE self, NODE *root_cref, VALUE orig_klass, ID id)
588
+ {
589
+ VALUE val;
590
+
591
+ if (orig_klass == Qnil) {
592
+ /* in current lexical scope */
593
+ const NODE *cref;
594
+ VALUE klass = orig_klass;
595
+
596
+ while (root_cref && root_cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) {
597
+ root_cref = root_cref->nd_next;
598
+ }
599
+ cref = root_cref;
600
+ while (cref && cref->nd_next) {
601
+ if (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) {
602
+ klass = Qnil;
603
+ } else {
604
+ klass = cref->nd_clss;
605
+ }
606
+ cref = cref->nd_next;
607
+
608
+ if (!NIL_P(klass)) {
609
+ VALUE am = 0;
610
+ st_data_t data;
611
+ search_continue:
612
+ if (RCLASS_CONST_TBL(klass) &&
613
+ st_lookup(RCLASS_CONST_TBL(klass), id, &data)) {
614
+ val = ((rb_const_entry_t*)data)->value;
615
+ if (val == Qundef) {
616
+ if (am == klass) break;
617
+ am = klass;
618
+ rb_autoload_load(klass, id);
619
+ goto search_continue;
620
+ } else {
621
+ return val;
622
+ }
623
+ }
624
+ }
625
+ }
626
+
627
+ /* search self */
628
+ if (root_cref && !NIL_P(root_cref->nd_clss)) {
629
+ klass = root_cref->nd_clss;
630
+ } else {
631
+ klass = CLASS_OF(self);
632
+ }
633
+ return rb_const_get(klass, id);
634
+ } else {
635
+ vm_check_if_namespace(orig_klass);
636
+ return rb_const_get(orig_klass, id); /* FIXME */
637
+ /* return rb_public_const_get_from(orig_klass, id); */
638
+ }
639
+ }
640
+ NOINLINE(static VALUE cast_off_get_ev_const(VALUE self, NODE *root_cref, VALUE orig_klass, ID id));
641
+
642
+ #if 0
643
+ /* should be mark cache->obj */
644
+ struct cast_off_ivar_cache_struct {
645
+ VALUE obj; /* FIXME should be gc mark ? */
646
+ VALUE klass; /* FIXME should be gc mark ? */
647
+ long index;
648
+ VALUE *ptr;
649
+ } cast_off_ivar_cache_t;
650
+
651
+ static inline VALUE
652
+ cast_off_getivar(VALUE self, ID id, cast_off_ivar_cache_t *cache)
653
+ {
654
+ if (TYPE(self) == T_OBJECT) {
655
+ VALUE val, klass;
656
+
657
+ if (cache->obj == self) {
658
+ return *ptr;
659
+ }
660
+
661
+ val = Qnil;
662
+ klass = RBASIC(self)->klass;
663
+
664
+ if (cache->klass == klass) {
665
+ long index = cache->index;
666
+ long len = ROBJECT_NUMIV(self);
667
+ VALUE *ptr = ROBJECT_IVPTR(self);
668
+
669
+ if (index < len) {
670
+ val = ptr[index];
671
+ cache->obj = self;
672
+ cache->ptr = ptr + index;
673
+ } else {
674
+ cache->obj = Qundef;
675
+ }
676
+ } else {
677
+ st_data_t index;
678
+ long len = ROBJECT_NUMIV(self);
679
+ VALUE *ptr = ROBJECT_IVPTR(self);
680
+ struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(self);
681
+
682
+ if (iv_index_tbl && st_lookup(iv_index_tbl, id, &index)) {
683
+ if ((long)index < len) {
684
+ val = ptr[index];
685
+ cache->obj = self;
686
+ cache->ptr = ptr + index;
687
+ } else {
688
+ cache->obj = Qundef;
689
+ }
690
+ cache->klass = klass;
691
+ cache->index = index;
692
+ } else {
693
+ cache->obj = Qundef;
694
+ cache->klass = Qundef;
695
+ }
696
+ }
697
+ return val;
698
+ } else {
699
+ return rb_ivar_get(self, id);
700
+ }
701
+ }
702
+
703
+ static inline void
704
+ cast_off_setivar(VALUE self, ID id, VALUE val, cast_off_ivar_cache_t *cache)
705
+ {
706
+ if (!OBJ_UNTRUSTED(self) && rb_safe_level() >= 4) {
707
+ rb_raise(rb_eSecurityError, "Insecure: can't modify instance variable");
708
+ }
709
+
710
+ rb_check_frozen(self);
711
+
712
+ if (TYPE(self) == T_OBJECT) {
713
+ VALUE klass = RBASIC(self)->klass;
714
+ st_data_t index;
715
+
716
+ if (cache->obj == self) {
717
+ *ptr = val;
718
+ return;
719
+ }
720
+
721
+ if (cache->klass == klass) {
722
+ long index = cache->index;
723
+ long len = ROBJECT_NUMIV(self);
724
+ VALUE *ptr = ROBJECT_IVPTR(self);
725
+
726
+ if (index < len) {
727
+ ptr[index] = val;
728
+ cache->obj = self;
729
+ cache->ptr = ptr + index;
730
+ return; /* inline cache hit */
731
+ } else {
732
+ cache->obj = Qundef;
733
+ }
734
+ } else {
735
+ struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(self);
736
+
737
+ if (iv_index_tbl && st_lookup(iv_index_tbl, (st_data_t)id, &index)) {
738
+ cache->obj = self;
739
+ cache->ptr = ROBJECT_IVPTR(self) + index;
740
+ cache->klass = klass;
741
+ cache->index = index;
742
+ } else {
743
+ cache->obj = Qundef;
744
+ cache->klass = Qundef;
745
+ }
746
+ /* fall through */
747
+ }
748
+ }
749
+ rb_ivar_set(self, id, val);
750
+ }
751
+ #endif