ytljit 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README +29 -0
- data/Rakefile +22 -0
- data/ext/code_alloc.c +266 -0
- data/ext/extconf.rb +3 -0
- data/ext/ytljit.c +527 -0
- data/ext/ytljit.h +285 -0
- data/lib/ytljit/asm.rb +205 -0
- data/lib/ytljit/asmext.rb +199 -0
- data/lib/ytljit/asmext_x64.rb +212 -0
- data/lib/ytljit/asmext_x86.rb +128 -0
- data/lib/ytljit/asmutil.rb +182 -0
- data/lib/ytljit/codespace.rb +92 -0
- data/lib/ytljit/error.rb +7 -0
- data/lib/ytljit/instruction.rb +138 -0
- data/lib/ytljit/instruction_ia.rb +1298 -0
- data/lib/ytljit/instruction_x64.rb +41 -0
- data/lib/ytljit/instruction_x86.rb +11 -0
- data/lib/ytljit/marshal.rb +133 -0
- data/lib/ytljit/matcher.rb +235 -0
- data/lib/ytljit/rubyvm.rb +63 -0
- data/lib/ytljit/struct.rb +125 -0
- data/lib/ytljit/type.rb +112 -0
- data/lib/ytljit/util.rb +63 -0
- data/lib/ytljit/vm.rb +1649 -0
- data/lib/ytljit/vm_codegen.rb +491 -0
- data/lib/ytljit/vm_inline_method.rb +85 -0
- data/lib/ytljit/vm_inspect.rb +74 -0
- data/lib/ytljit/vm_sendnode.rb +561 -0
- data/lib/ytljit/vm_trans.rb +508 -0
- data/lib/ytljit/vm_type.rb +299 -0
- data/lib/ytljit/vm_type_gen.rb +158 -0
- data/lib/ytljit/vm_typeinf.rb +98 -0
- data/lib/ytljit.rb +46 -0
- data/test/asmsample.rb +117 -0
- data/test/cstest.rb +61 -0
- data/test/marshaltest.rb +27 -0
- data/test/test_assemble.rb +148 -0
- data/test/test_assemble2.rb +286 -0
- data/test/test_codespace.rb +102 -0
- data/test/test_typeinf.rb +21 -0
- data/test/tivmtest.rb +54 -0
- data/test/vmtest.rb +59 -0
- data/test/vmtest2.rb +41 -0
- data/test/vmtest3.rb +22 -0
- data/test/vmtest_compile_only.rb +41 -0
- data/test/vmtest_execute_only.rb +22 -0
- metadata +121 -0
@@ -0,0 +1,212 @@
|
|
1
|
+
module YTLJit
|
2
|
+
module FuncArgX64CommonMixin
|
3
|
+
include AbsArch
|
4
|
+
include X64
|
5
|
+
ARGPOS2REG = [RDI, RSI, RDX, RCX, R8, R9]
|
6
|
+
ARGPOS2FREG = [XMM0, XMM1, XMM2, XMM3]
|
7
|
+
end
|
8
|
+
|
9
|
+
module FunctionArgumentX64MixinInt
|
10
|
+
include FuncArgX64CommonMixin
|
11
|
+
|
12
|
+
def argpos2reg
|
13
|
+
case @abi_kind
|
14
|
+
when :c
|
15
|
+
ARGPOS2REG
|
16
|
+
|
17
|
+
when :ytl
|
18
|
+
[]
|
19
|
+
|
20
|
+
else
|
21
|
+
raise "#{@abi_kind}"
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
module FunctionArgumentX64MixinFloat
|
27
|
+
include FuncArgX64CommonMixin
|
28
|
+
|
29
|
+
def argpos2reg
|
30
|
+
case @abi_kind
|
31
|
+
when :c
|
32
|
+
ARGPOS2FREG
|
33
|
+
|
34
|
+
when :ytl
|
35
|
+
[]
|
36
|
+
|
37
|
+
else
|
38
|
+
raise "#{@abi_kind}"
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
module FunctionArgumentX64MixinCommon
|
44
|
+
include FuncArgX64CommonMixin
|
45
|
+
|
46
|
+
def dst_opecode
|
47
|
+
if @no < argpos2reg.size then
|
48
|
+
argpos2reg[@no]
|
49
|
+
else
|
50
|
+
spos = @no - argpos2reg.size
|
51
|
+
OpIndirect.new(SPR, OpImmidiate8.new(spos * 8))
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def src_opecode
|
56
|
+
if @no < argpos2reg.size then
|
57
|
+
argpos2reg[@no]
|
58
|
+
else
|
59
|
+
# +1 means return address slot
|
60
|
+
spos = @no - argpos2reg.size + 1
|
61
|
+
OpIndirect.new(SPR, OpImmidiate8.new(spos * 8))
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def gen_access_dst(gen, inst, dst, src, src2)
|
66
|
+
code = ""
|
67
|
+
asm = gen.asm
|
68
|
+
fainfo = gen.funcarg_info
|
69
|
+
|
70
|
+
if @no == 0 then
|
71
|
+
fainfo.area_allocate_pos.push nil
|
72
|
+
fainfo.used_arg_tab.push Hash.new
|
73
|
+
end
|
74
|
+
|
75
|
+
# It can be argpos2reg.size == 0, so this "if" isn't "elsif"
|
76
|
+
if @no == argpos2reg.size then
|
77
|
+
offset = asm.offset
|
78
|
+
code += asm.update_state(gen.sub(SPR, 0))
|
79
|
+
fainfo.area_allocate_pos[-1] = offset
|
80
|
+
end
|
81
|
+
|
82
|
+
if @no < argpos2reg.size then
|
83
|
+
argreg = argpos2reg[@no]
|
84
|
+
|
85
|
+
# for nested function call. need save previous reg.
|
86
|
+
if asm.retry_mode != :change_op and
|
87
|
+
fainfo.used_arg_tab.last[@no] then
|
88
|
+
asm.update_state(gen.push(argreg))
|
89
|
+
fainfo.push argreg
|
90
|
+
end
|
91
|
+
code += asm.update_state(gen.mov(argreg, src))
|
92
|
+
else
|
93
|
+
# spilled reg
|
94
|
+
spos = @no - argpos2reg.size
|
95
|
+
argdst = OpIndirect.new(SPR, OpImmidiate8.new(spos * 8))
|
96
|
+
|
97
|
+
if src.is_a?(OpRegXMM) then
|
98
|
+
code += asm.update_state(gen.movsd(argdst, src))
|
99
|
+
else
|
100
|
+
if inst == :mov and !src.is_a?(OpRegistor) then
|
101
|
+
code += asm.update_state(gen.send(inst, TMPR, src))
|
102
|
+
code += asm.update_state(gen.mov(argdst, TMPR))
|
103
|
+
else
|
104
|
+
code += asm.update_state(gen.mov(argdst, src))
|
105
|
+
end
|
106
|
+
end
|
107
|
+
end
|
108
|
+
|
109
|
+
if asm.retry_mode != :change_op then
|
110
|
+
# if retry mode fainfo.used_arg_tab is deleted
|
111
|
+
fainfo.used_arg_tab.last[@no] = @size
|
112
|
+
end
|
113
|
+
code
|
114
|
+
end
|
115
|
+
|
116
|
+
# Access the passing argument from caller
|
117
|
+
#
|
118
|
+
def gen_access_src(gen, inst, dst, src, src2)
|
119
|
+
asm = gen.asm
|
120
|
+
fainfo = gen.funcarg_info
|
121
|
+
code = ""
|
122
|
+
if @no < argpos2reg.size then
|
123
|
+
code += asm.update_state(gen.mov(TMPR, argpos2reg[@no]))
|
124
|
+
else
|
125
|
+
spos = @no - argpos2reg.size
|
126
|
+
offset = 8 + spos * 8
|
127
|
+
code += asm.update_state(gen.mov(TMPR, OpIndirect.new(SPR, offset)))
|
128
|
+
end
|
129
|
+
code += asm.update_state(gen.send(inst, src, TMPR))
|
130
|
+
code
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
module GeneratorExtendX64Mixin
|
135
|
+
include FuncArgX64CommonMixin
|
136
|
+
|
137
|
+
def mov64(dst, src)
|
138
|
+
case dst
|
139
|
+
when OpIndirect
|
140
|
+
case src
|
141
|
+
when Integer
|
142
|
+
disp = dst.disp
|
143
|
+
dst2 = dst.class.new(dst.reg, disp + 4)
|
144
|
+
bit32val = 1 << 32
|
145
|
+
code = mov(dst2, src / bit32val)
|
146
|
+
code += mov(dst, src % bit32val)
|
147
|
+
code
|
148
|
+
else
|
149
|
+
nosupported_addressing_mode(:mov64, dst, src)
|
150
|
+
end
|
151
|
+
else
|
152
|
+
nosupported_addressing_mode(:mov64, dst, src)
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
def call_with_arg_get_argsize(addr, argnum)
|
157
|
+
argnum * 8
|
158
|
+
end
|
159
|
+
|
160
|
+
def call_with_arg(addr, argnum, argsize)
|
161
|
+
fainfo = funcarg_info
|
162
|
+
|
163
|
+
orgaddress = @asm.current_address
|
164
|
+
code = ""
|
165
|
+
code += @asm.update_state(mov(RAX, OpImmidiate32.new(argnum)))
|
166
|
+
code += @asm.update_state(call(addr))
|
167
|
+
callpos = @asm.current_address - @asm.output_stream.base_address
|
168
|
+
if @asm.retry_mode != :change_op then
|
169
|
+
return [code, callpos]
|
170
|
+
end
|
171
|
+
|
172
|
+
offset = @funcarg_info.area_allocate_pos.pop
|
173
|
+
if offset then
|
174
|
+
imm = OpImmidiate8.new(argsize)
|
175
|
+
code += @asm.update_state(add(SPR, imm))
|
176
|
+
alloc_argument_area = lambda {
|
177
|
+
@asm.with_current_address(@asm.output_stream.base_address + offset) {
|
178
|
+
@asm.output_stream[offset] = sub(SPR, argsize)
|
179
|
+
}
|
180
|
+
}
|
181
|
+
@asm.after_patch_tab.push alloc_argument_area
|
182
|
+
end
|
183
|
+
|
184
|
+
@funcarg_info.update_maxargs(argnum)
|
185
|
+
@funcarg_info.used_arg_tab.pop
|
186
|
+
|
187
|
+
=begin
|
188
|
+
# Save already stored restorer
|
189
|
+
uat = @funcarg_info.used_arg_tab.last
|
190
|
+
while !fainfo.empty? do
|
191
|
+
nreg = fainfo.pop
|
192
|
+
if argpos = ARGPOS2REG.index(nreg) then
|
193
|
+
if uat[argpos] then
|
194
|
+
fainfo.push nreg
|
195
|
+
break
|
196
|
+
else
|
197
|
+
code += @asm.update_state(pop(nreg))
|
198
|
+
uat[argpos] = true
|
199
|
+
end
|
200
|
+
else
|
201
|
+
fainfo.push nreg
|
202
|
+
break
|
203
|
+
end
|
204
|
+
end
|
205
|
+
=end
|
206
|
+
|
207
|
+
@asm.current_address = orgaddress
|
208
|
+
|
209
|
+
[code, callpos]
|
210
|
+
end
|
211
|
+
end
|
212
|
+
end
|
@@ -0,0 +1,128 @@
|
|
1
|
+
module YTLJit
|
2
|
+
module FunctionArgumentX86Mixin
|
3
|
+
include AbsArch
|
4
|
+
|
5
|
+
def size
|
6
|
+
case @abi_kind
|
7
|
+
when :c
|
8
|
+
AsmType::MACHINE_WORD.size
|
9
|
+
|
10
|
+
when :ytl
|
11
|
+
8
|
12
|
+
end
|
13
|
+
end
|
14
|
+
|
15
|
+
def dst_opecode
|
16
|
+
OpIndirect.new(SPR, OpImmidiate8.new(@no * size))
|
17
|
+
end
|
18
|
+
|
19
|
+
|
20
|
+
def src_opecode
|
21
|
+
# AsmType::MACHINE_WORD.size is return address slot
|
22
|
+
offset = AsmType::MACHINE_WORD.size + @no * size
|
23
|
+
OpIndirect.new(SPR, offset)
|
24
|
+
end
|
25
|
+
|
26
|
+
def gen_access_dst(gen, inst, dst, src, src2)
|
27
|
+
argdst = dst_opecode
|
28
|
+
code = ""
|
29
|
+
asm = gen.asm
|
30
|
+
fainfo = gen.funcarg_info
|
31
|
+
if @no == 0 then
|
32
|
+
offset = asm.offset
|
33
|
+
code += asm.update_state(gen.sub(SPR, fainfo.maxargs * size))
|
34
|
+
fainfo.area_allocate_pos.push offset
|
35
|
+
fainfo.used_arg_tab.push Hash.new
|
36
|
+
end
|
37
|
+
|
38
|
+
if asm.retry_mode != :change_op then
|
39
|
+
# if retry mode fainfo.used_arg_tab is deleted
|
40
|
+
fainfo.used_arg_tab.last[@no] = size
|
41
|
+
end
|
42
|
+
if src.is_a?(OpRegXMM) then
|
43
|
+
code += asm.update_state(gen.movsd(argdst, src))
|
44
|
+
else
|
45
|
+
if inst == :mov and !src.is_a?(OpRegistor) then
|
46
|
+
code += asm.update_state(gen.send(inst, TMPR, src))
|
47
|
+
code += asm.update_state(gen.mov(argdst, TMPR))
|
48
|
+
else
|
49
|
+
code += asm.update_state(gen.mov(argdst, src))
|
50
|
+
end
|
51
|
+
end
|
52
|
+
code
|
53
|
+
end
|
54
|
+
|
55
|
+
# Access the passing argument from caller
|
56
|
+
# You can use only between entering the function and change value of
|
57
|
+
# stack pointer.
|
58
|
+
#
|
59
|
+
def gen_access_src(gen, inst, dst, src, src2)
|
60
|
+
asm = gen.asm
|
61
|
+
fainfo = gen.funcarg_info
|
62
|
+
code = ""
|
63
|
+
code += asm.update_state(gen.mov(TMPR, src_opecode))
|
64
|
+
code += asm.update_state(gen.send(inst, src, TMPR))
|
65
|
+
code
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
module GeneratorExtendX86Mixin
|
70
|
+
include AbsArch
|
71
|
+
|
72
|
+
def mov64(dst, src)
|
73
|
+
case dst
|
74
|
+
when OpIndirect
|
75
|
+
case src
|
76
|
+
when Integer
|
77
|
+
disp = dst.disp
|
78
|
+
dst2 = dst.class.new(dst.reg, disp + 4)
|
79
|
+
bit32val = 1 << 32
|
80
|
+
code = mov(dst2, src / bit32val)
|
81
|
+
code += mov(dst, src % bit32val)
|
82
|
+
code
|
83
|
+
else
|
84
|
+
nosupported_addressing_mode(:mov64, dst, src)
|
85
|
+
end
|
86
|
+
else
|
87
|
+
nosupported_addressing_mode(:mov64, dst, src)
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
def call_with_arg_get_argsize(addr, argnum)
|
92
|
+
argsize = 0
|
93
|
+
argnum.times do |i|
|
94
|
+
if @funcarg_info.used_arg_tab.last[i] then
|
95
|
+
argsize += @funcarg_info.used_arg_tab.last[i]
|
96
|
+
else
|
97
|
+
STDERR.print "Wanrnning arg not initialized -- #{i}\n"
|
98
|
+
argsize += 4
|
99
|
+
end
|
100
|
+
end
|
101
|
+
argsize
|
102
|
+
end
|
103
|
+
|
104
|
+
def call_with_arg(addr, argnum, argsize)
|
105
|
+
orgaddress = @asm.current_address
|
106
|
+
code = @asm.update_state(call(addr))
|
107
|
+
callpos = @asm.current_address - @asm.output_stream.base_address
|
108
|
+
if @asm.retry_mode == :change_op then
|
109
|
+
return [code, callpos]
|
110
|
+
end
|
111
|
+
|
112
|
+
code += @asm.update_state(add(SPR, OpImmidiate8.new(argsize)))
|
113
|
+
offset = @funcarg_info.area_allocate_pos.pop
|
114
|
+
alloc_argument_area = lambda {
|
115
|
+
asm.with_current_address(asm.output_stream.base_address + offset) {
|
116
|
+
asm.output_stream[offset] = sub(SPR, argsize)
|
117
|
+
}
|
118
|
+
}
|
119
|
+
asm.after_patch_tab.push alloc_argument_area
|
120
|
+
|
121
|
+
@funcarg_info.update_maxargs(argnum)
|
122
|
+
@funcarg_info.used_arg_tab.pop
|
123
|
+
@asm.current_address = orgaddress
|
124
|
+
|
125
|
+
[code, callpos]
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
@@ -0,0 +1,182 @@
|
|
1
|
+
module YTLJit
|
2
|
+
module SSE
|
3
|
+
XMM0 = OpRXMM0.instance
|
4
|
+
XMM1 = OpRXMM1.instance
|
5
|
+
XMM2 = OpRXMM2.instance
|
6
|
+
XMM3 = OpRXMM3.instance
|
7
|
+
XMM4 = OpRXMM4.instance
|
8
|
+
XMM5 = OpRXMM5.instance
|
9
|
+
XMM6 = OpRXMM6.instance
|
10
|
+
XMM7 = OpRXMM7.instance
|
11
|
+
end
|
12
|
+
|
13
|
+
module SSE64
|
14
|
+
XMM8 = OpRXMM8.instance
|
15
|
+
XMM9 = OpRXMM9.instance
|
16
|
+
XMM10 = OpRXMM10.instance
|
17
|
+
XMM11 = OpRXMM11.instance
|
18
|
+
XMM12 = OpRXMM12.instance
|
19
|
+
XMM13 = OpRXMM13.instance
|
20
|
+
XMM14 = OpRXMM14.instance
|
21
|
+
XMM15 = OpRXMM15.instance
|
22
|
+
end
|
23
|
+
|
24
|
+
module X86
|
25
|
+
EAX = OpEAX.instance
|
26
|
+
EDX = OpEDX.instance
|
27
|
+
ECX = OpECX.instance
|
28
|
+
EBX = OpEBX.instance
|
29
|
+
ESP = OpESP.instance
|
30
|
+
EBP = OpEBP.instance
|
31
|
+
ESI = OpESI.instance
|
32
|
+
EDI = OpEDI.instance
|
33
|
+
|
34
|
+
AL = OpAL.instance
|
35
|
+
CL = OpCL.instance
|
36
|
+
DL = OpDL.instance
|
37
|
+
BL = OpBL.instance
|
38
|
+
|
39
|
+
INDIRECT_EAX = OpIndirect.new(EAX)
|
40
|
+
INDIRECT_EDX = OpIndirect.new(EDX)
|
41
|
+
INDIRECT_ECX = OpIndirect.new(ECX)
|
42
|
+
INDIRECT_EBX = OpIndirect.new(EBX)
|
43
|
+
INDIRECT_ESP = OpIndirect.new(ESP)
|
44
|
+
INDIRECT_EBP = OpIndirect.new(EBX)
|
45
|
+
INDIRECT_ESI = OpIndirect.new(ESI)
|
46
|
+
INDIRECT_EDI = OpIndirect.new(EDI)
|
47
|
+
|
48
|
+
include SSE
|
49
|
+
end
|
50
|
+
|
51
|
+
module X64
|
52
|
+
RAX = OpRAX.instance
|
53
|
+
RDX = OpRDX.instance
|
54
|
+
RCX = OpRCX.instance
|
55
|
+
RBX = OpRBX.instance
|
56
|
+
RSP = OpRSP.instance
|
57
|
+
RBP = OpRBP.instance
|
58
|
+
RSI = OpRSI.instance
|
59
|
+
RDI = OpRDI.instance
|
60
|
+
|
61
|
+
R8 = OpR8.instance
|
62
|
+
R9 = OpR9.instance
|
63
|
+
R10 = OpR10.instance
|
64
|
+
R11 = OpR11.instance
|
65
|
+
R12 = OpR12.instance
|
66
|
+
R13 = OpR13.instance
|
67
|
+
R14 = OpR14.instance
|
68
|
+
R15 = OpR15.instance
|
69
|
+
|
70
|
+
INDIRECT_RAX = OpIndirect.new(RAX)
|
71
|
+
INDIRECT_RDX = OpIndirect.new(RDX)
|
72
|
+
INDIRECT_RCX = OpIndirect.new(RCX)
|
73
|
+
INDIRECT_RBX = OpIndirect.new(RBX)
|
74
|
+
INDIRECT_RSP = OpIndirect.new(RSP)
|
75
|
+
INDIRECT_RBP = OpIndirect.new(RBX)
|
76
|
+
INDIRECT_RSI = OpIndirect.new(RSI)
|
77
|
+
INDIRECT_RDI = OpIndirect.new(RDI)
|
78
|
+
|
79
|
+
include SSE
|
80
|
+
include SSE64
|
81
|
+
end
|
82
|
+
|
83
|
+
module AbsArch
|
84
|
+
AL = OpAL.instance
|
85
|
+
CL = OpCL.instance
|
86
|
+
DL = OpDL.instance
|
87
|
+
BL = OpBL.instance
|
88
|
+
|
89
|
+
include SSE
|
90
|
+
case $ruby_platform
|
91
|
+
when /i.86/
|
92
|
+
TMPR = OpEAX.instance
|
93
|
+
TMPR2 = OpEDX.instance
|
94
|
+
TMPR3 = OpECX.instance
|
95
|
+
RETR = OpEAX.instance
|
96
|
+
SPR = OpESP.instance
|
97
|
+
BPR = OpEBP.instance
|
98
|
+
when /x86_64/
|
99
|
+
TMPR = OpRAX.instance
|
100
|
+
# TMPR2 = OpRDX.instance
|
101
|
+
# TMPR3 = OpRCX.instance
|
102
|
+
TMPR2 = OpR10.instance
|
103
|
+
TMPR3 = OpR11.instance
|
104
|
+
RETR = OpRAX.instance
|
105
|
+
SPR = OpRSP.instance
|
106
|
+
BPR = OpRBP.instance
|
107
|
+
end
|
108
|
+
INDIRECT_TMPR = OpIndirect.new(TMPR)
|
109
|
+
INDIRECT_TMPR2 = OpIndirect.new(TMPR2)
|
110
|
+
INDIRECT_RETR = OpIndirect.new(RETR)
|
111
|
+
INDIRECT_SPR = OpIndirect.new(SPR)
|
112
|
+
INDIRECT_BPR = OpIndirect.new(BPR)
|
113
|
+
FUNC_ARG = Hash.new {|hash, key|
|
114
|
+
hash[key] = FunctionArgumentInt.new(key, :c)
|
115
|
+
}
|
116
|
+
FUNC_FLOAT_ARG = Hash.new {|hash, key|
|
117
|
+
hash[key] = FunctionArgumentFloat.new(key, :c)
|
118
|
+
}
|
119
|
+
FUNC_ARG_YTL = Hash.new {|hash, key|
|
120
|
+
hash[key] = FunctionArgumentInt.new(key, :ytl)
|
121
|
+
}
|
122
|
+
FUNC_FLOAT_ARG_YTL = Hash.new {|hash, key|
|
123
|
+
hash[key] = FunctionArgumentFloat.new(key, :ytl)
|
124
|
+
}
|
125
|
+
end
|
126
|
+
|
127
|
+
module InternalRubyType
|
128
|
+
include AbsArch
|
129
|
+
VALUE = AsmType::MACHINE_WORD
|
130
|
+
P_CHAR = AsmType::Pointer.new(AsmType::INT8)
|
131
|
+
|
132
|
+
RBasic = AsmType::Struct.new(
|
133
|
+
VALUE, :flags,
|
134
|
+
VALUE, :klass
|
135
|
+
)
|
136
|
+
RString = AsmType::Struct.new(
|
137
|
+
RBasic, :basic,
|
138
|
+
AsmType::Union.new(
|
139
|
+
AsmType::Struct.new(
|
140
|
+
AsmType::INT32, :len,
|
141
|
+
P_CHAR, :ptr,
|
142
|
+
AsmType::Union.new(
|
143
|
+
AsmType::INT32, :capa,
|
144
|
+
VALUE, :shared,
|
145
|
+
), :aux
|
146
|
+
), :heap,
|
147
|
+
AsmType::Array.new(
|
148
|
+
AsmType::INT8,
|
149
|
+
24
|
150
|
+
), :ary
|
151
|
+
), :as
|
152
|
+
)
|
153
|
+
|
154
|
+
RFloat = AsmType::Struct.new(
|
155
|
+
RBasic, :basic,
|
156
|
+
AsmType::DOUBLE, :float_value
|
157
|
+
)
|
158
|
+
|
159
|
+
EMBEDER_FLAG = (1 << 13)
|
160
|
+
def self.rstring_ptr(str, csstart, cscont)
|
161
|
+
cs_embed = CodeSpace.new
|
162
|
+
|
163
|
+
asm = Assembler.new(csstart)
|
164
|
+
rsstr = TypedData.new(InternalRubyType::RString, str)
|
165
|
+
# asm.step_mode = true
|
166
|
+
asm.with_retry do
|
167
|
+
asm.mov(TMPR, rsstr[:basic][:flags])
|
168
|
+
asm.and(TMPR, EMBEDER_FLAG)
|
169
|
+
asm.jz(cs_embed.var_base_address)
|
170
|
+
asm.mov(TMPR, rsstr[:as][:heap][:ptr])
|
171
|
+
asm.jmp(cscont.var_base_address)
|
172
|
+
end
|
173
|
+
|
174
|
+
asm = Assembler.new(cs_embed)
|
175
|
+
# asm.step_mode = true
|
176
|
+
asm.with_retry do
|
177
|
+
asm.mov(TMPR, rsstr[:as][:ary])
|
178
|
+
asm.jmp(cscont.var_base_address)
|
179
|
+
end
|
180
|
+
end
|
181
|
+
end
|
182
|
+
end
|
@@ -0,0 +1,92 @@
|
|
1
|
+
module YTLJit
|
2
|
+
class CodeSpace
|
3
|
+
@@disasm_cache = {}
|
4
|
+
@@disasmed_codespace = {}
|
5
|
+
def self.disasm_cache
|
6
|
+
@@disasm_cache
|
7
|
+
end
|
8
|
+
|
9
|
+
def initialize
|
10
|
+
@refer_operands = []
|
11
|
+
reset
|
12
|
+
end
|
13
|
+
|
14
|
+
attr :disasm_cache
|
15
|
+
attr :refer_operands
|
16
|
+
|
17
|
+
def reset
|
18
|
+
@org_base_address = base_address
|
19
|
+
self.current_pos = 0
|
20
|
+
end
|
21
|
+
|
22
|
+
def emit(code)
|
23
|
+
self[self.current_pos] = code
|
24
|
+
end
|
25
|
+
|
26
|
+
def var_base_address(offset = 0)
|
27
|
+
func = lambda {
|
28
|
+
base_address + offset
|
29
|
+
}
|
30
|
+
ovi32 = OpVarMemAddress.new(func)
|
31
|
+
@refer_operands.push ovi32
|
32
|
+
ovi32
|
33
|
+
end
|
34
|
+
|
35
|
+
def var_base_immidiate_address(offset = 0)
|
36
|
+
func = lambda {
|
37
|
+
base_address + offset
|
38
|
+
}
|
39
|
+
ovi32 = OpVarImmidiateAddress.new(func)
|
40
|
+
@refer_operands.push ovi32
|
41
|
+
ovi32
|
42
|
+
end
|
43
|
+
|
44
|
+
def update_refer
|
45
|
+
@refer_operands.each do |refop|
|
46
|
+
refop.refer.each do |stfn|
|
47
|
+
stfn.call
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
def fill_disasm_cache
|
53
|
+
if @@disasmed_codespace[self] then
|
54
|
+
return
|
55
|
+
end
|
56
|
+
@@disasmed_codespace[self] = true
|
57
|
+
tmpfp = Tempfile.open("ytljitcode")
|
58
|
+
tmpfp.write code
|
59
|
+
tmpfp.close(false)
|
60
|
+
# quick dirty hack to work on Cygwin & Mac OS X/Core2Duo
|
61
|
+
# TODO: bdf and instruction set architecture should be automatically selected
|
62
|
+
case $ruby_platform
|
63
|
+
when /x86_64-darwin/
|
64
|
+
objcopy_cmd = "gobjcopy -I binary -O mach-o-i386 -B i386 --adjust-vma=#{base_address} #{tmpfp.path}"
|
65
|
+
objdump_cmd = "gobjdump -M x86-64 -D #{tmpfp.path}"
|
66
|
+
|
67
|
+
when /x86_64/
|
68
|
+
objcopy_cmd = "objcopy -I binary -O elf64-x86-64 -B i386 --adjust-vma=#{base_address} #{tmpfp.path}"
|
69
|
+
objdump_cmd = "objdump -M x86-64 -D #{tmpfp.path}"
|
70
|
+
|
71
|
+
when /i.86/
|
72
|
+
objcopy_cmd = "objcopy -I binary -O elf32-i386 -B i386 --adjust-vma=#{base_address} #{tmpfp.path}"
|
73
|
+
objdump_cmd = "objdump -M i386 -D #{tmpfp.path}"
|
74
|
+
end
|
75
|
+
system(objcopy_cmd)
|
76
|
+
File.popen(objdump_cmd, "r") {|fp|
|
77
|
+
fp.readlines.each do |lin|
|
78
|
+
if /([0-9a-f]*):(\t[0-9a-f ]+? *\t.*)/ =~ lin then
|
79
|
+
@@disasm_cache[$1] = $2
|
80
|
+
end
|
81
|
+
end
|
82
|
+
}
|
83
|
+
end
|
84
|
+
|
85
|
+
def disassemble
|
86
|
+
fill_disasm_cache
|
87
|
+
@@disasm_cache.each do |add, asm|
|
88
|
+
print "#{add}: #{asm}\n"
|
89
|
+
end
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|