pyvex 9.2.189__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. pyvex/__init__.py +92 -0
  2. pyvex/_register_info.py +1800 -0
  3. pyvex/arches.py +94 -0
  4. pyvex/block.py +697 -0
  5. pyvex/const.py +426 -0
  6. pyvex/const_val.py +26 -0
  7. pyvex/data_ref.py +55 -0
  8. pyvex/enums.py +156 -0
  9. pyvex/errors.py +31 -0
  10. pyvex/expr.py +974 -0
  11. pyvex/include/libvex.h +1029 -0
  12. pyvex/include/libvex_basictypes.h +236 -0
  13. pyvex/include/libvex_emnote.h +142 -0
  14. pyvex/include/libvex_guest_amd64.h +252 -0
  15. pyvex/include/libvex_guest_arm.h +224 -0
  16. pyvex/include/libvex_guest_arm64.h +203 -0
  17. pyvex/include/libvex_guest_mips32.h +175 -0
  18. pyvex/include/libvex_guest_mips64.h +173 -0
  19. pyvex/include/libvex_guest_offsets.h +941 -0
  20. pyvex/include/libvex_guest_ppc32.h +298 -0
  21. pyvex/include/libvex_guest_ppc64.h +343 -0
  22. pyvex/include/libvex_guest_riscv64.h +148 -0
  23. pyvex/include/libvex_guest_s390x.h +201 -0
  24. pyvex/include/libvex_guest_tilegx.h +149 -0
  25. pyvex/include/libvex_guest_x86.h +322 -0
  26. pyvex/include/libvex_ir.h +3113 -0
  27. pyvex/include/libvex_s390x_common.h +123 -0
  28. pyvex/include/libvex_trc_values.h +99 -0
  29. pyvex/include/pyvex.h +96 -0
  30. pyvex/lib/pyvex.dll +0 -0
  31. pyvex/lib/pyvex.lib +0 -0
  32. pyvex/lifting/__init__.py +18 -0
  33. pyvex/lifting/gym/README.md +7 -0
  34. pyvex/lifting/gym/__init__.py +5 -0
  35. pyvex/lifting/gym/aarch64_spotter.py +40 -0
  36. pyvex/lifting/gym/arm_spotter.py +427 -0
  37. pyvex/lifting/gym/x86_spotter.py +129 -0
  38. pyvex/lifting/libvex.py +117 -0
  39. pyvex/lifting/lift_function.py +304 -0
  40. pyvex/lifting/lifter.py +124 -0
  41. pyvex/lifting/post_processor.py +16 -0
  42. pyvex/lifting/util/__init__.py +14 -0
  43. pyvex/lifting/util/instr_helper.py +422 -0
  44. pyvex/lifting/util/lifter_helper.py +154 -0
  45. pyvex/lifting/util/syntax_wrapper.py +312 -0
  46. pyvex/lifting/util/vex_helper.py +301 -0
  47. pyvex/lifting/zerodivision.py +71 -0
  48. pyvex/native.py +63 -0
  49. pyvex/py.typed +1 -0
  50. pyvex/stmt.py +740 -0
  51. pyvex/types.py +48 -0
  52. pyvex/utils.py +63 -0
  53. pyvex/vex_ffi.py +1452 -0
  54. pyvex-9.2.189.dist-info/METADATA +181 -0
  55. pyvex-9.2.189.dist-info/RECORD +60 -0
  56. pyvex-9.2.189.dist-info/WHEEL +5 -0
  57. pyvex-9.2.189.dist-info/licenses/LICENSE +24 -0
  58. pyvex-9.2.189.dist-info/licenses/pyvex_c/LICENSE +339 -0
  59. pyvex-9.2.189.dist-info/licenses/vex/LICENSE.GPL +340 -0
  60. pyvex-9.2.189.dist-info/licenses/vex/LICENSE.README +23 -0
@@ -0,0 +1,312 @@
1
+ import functools
2
+ from typing import Union
3
+
4
+ from pyvex.const import get_type_size
5
+ from pyvex.expr import Const, IRExpr, RdTmp
6
+
7
+ from .vex_helper import IRSBCustomizer, Type
8
+
9
+
10
+ def checkparams(rhstype=None):
11
+ def decorator(fn):
12
+ @functools.wraps(fn)
13
+ def inner_decorator(self, *args, **kwargs):
14
+ irsb_cs = {a.irsb_c for a in list(args) + [self] if isinstance(a, VexValue)} # pylint: disable=no-member
15
+ assert len(irsb_cs) == 1, "All VexValues must belong to the same irsb_c"
16
+ args = list(args)
17
+ for idx, arg in enumerate(args):
18
+ if isinstance(arg, int):
19
+ thetype = rhstype if rhstype else self.ty
20
+ args[idx] = VexValue.Constant(self.irsb_c, arg, thetype)
21
+ elif not isinstance(arg, VexValue):
22
+ raise Exception("Cannot convert param %s" % str(arg))
23
+ args = tuple(args)
24
+ return fn(self, *args, **kwargs)
25
+
26
+ return inner_decorator
27
+
28
+ return decorator
29
+
30
+
31
+ def vvifyresults(f):
32
+ @functools.wraps(f)
33
+ def decor(self, *args, **kwargs):
34
+ returned = f(self, *args, **kwargs)
35
+ assert isinstance(returned, RdTmp) or isinstance(returned, Const)
36
+ return VexValue(self.irsb_c, returned)
37
+
38
+ return decor
39
+
40
+
41
+ class VexValue:
42
+ def __init__(self, irsb_c: "IRSBCustomizer", rdt: "Union[RdTmp, Const]", signed=False):
43
+ self.irsb_c = irsb_c
44
+ self.ty = self.irsb_c.get_type(rdt)
45
+ self.rdt = rdt
46
+ self.width = get_type_size(self.ty)
47
+ self._is_signed = signed
48
+
49
+ @property
50
+ def value(self):
51
+ if isinstance(self.rdt, Const):
52
+ return self.rdt.con.value
53
+ else:
54
+ raise ValueError("Non-constant VexValue has no value property")
55
+
56
+ @property
57
+ def signed(self):
58
+ return VexValue(self.irsb_c, self.rdt, True)
59
+
60
+ @vvifyresults
61
+ def widen_unsigned(self, ty):
62
+ return self.irsb_c.op_widen_int_unsigned(self.rdt, ty)
63
+
64
+ @vvifyresults
65
+ def cast_to(self, ty, signed=False, high=False):
66
+ return self.irsb_c.cast_to(self.rdt, ty, signed=signed, high=high)
67
+
68
+ @vvifyresults
69
+ def widen_signed(self, ty):
70
+ return self.irsb_c.op_widen_int_signed(self.rdt, ty)
71
+
72
+ @vvifyresults
73
+ def narrow_high(self, ty):
74
+ return self.irsb_c.op_narrow_int(self.rdt, ty, high_half=True)
75
+
76
+ @vvifyresults
77
+ def narrow_low(self, ty):
78
+ return self.irsb_c.op_narrow_int(self.rdt, ty, high_half=False)
79
+
80
+ # TODO at some point extend this to Vex nonconstants
81
+ def __getitem__(self, idx):
82
+ def getb(i):
83
+ return VexValue(self.irsb_c, self.irsb_c.get_bit(self.rdt, i))
84
+
85
+ def makeconstant(x):
86
+ return VexValue.Constant(self.irsb_c, x, Type.int_8).rdt
87
+
88
+ if not isinstance(idx, slice):
89
+ actualindex = slice(idx).indices(self.width)[1]
90
+ return getb(makeconstant(actualindex))
91
+ else:
92
+ return [getb(makeconstant(i)) for i in range(*idx.indices(self.width))]
93
+
94
+ def __setitem__(self, idx, bval):
95
+ setted = self.set_bit(idx, bval)
96
+ self.__init__(setted.irsb_c, setted.rdt)
97
+
98
+ @checkparams(rhstype=Type.int_8)
99
+ @vvifyresults
100
+ def set_bit(self, idx, bval):
101
+ return self.irsb_c.set_bit(self.rdt, idx.rdt, bval.rdt)
102
+
103
+ @checkparams()
104
+ @vvifyresults
105
+ def set_bits(self, idxsandvals):
106
+ return self.irsb_c.set_bits(self.rdt, [(i.cast_to(Type.int_8).rdt, b.rdt) for i, b in idxsandvals])
107
+
108
+ @checkparams()
109
+ @vvifyresults
110
+ def ite(self, iftrue, iffalse):
111
+ onebitcond = self.cast_to(Type.int_1)
112
+ return self.irsb_c.ite(onebitcond.rdt, iftrue.rdt, iffalse.rdt)
113
+
114
+ @checkparams()
115
+ @vvifyresults
116
+ def sar(self, right):
117
+ """
118
+ `v.sar(r)` should do arithmetic shift right of `v` by `r`
119
+
120
+ :param right:VexValue value to shift by
121
+ :return: VexValue - result of a shift
122
+ """
123
+ return self.irsb_c.op_sar(self.rdt, right.rdt)
124
+
125
+ @checkparams()
126
+ @vvifyresults
127
+ def __add__(self, right):
128
+ return self.irsb_c.op_add(self.rdt, right.rdt)
129
+
130
+ @checkparams()
131
+ def __radd__(self, left):
132
+ return self + left
133
+
134
+ @checkparams()
135
+ @vvifyresults
136
+ def __sub__(self, right):
137
+ return self.irsb_c.op_sub(self.rdt, right.rdt)
138
+
139
+ @checkparams()
140
+ def __rsub__(self, left):
141
+ return left - self
142
+
143
+ @checkparams()
144
+ @vvifyresults
145
+ def __div__(self, right):
146
+ if self._is_signed:
147
+ return self.irsb_c.op_sdiv(self.rdt, right.rdt)
148
+ else:
149
+ return self.irsb_c.op_udiv(self.rdt, right.rdt)
150
+
151
+ @checkparams()
152
+ def __rdiv__(self, left):
153
+ return left // self
154
+
155
+ @checkparams()
156
+ def __floordiv__(self, right): # Note: nonprimitive
157
+ return self.__div__(right)
158
+
159
+ @checkparams()
160
+ def __rfloordiv__(self, left):
161
+ return left // self
162
+
163
+ @checkparams()
164
+ def __truediv__(self, right): # Note: nonprimitive
165
+ return self / right
166
+
167
+ @checkparams()
168
+ def __rtruediv__(self, left):
169
+ return left.__truediv__(self)
170
+
171
+ @checkparams()
172
+ @vvifyresults
173
+ def __and__(self, right):
174
+ return self.irsb_c.op_and(self.rdt, right.rdt)
175
+
176
+ @checkparams()
177
+ def __rand__(self, left):
178
+ return left & self
179
+
180
+ @checkparams()
181
+ @vvifyresults
182
+ def __eq__(self, right):
183
+ return self.irsb_c.op_cmp_eq(self.rdt, right.rdt)
184
+
185
+ @checkparams()
186
+ @vvifyresults
187
+ def __ne__(self, other):
188
+ return self.irsb_c.op_cmp_ne(self.rdt, other.rdt)
189
+
190
+ @checkparams()
191
+ @vvifyresults
192
+ def __invert__(self):
193
+ return self.irsb_c.op_not(self.rdt)
194
+
195
+ @checkparams()
196
+ @vvifyresults
197
+ def __le__(self, right):
198
+ if self._is_signed:
199
+ return self.irsb_c.op_cmp_sle(self.rdt, right.rdt)
200
+ else:
201
+ return self.irsb_c.op_cmp_ule(self.rdt, right.rdt)
202
+
203
+ @checkparams()
204
+ @vvifyresults
205
+ def __gt__(self, other):
206
+ if self._is_signed:
207
+ return self.irsb_c.op_cmp_sgt(self.rdt, other.rdt)
208
+ else:
209
+ return self.irsb_c.op_cmp_ugt(self.rdt, other.rdt)
210
+
211
+ @checkparams()
212
+ @vvifyresults
213
+ def __ge__(self, right):
214
+ if self._is_signed:
215
+ return self.irsb_c.op_cmp_sge(self.rdt, right.rdt)
216
+ else:
217
+ return self.irsb_c.op_cmp_uge(self.rdt, right.rdt)
218
+
219
+ @checkparams(rhstype=Type.int_8)
220
+ @vvifyresults
221
+ def __lshift__(self, right): # TODO put better type inference in irsb_c so we can have rlshift
222
+ """
223
+ logical shift left
224
+ """
225
+ return self.irsb_c.op_shl(self.rdt, right.rdt)
226
+
227
+ @checkparams()
228
+ @vvifyresults
229
+ def __lt__(self, right):
230
+ if self._is_signed:
231
+ return self.irsb_c.op_cmp_slt(self.rdt, right.rdt)
232
+ else:
233
+ return self.irsb_c.op_cmp_ult(self.rdt, right.rdt)
234
+
235
+ @checkparams()
236
+ @vvifyresults
237
+ def __mod__(self, right): # Note: nonprimitive
238
+ return self.irsb_c.op_mod(self.rdt, right.rdt)
239
+
240
+ @checkparams()
241
+ def __rmod__(self, left):
242
+ return left % self
243
+
244
+ @checkparams()
245
+ @vvifyresults
246
+ def __mul__(self, right):
247
+ if self._is_signed:
248
+ return self.irsb_c.op_smul(self.rdt, right.rdt)
249
+ else:
250
+ return self.irsb_c.op_umul(self.rdt, right.rdt)
251
+
252
+ @checkparams()
253
+ def __rmul__(self, left):
254
+ return left * self
255
+
256
+ @checkparams()
257
+ @vvifyresults
258
+ def __neg__(self): # Note: nonprimitive
259
+ if not self._is_signed:
260
+ raise Exception("Number is unsigned, cannot change sign!")
261
+ else:
262
+ return self.rdt * -1
263
+
264
+ @checkparams()
265
+ @vvifyresults
266
+ def __or__(self, right):
267
+ return self.irsb_c.op_or(self.rdt, right.rdt)
268
+
269
+ def __ror__(self, left):
270
+ return self | left
271
+
272
+ @checkparams()
273
+ @vvifyresults
274
+ def __pos__(self):
275
+ return self
276
+
277
+ @checkparams(rhstype=Type.int_8)
278
+ @vvifyresults
279
+ def __rshift__(self, right):
280
+ """
281
+ logical shift right
282
+ """
283
+ return self.irsb_c.op_shr(self.rdt, right.rdt)
284
+
285
+ @checkparams()
286
+ def __rlshift__(self, left):
287
+ return left << self
288
+
289
+ @checkparams()
290
+ def __rrshift__(self, left):
291
+ return left >> self
292
+
293
+ @checkparams()
294
+ @vvifyresults
295
+ def __xor__(self, right):
296
+ return self.irsb_c.op_xor(self.rdt, right.rdt)
297
+
298
+ def __rxor__(self, left):
299
+ return self ^ left
300
+
301
+ @classmethod
302
+ def Constant(cls, irsb_c, val, ty):
303
+ """
304
+ Creates a constant as a VexValue
305
+ :param irsb_c: The IRSBCustomizer to use
306
+ :param val: The value, as an integer
307
+ :param ty: The type of the resulting VexValue
308
+ :return: a VexValue
309
+ """
310
+ assert not (isinstance(val, VexValue) or isinstance(val, IRExpr))
311
+ rdt = irsb_c.mkconst(val, ty)
312
+ return cls(irsb_c, rdt)
@@ -0,0 +1,301 @@
1
+ import copy
2
+ import re
3
+
4
+ from pyvex.const import U1, get_type_size, ty_to_const_class, vex_int_class
5
+ from pyvex.enums import IRCallee
6
+ from pyvex.expr import ITE, Binop, CCall, Const, Get, Load, RdTmp, Unop
7
+ from pyvex.stmt import Dirty, Exit, IMark, NoOp, Put, Store, WrTmp
8
+
9
+
10
+ class JumpKind:
11
+ Boring = "Ijk_Boring"
12
+ Call = "Ijk_Call"
13
+ Ret = "Ijk_Ret"
14
+ Segfault = "Ijk_SigSEGV"
15
+ Exit = "Ijk_Exit"
16
+ Syscall = "Ijk_Sys_syscall"
17
+ Sysenter = "Ijk_Sys_sysenter"
18
+ Invalid = "Ijk_INVALID"
19
+ NoDecode = "Ijk_NoDecode"
20
+
21
+
22
+ class TypeMeta(type):
23
+ typemeta_re = re.compile(r"int_(?P<size>\d+)$")
24
+
25
+ def __getattr__(self, name):
26
+ match = self.typemeta_re.match(name)
27
+ if match:
28
+ width = int(match.group("size"))
29
+ return vex_int_class(width).type
30
+ else:
31
+ return type.__getattr__(name)
32
+
33
+
34
+ class Type(metaclass=TypeMeta):
35
+ __metaclass__ = TypeMeta
36
+
37
+ ieee_float_16 = "Ity_F16"
38
+ ieee_float_32 = "Ity_F32"
39
+ ieee_float_64 = "Ity_F64"
40
+ ieee_float_128 = "Ity_F128"
41
+ decimal_float_32 = "Ity_D32"
42
+ decimal_float_64 = "Ity_D64"
43
+ decimal_float_128 = "Ity_D128"
44
+ simd_vector_128 = "Ity_V128"
45
+ simd_vector_256 = "Ity_V256"
46
+
47
+
48
+ def get_op_format_from_const_ty(ty):
49
+ return ty_to_const_class(ty).op_format
50
+
51
+
52
+ def make_format_op_generator(fmt_string):
53
+ """
54
+ Return a function which generates an op format (just a string of the vex instruction)
55
+
56
+ Functions by formatting the fmt_string with the types of the arguments
57
+ """
58
+
59
+ def gen(arg_types):
60
+ converted_arg_types = list(map(get_op_format_from_const_ty, arg_types))
61
+ op = fmt_string.format(arg_t=converted_arg_types)
62
+ return op
63
+
64
+ return gen
65
+
66
+
67
+ def mkbinop(fstring):
68
+ return lambda self, expr_a, expr_b: self.op_binary(make_format_op_generator(fstring))(expr_a, expr_b)
69
+
70
+
71
+ def mkunop(fstring):
72
+ return lambda self, expr_a: self.op_unary(make_format_op_generator(fstring))(expr_a)
73
+
74
+
75
+ def mkcmpop(fstring_fragment, signedness=""):
76
+ def cmpop(self, expr_a, expr_b):
77
+ ty = self.get_type(expr_a)
78
+ fstring = f"Iop_Cmp{fstring_fragment}{{arg_t[0]}}{signedness}"
79
+ retval = mkbinop(fstring)(self, expr_a, expr_b)
80
+ return self.cast_to(retval, ty)
81
+
82
+ return cmpop
83
+
84
+
85
+ class IRSBCustomizer:
86
+ op_add = mkbinop("Iop_Add{arg_t[0]}")
87
+ op_sub = mkbinop("Iop_Sub{arg_t[0]}")
88
+ op_umul = mkbinop("Iop_Mul{arg_t[0]}")
89
+ op_smul = mkbinop("Iop_MullS{arg_t[0]}")
90
+ op_sdiv = mkbinop("Iop_DivS{arg_t[0]}")
91
+ op_udiv = mkbinop("Iop_DivU{arg_t[0]}")
92
+
93
+ # Custom operation that does not exist in libVEX
94
+ op_mod = mkbinop("Iop_Mod{arg_t[0]}")
95
+
96
+ op_or = mkbinop("Iop_Or{arg_t[0]}")
97
+ op_and = mkbinop("Iop_And{arg_t[0]}")
98
+ op_xor = mkbinop("Iop_Xor{arg_t[0]}")
99
+
100
+ op_shr = mkbinop("Iop_Shr{arg_t[0]}") # Shift Right (logical)
101
+ op_shl = mkbinop("Iop_Shl{arg_t[0]}") # Shift Left (logical)
102
+
103
+ op_sar = mkbinop("Iop_Sar{arg_t[0]}") # Shift Arithmetic Right operation
104
+
105
+ op_not = mkunop("Iop_Not{arg_t[0]}")
106
+
107
+ op_cmp_eq = mkcmpop("EQ")
108
+ op_cmp_ne = mkcmpop("NE")
109
+ op_cmp_slt = mkcmpop("LT", "S")
110
+ op_cmp_sle = mkcmpop("LE", "S")
111
+ op_cmp_ult = mkcmpop("LT", "U")
112
+ op_cmp_ule = mkcmpop("LE", "U")
113
+ op_cmp_sge = mkcmpop("GE", "S")
114
+ op_cmp_uge = mkcmpop("GE", "U")
115
+ op_cmp_sgt = mkcmpop("GT", "S")
116
+ op_cmp_ugt = mkcmpop("GT", "U")
117
+
118
+ def __init__(self, irsb):
119
+ self.arch = irsb.arch
120
+ self.irsb = irsb
121
+
122
+ def get_type(self, rdt):
123
+ return rdt.result_type(self.irsb.tyenv)
124
+
125
+ # Statements (no return value)
126
+ def _append_stmt(self, stmt):
127
+ self.irsb.statements += [stmt]
128
+
129
+ def imark(self, int_addr, int_length, int_delta=0):
130
+ self._append_stmt(IMark(int_addr, int_length, int_delta))
131
+
132
+ def get_reg(self, regname): # TODO move this into the lifter
133
+ return self.arch.registers[regname][0]
134
+
135
+ def put(self, expr_val, tuple_reg):
136
+ self._append_stmt(Put(copy.copy(expr_val), tuple_reg))
137
+
138
+ def store(self, addr, expr):
139
+ self._append_stmt(Store(copy.copy(addr), copy.copy(expr), self.arch.memory_endness))
140
+
141
+ def noop(self):
142
+ self._append_stmt(NoOp())
143
+
144
+ def add_exit(self, guard, dst, jk, ip):
145
+ """
146
+ Add an exit out of the middle of an IRSB.
147
+ (e.g., a conditional jump)
148
+ :param guard: An expression, the exit is taken if true
149
+ :param dst: the destination of the exit (a Const)
150
+ :param jk: the JumpKind of this exit (probably Ijk_Boring)
151
+ :param ip: The address of this exit's source
152
+ """
153
+ self.irsb.statements.append(Exit(guard, dst.con, jk, ip))
154
+
155
+ # end statements
156
+
157
+ def goto(self, addr):
158
+ self.irsb.next = addr
159
+ self.irsb.jumpkind = JumpKind.Boring
160
+
161
+ def ret(self, addr):
162
+ self.irsb.next = addr
163
+ self.irsb.jumpkind = JumpKind.Ret
164
+
165
+ def call(self, addr):
166
+ self.irsb.next = addr
167
+ self.irsb.jumpkind = JumpKind.Call
168
+
169
+ def _add_tmp(self, t):
170
+ return self.irsb.tyenv.add(t)
171
+
172
+ def _rdtmp(self, tmp):
173
+ return RdTmp.get_instance(tmp)
174
+
175
+ def _settmp(self, expr):
176
+ ty = self.get_type(expr)
177
+ tmp = self._add_tmp(ty)
178
+ self._append_stmt(WrTmp(tmp, expr))
179
+ return self._rdtmp(tmp)
180
+
181
+ def rdreg(self, reg, ty):
182
+ return self._settmp(Get(reg, ty))
183
+
184
+ def load(self, addr, ty):
185
+ return self._settmp(Load(self.arch.memory_endness, ty, copy.copy(addr)))
186
+
187
+ def op_ccall(self, retty, funcstr, args):
188
+ return self._settmp(CCall(retty, IRCallee(len(args), funcstr, 0xFFFF), args))
189
+
190
+ def dirty(self, retty, funcstr, args):
191
+ if retty is None:
192
+ tmp = 0xFFFFFFFF
193
+ else:
194
+ tmp = self._add_tmp(retty)
195
+ self._append_stmt(Dirty(IRCallee(len(args), funcstr, 0xFFFF), Const(U1(1)), args, tmp, None, None, None, None))
196
+ return self._rdtmp(tmp)
197
+
198
+ def ite(self, condrdt, iftruerdt, iffalserdt):
199
+ return self._settmp(ITE(copy.copy(condrdt), copy.copy(iffalserdt), copy.copy(iftruerdt)))
200
+
201
+ def mkconst(self, val, ty):
202
+ cls = ty_to_const_class(ty)
203
+ return Const(cls(val))
204
+
205
+ # Operations
206
+ def op_generic(self, Operation, op_generator):
207
+ def instance(*args): # Note: The args here are all RdTmps
208
+ for arg in args:
209
+ assert isinstance(arg, RdTmp) or isinstance(arg, Const)
210
+ arg_types = [self.get_type(arg) for arg in args]
211
+ # two operations should never share the same argument instances, copy them here to ensure that
212
+ args = [copy.copy(a) for a in args]
213
+ op = Operation(op_generator(arg_types), args)
214
+ msg = "operation needs to be well typed: " + str(op)
215
+ assert op.typecheck(self.irsb.tyenv), msg + "\ntypes: " + str(self.irsb.tyenv)
216
+ return self._settmp(op)
217
+
218
+ return instance
219
+
220
+ def op_binary(self, op_format_str):
221
+ return self.op_generic(Binop, op_format_str)
222
+
223
+ def op_unary(self, op_format_str):
224
+ return self.op_generic(Unop, op_format_str)
225
+
226
+ def cast_to(self, rdt, tydest, signed=False, high=False):
227
+ goalwidth = get_type_size(tydest)
228
+ rdtwidth = self.get_rdt_width(rdt)
229
+
230
+ if rdtwidth > goalwidth:
231
+ return self.op_narrow_int(rdt, tydest, high_half=high)
232
+ elif rdtwidth < goalwidth:
233
+ return self.op_widen_int(rdt, tydest, signed=signed)
234
+ else:
235
+ return rdt
236
+
237
+ def op_to_one_bit(self, rdt):
238
+ rdtty = self.get_type(rdt)
239
+ if rdtty not in [Type.int_64, Type.int_32]:
240
+ rdt = self.op_widen_int_unsigned(rdt, Type.int_32)
241
+ onebit = self.op_narrow_int(rdt, Type.int_1)
242
+ return onebit
243
+
244
+ def op_narrow_int(self, rdt, tydest, high_half=False):
245
+ op_name = "{op}{high}to{dest}".format(
246
+ op="Iop_{arg_t[0]}", high="HI" if high_half else "", dest=get_op_format_from_const_ty(tydest)
247
+ )
248
+ return self.op_unary(make_format_op_generator(op_name))(rdt)
249
+
250
+ def op_widen_int(self, rdt, tydest, signed=False):
251
+ op_name = "{op}{sign}to{dest}".format(
252
+ op="Iop_{arg_t[0]}", sign="S" if signed else "U", dest=get_op_format_from_const_ty(tydest)
253
+ )
254
+ return self.op_unary(make_format_op_generator(op_name))(rdt)
255
+
256
+ def op_widen_int_signed(self, rdt, tydest):
257
+ return self.op_widen_int(rdt, tydest, signed=True)
258
+
259
+ def op_widen_int_unsigned(self, rdt, tydest):
260
+ return self.op_widen_int(rdt, tydest, signed=False)
261
+
262
+ def get_msb(self, tmp, ty):
263
+ width = get_type_size(ty)
264
+ return self.get_bit(tmp, width - 1)
265
+
266
+ def get_bit(self, rdt, idx):
267
+ shifted = self.op_shr(rdt, idx)
268
+ bit = self.op_extract_lsb(shifted)
269
+ return bit
270
+
271
+ def op_extract_lsb(self, rdt):
272
+ bitmask = self.mkconst(1, self.get_type(rdt))
273
+ return self.op_and(bitmask, rdt)
274
+
275
+ def set_bit(self, rdt, idx, bval):
276
+ currbit = self.get_bit(rdt, idx)
277
+ areequalextrabits = self.op_xor(bval, currbit)
278
+ one = self.mkconst(1, self.get_type(areequalextrabits))
279
+ areequal = self.op_and(areequalextrabits, one)
280
+ shifted = self.op_shl(areequal, idx)
281
+ return self.op_xor(rdt, shifted)
282
+
283
+ def set_bits(self, rdt, idxsandvals):
284
+ ty = self.get_type(rdt)
285
+ if all([isinstance(idx, Const) for idx, _ in idxsandvals]):
286
+ relevantbits = self.mkconst(sum([1 << idx.con.value for idx, _ in idxsandvals]), ty)
287
+ else:
288
+ relevantbits = self.mkconst(0, ty)
289
+ for idx, _ in idxsandvals:
290
+ shifted = self.op_shl(self.mkconst(1, ty), idx)
291
+ relevantbits = self.op_or(relevantbits, shifted)
292
+ setto = self.mkconst(0, ty)
293
+ for idx, bval in idxsandvals:
294
+ bvalbit = self.op_extract_lsb(bval)
295
+ shifted = self.op_shl(bvalbit, idx)
296
+ setto = self.op_or(setto, shifted)
297
+ shouldflip = self.op_and(self.op_xor(setto, rdt), relevantbits)
298
+ return self.op_xor(rdt, shouldflip)
299
+
300
+ def get_rdt_width(self, rdt):
301
+ return rdt.result_size(self.irsb.tyenv)
@@ -0,0 +1,71 @@
1
+ import copy
2
+
3
+ from pyvex import const, expr, stmt
4
+
5
+ from .post_processor import Postprocessor
6
+
7
+
8
+ class ZeroDivisionPostProcessor(Postprocessor):
9
+ """
10
+ A postprocessor for adding zero-division checks to VEX.
11
+
12
+ For "div rcx", will turn:
13
+
14
+ 00 | ------ IMark(0x8000, 3, 0) ------
15
+ 01 | t0 = GET:I64(rcx)
16
+ 02 | t1 = GET:I64(rax)
17
+ 03 | t2 = GET:I64(rdx)
18
+ 04 | t3 = 64HLto128(t2,t1)
19
+ 05 | t4 = DivModU128to64(t3,t0)
20
+ 06 | t5 = 128to64(t4)
21
+ 07 | PUT(rax) = t5
22
+ 08 | t6 = 128HIto64(t4)
23
+ 09 | PUT(rdx) = t6
24
+ NEXT: PUT(rip) = 0x0000000000008003; Ijk_Boring
25
+
26
+ into:
27
+
28
+ 00 | ------ IMark(0x8000, 3, 0) ------
29
+ 01 | t0 = GET:I64(rcx)
30
+ 02 | t4 = GET:I64(rax)
31
+ 03 | t5 = GET:I64(rdx)
32
+ 04 | t3 = 64HLto128(t5,t4)
33
+ 05 | t9 = CmpEQ(t0,0x0000000000000000)
34
+ 06 | if (t9) { PUT(pc) = 0x8000; Ijk_SigFPE_IntDiv }
35
+ 07 | t2 = DivModU128to64(t3,t0)
36
+ 08 | t6 = 128to64(t2)
37
+ 09 | PUT(rax) = t6
38
+ 10 | t7 = 128HIto64(t2)
39
+ 11 | PUT(rdx) = t7
40
+ NEXT: PUT(rip) = 0x0000000000008003; Ijk_Boring
41
+ """
42
+
43
+ def postprocess(self):
44
+ if self.irsb.statements is None:
45
+ # This is an optimized IRSB. We cannot really post-process it.
46
+ return
47
+
48
+ insertions = []
49
+ last_ip = 0
50
+ for i, s in enumerate(self.irsb.statements):
51
+ if s.tag == "Ist_IMark":
52
+ last_ip = s.addr
53
+ if s.tag == "Ist_WrTmp" and s.data.tag == "Iex_Binop" and ("Div" in s.data.op or "Mod" in s.data.op):
54
+ arg_size = s.data.args[1].result_size(self.irsb.tyenv)
55
+ cmp_args = [copy.copy(s.data.args[1]), expr.Const(const.vex_int_class(arg_size)(0))]
56
+ cmp_tmp = self.irsb.tyenv.add("Ity_I1")
57
+ insertions.append((i, stmt.WrTmp(cmp_tmp, expr.Binop("Iop_CmpEQ%d" % arg_size, cmp_args))))
58
+ insertions.append(
59
+ (
60
+ i,
61
+ stmt.Exit(
62
+ expr.RdTmp.get_instance(cmp_tmp),
63
+ const.vex_int_class(self.irsb.arch.bits)(last_ip),
64
+ "Ijk_SigFPE_IntDiv",
65
+ self.irsb.offsIP,
66
+ ),
67
+ )
68
+ )
69
+
70
+ for i, s in reversed(insertions):
71
+ self.irsb.statements.insert(i, s)