pyvex 9.2.189__cp312-cp312-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyvex/__init__.py +92 -0
- pyvex/_register_info.py +1800 -0
- pyvex/arches.py +94 -0
- pyvex/block.py +697 -0
- pyvex/const.py +426 -0
- pyvex/const_val.py +26 -0
- pyvex/data_ref.py +55 -0
- pyvex/enums.py +156 -0
- pyvex/errors.py +31 -0
- pyvex/expr.py +974 -0
- pyvex/include/libvex.h +1029 -0
- pyvex/include/libvex_basictypes.h +236 -0
- pyvex/include/libvex_emnote.h +142 -0
- pyvex/include/libvex_guest_amd64.h +252 -0
- pyvex/include/libvex_guest_arm.h +224 -0
- pyvex/include/libvex_guest_arm64.h +203 -0
- pyvex/include/libvex_guest_mips32.h +175 -0
- pyvex/include/libvex_guest_mips64.h +173 -0
- pyvex/include/libvex_guest_offsets.h +941 -0
- pyvex/include/libvex_guest_ppc32.h +298 -0
- pyvex/include/libvex_guest_ppc64.h +343 -0
- pyvex/include/libvex_guest_riscv64.h +148 -0
- pyvex/include/libvex_guest_s390x.h +201 -0
- pyvex/include/libvex_guest_tilegx.h +149 -0
- pyvex/include/libvex_guest_x86.h +322 -0
- pyvex/include/libvex_ir.h +3113 -0
- pyvex/include/libvex_s390x_common.h +123 -0
- pyvex/include/libvex_trc_values.h +99 -0
- pyvex/include/pyvex.h +96 -0
- pyvex/lib/pyvex.dll +0 -0
- pyvex/lib/pyvex.lib +0 -0
- pyvex/lifting/__init__.py +18 -0
- pyvex/lifting/gym/README.md +7 -0
- pyvex/lifting/gym/__init__.py +5 -0
- pyvex/lifting/gym/aarch64_spotter.py +40 -0
- pyvex/lifting/gym/arm_spotter.py +427 -0
- pyvex/lifting/gym/x86_spotter.py +129 -0
- pyvex/lifting/libvex.py +117 -0
- pyvex/lifting/lift_function.py +304 -0
- pyvex/lifting/lifter.py +124 -0
- pyvex/lifting/post_processor.py +16 -0
- pyvex/lifting/util/__init__.py +14 -0
- pyvex/lifting/util/instr_helper.py +422 -0
- pyvex/lifting/util/lifter_helper.py +154 -0
- pyvex/lifting/util/syntax_wrapper.py +312 -0
- pyvex/lifting/util/vex_helper.py +301 -0
- pyvex/lifting/zerodivision.py +71 -0
- pyvex/native.py +63 -0
- pyvex/py.typed +1 -0
- pyvex/stmt.py +740 -0
- pyvex/types.py +48 -0
- pyvex/utils.py +63 -0
- pyvex/vex_ffi.py +1452 -0
- pyvex-9.2.189.dist-info/METADATA +181 -0
- pyvex-9.2.189.dist-info/RECORD +60 -0
- pyvex-9.2.189.dist-info/WHEEL +5 -0
- pyvex-9.2.189.dist-info/licenses/LICENSE +24 -0
- pyvex-9.2.189.dist-info/licenses/pyvex_c/LICENSE +339 -0
- pyvex-9.2.189.dist-info/licenses/vex/LICENSE.GPL +340 -0
- pyvex-9.2.189.dist-info/licenses/vex/LICENSE.README +23 -0
pyvex/include/libvex.h
ADDED
|
@@ -0,0 +1,1029 @@
|
|
|
1
|
+
|
|
2
|
+
/*---------------------------------------------------------------*/
|
|
3
|
+
/*--- begin libvex.h ---*/
|
|
4
|
+
/*---------------------------------------------------------------*/
|
|
5
|
+
|
|
6
|
+
/*
|
|
7
|
+
This file is part of Valgrind, a dynamic binary instrumentation
|
|
8
|
+
framework.
|
|
9
|
+
|
|
10
|
+
Copyright (C) 2004-2015 OpenWorks LLP
|
|
11
|
+
info@open-works.net
|
|
12
|
+
|
|
13
|
+
This program is free software; you can redistribute it and/or
|
|
14
|
+
modify it under the terms of the GNU General Public License as
|
|
15
|
+
published by the Free Software Foundation; either version 2 of the
|
|
16
|
+
License, or (at your option) any later version.
|
|
17
|
+
|
|
18
|
+
This program is distributed in the hope that it will be useful, but
|
|
19
|
+
WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
20
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
21
|
+
General Public License for more details.
|
|
22
|
+
|
|
23
|
+
You should have received a copy of the GNU General Public License
|
|
24
|
+
along with this program; if not, write to the Free Software
|
|
25
|
+
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
26
|
+
02110-1301, USA.
|
|
27
|
+
|
|
28
|
+
The GNU General Public License is contained in the file COPYING.
|
|
29
|
+
|
|
30
|
+
Neither the names of the U.S. Department of Energy nor the
|
|
31
|
+
University of California nor the names of its contributors may be
|
|
32
|
+
used to endorse or promote products derived from this software
|
|
33
|
+
without prior written permission.
|
|
34
|
+
*/
|
|
35
|
+
|
|
36
|
+
#ifndef __LIBVEX_H
|
|
37
|
+
#define __LIBVEX_H
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
#include "libvex_basictypes.h"
|
|
41
|
+
#include "libvex_ir.h"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
/*---------------------------------------------------------------*/
|
|
45
|
+
/*--- This file defines the top-level interface to LibVEX. ---*/
|
|
46
|
+
/*---------------------------------------------------------------*/
|
|
47
|
+
|
|
48
|
+
/*-------------------------------------------------------*/
|
|
49
|
+
/*--- Architectures, variants, and other arch info ---*/
|
|
50
|
+
/*-------------------------------------------------------*/
|
|
51
|
+
|
|
52
|
+
typedef
|
|
53
|
+
enum {
|
|
54
|
+
VexArch_INVALID=0x400,
|
|
55
|
+
VexArchX86,
|
|
56
|
+
VexArchAMD64,
|
|
57
|
+
VexArchARM,
|
|
58
|
+
VexArchARM64,
|
|
59
|
+
VexArchPPC32,
|
|
60
|
+
VexArchPPC64,
|
|
61
|
+
VexArchS390X,
|
|
62
|
+
VexArchMIPS32,
|
|
63
|
+
VexArchMIPS64,
|
|
64
|
+
VexArchTILEGX,
|
|
65
|
+
VexArchRISCV64
|
|
66
|
+
}
|
|
67
|
+
VexArch;
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
/* Information about endianness. */
|
|
71
|
+
typedef
|
|
72
|
+
enum {
|
|
73
|
+
VexEndness_INVALID=0x600, /* unknown endianness */
|
|
74
|
+
VexEndnessLE, /* little endian */
|
|
75
|
+
VexEndnessBE /* big endian */
|
|
76
|
+
}
|
|
77
|
+
VexEndness;
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
/* For a given architecture, these specify extra capabilities beyond
|
|
81
|
+
the minimum supported (baseline) capabilities. They may be OR'd
|
|
82
|
+
together, although some combinations don't make sense. (eg, SSE2
|
|
83
|
+
but not SSE1). LibVEX_Translate will check for nonsensical
|
|
84
|
+
combinations. */
|
|
85
|
+
|
|
86
|
+
/* x86: baseline capability is Pentium-1 (FPU, MMX, but no SSE), with
|
|
87
|
+
cmpxchg8b. MMXEXT is a special AMD only subset of SSE1 (Integer SSE). */
|
|
88
|
+
#define VEX_HWCAPS_X86_MMXEXT (1<<1) /* A subset of SSE1 on early AMD */
|
|
89
|
+
#define VEX_HWCAPS_X86_SSE1 (1<<2) /* SSE1 support (Pentium III) */
|
|
90
|
+
#define VEX_HWCAPS_X86_SSE2 (1<<3) /* SSE2 support (Pentium 4) */
|
|
91
|
+
#define VEX_HWCAPS_X86_SSE3 (1<<4) /* SSE3 support (>= Prescott) */
|
|
92
|
+
#define VEX_HWCAPS_X86_LZCNT (1<<5) /* SSE4a LZCNT insn */
|
|
93
|
+
|
|
94
|
+
/* amd64: baseline capability is SSE2, with cmpxchg8b but not
|
|
95
|
+
cmpxchg16b. */
|
|
96
|
+
#define VEX_HWCAPS_AMD64_SSE3 (1<<5) /* SSE3 support */
|
|
97
|
+
#define VEX_HWCAPS_AMD64_CX16 (1<<6) /* cmpxchg16b support */
|
|
98
|
+
#define VEX_HWCAPS_AMD64_LZCNT (1<<7) /* SSE4a LZCNT insn */
|
|
99
|
+
#define VEX_HWCAPS_AMD64_AVX (1<<8) /* AVX instructions */
|
|
100
|
+
#define VEX_HWCAPS_AMD64_RDTSCP (1<<9) /* RDTSCP instruction */
|
|
101
|
+
#define VEX_HWCAPS_AMD64_BMI (1<<10) /* BMI1 instructions */
|
|
102
|
+
#define VEX_HWCAPS_AMD64_AVX2 (1<<11) /* AVX2 instructions */
|
|
103
|
+
|
|
104
|
+
/* ppc32: baseline capability is integer only */
|
|
105
|
+
#define VEX_HWCAPS_PPC32_F (1<<8) /* basic (non-optional) FP */
|
|
106
|
+
#define VEX_HWCAPS_PPC32_V (1<<9) /* Altivec (VMX) */
|
|
107
|
+
#define VEX_HWCAPS_PPC32_FX (1<<10) /* FP extns (fsqrt, fsqrts) */
|
|
108
|
+
#define VEX_HWCAPS_PPC32_GX (1<<11) /* Graphics extns
|
|
109
|
+
(fres,frsqrte,fsel,stfiwx) */
|
|
110
|
+
#define VEX_HWCAPS_PPC32_VX (1<<12) /* Vector-scalar floating-point (VSX); implies ISA 2.06 or higher */
|
|
111
|
+
#define VEX_HWCAPS_PPC32_DFP (1<<17) /* Decimal Floating Point (DFP) -- e.g., dadd */
|
|
112
|
+
#define VEX_HWCAPS_PPC32_ISA2_07 (1<<19) /* ISA 2.07 -- e.g., mtvsrd */
|
|
113
|
+
#define VEX_HWCAPS_PPC32_ISA3_0 (1<<21) /* ISA 3.0 -- e.g., cnttzw */
|
|
114
|
+
|
|
115
|
+
/* ppc64: baseline capability is integer and basic FP insns */
|
|
116
|
+
#define VEX_HWCAPS_PPC64_V (1<<13) /* Altivec (VMX) */
|
|
117
|
+
#define VEX_HWCAPS_PPC64_FX (1<<14) /* FP extns (fsqrt, fsqrts) */
|
|
118
|
+
#define VEX_HWCAPS_PPC64_GX (1<<15) /* Graphics extns
|
|
119
|
+
(fres,frsqrte,fsel,stfiwx) */
|
|
120
|
+
#define VEX_HWCAPS_PPC64_VX (1<<16) /* Vector-scalar floating-point (VSX); implies ISA 2.06 or higher */
|
|
121
|
+
#define VEX_HWCAPS_PPC64_DFP (1<<18) /* Decimal Floating Point (DFP) -- e.g., dadd */
|
|
122
|
+
#define VEX_HWCAPS_PPC64_ISA2_07 (1<<20) /* ISA 2.07 -- e.g., mtvsrd */
|
|
123
|
+
#define VEX_HWCAPS_PPC64_ISA3_0 (1<<22) /* ISA 3.0 -- e.g., cnttzw */
|
|
124
|
+
|
|
125
|
+
/* s390x: Hardware capability encoding
|
|
126
|
+
|
|
127
|
+
Bits [26:31] encode the machine model (see VEX_S390X_MODEL... below)
|
|
128
|
+
Bits [0:20] encode specific hardware capabilities
|
|
129
|
+
(see VEX_HWAPS_S390X_... below)
|
|
130
|
+
*/
|
|
131
|
+
|
|
132
|
+
/* Model numbers must be assigned in chronological order.
|
|
133
|
+
They are used as array index. */
|
|
134
|
+
#define VEX_S390X_MODEL_Z900 0
|
|
135
|
+
#define VEX_S390X_MODEL_Z800 1
|
|
136
|
+
#define VEX_S390X_MODEL_Z990 2
|
|
137
|
+
#define VEX_S390X_MODEL_Z890 3
|
|
138
|
+
#define VEX_S390X_MODEL_Z9_EC 4
|
|
139
|
+
#define VEX_S390X_MODEL_Z9_BC 5
|
|
140
|
+
#define VEX_S390X_MODEL_Z10_EC 6
|
|
141
|
+
#define VEX_S390X_MODEL_Z10_BC 7
|
|
142
|
+
#define VEX_S390X_MODEL_Z196 8
|
|
143
|
+
#define VEX_S390X_MODEL_Z114 9
|
|
144
|
+
#define VEX_S390X_MODEL_ZEC12 10
|
|
145
|
+
#define VEX_S390X_MODEL_ZBC12 11
|
|
146
|
+
#define VEX_S390X_MODEL_Z13 12
|
|
147
|
+
#define VEX_S390X_MODEL_Z13S 13
|
|
148
|
+
#define VEX_S390X_MODEL_UNKNOWN 14 /* always last in list */
|
|
149
|
+
#define VEX_S390X_MODEL_MASK 0x3F
|
|
150
|
+
|
|
151
|
+
#define VEX_HWCAPS_S390X_LDISP (1<<6) /* Long-displacement facility */
|
|
152
|
+
#define VEX_HWCAPS_S390X_EIMM (1<<7) /* Extended-immediate facility */
|
|
153
|
+
#define VEX_HWCAPS_S390X_GIE (1<<8) /* General-instruction-extension facility */
|
|
154
|
+
#define VEX_HWCAPS_S390X_DFP (1<<9) /* Decimal floating point facility */
|
|
155
|
+
#define VEX_HWCAPS_S390X_FGX (1<<10) /* FPR-GR transfer facility */
|
|
156
|
+
#define VEX_HWCAPS_S390X_ETF2 (1<<11) /* ETF2-enhancement facility */
|
|
157
|
+
#define VEX_HWCAPS_S390X_STFLE (1<<12) /* STFLE facility */
|
|
158
|
+
#define VEX_HWCAPS_S390X_ETF3 (1<<13) /* ETF3-enhancement facility */
|
|
159
|
+
#define VEX_HWCAPS_S390X_STCKF (1<<14) /* STCKF facility */
|
|
160
|
+
#define VEX_HWCAPS_S390X_FPEXT (1<<15) /* Floating point extension facility */
|
|
161
|
+
#define VEX_HWCAPS_S390X_LSC (1<<16) /* Conditional load/store facility */
|
|
162
|
+
#define VEX_HWCAPS_S390X_PFPO (1<<17) /* Perform floating point ops facility */
|
|
163
|
+
#define VEX_HWCAPS_S390X_VX (1<<18) /* Vector facility */
|
|
164
|
+
#define VEX_HWCAPS_S390X_MSA5 (1<<19) /* message security assistance facility */
|
|
165
|
+
#define VEX_HWCAPS_S390X_MI2 (1<<20) /* miscellaneous-instruction-extensions facility 2 */
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
/* Special value representing all available s390x hwcaps */
|
|
169
|
+
#define VEX_HWCAPS_S390X_ALL (VEX_HWCAPS_S390X_LDISP | \
|
|
170
|
+
VEX_HWCAPS_S390X_EIMM | \
|
|
171
|
+
VEX_HWCAPS_S390X_GIE | \
|
|
172
|
+
VEX_HWCAPS_S390X_DFP | \
|
|
173
|
+
VEX_HWCAPS_S390X_FGX | \
|
|
174
|
+
VEX_HWCAPS_S390X_STFLE | \
|
|
175
|
+
VEX_HWCAPS_S390X_STCKF | \
|
|
176
|
+
VEX_HWCAPS_S390X_FPEXT | \
|
|
177
|
+
VEX_HWCAPS_S390X_LSC | \
|
|
178
|
+
VEX_HWCAPS_S390X_ETF3 | \
|
|
179
|
+
VEX_HWCAPS_S390X_ETF2 | \
|
|
180
|
+
VEX_HWCAPS_S390X_PFPO | \
|
|
181
|
+
VEX_HWCAPS_S390X_VX | \
|
|
182
|
+
VEX_HWCAPS_S390X_MSA5 | \
|
|
183
|
+
VEX_HWCAPS_S390X_MI2)
|
|
184
|
+
|
|
185
|
+
#define VEX_HWCAPS_S390X(x) ((x) & ~VEX_S390X_MODEL_MASK)
|
|
186
|
+
#define VEX_S390X_MODEL(x) ((x) & VEX_S390X_MODEL_MASK)
|
|
187
|
+
|
|
188
|
+
/* Tilegx: baseline capability is TILEGX36 */
|
|
189
|
+
#define VEX_HWCAPS_TILEGX_BASE (1<<16) /* TILEGX Baseline */
|
|
190
|
+
|
|
191
|
+
/* arm: baseline capability is ARMv4 */
|
|
192
|
+
/* Bits 5:0 - architecture level (e.g. 5 for v5, 6 for v6 etc) */
|
|
193
|
+
#define VEX_HWCAPS_ARM_VFP (1<<6) /* VFP extension */
|
|
194
|
+
#define VEX_HWCAPS_ARM_VFP2 (1<<7) /* VFPv2 */
|
|
195
|
+
#define VEX_HWCAPS_ARM_VFP3 (1<<8) /* VFPv3 */
|
|
196
|
+
/* Bits 15:10 reserved for (possible) future VFP revisions */
|
|
197
|
+
#define VEX_HWCAPS_ARM_NEON (1<<16) /* Advanced SIMD also known as NEON */
|
|
198
|
+
|
|
199
|
+
/* Get an ARM architecure level from HWCAPS */
|
|
200
|
+
#define VEX_ARM_ARCHLEVEL(x) ((x) & 0x3f)
|
|
201
|
+
|
|
202
|
+
/* ARM64: baseline capability is AArch64 v8. */
|
|
203
|
+
/* (no definitions since no variants so far) */
|
|
204
|
+
|
|
205
|
+
/* MIPS baseline capability */
|
|
206
|
+
/* Assigned Company values for bits 23:16 of the PRId Register
|
|
207
|
+
(CP0 register 15, select 0). As of the MIPS32 and MIPS64 specs from
|
|
208
|
+
MTI, the PRId register is defined in this (backwards compatible)
|
|
209
|
+
way:
|
|
210
|
+
|
|
211
|
+
+----------------+----------------+----------------+----------------+
|
|
212
|
+
| Company Options| Company ID | Processor ID | Revision |
|
|
213
|
+
+----------------+----------------+----------------+----------------+
|
|
214
|
+
31 24 23 16 15 8 7
|
|
215
|
+
|
|
216
|
+
*/
|
|
217
|
+
|
|
218
|
+
#define VEX_PRID_COMP_LEGACY 0x00000000
|
|
219
|
+
#define VEX_PRID_COMP_MIPS 0x00010000
|
|
220
|
+
#define VEX_PRID_COMP_BROADCOM 0x00020000
|
|
221
|
+
#define VEX_PRID_COMP_NETLOGIC 0x000C0000
|
|
222
|
+
#define VEX_PRID_COMP_CAVIUM 0x000D0000
|
|
223
|
+
#define VEX_PRID_COMP_INGENIC_E1 0x00E10000 /* JZ4780 */
|
|
224
|
+
|
|
225
|
+
/*
|
|
226
|
+
* These are valid when 23:16 == PRID_COMP_LEGACY
|
|
227
|
+
*/
|
|
228
|
+
#define VEX_PRID_IMP_LOONGSON_64 0x6300 /* Loongson-2/3 */
|
|
229
|
+
|
|
230
|
+
/*
|
|
231
|
+
* These are the PRID's for when 23:16 == PRID_COMP_MIPS
|
|
232
|
+
*/
|
|
233
|
+
#define VEX_PRID_IMP_34K 0x9500
|
|
234
|
+
#define VEX_PRID_IMP_74K 0x9700
|
|
235
|
+
|
|
236
|
+
/*
|
|
237
|
+
* Instead of Company Options values, bits 31:24 will be packed with
|
|
238
|
+
* additional information, such as isa level and FP mode.
|
|
239
|
+
*/
|
|
240
|
+
#define VEX_MIPS_CPU_ISA_M32R1 0x01000000
|
|
241
|
+
#define VEX_MIPS_CPU_ISA_M32R2 0x02000000
|
|
242
|
+
#define VEX_MIPS_CPU_ISA_M64R1 0x04000000
|
|
243
|
+
#define VEX_MIPS_CPU_ISA_M64R2 0x08000000
|
|
244
|
+
#define VEX_MIPS_CPU_ISA_M32R6 0x10000000
|
|
245
|
+
#define VEX_MIPS_CPU_ISA_M64R6 0x20000000
|
|
246
|
+
/* FP mode is FR = 1 (32 dbl. prec. FP registers) */
|
|
247
|
+
#define VEX_MIPS_HOST_FR 0x40000000
|
|
248
|
+
/* Get MIPS Extended Information */
|
|
249
|
+
#define VEX_MIPS_EX_INFO(x) ((x) & 0xFF000000)
|
|
250
|
+
/* Get MIPS Company ID from HWCAPS */
|
|
251
|
+
#define VEX_MIPS_COMP_ID(x) ((x) & 0x00FF0000)
|
|
252
|
+
/* Get MIPS Processor ID from HWCAPS */
|
|
253
|
+
#define VEX_MIPS_PROC_ID(x) ((x) & 0x0000FF00)
|
|
254
|
+
/* Get MIPS Revision from HWCAPS */
|
|
255
|
+
#define VEX_MIPS_REV(x) ((x) & 0x000000FF)
|
|
256
|
+
/* Get host FP mode */
|
|
257
|
+
#define VEX_MIPS_HOST_FP_MODE(x) (!!(VEX_MIPS_EX_INFO(x) & VEX_MIPS_HOST_FR))
|
|
258
|
+
/* Check if the processor supports MIPS32R2. */
|
|
259
|
+
#define VEX_MIPS_CPU_HAS_MIPS32R2(x) (VEX_MIPS_EX_INFO(x) & \
|
|
260
|
+
VEX_MIPS_CPU_ISA_M32R2)
|
|
261
|
+
/* Check if the processor supports DSP ASE Rev 2. */
|
|
262
|
+
#define VEX_MIPS_PROC_DSP2(x) ((VEX_MIPS_COMP_ID(x) == VEX_PRID_COMP_MIPS) && \
|
|
263
|
+
(VEX_MIPS_PROC_ID(x) == VEX_PRID_IMP_74K))
|
|
264
|
+
/* Check if the processor supports DSP ASE Rev 1. */
|
|
265
|
+
#define VEX_MIPS_PROC_DSP(x) (VEX_MIPS_PROC_DSP2(x) || \
|
|
266
|
+
((VEX_MIPS_COMP_ID(x) == VEX_PRID_COMP_MIPS) && \
|
|
267
|
+
(VEX_MIPS_PROC_ID(x) == VEX_PRID_IMP_34K)))
|
|
268
|
+
|
|
269
|
+
/* These return statically allocated strings. */
|
|
270
|
+
|
|
271
|
+
extern const HChar* LibVEX_ppVexArch ( VexArch );
|
|
272
|
+
extern const HChar* LibVEX_ppVexEndness ( VexEndness endness );
|
|
273
|
+
extern const HChar* LibVEX_ppVexHwCaps ( VexArch, UInt );
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
/* The various kinds of caches */
|
|
277
|
+
typedef enum {
|
|
278
|
+
DATA_CACHE=0x500,
|
|
279
|
+
INSN_CACHE,
|
|
280
|
+
UNIFIED_CACHE
|
|
281
|
+
} VexCacheKind;
|
|
282
|
+
|
|
283
|
+
/* Information about a particular cache */
|
|
284
|
+
typedef struct {
|
|
285
|
+
VexCacheKind kind;
|
|
286
|
+
UInt level; /* level this cache is at, e.g. 1 for L1 cache */
|
|
287
|
+
UInt sizeB; /* size of this cache in bytes */
|
|
288
|
+
UInt line_sizeB; /* cache line size in bytes */
|
|
289
|
+
UInt assoc; /* set associativity */
|
|
290
|
+
Bool is_trace_cache; /* False, except for certain Pentium 4 models */
|
|
291
|
+
} VexCache;
|
|
292
|
+
|
|
293
|
+
/* Convenience macro to initialise a VexCache */
|
|
294
|
+
#define VEX_CACHE_INIT(_kind, _level, _size, _line_size, _assoc) \
|
|
295
|
+
({ (VexCache) { .kind = _kind, .level = _level, .sizeB = _size, \
|
|
296
|
+
.line_sizeB = _line_size, .assoc = _assoc, \
|
|
297
|
+
.is_trace_cache = False }; })
|
|
298
|
+
|
|
299
|
+
/* Information about the cache system as a whole */
|
|
300
|
+
typedef struct {
|
|
301
|
+
UInt num_levels;
|
|
302
|
+
UInt num_caches;
|
|
303
|
+
/* Unordered array of caches for this host. NULL if there are
|
|
304
|
+
no caches. The following can always be assumed:
|
|
305
|
+
(1) There is at most one cache of a given kind per cache level.
|
|
306
|
+
(2) If there exists a unified cache at a particular level then
|
|
307
|
+
no other cache exists at that level.
|
|
308
|
+
(3) The existence of a cache at level N > 1 implies the existence of
|
|
309
|
+
at least one cache at level N-1. */
|
|
310
|
+
VexCache *caches;
|
|
311
|
+
Bool icaches_maintain_coherence;
|
|
312
|
+
} VexCacheInfo;
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
/* This struct is a bit of a hack, but is needed to carry misc
|
|
316
|
+
important bits of info about an arch. Fields which are meaningless
|
|
317
|
+
or ignored for the platform in question should be set to zero.
|
|
318
|
+
Nb: if you add fields to the struct make sure to update function
|
|
319
|
+
LibVEX_default_VexArchInfo. */
|
|
320
|
+
|
|
321
|
+
typedef
|
|
322
|
+
struct {
|
|
323
|
+
/* The following three fields are mandatory. */
|
|
324
|
+
UInt hwcaps;
|
|
325
|
+
VexEndness endness;
|
|
326
|
+
VexCacheInfo hwcache_info;
|
|
327
|
+
/* PPC32/PPC64 only: size of instruction cache line */
|
|
328
|
+
Int ppc_icache_line_szB;
|
|
329
|
+
/* PPC32/PPC64 only: sizes zeroed by the dcbz/dcbzl instructions
|
|
330
|
+
(bug#135264) */
|
|
331
|
+
UInt ppc_dcbz_szB;
|
|
332
|
+
UInt ppc_dcbzl_szB; /* 0 means unsupported (SIGILL) */
|
|
333
|
+
/* ARM64: I- and D- minimum line sizes in log2(bytes), as
|
|
334
|
+
obtained from ctr_el0.DminLine and .IminLine. For example, a
|
|
335
|
+
line size of 64 bytes would be encoded here as 6. */
|
|
336
|
+
UInt arm64_dMinLine_lg2_szB;
|
|
337
|
+
UInt arm64_iMinLine_lg2_szB;
|
|
338
|
+
/* x86 cr0 register value, useful for distinguishing real/protected
|
|
339
|
+
* mode when decoding instructions */
|
|
340
|
+
UInt x86_cr0;
|
|
341
|
+
}
|
|
342
|
+
VexArchInfo;
|
|
343
|
+
|
|
344
|
+
/* Write default settings info *vai. */
|
|
345
|
+
extern
|
|
346
|
+
void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai );
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
/* This struct carries guest and host ABI variant information that may
|
|
350
|
+
be needed. Fields which are meaningless or ignored for the
|
|
351
|
+
platform in question should be set to zero.
|
|
352
|
+
|
|
353
|
+
Settings which are believed to be correct are:
|
|
354
|
+
|
|
355
|
+
guest_stack_redzone_size
|
|
356
|
+
guest is ppc32-linux ==> 0
|
|
357
|
+
guest is ppc64-linux ==> 288
|
|
358
|
+
guest is amd64-linux ==> 128
|
|
359
|
+
guest is other ==> inapplicable
|
|
360
|
+
|
|
361
|
+
guest_amd64_assume_fs_is_const
|
|
362
|
+
guest is amd64-linux ==> True
|
|
363
|
+
guest is amd64-darwin ==> False
|
|
364
|
+
guest is amd64-solaris ==> True
|
|
365
|
+
guest is other ==> inapplicable
|
|
366
|
+
|
|
367
|
+
guest_amd64_assume_gs_is_const
|
|
368
|
+
guest is amd64-darwin ==> True
|
|
369
|
+
guest is amd64-linux ==> True
|
|
370
|
+
guest is amd64-solaris ==> False
|
|
371
|
+
guest is other ==> inapplicable
|
|
372
|
+
|
|
373
|
+
guest_ppc_zap_RZ_at_blr
|
|
374
|
+
guest is ppc64-linux ==> True
|
|
375
|
+
guest is ppc32-linux ==> False
|
|
376
|
+
guest is other ==> inapplicable
|
|
377
|
+
|
|
378
|
+
guest_ppc_zap_RZ_at_bl
|
|
379
|
+
guest is ppc64-linux ==> const True
|
|
380
|
+
guest is ppc32-linux ==> const False
|
|
381
|
+
guest is other ==> inapplicable
|
|
382
|
+
|
|
383
|
+
host_ppc_calls_use_fndescrs:
|
|
384
|
+
host is ppc32-linux ==> False
|
|
385
|
+
host is ppc64-linux ==> True
|
|
386
|
+
host is other ==> inapplicable
|
|
387
|
+
*/
|
|
388
|
+
|
|
389
|
+
typedef
|
|
390
|
+
struct {
|
|
391
|
+
/* PPC and AMD64 GUESTS only: how many bytes below the
|
|
392
|
+
stack pointer are validly addressible? */
|
|
393
|
+
Int guest_stack_redzone_size;
|
|
394
|
+
|
|
395
|
+
/* AMD64 GUESTS only: should we translate %fs-prefixed
|
|
396
|
+
instructions using the assumption that %fs always contains
|
|
397
|
+
the same value? (typically zero on linux and solaris) */
|
|
398
|
+
Bool guest_amd64_assume_fs_is_const;
|
|
399
|
+
|
|
400
|
+
/* AMD64 GUESTS only: should we translate %gs-prefixed
|
|
401
|
+
instructions using the assumption that %gs always contains
|
|
402
|
+
the same value? (typically 0x60 on darwin)? */
|
|
403
|
+
Bool guest_amd64_assume_gs_is_const;
|
|
404
|
+
|
|
405
|
+
/* PPC GUESTS only: should we zap the stack red zone at a 'blr'
|
|
406
|
+
(function return) ? */
|
|
407
|
+
Bool guest_ppc_zap_RZ_at_blr;
|
|
408
|
+
|
|
409
|
+
/* PPC GUESTS only: should we zap the stack red zone at a 'bl'
|
|
410
|
+
(function call) ? Is supplied with the guest address of the
|
|
411
|
+
target of the call since that may be significant. If NULL,
|
|
412
|
+
is assumed equivalent to a fn which always returns False. */
|
|
413
|
+
Bool (*guest_ppc_zap_RZ_at_bl)(Addr);
|
|
414
|
+
|
|
415
|
+
/* PPC32/PPC64 HOSTS only: does '&f' give us a pointer to a
|
|
416
|
+
function descriptor on the host, or to the function code
|
|
417
|
+
itself? True => descriptor, False => code. */
|
|
418
|
+
Bool host_ppc_calls_use_fndescrs;
|
|
419
|
+
|
|
420
|
+
Bool guest_mips_fp_mode64;
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
/* Potentially for all guests that use LL/SC: use the fallback
|
|
424
|
+
(synthesised) implementation rather than passing LL/SC on to
|
|
425
|
+
the host? */
|
|
426
|
+
Bool guest__use_fallback_LLSC;
|
|
427
|
+
}
|
|
428
|
+
VexAbiInfo;
|
|
429
|
+
|
|
430
|
+
/* Write default settings info *vbi. */
|
|
431
|
+
extern
|
|
432
|
+
void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi );
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
/*-------------------------------------------------------*/
|
|
436
|
+
/*--- Control of Vex's optimiser (iropt). ---*/
|
|
437
|
+
/*-------------------------------------------------------*/
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
/* VexRegisterUpdates specifies when to ensure that the guest state is
|
|
441
|
+
up to date, in order of increasing accuracy but increasing expense.
|
|
442
|
+
|
|
443
|
+
VexRegUpdSpAtMemAccess: all registers are updated at superblock
|
|
444
|
+
exits, and SP is also up to date at memory exception points. The
|
|
445
|
+
SP is described by the arch specific functions
|
|
446
|
+
guest_<arch>_state_requires_precise_mem_exns.
|
|
447
|
+
|
|
448
|
+
VexRegUpdUnwindregsAtMemAccess: registers needed to make a stack
|
|
449
|
+
trace are up to date at memory exception points. Typically,
|
|
450
|
+
these are PC/SP/FP. The minimal registers are described by the
|
|
451
|
+
arch specific functions guest_<arch>_state_requires_precise_mem_exns.
|
|
452
|
+
This is what Valgrind sets as the default.
|
|
453
|
+
|
|
454
|
+
VexRegUpdAllregsAtMemAccess: all registers up to date at memory
|
|
455
|
+
exception points. This is what normally might be considered as
|
|
456
|
+
providing "precise exceptions for memory", but does not
|
|
457
|
+
necessarily provide precise register values at any other kind of
|
|
458
|
+
exception.
|
|
459
|
+
|
|
460
|
+
VexRegUpdAllregsAtEachInsn: all registers up to date at each
|
|
461
|
+
instruction.
|
|
462
|
+
*/
|
|
463
|
+
typedef
|
|
464
|
+
enum {
|
|
465
|
+
VexRegUpd_INVALID=0x700,
|
|
466
|
+
VexRegUpdSpAtMemAccess,
|
|
467
|
+
VexRegUpdUnwindregsAtMemAccess,
|
|
468
|
+
VexRegUpdAllregsAtMemAccess,
|
|
469
|
+
VexRegUpdAllregsAtEachInsn,
|
|
470
|
+
VexRegUpdLdAllregsAtEachInsn
|
|
471
|
+
}
|
|
472
|
+
VexRegisterUpdates;
|
|
473
|
+
|
|
474
|
+
/* Control of Vex's optimiser. */
|
|
475
|
+
|
|
476
|
+
typedef
|
|
477
|
+
struct {
|
|
478
|
+
/* Controls verbosity of iropt. 0 = no output. */
|
|
479
|
+
Int iropt_verbosity;
|
|
480
|
+
/* Control aggressiveness of iropt. 0 = no opt, 1 = simple
|
|
481
|
+
opts, 2 (default) = max optimisation. */
|
|
482
|
+
Int iropt_level;
|
|
483
|
+
/* Controls when registers are updated in guest state. Note
|
|
484
|
+
that this is the default value. The VEX client can override
|
|
485
|
+
this on a per-IRSB basis if it wants. bb_to_IR() will query
|
|
486
|
+
the client to ask if it wants a different setting for the
|
|
487
|
+
block under construction, and that new setting is transported
|
|
488
|
+
back to LibVEX_Translate, which feeds it to iropt via the
|
|
489
|
+
various do_iropt_BB calls. */
|
|
490
|
+
VexRegisterUpdates iropt_register_updates_default;
|
|
491
|
+
/* How aggressive should iropt be in unrolling loops? Higher
|
|
492
|
+
numbers make it more enthusiastic about loop unrolling.
|
|
493
|
+
Default=120. A setting of zero disables unrolling. */
|
|
494
|
+
Int iropt_unroll_thresh;
|
|
495
|
+
/* What's the maximum basic block length the front end(s) allow?
|
|
496
|
+
BBs longer than this are split up. Default=60 (guest
|
|
497
|
+
insns). */
|
|
498
|
+
Int guest_max_insns;
|
|
499
|
+
/* What's the maximum basic block size in bytes? Default=5000 */
|
|
500
|
+
Int guest_max_bytes;
|
|
501
|
+
/* How aggressive should front ends be in following
|
|
502
|
+
unconditional branches to known destinations? Default=10,
|
|
503
|
+
meaning that if a block contains less than 10 guest insns so
|
|
504
|
+
far, the front end(s) will attempt to chase into its
|
|
505
|
+
successor. A setting of zero disables chasing. */
|
|
506
|
+
Int guest_chase_thresh;
|
|
507
|
+
/* EXPERIMENTAL: chase across conditional branches? Not all
|
|
508
|
+
front ends honour this. Default: NO. */
|
|
509
|
+
Bool guest_chase_cond;
|
|
510
|
+
/* Register allocator version. Allowed values are:
|
|
511
|
+
- '2': previous, good and slow implementation.
|
|
512
|
+
- '3': current, faster implementation; perhaps producing slightly worse
|
|
513
|
+
spilling decisions. */
|
|
514
|
+
UInt regalloc_version;
|
|
515
|
+
/* Should the arm-thumb lifter be allowed to look before the
|
|
516
|
+
current instruction pointer in order to check if there are no
|
|
517
|
+
IT instructions so that it can optimize the IR? Default: YES */
|
|
518
|
+
Bool arm_allow_optimizing_lookback;
|
|
519
|
+
/* Make CB{N}Z arm thumb instruction to be considered as branch */
|
|
520
|
+
Bool strict_block_end;
|
|
521
|
+
/* Should the arm64 lifter be allowed to re-order register
|
|
522
|
+
writeback in a handful of special cases that make memcheck
|
|
523
|
+
unhappy otherwise? Default: YES */
|
|
524
|
+
Bool arm64_allow_reordered_writeback;
|
|
525
|
+
/* Whether we should lift the x86 code `call $+5; pop xxx` as
|
|
526
|
+
one instruction (True) or two (False). Default: True */
|
|
527
|
+
Bool x86_optimize_callpop_idiom;
|
|
528
|
+
/* Whether we should decode the vex-specific "special" instructions.
|
|
529
|
+
Default: YES */
|
|
530
|
+
Bool special_instruction_support;
|
|
531
|
+
/* The number of bytes it is legal to poke before the start of the
|
|
532
|
+
passed instruction data */
|
|
533
|
+
UInt lookback_amount;
|
|
534
|
+
}
|
|
535
|
+
VexControl;
|
|
536
|
+
|
|
537
|
+
|
|
538
|
+
/* Write the default settings into *vcon. */
|
|
539
|
+
|
|
540
|
+
extern
|
|
541
|
+
void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon );
|
|
542
|
+
|
|
543
|
+
|
|
544
|
+
/*-------------------------------------------------------*/
|
|
545
|
+
/*--- Storage management control ---*/
|
|
546
|
+
/*-------------------------------------------------------*/
|
|
547
|
+
|
|
548
|
+
/* Allocate in Vex's temporary allocation area. Be careful with this.
|
|
549
|
+
You can only call it inside an instrumentation or optimisation
|
|
550
|
+
callback that you have previously specified in a call to
|
|
551
|
+
LibVEX_Translate. The storage allocated will only stay alive until
|
|
552
|
+
translation of the current basic block is complete. */
|
|
553
|
+
extern void* LibVEX_Alloc ( SizeT nbytes );
|
|
554
|
+
|
|
555
|
+
/* Show Vex allocation statistics. */
|
|
556
|
+
extern void LibVEX_ShowAllocStats ( void );
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
/*-------------------------------------------------------*/
|
|
560
|
+
/*--- Describing guest state layout ---*/
|
|
561
|
+
/*-------------------------------------------------------*/
|
|
562
|
+
|
|
563
|
+
/* Describe the guest state enough that the instrumentation
|
|
564
|
+
functions can work. */
|
|
565
|
+
|
|
566
|
+
/* The max number of guest state chunks which we can describe as
|
|
567
|
+
always defined (for the benefit of Memcheck). */
|
|
568
|
+
#define VEXGLO_N_ALWAYSDEFD 24
|
|
569
|
+
|
|
570
|
+
typedef
|
|
571
|
+
struct {
|
|
572
|
+
/* Total size of the guest state, in bytes. Must be
|
|
573
|
+
16-aligned. */
|
|
574
|
+
Int total_sizeB;
|
|
575
|
+
/* Whereabouts is the stack pointer? */
|
|
576
|
+
Int offset_SP;
|
|
577
|
+
Int sizeof_SP; /* 4 or 8 */
|
|
578
|
+
/* Whereabouts is the frame pointer? */
|
|
579
|
+
Int offset_FP;
|
|
580
|
+
Int sizeof_FP; /* 4 or 8 */
|
|
581
|
+
/* Whereabouts is the instruction pointer? */
|
|
582
|
+
Int offset_IP;
|
|
583
|
+
Int sizeof_IP; /* 4 or 8 */
|
|
584
|
+
/* Describe parts of the guest state regarded as 'always
|
|
585
|
+
defined'. */
|
|
586
|
+
Int n_alwaysDefd;
|
|
587
|
+
struct {
|
|
588
|
+
Int offset;
|
|
589
|
+
Int size;
|
|
590
|
+
} alwaysDefd[VEXGLO_N_ALWAYSDEFD];
|
|
591
|
+
}
|
|
592
|
+
VexGuestLayout;
|
|
593
|
+
|
|
594
|
+
/* A note about guest state layout.
|
|
595
|
+
|
|
596
|
+
LibVEX defines the layout for the guest state, in the file
|
|
597
|
+
pub/libvex_guest_<arch>.h. The struct will have an 16-aligned
|
|
598
|
+
size. Each translated bb is assumed to be entered with a specified
|
|
599
|
+
register pointing at such a struct. Beyond that is two copies of
|
|
600
|
+
the shadow state area with the same size as the struct. Beyond
|
|
601
|
+
that is a spill area that LibVEX may spill into. It must have size
|
|
602
|
+
LibVEX_N_SPILL_BYTES, and this must be a 16-aligned number.
|
|
603
|
+
|
|
604
|
+
On entry, the baseblock pointer register must be 16-aligned.
|
|
605
|
+
|
|
606
|
+
There must be no holes in between the primary guest state, its two
|
|
607
|
+
copies, and the spill area. In short, all 4 areas must have a
|
|
608
|
+
16-aligned size and be 16-aligned, and placed back-to-back.
|
|
609
|
+
*/
|
|
610
|
+
|
|
611
|
+
#define LibVEX_N_SPILL_BYTES 4096
|
|
612
|
+
|
|
613
|
+
/* The size of the guest state must be a multiple of this number. */
|
|
614
|
+
#define LibVEX_GUEST_STATE_ALIGN 16
|
|
615
|
+
|
|
616
|
+
/*-------------------------------------------------------*/
|
|
617
|
+
/*--- Initialisation of the library ---*/
|
|
618
|
+
/*-------------------------------------------------------*/
|
|
619
|
+
|
|
620
|
+
/* Initialise the library. You must call this first. */
|
|
621
|
+
|
|
622
|
+
extern void LibVEX_Init (
|
|
623
|
+
|
|
624
|
+
/* failure exit function */
|
|
625
|
+
# if (__cplusplus == 1 && __GNUC__ && __GNUC__ <= 3) || defined(_MSC_VER)
|
|
626
|
+
/* g++ 3.x doesn't understand attributes on function parameters.
|
|
627
|
+
See #265762. */
|
|
628
|
+
# else
|
|
629
|
+
__attribute__ ((noreturn))
|
|
630
|
+
# endif
|
|
631
|
+
void (*failure_exit) ( void ),
|
|
632
|
+
|
|
633
|
+
/* logging output function */
|
|
634
|
+
void (*log_bytes) ( const HChar*, SizeT nbytes ),
|
|
635
|
+
|
|
636
|
+
/* debug paranoia level */
|
|
637
|
+
Int debuglevel,
|
|
638
|
+
|
|
639
|
+
/* Control ... */
|
|
640
|
+
const VexControl* vcon
|
|
641
|
+
);
|
|
642
|
+
|
|
643
|
+
/* Update the global VexControl */
|
|
644
|
+
extern void LibVEX_Update_Control (const VexControl * );
|
|
645
|
+
|
|
646
|
+
/*-------------------------------------------------------*/
|
|
647
|
+
/*--- Make a translation ---*/
|
|
648
|
+
/*-------------------------------------------------------*/
|
|
649
|
+
|
|
650
|
+
/* Describes the outcome of a translation attempt. */
|
|
651
|
+
typedef
|
|
652
|
+
struct {
|
|
653
|
+
/* overall status */
|
|
654
|
+
enum { VexTransOK=0x800,
|
|
655
|
+
VexTransAccessFail, VexTransOutputFull } status;
|
|
656
|
+
/* The number of extents that have a self-check (0 to 3) */
|
|
657
|
+
UInt n_sc_extents;
|
|
658
|
+
/* Offset in generated code of the profile inc, or -1 if
|
|
659
|
+
none. Needed for later patching. */
|
|
660
|
+
Int offs_profInc;
|
|
661
|
+
/* Stats only: the number of guest insns included in the
|
|
662
|
+
translation. It may be zero (!). */
|
|
663
|
+
UInt n_guest_instrs;
|
|
664
|
+
}
|
|
665
|
+
VexTranslateResult;
|
|
666
|
+
|
|
667
|
+
|
|
668
|
+
/* Describes precisely the pieces of guest code that a translation
|
|
669
|
+
covers. Now that Vex can chase across BB boundaries, the old
|
|
670
|
+
scheme of describing a chunk of guest code merely by its start
|
|
671
|
+
address and length is inadequate.
|
|
672
|
+
|
|
673
|
+
This struct uses 20 bytes on a 32-bit archtecture and 32 bytes on a
|
|
674
|
+
64-bit architecture. Space is important as clients will have to store
|
|
675
|
+
one of these for each translation made.
|
|
676
|
+
*/
|
|
677
|
+
typedef
|
|
678
|
+
struct {
|
|
679
|
+
Addr base[3];
|
|
680
|
+
UShort len[3];
|
|
681
|
+
UShort n_used;
|
|
682
|
+
}
|
|
683
|
+
VexGuestExtents;
|
|
684
|
+
|
|
685
|
+
|
|
686
|
+
/* A structure to carry arguments for LibVEX_Translate. There are so
|
|
687
|
+
many of them, it seems better to have a structure. */
|
|
688
|
+
typedef
|
|
689
|
+
struct {
|
|
690
|
+
/* IN: The instruction sets we are translating from and to. And
|
|
691
|
+
guest/host misc info. */
|
|
692
|
+
VexArch arch_guest;
|
|
693
|
+
VexArchInfo archinfo_guest;
|
|
694
|
+
VexArch arch_host;
|
|
695
|
+
VexArchInfo archinfo_host;
|
|
696
|
+
VexAbiInfo abiinfo_both;
|
|
697
|
+
|
|
698
|
+
/* IN: an opaque value which is passed as the first arg to all
|
|
699
|
+
callback functions supplied in this struct. Vex has no idea
|
|
700
|
+
what's at the other end of this pointer. */
|
|
701
|
+
void* callback_opaque;
|
|
702
|
+
|
|
703
|
+
/* IN: the block to translate, and its guest address. */
|
|
704
|
+
/* where are the actual bytes in the host's address space? */
|
|
705
|
+
const UChar* guest_bytes;
|
|
706
|
+
/* where do the bytes really come from in the guest's aspace?
|
|
707
|
+
This is the post-redirection guest address. Not that Vex
|
|
708
|
+
understands anything about redirection; that is all done on
|
|
709
|
+
the Valgrind side. */
|
|
710
|
+
Addr guest_bytes_addr;
|
|
711
|
+
|
|
712
|
+
/* Is it OK to chase into this guest address? May not be
|
|
713
|
+
NULL. */
|
|
714
|
+
Bool (*chase_into_ok) ( /*callback_opaque*/void*, Addr );
|
|
715
|
+
|
|
716
|
+
/* OUT: which bits of guest code actually got translated */
|
|
717
|
+
VexGuestExtents* guest_extents;
|
|
718
|
+
|
|
719
|
+
/* IN: a place to put the resulting code, and its size */
|
|
720
|
+
UChar* host_bytes;
|
|
721
|
+
Int host_bytes_size;
|
|
722
|
+
/* OUT: how much of the output area is used. */
|
|
723
|
+
Int* host_bytes_used;
|
|
724
|
+
|
|
725
|
+
/* IN: optionally, two instrumentation functions. May be
|
|
726
|
+
NULL. */
|
|
727
|
+
IRSB* (*instrument1) ( /*callback_opaque*/void*,
|
|
728
|
+
IRSB*,
|
|
729
|
+
const VexGuestLayout*,
|
|
730
|
+
const VexGuestExtents*,
|
|
731
|
+
const VexArchInfo*,
|
|
732
|
+
IRType gWordTy, IRType hWordTy );
|
|
733
|
+
IRSB* (*instrument2) ( /*callback_opaque*/void*,
|
|
734
|
+
IRSB*,
|
|
735
|
+
const VexGuestLayout*,
|
|
736
|
+
const VexGuestExtents*,
|
|
737
|
+
const VexArchInfo*,
|
|
738
|
+
IRType gWordTy, IRType hWordTy );
|
|
739
|
+
|
|
740
|
+
IRSB* (*finaltidy) ( IRSB* );
|
|
741
|
+
|
|
742
|
+
/* IN: a callback used to ask the caller which of the extents,
|
|
743
|
+
if any, a self check is required for. Must not be NULL.
|
|
744
|
+
The returned value is a bitmask with a 1 in position i indicating
|
|
745
|
+
that the i'th extent needs a check. Since there can be at most
|
|
746
|
+
3 extents, the returned values must be between 0 and 7.
|
|
747
|
+
|
|
748
|
+
This call also gives the VEX client the opportunity to change
|
|
749
|
+
the precision of register update preservation as performed by
|
|
750
|
+
the IR optimiser. Before the call, VEX will set *pxControl
|
|
751
|
+
to hold the default register-update status value as specified
|
|
752
|
+
by VexControl::iropt_register_updates_default as passed to
|
|
753
|
+
LibVEX_Init at library initialisation time. The client (in
|
|
754
|
+
this callback) can if it wants, inspect the value and change
|
|
755
|
+
it to something different, and that value will be used for
|
|
756
|
+
subsequent IR optimisation of the block. */
|
|
757
|
+
UInt (*needs_self_check)( /*callback_opaque*/void*,
|
|
758
|
+
/*MAYBE_MOD*/VexRegisterUpdates* pxControl,
|
|
759
|
+
const VexGuestExtents* );
|
|
760
|
+
|
|
761
|
+
/* IN: optionally, a callback which allows the caller to add its
|
|
762
|
+
own IR preamble following the self-check and any other
|
|
763
|
+
VEX-generated preamble, if any. May be NULL. If non-NULL,
|
|
764
|
+
the IRSB under construction is handed to this function, which
|
|
765
|
+
presumably adds IR statements to it. The callback may
|
|
766
|
+
optionally complete the block and direct bb_to_IR not to
|
|
767
|
+
disassemble any instructions into it; this is indicated by
|
|
768
|
+
the callback returning True.
|
|
769
|
+
*/
|
|
770
|
+
Bool (*preamble_function)(/*callback_opaque*/void*, IRSB*);
|
|
771
|
+
|
|
772
|
+
/* IN: debug: trace vex activity at various points */
|
|
773
|
+
Int traceflags;
|
|
774
|
+
|
|
775
|
+
/* IN: debug: print diagnostics when an illegal instr is detected */
|
|
776
|
+
Bool sigill_diag;
|
|
777
|
+
|
|
778
|
+
/* IN: profiling: add a 64 bit profiler counter increment to the
|
|
779
|
+
translation? */
|
|
780
|
+
Bool addProfInc;
|
|
781
|
+
|
|
782
|
+
/* IN: address of the dispatcher entry points. Describes the
|
|
783
|
+
places where generated code should jump to at the end of each
|
|
784
|
+
bb.
|
|
785
|
+
|
|
786
|
+
At the end of each translation, the next guest address is
|
|
787
|
+
placed in the host's standard return register (x86: %eax,
|
|
788
|
+
amd64: %rax, ppc32: %r3, ppc64: %r3). Optionally, the guest
|
|
789
|
+
state pointer register (on host x86: %ebp; amd64: %rbp;
|
|
790
|
+
ppc32/64: r31) may be set to a VEX_TRC_ value to indicate any
|
|
791
|
+
special action required before the next block is run.
|
|
792
|
+
|
|
793
|
+
Control is then passed back to the dispatcher (beyond Vex's
|
|
794
|
+
control; caller supplies this) in the following way:
|
|
795
|
+
|
|
796
|
+
- On host archs which lack a link register (x86, amd64), by a
|
|
797
|
+
jump to the host address specified in
|
|
798
|
+
'dispatcher_assisted', if the guest state pointer has been
|
|
799
|
+
changed so as to request some action before the next block
|
|
800
|
+
is run, or 'dispatcher_unassisted' (the fast path), in
|
|
801
|
+
which it is assumed that the guest state pointer is
|
|
802
|
+
unchanged and we wish to continue directly with the next
|
|
803
|
+
translation. Both of these must be non-NULL.
|
|
804
|
+
|
|
805
|
+
- On host archs which have a link register (ppc32, ppc64), by
|
|
806
|
+
a branch to the link register (which is guaranteed to be
|
|
807
|
+
unchanged from whatever it was at entry to the
|
|
808
|
+
translation). 'dispatch_assisted' and
|
|
809
|
+
'dispatch_unassisted' must be NULL.
|
|
810
|
+
|
|
811
|
+
The aim is to get back and forth between translations and the
|
|
812
|
+
dispatcher without creating memory traffic to store return
|
|
813
|
+
addresses.
|
|
814
|
+
|
|
815
|
+
FIXME: update this comment
|
|
816
|
+
*/
|
|
817
|
+
const void* disp_cp_chain_me_to_slowEP;
|
|
818
|
+
const void* disp_cp_chain_me_to_fastEP;
|
|
819
|
+
const void* disp_cp_xindir;
|
|
820
|
+
const void* disp_cp_xassisted;
|
|
821
|
+
}
|
|
822
|
+
VexTranslateArgs;
|
|
823
|
+
|
|
824
|
+
|
|
825
|
+
extern
|
|
826
|
+
VexTranslateResult LibVEX_Translate ( VexTranslateArgs* );
|
|
827
|
+
extern
|
|
828
|
+
IRSB *LibVEX_Lift ( VexTranslateArgs*,
|
|
829
|
+
VexTranslateResult*,
|
|
830
|
+
VexRegisterUpdates* );
|
|
831
|
+
extern
|
|
832
|
+
void LibVEX_Codegen ( VexTranslateArgs*,
|
|
833
|
+
VexTranslateResult*,
|
|
834
|
+
IRSB*,
|
|
835
|
+
VexRegisterUpdates );
|
|
836
|
+
|
|
837
|
+
|
|
838
|
+
/* A subtlety re interaction between self-checking translations and
|
|
839
|
+
bb-chasing. The supplied chase_into_ok function should say NO
|
|
840
|
+
(False) when presented with any address for which you might want to
|
|
841
|
+
make a self-checking translation.
|
|
842
|
+
|
|
843
|
+
If it doesn't do that, you may end up with Vex chasing from BB #1
|
|
844
|
+
to BB #2 (fine); but if you wanted checking for #2 and not #1, that
|
|
845
|
+
would not be the result. Therefore chase_into_ok should disallow
|
|
846
|
+
following into #2. That will force the caller to eventually
|
|
847
|
+
request a new translation starting at #2, at which point Vex will
|
|
848
|
+
correctly observe the make-a-self-check flag.
|
|
849
|
+
|
|
850
|
+
FIXME: is this still up to date? */
|
|
851
|
+
|
|
852
|
+
|
|
853
|
+
/*-------------------------------------------------------*/
|
|
854
|
+
/*--- Patch existing translations ---*/
|
|
855
|
+
/*-------------------------------------------------------*/
|
|
856
|
+
|
|
857
|
+
/* A host address range that was modified by the functions below.
|
|
858
|
+
Callers must request I-cache syncing after the call as appropriate. */
|
|
859
|
+
typedef
|
|
860
|
+
struct {
|
|
861
|
+
HWord start;
|
|
862
|
+
HWord len; /* always > 0 */
|
|
863
|
+
}
|
|
864
|
+
VexInvalRange;
|
|
865
|
+
|
|
866
|
+
/* Chain an XDirect jump located at place_to_chain so it jumps to
|
|
867
|
+
place_to_jump_to. It is expected (and checked) that this site
|
|
868
|
+
currently contains a call to the dispatcher specified by
|
|
869
|
+
disp_cp_chain_me_EXPECTED. */
|
|
870
|
+
extern
|
|
871
|
+
VexInvalRange LibVEX_Chain ( VexArch arch_host,
|
|
872
|
+
VexEndness endhess_host,
|
|
873
|
+
void* place_to_chain,
|
|
874
|
+
const void* disp_cp_chain_me_EXPECTED,
|
|
875
|
+
const void* place_to_jump_to );
|
|
876
|
+
|
|
877
|
+
/* Undo an XDirect jump located at place_to_unchain, so it is
|
|
878
|
+
converted back into a call to disp_cp_chain_me. It is expected
|
|
879
|
+
(and checked) that this site currently contains a jump directly to
|
|
880
|
+
the address specified by place_to_jump_to_EXPECTED. */
|
|
881
|
+
extern
|
|
882
|
+
VexInvalRange LibVEX_UnChain ( VexArch arch_host,
|
|
883
|
+
VexEndness endness_host,
|
|
884
|
+
void* place_to_unchain,
|
|
885
|
+
const void* place_to_jump_to_EXPECTED,
|
|
886
|
+
const void* disp_cp_chain_me );
|
|
887
|
+
|
|
888
|
+
/* Returns a constant -- the size of the event check that is put at
|
|
889
|
+
the start of every translation. This makes it possible to
|
|
890
|
+
calculate the fast entry point address if the slow entry point
|
|
891
|
+
address is known (the usual case), or vice versa. */
|
|
892
|
+
extern
|
|
893
|
+
Int LibVEX_evCheckSzB ( VexArch arch_host );
|
|
894
|
+
|
|
895
|
+
|
|
896
|
+
/* Patch the counter location into an existing ProfInc point. The
|
|
897
|
+
specified point is checked to make sure it is plausible. */
|
|
898
|
+
extern
|
|
899
|
+
VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host,
|
|
900
|
+
VexEndness endness_host,
|
|
901
|
+
void* place_to_patch,
|
|
902
|
+
const ULong* location_of_counter );
|
|
903
|
+
|
|
904
|
+
|
|
905
|
+
/*-------------------------------------------------------*/
|
|
906
|
+
/*--- Show accumulated statistics ---*/
|
|
907
|
+
/*-------------------------------------------------------*/
|
|
908
|
+
|
|
909
|
+
extern void LibVEX_ShowStats ( void );
|
|
910
|
+
|
|
911
|
+
/*-------------------------------------------------------*/
|
|
912
|
+
/*-- IR injection --*/
|
|
913
|
+
/*-------------------------------------------------------*/
|
|
914
|
+
|
|
915
|
+
/* IR Injection Control Block */
|
|
916
|
+
|
|
917
|
+
#define NO_ROUNDING_MODE (~0u)
|
|
918
|
+
|
|
919
|
+
typedef
|
|
920
|
+
struct {
|
|
921
|
+
IROp op; // the operation to perform
|
|
922
|
+
HWord result; // address of the result
|
|
923
|
+
HWord opnd1; // address of 1st operand
|
|
924
|
+
HWord opnd2; // address of 2nd operand
|
|
925
|
+
HWord opnd3; // address of 3rd operand
|
|
926
|
+
HWord opnd4; // address of 4th operand
|
|
927
|
+
IRType t_result; // type of result
|
|
928
|
+
IRType t_opnd1; // type of 1st operand
|
|
929
|
+
IRType t_opnd2; // type of 2nd operand
|
|
930
|
+
IRType t_opnd3; // type of 3rd operand
|
|
931
|
+
IRType t_opnd4; // type of 4th operand
|
|
932
|
+
UInt rounding_mode;
|
|
933
|
+
UInt num_operands; // excluding rounding mode, if any
|
|
934
|
+
/* The following two members describe if this operand has immediate
|
|
935
|
+
* operands. There are a few restrictions:
|
|
936
|
+
* (1) An operator can have at most one immediate operand.
|
|
937
|
+
* (2) If there is an immediate operand, it is the right-most operand
|
|
938
|
+
* An immediate_index of 0 means there is no immediate operand.
|
|
939
|
+
*/
|
|
940
|
+
UInt immediate_type; // size of immediate Ity_I8, Ity_16
|
|
941
|
+
UInt immediate_index; // operand number: 1, 2
|
|
942
|
+
}
|
|
943
|
+
IRICB;
|
|
944
|
+
|
|
945
|
+
extern void LibVEX_InitIRI ( const IRICB * );
|
|
946
|
+
|
|
947
|
+
/*-------------------------------------------------------*/
|
|
948
|
+
/*--- Notes ---*/
|
|
949
|
+
/*-------------------------------------------------------*/
|
|
950
|
+
|
|
951
|
+
/* Code generation conventions that need to be recorded somewhere.
|
|
952
|
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
953
|
+
|
|
954
|
+
x86
|
|
955
|
+
~~~
|
|
956
|
+
Generated code should be entered using a JMP instruction. On
|
|
957
|
+
entry, %ebp should point to the guest state, and %esp should be a
|
|
958
|
+
valid stack pointer. The generated code may change %eax, %ebx,
|
|
959
|
+
%ecx, %edx, %esi, %edi, all the FP registers and control state, and
|
|
960
|
+
all the XMM registers.
|
|
961
|
+
|
|
962
|
+
On entry, the FPU control word should be set to 0x027F, and the SSE
|
|
963
|
+
control word (%mxcsr) should be set to 0x1F80. On exit, they
|
|
964
|
+
should still have those values (after masking off the lowest 6 bits
|
|
965
|
+
of %mxcsr). If they don't, there is a bug in VEX-generated code.
|
|
966
|
+
|
|
967
|
+
Generated code returns to the scheduler using a JMP instruction, to
|
|
968
|
+
the address specified in the .dispatch field of VexTranslateArgs.
|
|
969
|
+
%eax (or %eax:%edx, if simulating a 64-bit target) will contain the
|
|
970
|
+
guest address of the next block to execute. %ebp may be changed
|
|
971
|
+
to a VEX_TRC_ value, otherwise it should be as it was at entry.
|
|
972
|
+
|
|
973
|
+
CRITICAL ISSUES in x86 code generation. The only known critical
|
|
974
|
+
issue is that the host FPU and SSE state is not properly saved
|
|
975
|
+
across calls to helper functions. If any helper references any
|
|
976
|
+
such state, it is likely (1) to misbehave itself, since the FP
|
|
977
|
+
stack tags will not be as expected, and (2) after returning to
|
|
978
|
+
generated code, the generated code is likely to go wrong. This
|
|
979
|
+
really should be fixed.
|
|
980
|
+
|
|
981
|
+
amd64
|
|
982
|
+
~~~~~
|
|
983
|
+
Analogous to x86.
|
|
984
|
+
|
|
985
|
+
ppc32
|
|
986
|
+
~~~~~
|
|
987
|
+
On entry, guest state pointer is r31. .dispatch must be NULL.
|
|
988
|
+
Control is returned with a branch to the link register. Generated
|
|
989
|
+
code will not change lr. At return, r3 holds the next guest addr
|
|
990
|
+
(or r3:r4 ?). r31 may be may be changed to a VEX_TRC_ value,
|
|
991
|
+
otherwise it should be as it was at entry.
|
|
992
|
+
|
|
993
|
+
ppc64
|
|
994
|
+
~~~~~
|
|
995
|
+
Same as ppc32.
|
|
996
|
+
|
|
997
|
+
arm32
|
|
998
|
+
~~~~~
|
|
999
|
+
r8 is GSP.
|
|
1000
|
+
|
|
1001
|
+
arm64
|
|
1002
|
+
~~~~~
|
|
1003
|
+
r21 is GSP.
|
|
1004
|
+
|
|
1005
|
+
ALL GUEST ARCHITECTURES
|
|
1006
|
+
~~~~~~~~~~~~~~~~~~~~~~~
|
|
1007
|
+
The guest state must contain two pseudo-registers, guest_CMSTART
|
|
1008
|
+
and guest_CMLEN. These are used to specify guest address ranges,
|
|
1009
|
+
either of code to be invalidated, when used in conjunction with
|
|
1010
|
+
Ijk_InvalICache, or of d-cache ranges to be flushed, when used in
|
|
1011
|
+
conjunction with Ijk_FlushDCache. In such cases, the two _CM
|
|
1012
|
+
pseudo-regs should be filled in by the IR, and then an exit with
|
|
1013
|
+
one of the two abovementioned Ijk_ kinds should happen, so that the
|
|
1014
|
+
dispatcher can action them. Both pseudo-regs must have size equal
|
|
1015
|
+
to the guest word size.
|
|
1016
|
+
|
|
1017
|
+
The architecture must a third pseudo-register, guest_NRADDR, also
|
|
1018
|
+
guest-word-sized. This is used to record the unredirected guest
|
|
1019
|
+
address at the start of a translation whose start has been
|
|
1020
|
+
redirected. By reading this pseudo-register shortly afterwards,
|
|
1021
|
+
the translation can find out what the corresponding no-redirection
|
|
1022
|
+
address was. Note, this is only set for wrap-style redirects, not
|
|
1023
|
+
for replace-style ones.
|
|
1024
|
+
*/
|
|
1025
|
+
#endif /* ndef __LIBVEX_H */
|
|
1026
|
+
|
|
1027
|
+
/*---------------------------------------------------------------*/
|
|
1028
|
+
/*--- libvex.h ---*/
|
|
1029
|
+
/*---------------------------------------------------------------*/
|