@aptre/v86 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. package/LICENSE +22 -0
  2. package/LICENSE.MIT +22 -0
  3. package/Readme.md +237 -0
  4. package/dist/v86.browser.js +26666 -0
  5. package/dist/v86.browser.js.map +7 -0
  6. package/dist/v86.js +26632 -0
  7. package/dist/v86.js.map +7 -0
  8. package/gen/generate_analyzer.ts +512 -0
  9. package/gen/generate_interpreter.ts +522 -0
  10. package/gen/generate_jit.ts +624 -0
  11. package/gen/rust_ast.ts +107 -0
  12. package/gen/util.ts +35 -0
  13. package/gen/x86_table.ts +1836 -0
  14. package/lib/9p.ts +1547 -0
  15. package/lib/filesystem.ts +1879 -0
  16. package/lib/marshall.ts +168 -0
  17. package/lib/softfloat/softfloat.c +32501 -0
  18. package/lib/zstd/zstddeclib.c +13520 -0
  19. package/package.json +75 -0
  20. package/src/acpi.ts +267 -0
  21. package/src/browser/dummy_screen.ts +106 -0
  22. package/src/browser/fake_network.ts +1771 -0
  23. package/src/browser/fetch_network.ts +361 -0
  24. package/src/browser/filestorage.ts +124 -0
  25. package/src/browser/inbrowser_network.ts +57 -0
  26. package/src/browser/keyboard.ts +564 -0
  27. package/src/browser/main.ts +3415 -0
  28. package/src/browser/mouse.ts +255 -0
  29. package/src/browser/network.ts +142 -0
  30. package/src/browser/print_stats.ts +336 -0
  31. package/src/browser/screen.ts +978 -0
  32. package/src/browser/serial.ts +316 -0
  33. package/src/browser/speaker.ts +1223 -0
  34. package/src/browser/starter.ts +1688 -0
  35. package/src/browser/wisp_network.ts +332 -0
  36. package/src/browser/worker_bus.ts +64 -0
  37. package/src/buffer.ts +652 -0
  38. package/src/bus.ts +78 -0
  39. package/src/const.ts +128 -0
  40. package/src/cpu.ts +2891 -0
  41. package/src/dma.ts +474 -0
  42. package/src/elf.ts +251 -0
  43. package/src/floppy.ts +1778 -0
  44. package/src/ide.ts +3455 -0
  45. package/src/io.ts +504 -0
  46. package/src/iso9660.ts +317 -0
  47. package/src/kernel.ts +250 -0
  48. package/src/lib.ts +645 -0
  49. package/src/log.ts +149 -0
  50. package/src/main.ts +199 -0
  51. package/src/ne2k.ts +1589 -0
  52. package/src/pci.ts +815 -0
  53. package/src/pit.ts +406 -0
  54. package/src/ps2.ts +820 -0
  55. package/src/rtc.ts +537 -0
  56. package/src/rust/analysis.rs +101 -0
  57. package/src/rust/codegen.rs +2660 -0
  58. package/src/rust/config.rs +3 -0
  59. package/src/rust/control_flow.rs +425 -0
  60. package/src/rust/cpu/apic.rs +658 -0
  61. package/src/rust/cpu/arith.rs +1207 -0
  62. package/src/rust/cpu/call_indirect.rs +2 -0
  63. package/src/rust/cpu/cpu.rs +4501 -0
  64. package/src/rust/cpu/fpu.rs +923 -0
  65. package/src/rust/cpu/global_pointers.rs +112 -0
  66. package/src/rust/cpu/instructions.rs +2486 -0
  67. package/src/rust/cpu/instructions_0f.rs +5261 -0
  68. package/src/rust/cpu/ioapic.rs +316 -0
  69. package/src/rust/cpu/memory.rs +351 -0
  70. package/src/rust/cpu/misc_instr.rs +613 -0
  71. package/src/rust/cpu/mod.rs +16 -0
  72. package/src/rust/cpu/modrm.rs +133 -0
  73. package/src/rust/cpu/pic.rs +402 -0
  74. package/src/rust/cpu/sse_instr.rs +361 -0
  75. package/src/rust/cpu/string.rs +701 -0
  76. package/src/rust/cpu/vga.rs +175 -0
  77. package/src/rust/cpu_context.rs +69 -0
  78. package/src/rust/dbg.rs +98 -0
  79. package/src/rust/gen/analyzer.rs +3807 -0
  80. package/src/rust/gen/analyzer0f.rs +3992 -0
  81. package/src/rust/gen/interpreter.rs +4447 -0
  82. package/src/rust/gen/interpreter0f.rs +5404 -0
  83. package/src/rust/gen/jit.rs +5080 -0
  84. package/src/rust/gen/jit0f.rs +5547 -0
  85. package/src/rust/gen/mod.rs +14 -0
  86. package/src/rust/jit.rs +2443 -0
  87. package/src/rust/jit_instructions.rs +7881 -0
  88. package/src/rust/js_api.rs +6 -0
  89. package/src/rust/leb.rs +46 -0
  90. package/src/rust/lib.rs +29 -0
  91. package/src/rust/modrm.rs +330 -0
  92. package/src/rust/opstats.rs +249 -0
  93. package/src/rust/page.rs +15 -0
  94. package/src/rust/paging.rs +25 -0
  95. package/src/rust/prefix.rs +15 -0
  96. package/src/rust/profiler.rs +155 -0
  97. package/src/rust/regs.rs +38 -0
  98. package/src/rust/softfloat.rs +286 -0
  99. package/src/rust/state_flags.rs +27 -0
  100. package/src/rust/wasmgen/mod.rs +2 -0
  101. package/src/rust/wasmgen/wasm_builder.rs +1047 -0
  102. package/src/rust/wasmgen/wasm_opcodes.rs +221 -0
  103. package/src/rust/zstd.rs +105 -0
  104. package/src/sb16.ts +1928 -0
  105. package/src/state.ts +359 -0
  106. package/src/uart.ts +472 -0
  107. package/src/vga.ts +2791 -0
  108. package/src/virtio.ts +1756 -0
  109. package/src/virtio_balloon.ts +273 -0
  110. package/src/virtio_console.ts +372 -0
  111. package/src/virtio_net.ts +326 -0
@@ -0,0 +1,4501 @@
1
+ #![allow(non_upper_case_globals)]
2
+
3
+ use crate::config;
4
+ use crate::cpu::fpu::fpu_set_tag_word;
5
+ use crate::cpu::global_pointers::*;
6
+ use crate::cpu::memory;
7
+ use crate::cpu::misc_instr::{
8
+ adjust_stack_reg, get_stack_pointer, getaf, getcf, getof, getpf, getsf, getzf, pop16, pop32s,
9
+ push16, push32,
10
+ };
11
+ use crate::cpu::modrm::{resolve_modrm16, resolve_modrm32};
12
+ use crate::cpu::{apic, ioapic, pic};
13
+ use crate::dbg::dbg_trace;
14
+ use crate::gen;
15
+ use crate::jit;
16
+ use crate::jit::is_near_end_of_page;
17
+ use crate::opstats;
18
+ use crate::page::Page;
19
+ use crate::paging::OrPageFault;
20
+ use crate::prefix;
21
+ use crate::profiler;
22
+ use crate::profiler::stat;
23
+ use crate::softfloat;
24
+ use crate::state_flags::CachedStateFlags;
25
+
26
+ use std::collections::HashSet;
27
+ use std::ptr;
28
+
29
+ mod wasm {
30
+ extern "C" {
31
+ pub fn call_indirect1(f: i32, x: u16);
32
+ }
33
+ }
34
+
35
+ pub mod js {
36
+ extern "C" {
37
+ pub fn cpu_exception_hook(interrupt: i32) -> bool;
38
+ pub fn microtick() -> f64;
39
+ pub fn run_hardware_timers(acpi_enabled: bool, t: f64) -> f64;
40
+ pub fn cpu_event_halt();
41
+ pub fn stop_idling();
42
+
43
+ pub fn io_port_read8(port: i32) -> i32;
44
+ pub fn io_port_read16(port: i32) -> i32;
45
+ pub fn io_port_read32(port: i32) -> i32;
46
+
47
+ pub fn io_port_write8(port: i32, value: i32);
48
+ pub fn io_port_write16(port: i32, value: i32);
49
+ pub fn io_port_write32(port: i32, value: i32);
50
+
51
+ pub fn get_rand_int() -> i32;
52
+ }
53
+ }
54
+
55
+ /// The offset for our generated functions in the wasm table. Every index less than this is
56
+ /// reserved for rustc's indirect functions
57
+ pub const WASM_TABLE_OFFSET: u32 = 1024;
58
+
59
+ #[derive(Copy, Clone)]
60
+ #[repr(C)]
61
+ #[repr(align(16))]
62
+ pub union reg128 {
63
+ pub i8: [i8; 16],
64
+ pub i16: [i16; 8],
65
+ pub i32: [i32; 4],
66
+ pub i64: [i64; 2],
67
+ pub u8: [u8; 16],
68
+ pub u16: [u16; 8],
69
+ pub u32: [u32; 4],
70
+ pub u64: [u64; 2],
71
+ pub f32: [f32; 4],
72
+ pub f64: [f64; 2],
73
+ }
74
+
75
+ pub const CHECK_MISSED_ENTRY_POINTS: bool = false;
76
+
77
+ pub const INTERPRETER_ITERATION_LIMIT: u32 = 100_001;
78
+
79
+ // How often, in milliseconds, to yield to the browser for rendering and running events
80
+ pub const TIME_PER_FRAME: f64 = 1.0;
81
+
82
+ pub const FLAG_SUB: i32 = -0x8000_0000;
83
+ pub const FLAG_CARRY: i32 = 1;
84
+ pub const FLAG_PARITY: i32 = 4;
85
+ pub const FLAG_ADJUST: i32 = 16;
86
+ pub const FLAG_ZERO: i32 = 64;
87
+ pub const FLAG_SIGN: i32 = 128;
88
+ pub const FLAG_TRAP: i32 = 256;
89
+ pub const FLAG_INTERRUPT: i32 = 512;
90
+ pub const FLAG_DIRECTION: i32 = 1024;
91
+ pub const FLAG_OVERFLOW: i32 = 2048;
92
+ pub const FLAG_IOPL: i32 = 1 << 12 | 1 << 13;
93
+ pub const FLAG_NT: i32 = 1 << 14;
94
+ pub const FLAG_RF: i32 = 1 << 16;
95
+ pub const FLAG_VM: i32 = 1 << 17;
96
+ pub const FLAG_AC: i32 = 1 << 18;
97
+ pub const FLAG_VIF: i32 = 1 << 19;
98
+ pub const FLAG_VIP: i32 = 1 << 20;
99
+ pub const FLAG_ID: i32 = 1 << 21;
100
+ pub const FLAGS_DEFAULT: i32 = 1 << 1;
101
+ pub const FLAGS_MASK: i32 = FLAG_CARRY
102
+ | FLAG_PARITY
103
+ | FLAG_ADJUST
104
+ | FLAG_ZERO
105
+ | FLAG_SIGN
106
+ | FLAG_TRAP
107
+ | FLAG_INTERRUPT
108
+ | FLAG_DIRECTION
109
+ | FLAG_OVERFLOW
110
+ | FLAG_IOPL
111
+ | FLAG_NT
112
+ | FLAG_RF
113
+ | FLAG_VM
114
+ | FLAG_AC
115
+ | FLAG_VIF
116
+ | FLAG_VIP
117
+ | FLAG_ID;
118
+ pub const FLAGS_ALL: i32 =
119
+ FLAG_CARRY | FLAG_PARITY | FLAG_ADJUST | FLAG_ZERO | FLAG_SIGN | FLAG_OVERFLOW;
120
+ pub const OPSIZE_8: i32 = 7;
121
+ pub const OPSIZE_16: i32 = 15;
122
+ pub const OPSIZE_32: i32 = 31;
123
+
124
+ pub const EAX: i32 = 0;
125
+ pub const ECX: i32 = 1;
126
+ pub const EDX: i32 = 2;
127
+ pub const EBX: i32 = 3;
128
+ pub const ESP: i32 = 4;
129
+ pub const EBP: i32 = 5;
130
+ pub const ESI: i32 = 6;
131
+ pub const EDI: i32 = 7;
132
+
133
+ pub const AX: i32 = 0;
134
+ pub const CX: i32 = 1;
135
+ pub const DX: i32 = 2;
136
+ pub const BX: i32 = 3;
137
+ pub const SP: i32 = 4;
138
+ pub const BP: i32 = 5;
139
+ pub const SI: i32 = 6;
140
+ pub const DI: i32 = 7;
141
+
142
+ pub const AL: i32 = 0;
143
+ pub const CL: i32 = 1;
144
+ pub const DL: i32 = 2;
145
+ pub const BL: i32 = 3;
146
+ pub const AH: i32 = 4;
147
+ pub const CH: i32 = 5;
148
+ pub const DH: i32 = 6;
149
+ pub const BH: i32 = 7;
150
+
151
+ pub const ES: i32 = 0;
152
+ pub const CS: i32 = 1;
153
+ pub const SS: i32 = 2;
154
+ pub const DS: i32 = 3;
155
+ pub const FS: i32 = 4;
156
+ pub const GS: i32 = 5;
157
+ pub const TR: i32 = 6;
158
+
159
+ pub const LDTR: i32 = 7;
160
+ pub const PAGE_TABLE_PRESENT_MASK: i32 = 1 << 0;
161
+ pub const PAGE_TABLE_RW_MASK: i32 = 1 << 1;
162
+ pub const PAGE_TABLE_USER_MASK: i32 = 1 << 2;
163
+ pub const PAGE_TABLE_ACCESSED_MASK: i32 = 1 << 5;
164
+ pub const PAGE_TABLE_DIRTY_MASK: i32 = 1 << 6;
165
+ pub const PAGE_TABLE_PSE_MASK: i32 = 1 << 7;
166
+ pub const PAGE_TABLE_GLOBAL_MASK: i32 = 1 << 8;
167
+ pub const MMAP_BLOCK_BITS: i32 = 17;
168
+ pub const MMAP_BLOCK_SIZE: i32 = 1 << MMAP_BLOCK_BITS;
169
+ pub const CR0_PE: i32 = 1;
170
+ pub const CR0_MP: i32 = 1 << 1;
171
+ pub const CR0_EM: i32 = 1 << 2;
172
+ pub const CR0_TS: i32 = 1 << 3;
173
+ pub const CR0_ET: i32 = 1 << 4;
174
+ pub const CR0_WP: i32 = 1 << 16;
175
+ pub const CR0_AM: i32 = 1 << 18;
176
+ pub const CR0_NW: i32 = 1 << 29;
177
+ pub const CR0_CD: i32 = 1 << 30;
178
+ pub const CR0_PG: i32 = 1 << 31;
179
+ pub const CR4_VME: i32 = 1;
180
+ pub const CR4_PVI: i32 = 1 << 1;
181
+ pub const CR4_TSD: i32 = 1 << 2;
182
+ pub const CR4_PSE: i32 = 1 << 4;
183
+ pub const CR4_DE: i32 = 1 << 3;
184
+ pub const CR4_PAE: i32 = 1 << 5;
185
+ pub const CR4_PGE: i32 = 1 << 7;
186
+ pub const CR4_OSFXSR: i32 = 1 << 9;
187
+ pub const CR4_OSXMMEXCPT: i32 = 1 << 10;
188
+ pub const CR4_SMEP: i32 = 1 << 20;
189
+
190
+ pub const TSR_BACKLINK: i32 = 0x00;
191
+ pub const TSR_CR3: i32 = 0x1C;
192
+ pub const TSR_EIP: i32 = 0x20;
193
+ pub const TSR_EFLAGS: i32 = 0x24;
194
+
195
+ pub const TSR_EAX: i32 = 0x28;
196
+ pub const TSR_ECX: i32 = 0x2c;
197
+ pub const TSR_EDX: i32 = 0x30;
198
+ pub const TSR_EBX: i32 = 0x34;
199
+ pub const TSR_ESP: i32 = 0x38;
200
+ pub const TSR_EBP: i32 = 0x3c;
201
+ pub const TSR_ESI: i32 = 0x40;
202
+ pub const TSR_EDI: i32 = 0x44;
203
+
204
+ pub const TSR_ES: i32 = 0x48;
205
+ pub const TSR_CS: i32 = 0x4c;
206
+ pub const TSR_SS: i32 = 0x50;
207
+ pub const TSR_DS: i32 = 0x54;
208
+ pub const TSR_FS: i32 = 0x58;
209
+ pub const TSR_GS: i32 = 0x5c;
210
+ pub const TSR_LDT: i32 = 0x60;
211
+
212
+ pub const IA32_TIME_STAMP_COUNTER: i32 = 0x10;
213
+ pub const IA32_PLATFORM_ID: i32 = 0x17;
214
+ pub const IA32_APIC_BASE: i32 = 0x1B;
215
+ pub const MSR_TEST_CTRL: i32 = 0x33;
216
+ pub const MSR_SMI_COUNT: i32 = 0x34;
217
+ pub const IA32_FEAT_CTL: i32 = 0x3A;
218
+ pub const IA32_SPEC_CTRL: i32 = 0x48;
219
+ pub const IA32_BIOS_UPDT_TRIG: i32 = 0x79;
220
+ pub const IA32_BIOS_SIGN_ID: i32 = 0x8B;
221
+ pub const IA32_PMC0: i32 = 0xC1;
222
+ pub const IA32_PMC1: i32 = 0xC2;
223
+ pub const MSR_PLATFORM_INFO: i32 = 0xCE;
224
+ pub const MSR_TSX_FORCE_ABORT: i32 = 0x10F;
225
+ pub const IA32_TSX_CTRL: i32 = 0x122;
226
+ pub const IA32_MCU_OPT_CTRL: i32 = 0x123;
227
+ pub const MISC_FEATURE_ENABLES: i32 = 0x140;
228
+ pub const IA32_SYSENTER_CS: i32 = 0x174;
229
+ pub const IA32_SYSENTER_ESP: i32 = 0x175;
230
+ pub const IA32_SYSENTER_EIP: i32 = 0x176;
231
+ pub const IA32_MCG_CAP: i32 = 0x179;
232
+ pub const IA32_PERFEVTSEL0: i32 = 0x186;
233
+ pub const IA32_PERFEVTSEL1: i32 = 0x187;
234
+ pub const IA32_MISC_ENABLE: i32 = 0x1A0;
235
+ pub const IA32_PAT: i32 = 0x277;
236
+ pub const IA32_RTIT_CTL: i32 = 0x570;
237
+ pub const MSR_PKG_C2_RESIDENCY: i32 = 0x60D;
238
+ pub const IA32_KERNEL_GS_BASE: i32 = 0xC0000101u32 as i32;
239
+ pub const MSR_AMD64_LS_CFG: i32 = 0xC0011020u32 as i32;
240
+ pub const MSR_AMD64_DE_CFG: i32 = 0xC0011029u32 as i32;
241
+
242
+ pub const IA32_APIC_BASE_BSP: i32 = 1 << 8;
243
+ pub const IA32_APIC_BASE_EXTD: i32 = 1 << 10;
244
+ pub const IA32_APIC_BASE_EN: i32 = 1 << 11;
245
+
246
+ pub const IOAPIC_MEM_ADDRESS: u32 = 0xFEC00000;
247
+ pub const IOAPIC_MEM_SIZE: u32 = 32;
248
+ pub const APIC_MEM_ADDRESS: u32 = 0xFEE00000;
249
+ pub const APIC_MEM_SIZE: u32 = 0x1000;
250
+
251
+ pub const MXCSR_MASK: i32 = 0xffff;
252
+ pub const MXCSR_FZ: i32 = 1 << 15;
253
+ pub const MXCSR_DAZ: i32 = 1 << 6;
254
+ pub const MXCSR_RC_SHIFT: i32 = 13;
255
+
256
+ pub const VALID_TLB_ENTRY_MAX: i32 = 10000;
257
+ pub const TLB_VALID: i32 = 1 << 0;
258
+ pub const TLB_READONLY: i32 = 1 << 1;
259
+ pub const TLB_NO_USER: i32 = 1 << 2;
260
+ pub const TLB_IN_MAPPED_RANGE: i32 = 1 << 3;
261
+ pub const TLB_GLOBAL: i32 = 1 << 4;
262
+ pub const TLB_HAS_CODE: i32 = 1 << 5;
263
+ pub const IVT_SIZE: u32 = 0x400;
264
+ pub const CPU_EXCEPTION_DE: i32 = 0;
265
+ pub const CPU_EXCEPTION_DB: i32 = 1;
266
+ pub const CPU_EXCEPTION_NMI: i32 = 2;
267
+ pub const CPU_EXCEPTION_BP: i32 = 3;
268
+ pub const CPU_EXCEPTION_OF: i32 = 4;
269
+ pub const CPU_EXCEPTION_BR: i32 = 5;
270
+ pub const CPU_EXCEPTION_UD: i32 = 6;
271
+ pub const CPU_EXCEPTION_NM: i32 = 7;
272
+ pub const CPU_EXCEPTION_DF: i32 = 8;
273
+ pub const CPU_EXCEPTION_TS: i32 = 10;
274
+ pub const CPU_EXCEPTION_NP: i32 = 11;
275
+ pub const CPU_EXCEPTION_SS: i32 = 12;
276
+ pub const CPU_EXCEPTION_GP: i32 = 13;
277
+ pub const CPU_EXCEPTION_PF: i32 = 14;
278
+ pub const CPU_EXCEPTION_MF: i32 = 16;
279
+ pub const CPU_EXCEPTION_AC: i32 = 17;
280
+ pub const CPU_EXCEPTION_MC: i32 = 18;
281
+ pub const CPU_EXCEPTION_XM: i32 = 19;
282
+ pub const CPU_EXCEPTION_VE: i32 = 20;
283
+
284
+ pub const CHECK_TLB_INVARIANTS: bool = false;
285
+
286
+ pub const DEBUG: bool = cfg!(debug_assertions);
287
+
288
+ pub const LOOP_COUNTER: i32 = 100_003;
289
+
290
+ // should probably be kept in sync with APIC_TIMER_FREQ in apic.js
291
+ pub const TSC_RATE: f64 = 1_000_000.0;
292
+
293
+ pub static mut cpuid_level: u32 = 0x16;
294
+
295
+ pub static mut jit_block_boundary: bool = false;
296
+
297
+ const TSC_ENABLE_IMPRECISE_BROWSER_WORKAROUND: bool = true;
298
+
299
+ #[cfg(debug_assertions)]
300
+ const TSC_VERBOSE_LOGGING: bool = false;
301
+ #[cfg(debug_assertions)]
302
+ pub static mut tsc_last_extra: u64 = 0;
303
+
304
+ // the last value returned by rdtsc
305
+ pub static mut tsc_last_value: u64 = 0;
306
+ // the smallest difference between two rdtsc readings (depends on the browser's performance.now resolution)
307
+ pub static mut tsc_resolution: u64 = u64::MAX;
308
+ // how many times rdtsc was called and had to return the same value (due to browser's performance.now resolution)
309
+ pub static mut tsc_number_of_same_readings: u64 = 0;
310
+ // how often rdtsc was previously called without its value changing, used for interpolating quick
311
+ // consecutive calls between rdtsc (when it's called faster than the browser's performance.now
312
+ // changes)
313
+ pub static mut tsc_speed: u64 = 1;
314
+
315
+ // used for restoring the state
316
+ pub static mut tsc_offset: u64 = 0;
317
+
318
+ pub struct Code {
319
+ pub wasm_table_index: jit::WasmTableIndex,
320
+ pub state_flags: CachedStateFlags,
321
+ pub state_table: [u16; 0x1000],
322
+ }
323
+
324
+ pub static mut tlb_data: [i32; 0x100000] = [0; 0x100000];
325
+ pub static mut tlb_code: [Option<ptr::NonNull<Code>>; 0x100000] = [None; 0x100000];
326
+
327
+ pub static mut valid_tlb_entries: [i32; 10000] = [0; 10000];
328
+ pub static mut valid_tlb_entries_count: i32 = 0;
329
+
330
+ pub static mut in_jit: bool = false;
331
+
332
+ pub static mut jit_fault: Option<(i32, Option<i32>)> = None;
333
+
334
+ pub enum LastJump {
335
+ Interrupt {
336
+ phys_addr: u32,
337
+ int: u8,
338
+ software: bool,
339
+ error: Option<u32>,
340
+ },
341
+ Compiled {
342
+ phys_addr: u32,
343
+ },
344
+ Interpreted {
345
+ phys_addr: u32,
346
+ },
347
+ None,
348
+ }
349
+ impl LastJump {
350
+ pub fn phys_address(&self) -> Option<u32> {
351
+ match self {
352
+ LastJump::Interrupt { phys_addr, .. } => Some(*phys_addr),
353
+ LastJump::Compiled { phys_addr } => Some(*phys_addr),
354
+ LastJump::Interpreted { phys_addr } => Some(*phys_addr),
355
+ LastJump::None => None,
356
+ }
357
+ }
358
+ pub fn name(&self) -> &'static str {
359
+ match self {
360
+ LastJump::Interrupt { .. } => "interrupt",
361
+ LastJump::Compiled { .. } => "compiled",
362
+ LastJump::Interpreted { .. } => "interpreted",
363
+ LastJump::None => "none",
364
+ }
365
+ }
366
+ }
367
+ pub static mut debug_last_jump: LastJump = LastJump::None;
368
+
369
+ #[derive(Copy, Clone)]
370
+ pub struct SegmentSelector {
371
+ raw: u16,
372
+ }
373
+
374
+ impl SegmentSelector {
375
+ pub fn of_u16(raw: u16) -> SegmentSelector { SegmentSelector { raw } }
376
+ pub fn rpl(&self) -> u8 { (self.raw & 3) as u8 }
377
+ pub fn is_gdt(&self) -> bool { (self.raw & 4) == 0 }
378
+ pub fn descriptor_offset(&self) -> u16 { (self.raw & !7) as u16 }
379
+
380
+ pub fn is_null(&self) -> bool { self.is_gdt() && self.descriptor_offset() == 0 }
381
+ }
382
+
383
+ // Used to indicate early that the selector cannot be used to fetch a descriptor
384
+ #[derive(PartialEq)]
385
+ pub enum SelectorNullOrInvalid {
386
+ IsNull,
387
+ OutsideOfTableLimit,
388
+ }
389
+
390
+ pub struct SegmentDescriptor {
391
+ pub raw: u64,
392
+ }
393
+
394
+ impl SegmentDescriptor {
395
+ pub fn of_u64(raw: u64) -> SegmentDescriptor { SegmentDescriptor { raw } }
396
+ pub fn base(&self) -> i32 {
397
+ ((self.raw >> 16) & 0xffff | (self.raw & 0xff_00000000) >> 16 | (self.raw >> 56 << 24))
398
+ as i32
399
+ }
400
+ pub fn limit(&self) -> u32 { (self.raw & 0xffff | ((self.raw >> 48) & 0xf) << 16) as u32 }
401
+ pub fn access_byte(&self) -> u8 { ((self.raw >> 40) & 0xff) as u8 }
402
+ pub fn flags(&self) -> u8 { ((self.raw >> 48 >> 4) & 0xf) as u8 }
403
+
404
+ pub fn is_system(&self) -> bool { self.access_byte() & 0x10 == 0 }
405
+ pub fn system_type(&self) -> u8 { self.access_byte() & 0xF }
406
+
407
+ pub fn accessed(&self) -> bool { self.access_byte() & 1 == 1 }
408
+ pub fn is_rw(&self) -> bool { self.access_byte() & 2 == 2 }
409
+ pub fn is_dc(&self) -> bool { self.access_byte() & 4 == 4 }
410
+ pub fn is_executable(&self) -> bool { self.access_byte() & 8 == 8 }
411
+ pub fn is_present(&self) -> bool { self.access_byte() & 0x80 == 0x80 }
412
+ pub fn is_writable(&self) -> bool { self.is_rw() && !self.is_executable() }
413
+ pub fn is_readable(&self) -> bool { self.is_rw() || !self.is_executable() }
414
+ pub fn is_conforming_executable(&self) -> bool { self.is_dc() && self.is_executable() }
415
+ pub fn dpl(&self) -> u8 { (self.access_byte() >> 5) & 3 }
416
+ pub fn is_32(&self) -> bool { self.flags() & 4 == 4 }
417
+ pub fn effective_limit(&self) -> u32 {
418
+ if self.flags() & 8 == 8 {
419
+ self.limit() << 12 | 0xFFF
420
+ }
421
+ else {
422
+ self.limit()
423
+ }
424
+ }
425
+ pub fn set_busy(&self) -> SegmentDescriptor {
426
+ SegmentDescriptor {
427
+ raw: self.raw | 2 << 40,
428
+ }
429
+ }
430
+ pub fn set_accessed(&self) -> SegmentDescriptor {
431
+ SegmentDescriptor {
432
+ raw: self.raw | 1 << 40,
433
+ }
434
+ }
435
+ }
436
+
437
+ pub struct InterruptDescriptor {
438
+ raw: u64,
439
+ }
440
+
441
+ impl InterruptDescriptor {
442
+ pub fn of_u64(raw: u64) -> InterruptDescriptor { InterruptDescriptor { raw } }
443
+ pub fn offset(&self) -> i32 { (self.raw & 0xffff | self.raw >> 32 & 0xffff0000) as i32 }
444
+ pub fn selector(&self) -> u16 { (self.raw >> 16 & 0xffff) as u16 }
445
+ pub fn access_byte(&self) -> u8 { (self.raw >> 40 & 0xff) as u8 }
446
+ pub fn dpl(&self) -> u8 { (self.access_byte() >> 5 & 3) as u8 }
447
+ pub fn gate_type(&self) -> u8 { self.access_byte() & 7 }
448
+ pub fn is_32(&self) -> bool { self.access_byte() & 8 == 8 }
449
+ pub fn is_present(&self) -> bool { self.access_byte() & 0x80 == 0x80 }
450
+ pub fn reserved_zeros_are_valid(&self) -> bool { self.access_byte() & 16 == 0 }
451
+
452
+ const TASK_GATE: u8 = 0b101;
453
+ const INTERRUPT_GATE: u8 = 0b110;
454
+ const TRAP_GATE: u8 = 0b111;
455
+ }
456
+
457
+ pub unsafe fn switch_cs_real_mode(selector: i32) {
458
+ dbg_assert!(!*protected_mode || vm86_mode());
459
+
460
+ *sreg.offset(CS as isize) = selector as u16;
461
+ *segment_is_null.offset(CS as isize) = false;
462
+ *segment_offsets.offset(CS as isize) = selector << 4;
463
+ update_cs_size(false);
464
+ }
465
+
466
+ unsafe fn get_tss_ss_esp(dpl: u8) -> OrPageFault<(i32, i32)> {
467
+ Ok(if *tss_size_32 {
468
+ let tss_stack_offset = ((dpl << 3) + 4) as u32;
469
+ if tss_stack_offset + 7 > *segment_limits.offset(TR as isize) {
470
+ panic!("#TS handler");
471
+ }
472
+ let addr = translate_address_system_read(
473
+ *segment_offsets.offset(TR as isize) + tss_stack_offset as i32,
474
+ )?;
475
+ dbg_assert!(addr & 0xFFF <= 0x1000 - 6);
476
+ (memory::read16(addr + 4), memory::read32s(addr))
477
+ }
478
+ else {
479
+ let tss_stack_offset = ((dpl << 2) + 2) as u32;
480
+ if tss_stack_offset + 3 > *segment_limits.offset(TR as isize) {
481
+ panic!("#TS handler");
482
+ }
483
+ let addr = translate_address_system_read(
484
+ *segment_offsets.offset(TR as isize) + tss_stack_offset as i32,
485
+ )?;
486
+ dbg_assert!(addr & 0xFFF <= 0x1000 - 4);
487
+ (memory::read16(addr + 2), memory::read16(addr))
488
+ })
489
+ }
490
+
491
+ pub unsafe fn iret16() { iret(true); }
492
+ pub unsafe fn iret32() { iret(false); }
493
+
494
+ pub unsafe fn iret(is_16: bool) {
495
+ if vm86_mode() && getiopl() < 3 {
496
+ // vm86 mode, iopl != 3
497
+ dbg_log!("#gp iret vm86 mode, iopl != 3");
498
+ trigger_gp(0);
499
+ return;
500
+ }
501
+
502
+ let (new_eip, new_cs, mut new_flags) = if is_16 {
503
+ (
504
+ return_on_pagefault!(safe_read16(get_stack_pointer(0))),
505
+ return_on_pagefault!(safe_read16(get_stack_pointer(2))),
506
+ return_on_pagefault!(safe_read16(get_stack_pointer(4))),
507
+ )
508
+ }
509
+ else {
510
+ (
511
+ return_on_pagefault!(safe_read32s(get_stack_pointer(0))),
512
+ return_on_pagefault!(safe_read16(get_stack_pointer(4))),
513
+ return_on_pagefault!(safe_read32s(get_stack_pointer(8))),
514
+ )
515
+ };
516
+
517
+ if !*protected_mode || (vm86_mode() && getiopl() == 3) {
518
+ if new_eip as u32 & 0xFFFF0000 != 0 {
519
+ panic!("#GP handler");
520
+ }
521
+
522
+ switch_cs_real_mode(new_cs);
523
+ *instruction_pointer = get_seg_cs() + new_eip;
524
+
525
+ if is_16 {
526
+ update_eflags(new_flags | *flags & !0xFFFF);
527
+ adjust_stack_reg(3 * 2);
528
+ }
529
+ else {
530
+ if !*protected_mode {
531
+ update_eflags((new_flags & 0x257FD5) | (*flags & 0x1A0000));
532
+ }
533
+ else {
534
+ update_eflags(new_flags);
535
+ }
536
+ adjust_stack_reg(3 * 4);
537
+ }
538
+
539
+ update_state_flags();
540
+ handle_irqs();
541
+ return;
542
+ }
543
+
544
+ dbg_assert!(!vm86_mode());
545
+
546
+ if *flags & FLAG_NT != 0 {
547
+ if DEBUG {
548
+ panic!("NT");
549
+ }
550
+ trigger_gp(0);
551
+ return;
552
+ }
553
+
554
+ if new_flags & FLAG_VM != 0 {
555
+ if *cpl == 0 {
556
+ // return to virtual 8086 mode
557
+
558
+ // vm86 cannot be set in 16 bit flag
559
+ dbg_assert!(!is_16);
560
+
561
+ let temp_esp = return_on_pagefault!(safe_read32s(get_stack_pointer(12)));
562
+ let temp_ss = return_on_pagefault!(safe_read16(get_stack_pointer(16)));
563
+
564
+ let new_es = return_on_pagefault!(safe_read16(get_stack_pointer(20)));
565
+ let new_ds = return_on_pagefault!(safe_read16(get_stack_pointer(24)));
566
+ let new_fs = return_on_pagefault!(safe_read16(get_stack_pointer(28)));
567
+ let new_gs = return_on_pagefault!(safe_read16(get_stack_pointer(32)));
568
+
569
+ // no exceptions below
570
+
571
+ update_eflags(new_flags);
572
+ *flags |= FLAG_VM;
573
+
574
+ switch_cs_real_mode(new_cs);
575
+ *instruction_pointer = get_seg_cs() + (new_eip & 0xFFFF);
576
+
577
+ if !switch_seg(ES, new_es)
578
+ || !switch_seg(DS, new_ds)
579
+ || !switch_seg(FS, new_fs)
580
+ || !switch_seg(GS, new_gs)
581
+ {
582
+ // XXX: Should be checked before side effects
583
+ dbg_assert!(false);
584
+ }
585
+
586
+ adjust_stack_reg(9 * 4); // 9 dwords: eip, cs, flags, esp, ss, es, ds, fs, gs
587
+
588
+ write_reg32(ESP, temp_esp);
589
+ if !switch_seg(SS, temp_ss) {
590
+ // XXX
591
+ dbg_assert!(false);
592
+ }
593
+
594
+ *cpl = 3;
595
+ cpl_changed();
596
+
597
+ update_cs_size(false);
598
+ update_state_flags();
599
+
600
+ // iret end
601
+ return;
602
+ }
603
+ else {
604
+ dbg_log!("vm86 flag ignored because cpl != 0");
605
+ new_flags &= !FLAG_VM;
606
+ }
607
+ }
608
+
609
+ // protected mode return
610
+
611
+ let cs_selector = SegmentSelector::of_u16(new_cs as u16);
612
+ let cs_descriptor = match return_on_pagefault!(lookup_segment_selector(cs_selector)) {
613
+ Ok((desc, _)) => desc,
614
+ Err(SelectorNullOrInvalid::IsNull) => panic!("Unimplemented: CS selector is null"),
615
+ Err(SelectorNullOrInvalid::OutsideOfTableLimit) => {
616
+ panic!("Unimplemented: CS selector is invalid")
617
+ },
618
+ };
619
+
620
+ if new_eip as u32 > cs_descriptor.effective_limit() {
621
+ dbg_log!(
622
+ "#gp iret: new_eip > cs_descriptor.effective_limit, new_eip={:x} cs_descriptor.effective_limit={:x}",
623
+ new_eip as u32,
624
+ cs_descriptor.effective_limit()
625
+ );
626
+ trigger_gp(new_cs & !3);
627
+ return;
628
+ }
629
+
630
+ if !cs_descriptor.is_present() {
631
+ panic!("not present");
632
+ }
633
+ if !cs_descriptor.is_executable() {
634
+ panic!("not exec");
635
+ }
636
+ if cs_selector.rpl() < *cpl {
637
+ panic!("rpl < cpl");
638
+ }
639
+ if cs_descriptor.is_dc() && cs_descriptor.dpl() > cs_selector.rpl() {
640
+ panic!("conforming and dpl > rpl");
641
+ }
642
+
643
+ if !cs_descriptor.is_dc() && cs_selector.rpl() != cs_descriptor.dpl() {
644
+ dbg_log!(
645
+ "#gp iret: non-conforming cs and rpl != dpl, dpl={} rpl={}",
646
+ cs_descriptor.dpl(),
647
+ cs_selector.rpl()
648
+ );
649
+ trigger_gp(new_cs & !3);
650
+ return;
651
+ }
652
+
653
+ if cs_selector.rpl() > *cpl {
654
+ // outer privilege return
655
+ let (temp_esp, temp_ss) = if is_16 {
656
+ (
657
+ return_on_pagefault!(safe_read16(get_stack_pointer(6))),
658
+ return_on_pagefault!(safe_read16(get_stack_pointer(8))),
659
+ )
660
+ }
661
+ else {
662
+ (
663
+ return_on_pagefault!(safe_read32s(get_stack_pointer(12))),
664
+ return_on_pagefault!(safe_read16(get_stack_pointer(16))),
665
+ )
666
+ };
667
+
668
+ let ss_selector = SegmentSelector::of_u16(temp_ss as u16);
669
+ let ss_descriptor = match return_on_pagefault!(lookup_segment_selector(ss_selector)) {
670
+ Ok((desc, _)) => desc,
671
+ Err(SelectorNullOrInvalid::IsNull) => {
672
+ dbg_log!("#GP for loading 0 in SS sel={:x}", temp_ss);
673
+ dbg_trace();
674
+ trigger_gp(0);
675
+ return;
676
+ },
677
+ Err(SelectorNullOrInvalid::OutsideOfTableLimit) => {
678
+ dbg_log!("#GP for loading invalid in SS sel={:x}", temp_ss);
679
+ trigger_gp(temp_ss & !3);
680
+ return;
681
+ },
682
+ };
683
+ let new_cpl = cs_selector.rpl();
684
+
685
+ if ss_descriptor.is_system()
686
+ || ss_selector.rpl() != new_cpl
687
+ || !ss_descriptor.is_writable()
688
+ || ss_descriptor.dpl() != new_cpl
689
+ {
690
+ dbg_log!("#GP for loading invalid in SS sel={:x}", temp_ss);
691
+ dbg_trace();
692
+ trigger_gp(temp_ss & !3);
693
+ return;
694
+ }
695
+
696
+ if !ss_descriptor.is_present() {
697
+ dbg_log!("#SS for loading non-present in SS sel={:x}", temp_ss);
698
+ dbg_trace();
699
+ trigger_ss(temp_ss & !3);
700
+ return;
701
+ }
702
+
703
+ // no exceptions below
704
+
705
+ if is_16 {
706
+ update_eflags(new_flags | *flags & !0xFFFF);
707
+ }
708
+ else {
709
+ update_eflags(new_flags);
710
+ }
711
+
712
+ *cpl = cs_selector.rpl();
713
+ cpl_changed();
714
+
715
+ if !switch_seg(SS, temp_ss) {
716
+ // XXX
717
+ dbg_assert!(false);
718
+ }
719
+
720
+ set_stack_reg(temp_esp);
721
+
722
+ if *cpl == 0 && !is_16 {
723
+ *flags = *flags & !FLAG_VIF & !FLAG_VIP | (new_flags & (FLAG_VIF | FLAG_VIP));
724
+ }
725
+
726
+ for reg in [ES, DS, FS, GS] {
727
+ let access = *segment_access_bytes.offset(reg as isize);
728
+ let dpl = access >> 5 & 3;
729
+ let executable = access & 8 == 8;
730
+ let conforming = access & 4 == 4;
731
+ if dpl < *cpl && !(executable && conforming) {
732
+ //dbg_log!(
733
+ // "set segment to null sreg={} dpl={} executable={} conforming={}",
734
+ // reg,
735
+ // dpl,
736
+ // executable,
737
+ // conforming
738
+ //);
739
+ *segment_is_null.offset(reg as isize) = true;
740
+ *sreg.offset(reg as isize) = 0;
741
+ }
742
+ }
743
+ }
744
+ else if cs_selector.rpl() == *cpl {
745
+ // same privilege return
746
+ // no exceptions below
747
+ if is_16 {
748
+ adjust_stack_reg(3 * 2);
749
+ update_eflags(new_flags | *flags & !0xFFFF);
750
+ }
751
+ else {
752
+ adjust_stack_reg(3 * 4);
753
+ update_eflags(new_flags);
754
+ }
755
+
756
+ // update vip and vif, which are not changed by update_eflags
757
+ if *cpl == 0 && !is_16 {
758
+ *flags = *flags & !FLAG_VIF & !FLAG_VIP | (new_flags & (FLAG_VIF | FLAG_VIP));
759
+ }
760
+ }
761
+ else {
762
+ dbg_assert!(false);
763
+ }
764
+
765
+ *sreg.offset(CS as isize) = new_cs as u16;
766
+ dbg_assert!((new_cs & 3) == *cpl as i32);
767
+
768
+ update_cs_size(cs_descriptor.is_32());
769
+
770
+ *segment_limits.offset(CS as isize) = cs_descriptor.effective_limit();
771
+ *segment_offsets.offset(CS as isize) = cs_descriptor.base();
772
+ *segment_access_bytes.offset(CS as isize) = cs_descriptor.access_byte();
773
+
774
+ *instruction_pointer = new_eip + get_seg_cs();
775
+
776
+ update_state_flags();
777
+
778
+ // iret end
779
+
780
+ handle_irqs();
781
+ }
782
+
783
+ pub unsafe fn call_interrupt_vector(
784
+ interrupt_nr: i32,
785
+ is_software_int: bool,
786
+ error_code: Option<i32>,
787
+ ) {
788
+ if *protected_mode {
789
+ if vm86_mode() && *cr.offset(4) & CR4_VME != 0 {
790
+ panic!("Unimplemented: VME");
791
+ }
792
+
793
+ if vm86_mode() && is_software_int && getiopl() < 3 {
794
+ dbg_log!("call_interrupt_vector #GP. vm86 && software int && iopl < 3");
795
+ dbg_trace();
796
+ trigger_gp(0);
797
+ return;
798
+ }
799
+
800
+ if interrupt_nr << 3 | 7 > *idtr_size {
801
+ dbg_log!("interrupt_nr={:x} idtr_size={:x}", interrupt_nr, *idtr_size);
802
+ dbg_trace();
803
+ panic!("Unimplemented: #GP handler");
804
+ }
805
+
806
+ let descriptor_address = return_on_pagefault!(translate_address_system_read(
807
+ *idtr_offset + (interrupt_nr << 3)
808
+ ));
809
+
810
+ let descriptor = InterruptDescriptor::of_u64(memory::read64s(descriptor_address) as u64);
811
+
812
+ let mut offset = descriptor.offset();
813
+ let selector = descriptor.selector() as i32;
814
+ let dpl = descriptor.dpl();
815
+ let gate_type = descriptor.gate_type();
816
+
817
+ if is_software_int && dpl < *cpl {
818
+ dbg_log!("#gp software interrupt ({:x}) and dpl < cpl", interrupt_nr);
819
+ dbg_trace();
820
+ trigger_gp(interrupt_nr << 3 | 2);
821
+ return;
822
+ }
823
+
824
+ if gate_type != InterruptDescriptor::TRAP_GATE
825
+ && gate_type != InterruptDescriptor::INTERRUPT_GATE
826
+ && gate_type != InterruptDescriptor::TASK_GATE
827
+ {
828
+ // invalid gate_type
829
+ dbg_log!(
830
+ "gate type invalid. gate_type=0b{:b} raw={:b}",
831
+ gate_type,
832
+ descriptor.raw
833
+ );
834
+ dbg_trace();
835
+ panic!("Unimplemented: #GP handler");
836
+ }
837
+
838
+ if !descriptor.reserved_zeros_are_valid() {
839
+ dbg_log!(
840
+ "reserved 0s violated. gate_type=0b{:b} raw={:b}",
841
+ gate_type,
842
+ descriptor.raw
843
+ );
844
+ dbg_trace();
845
+ panic!("Unimplemented: #GP handler");
846
+ }
847
+
848
+ if !descriptor.is_present() {
849
+ // present bit not set
850
+ dbg_log!("#np int descriptor not present, int={}", interrupt_nr);
851
+ trigger_np(interrupt_nr << 3 | 2);
852
+ return;
853
+ }
854
+
855
+ if gate_type == InterruptDescriptor::TASK_GATE {
856
+ // task gate
857
+ dbg_log!(
858
+ "interrupt to task gate: int={:x} sel={:x} dpl={}",
859
+ interrupt_nr,
860
+ selector,
861
+ dpl
862
+ );
863
+ dbg_trace();
864
+ dbg_assert!(descriptor.is_32(), "TODO: Check this (likely #GP)");
865
+ dbg_assert!(offset == 0, "TODO: Check this (likely #GP)");
866
+ do_task_switch(selector, error_code);
867
+ return;
868
+ }
869
+
870
+ let cs_segment_descriptor = match return_on_pagefault!(lookup_segment_selector(
871
+ SegmentSelector::of_u16(selector as u16)
872
+ )) {
873
+ Ok((desc, _)) => desc,
874
+ Err(SelectorNullOrInvalid::IsNull) => {
875
+ dbg_log!("is null");
876
+ panic!("Unimplemented: #GP handler");
877
+ },
878
+ Err(SelectorNullOrInvalid::OutsideOfTableLimit) => {
879
+ dbg_log!("is invalid");
880
+ panic!("Unimplemented: #GP handler (error code)");
881
+ },
882
+ };
883
+
884
+ dbg_assert!(offset as u32 <= cs_segment_descriptor.effective_limit());
885
+
886
+ if !cs_segment_descriptor.is_executable() || cs_segment_descriptor.dpl() > *cpl {
887
+ dbg_log!("not exec");
888
+ panic!("Unimplemented: #GP handler");
889
+ }
890
+ if !cs_segment_descriptor.is_present() {
891
+ // kvm-unit-test
892
+ dbg_log!("not present");
893
+ trigger_np(interrupt_nr << 3 | 2);
894
+ return;
895
+ }
896
+
897
+ let old_flags = get_eflags();
898
+
899
+ if !cs_segment_descriptor.is_dc() && cs_segment_descriptor.dpl() < *cpl {
900
+ // inter privilege level interrupt
901
+ // interrupt from vm86 mode
902
+
903
+ if old_flags & FLAG_VM != 0 && cs_segment_descriptor.dpl() != 0 {
904
+ panic!("Unimplemented: #GP handler for non-0 cs segment dpl when in vm86 mode");
905
+ }
906
+
907
+ let (new_ss, new_esp) =
908
+ return_on_pagefault!(get_tss_ss_esp(cs_segment_descriptor.dpl()));
909
+
910
+ let ss_segment_selector = SegmentSelector::of_u16(new_ss as u16);
911
+ let ss_segment_descriptor =
912
+ match return_on_pagefault!(lookup_segment_selector(ss_segment_selector)) {
913
+ Ok((desc, _)) => desc,
914
+ Err(
915
+ SelectorNullOrInvalid::IsNull | SelectorNullOrInvalid::OutsideOfTableLimit,
916
+ ) => {
917
+ panic!("Unimplemented: #TS handler");
918
+ },
919
+ };
920
+
921
+ if ss_segment_descriptor.is_dc() {
922
+ dbg_assert!(new_esp as u32 > ss_segment_descriptor.effective_limit());
923
+ }
924
+ else {
925
+ dbg_assert!(new_esp as u32 - 1 <= ss_segment_descriptor.effective_limit());
926
+ }
927
+ dbg_assert!(!ss_segment_descriptor.is_system() && ss_segment_descriptor.is_writable());
928
+
929
+ if ss_segment_selector.rpl() != cs_segment_descriptor.dpl() {
930
+ panic!("Unimplemented: #TS handler");
931
+ }
932
+ if ss_segment_descriptor.dpl() != cs_segment_descriptor.dpl()
933
+ || !ss_segment_descriptor.is_rw()
934
+ {
935
+ panic!("Unimplemented: #TS handler");
936
+ }
937
+ if !ss_segment_descriptor.is_present() {
938
+ panic!("Unimplemented: #TS handler");
939
+ }
940
+
941
+ let old_esp = read_reg32(ESP);
942
+ let old_ss = *sreg.offset(SS as isize) as i32;
943
+
944
+ let error_code_space = if error_code.is_some() { 1 } else { 0 };
945
+ let vm86_space = if (old_flags & FLAG_VM) == FLAG_VM { 4 } else { 0 };
946
+ let bytes_per_arg = if descriptor.is_32() { 4 } else { 2 };
947
+
948
+ let stack_space = bytes_per_arg * (5 + error_code_space + vm86_space);
949
+ let new_stack_pointer = ss_segment_descriptor.base()
950
+ + if ss_segment_descriptor.is_32() {
951
+ new_esp - stack_space
952
+ }
953
+ else {
954
+ new_esp - stack_space & 0xFFFF
955
+ };
956
+
957
+ return_on_pagefault!(translate_address_system_write(new_stack_pointer));
958
+ return_on_pagefault!(translate_address_system_write(
959
+ ss_segment_descriptor.base() + new_esp - 1
960
+ ));
961
+
962
+ // no exceptions below
963
+ *cpl = cs_segment_descriptor.dpl();
964
+ cpl_changed();
965
+
966
+ update_cs_size(cs_segment_descriptor.is_32());
967
+
968
+ *flags &= !FLAG_VM & !FLAG_RF;
969
+
970
+ if !switch_seg(SS, new_ss) {
971
+ // XXX
972
+ dbg_assert!(false);
973
+ }
974
+ set_stack_reg(new_esp);
975
+
976
+ // XXX: #SS if stack would cross stack limit
977
+
978
+ if old_flags & FLAG_VM != 0 {
979
+ if !descriptor.is_32() {
980
+ dbg_assert!(false);
981
+ }
982
+ else {
983
+ push32(*sreg.offset(GS as isize) as i32).unwrap();
984
+ push32(*sreg.offset(FS as isize) as i32).unwrap();
985
+ push32(*sreg.offset(DS as isize) as i32).unwrap();
986
+ push32(*sreg.offset(ES as isize) as i32).unwrap();
987
+ }
988
+ }
989
+
990
+ if descriptor.is_32() {
991
+ push32(old_ss).unwrap();
992
+ push32(old_esp).unwrap();
993
+ }
994
+ else {
995
+ push16(old_ss).unwrap();
996
+ push16(old_esp & 0xFFFF).unwrap();
997
+ }
998
+ }
999
+ else if cs_segment_descriptor.is_dc() || cs_segment_descriptor.dpl() == *cpl {
1000
+ // intra privilege level interrupt
1001
+
1002
+ //dbg_log!("Intra privilege interrupt gate=" + h(selector, 4) + ":" + h(offset >>> 0, 8) +
1003
+ // " gate_type=" + gate_type + " 16bit=" + descriptor.is_32() +
1004
+ // " cpl=" + *cpl + " dpl=" + segment_descriptor.dpl() + " conforming=" + +segment_descriptor.is_dc(), );
1005
+ //debug.dump_regs_short();
1006
+
1007
+ if *flags & FLAG_VM != 0 {
1008
+ dbg_assert!(false, "check error code");
1009
+ trigger_gp(selector & !3);
1010
+ return;
1011
+ }
1012
+
1013
+ let bytes_per_arg = if descriptor.is_32() { 4 } else { 2 };
1014
+ let error_code_space = if error_code.is_some() { 1 } else { 0 };
1015
+
1016
+ let stack_space = bytes_per_arg * (3 + error_code_space);
1017
+
1018
+ // XXX: with current cpl or with cpl 0?
1019
+ return_on_pagefault!(writable_or_pagefault(
1020
+ get_stack_pointer(-stack_space),
1021
+ stack_space
1022
+ ));
1023
+
1024
+ // no exceptions below
1025
+ }
1026
+ else {
1027
+ panic!("Unimplemented: #GP handler");
1028
+ }
1029
+
1030
+ // XXX: #SS if stack would cross stack limit
1031
+ if descriptor.is_32() {
1032
+ push32(old_flags).unwrap();
1033
+ push32(*sreg.offset(CS as isize) as i32).unwrap();
1034
+ push32(get_real_eip()).unwrap();
1035
+
1036
+ if let Some(ec) = error_code {
1037
+ push32(ec).unwrap();
1038
+ }
1039
+ }
1040
+ else {
1041
+ push16(old_flags & 0xFFFF).unwrap();
1042
+ push16(*sreg.offset(CS as isize) as i32).unwrap();
1043
+ push16(get_real_eip() & 0xFFFF).unwrap();
1044
+
1045
+ if let Some(ec) = error_code {
1046
+ dbg_assert!(ec >= 0 && ec < 0x10000);
1047
+ push16(ec).unwrap();
1048
+ }
1049
+
1050
+ offset &= 0xFFFF;
1051
+ }
1052
+
1053
+ if old_flags & FLAG_VM != 0 {
1054
+ if !switch_seg(GS, 0) || !switch_seg(FS, 0) || !switch_seg(DS, 0) || !switch_seg(ES, 0)
1055
+ {
1056
+ // can't fail
1057
+ dbg_assert!(false);
1058
+ }
1059
+ }
1060
+
1061
+ *sreg.offset(CS as isize) = (selector as u16) & !3 | *cpl as u16;
1062
+ dbg_assert!((*sreg.offset(CS as isize) & 3) == *cpl as u16);
1063
+
1064
+ update_cs_size(cs_segment_descriptor.is_32());
1065
+
1066
+ *segment_limits.offset(CS as isize) = cs_segment_descriptor.effective_limit();
1067
+ *segment_offsets.offset(CS as isize) = cs_segment_descriptor.base();
1068
+ *segment_access_bytes.offset(CS as isize) = cs_segment_descriptor.access_byte();
1069
+
1070
+ *instruction_pointer = get_seg_cs() + offset;
1071
+
1072
+ *flags &= !FLAG_NT & !FLAG_VM & !FLAG_RF & !FLAG_TRAP;
1073
+
1074
+ if gate_type == InterruptDescriptor::INTERRUPT_GATE {
1075
+ // clear int flag for interrupt gates
1076
+ *flags &= !FLAG_INTERRUPT;
1077
+ }
1078
+ else {
1079
+ if *flags & FLAG_INTERRUPT != 0 && old_flags & FLAG_INTERRUPT == 0 {
1080
+ handle_irqs();
1081
+ }
1082
+ }
1083
+
1084
+ update_state_flags();
1085
+ }
1086
+ else {
1087
+ // call 4 byte cs:ip interrupt vector from ivt at cpu.memory 0
1088
+
1089
+ let index = (interrupt_nr << 2) as u32;
1090
+ let new_ip = memory::read16(index);
1091
+ let new_cs = memory::read16(index + 2);
1092
+
1093
+ dbg_assert!(
1094
+ index | 3 <= IVT_SIZE,
1095
+ "Unimplemented: #GP for interrupt number out of IVT bounds"
1096
+ );
1097
+
1098
+ // XXX: #SS if stack would cross stack limit
1099
+
1100
+ // push flags, cs:ip
1101
+ push16(get_eflags() & 0xFFFF).unwrap();
1102
+ push16(*sreg.offset(CS as isize) as i32).unwrap();
1103
+ push16(get_real_eip() & 0xFFFF).unwrap();
1104
+
1105
+ *flags &= !FLAG_INTERRUPT & !FLAG_AC & !FLAG_TRAP;
1106
+
1107
+ switch_cs_real_mode(new_cs);
1108
+ *instruction_pointer = get_seg_cs() + new_ip;
1109
+ update_state_flags();
1110
+ }
1111
+ }
1112
+
1113
+ pub unsafe fn far_jump(eip: i32, selector: i32, is_call: bool, is_osize_32: bool) {
1114
+ dbg_assert!(selector < 0x10000 && selector >= 0);
1115
+
1116
+ if !*protected_mode || vm86_mode() {
1117
+ if is_call {
1118
+ if is_osize_32 {
1119
+ return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-8), 8));
1120
+
1121
+ push32(*sreg.offset(CS as isize) as i32).unwrap();
1122
+ push32(get_real_eip()).unwrap();
1123
+ }
1124
+ else {
1125
+ return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-4), 4));
1126
+
1127
+ push16(*sreg.offset(CS as isize) as i32).unwrap();
1128
+ push16(get_real_eip()).unwrap();
1129
+ }
1130
+ }
1131
+ switch_cs_real_mode(selector);
1132
+ *instruction_pointer = get_seg_cs() + eip;
1133
+ update_state_flags();
1134
+ return;
1135
+ }
1136
+
1137
+ let cs_selector = SegmentSelector::of_u16(selector as u16);
1138
+ let info = match return_on_pagefault!(lookup_segment_selector(cs_selector)) {
1139
+ Ok((desc, _)) => desc,
1140
+ Err(SelectorNullOrInvalid::IsNull) => {
1141
+ dbg_log!("#gp null cs");
1142
+ trigger_gp(0);
1143
+ return;
1144
+ },
1145
+ Err(SelectorNullOrInvalid::OutsideOfTableLimit) => {
1146
+ dbg_log!("#gp invalid cs: {:x}", selector);
1147
+ trigger_gp(selector & !3);
1148
+ return;
1149
+ },
1150
+ };
1151
+
1152
+ if info.is_system() {
1153
+ dbg_assert!(is_call, "TODO: Jump");
1154
+
1155
+ dbg_log!("system type cs: {:x}", selector);
1156
+
1157
+ if info.system_type() == 0xC || info.system_type() == 4 {
1158
+ // call gate
1159
+ let is_16 = info.system_type() == 4;
1160
+
1161
+ if info.dpl() < *cpl || info.dpl() < cs_selector.rpl() {
1162
+ dbg_log!("#gp cs gate dpl < cpl or dpl < rpl: {:x}", selector);
1163
+ trigger_gp(selector & !3);
1164
+ return;
1165
+ }
1166
+
1167
+ if !info.is_present() {
1168
+ dbg_log!("#NP for loading not-present in gate cs sel={:x}", selector);
1169
+ trigger_np(selector & !3);
1170
+ return;
1171
+ }
1172
+
1173
+ let cs_selector = (info.raw >> 16) as i32;
1174
+
1175
+ let cs_info = match return_on_pagefault!(lookup_segment_selector(
1176
+ SegmentSelector::of_u16(cs_selector as u16)
1177
+ )) {
1178
+ Ok((desc, _)) => desc,
1179
+ Err(SelectorNullOrInvalid::IsNull) => {
1180
+ dbg_log!("#gp null cs");
1181
+ trigger_gp(0);
1182
+ return;
1183
+ },
1184
+ Err(SelectorNullOrInvalid::OutsideOfTableLimit) => {
1185
+ dbg_log!("#gp invalid cs: {:x}", cs_selector);
1186
+ trigger_gp(cs_selector & !3);
1187
+ return;
1188
+ },
1189
+ };
1190
+
1191
+ if cs_info.is_system() {
1192
+ dbg_log!("#gp non-code cs: {:x}", cs_selector);
1193
+ trigger_gp(cs_selector & !3);
1194
+ return;
1195
+ }
1196
+
1197
+ if !cs_info.is_executable() {
1198
+ dbg_log!("#gp non-executable cs: {:x}", cs_selector);
1199
+ trigger_gp(cs_selector & !3);
1200
+ return;
1201
+ }
1202
+
1203
+ if cs_info.dpl() > *cpl {
1204
+ dbg_log!("#gp dpl > cpl: {:x}", cs_selector);
1205
+ trigger_gp(cs_selector & !3);
1206
+ return;
1207
+ }
1208
+
1209
+ if !cs_info.is_present() {
1210
+ dbg_log!("#NP for loading not-present in cs sel={:x}", cs_selector);
1211
+ trigger_np(cs_selector & !3);
1212
+ return;
1213
+ }
1214
+
1215
+ if !cs_info.is_dc() && cs_info.dpl() < *cpl {
1216
+ dbg_log!(
1217
+ "more privilege call gate is_16={} from={} to={}",
1218
+ is_16,
1219
+ *cpl,
1220
+ cs_info.dpl()
1221
+ );
1222
+ let (new_ss, new_esp) = return_on_pagefault!(get_tss_ss_esp(cs_info.dpl()));
1223
+
1224
+ let ss_selector = SegmentSelector::of_u16(new_ss as u16);
1225
+ let ss_info = match return_on_pagefault!(lookup_segment_selector(ss_selector)) {
1226
+ Ok((desc, _)) => desc,
1227
+ Err(SelectorNullOrInvalid::IsNull) => {
1228
+ panic!("null ss: {}", new_ss);
1229
+ },
1230
+ Err(SelectorNullOrInvalid::OutsideOfTableLimit) => {
1231
+ panic!("invalid ss: {}", new_ss);
1232
+ },
1233
+ };
1234
+
1235
+ if ss_info.is_dc() {
1236
+ dbg_assert!(new_esp as u32 > ss_info.effective_limit());
1237
+ }
1238
+ else {
1239
+ dbg_assert!(new_esp as u32 - 1 <= ss_info.effective_limit());
1240
+ }
1241
+ dbg_assert!(!ss_info.is_system() && ss_info.is_writable());
1242
+
1243
+ if ss_selector.rpl() != cs_info.dpl()
1244
+ // xxx: 0 in v86 mode
1245
+ {
1246
+ panic!("#TS handler");
1247
+ }
1248
+ if ss_info.dpl() != cs_info.dpl() || !ss_info.is_writable() {
1249
+ panic!("#TS handler");
1250
+ }
1251
+ if !ss_info.is_present() {
1252
+ panic!("#SS handler");
1253
+ }
1254
+
1255
+ let parameter_count = (info.raw >> 32 & 0x1F) as i32;
1256
+ let mut stack_space = if is_16 { 4 } else { 8 };
1257
+ if is_call {
1258
+ stack_space +=
1259
+ if is_16 { 4 + 2 * parameter_count } else { 8 + 4 * parameter_count };
1260
+ }
1261
+ if ss_info.is_32() {
1262
+ return_on_pagefault!(writable_or_pagefault_cpl(
1263
+ cs_info.dpl(),
1264
+ ss_info.base() + new_esp - stack_space,
1265
+ stack_space
1266
+ ));
1267
+ }
1268
+ else {
1269
+ return_on_pagefault!(writable_or_pagefault_cpl(
1270
+ cs_info.dpl(),
1271
+ ss_info.base() + (new_esp - stack_space & 0xFFFF),
1272
+ stack_space
1273
+ ));
1274
+ }
1275
+
1276
+ let old_esp = read_reg32(ESP);
1277
+ let old_ss = *sreg.offset(SS as isize);
1278
+ let old_stack_pointer = get_stack_pointer(0);
1279
+
1280
+ //dbg_log!("old_esp=" + h(old_esp));
1281
+
1282
+ *cpl = cs_info.dpl();
1283
+ cpl_changed();
1284
+
1285
+ update_cs_size(cs_info.is_32());
1286
+
1287
+ dbg_assert!(new_ss & 3 == cs_info.dpl() as i32);
1288
+ // XXX: Should be checked before side effects
1289
+ if !switch_seg(SS, new_ss) {
1290
+ dbg_assert!(false);
1291
+ };
1292
+ set_stack_reg(new_esp);
1293
+
1294
+ //dbg_log!("parameter_count=" + parameter_count);
1295
+ //dbg_assert!(parameter_count == 0, "TODO");
1296
+
1297
+ if is_16 {
1298
+ push16(old_ss as i32).unwrap();
1299
+ push16(old_esp).unwrap();
1300
+ }
1301
+ else {
1302
+ push32(old_ss as i32).unwrap();
1303
+ push32(old_esp).unwrap();
1304
+ }
1305
+
1306
+ if is_call {
1307
+ if is_16 {
1308
+ for i in (0..parameter_count).rev() {
1309
+ let parameter = safe_read16(old_stack_pointer + 2 * i).unwrap();
1310
+ push16(parameter).unwrap();
1311
+ }
1312
+
1313
+ //writable_or_pagefault(get_stack_pointer(-4), 4);
1314
+ push16(*sreg.offset(CS as isize) as i32).unwrap();
1315
+ push16(get_real_eip()).unwrap();
1316
+ }
1317
+ else {
1318
+ for i in (0..parameter_count).rev() {
1319
+ let parameter = safe_read32s(old_stack_pointer + 4 * i).unwrap();
1320
+ push32(parameter).unwrap();
1321
+ }
1322
+
1323
+ //writable_or_pagefault(get_stack_pointer(-8), 8);
1324
+ push32(*sreg.offset(CS as isize) as i32).unwrap();
1325
+ push32(get_real_eip()).unwrap();
1326
+ }
1327
+ }
1328
+ }
1329
+ else {
1330
+ dbg_log!(
1331
+ "same privilege call gate is_16={} from={} to={} conforming={}",
1332
+ is_16,
1333
+ *cpl,
1334
+ cs_info.dpl(),
1335
+ cs_info.is_dc()
1336
+ );
1337
+
1338
+ if is_call {
1339
+ if is_16 {
1340
+ return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-4), 4));
1341
+
1342
+ push16(*sreg.offset(CS as isize) as i32).unwrap();
1343
+ push16(get_real_eip()).unwrap();
1344
+ }
1345
+ else {
1346
+ return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-8), 8));
1347
+
1348
+ push32(*sreg.offset(CS as isize) as i32).unwrap();
1349
+ push32(get_real_eip()).unwrap();
1350
+ }
1351
+ }
1352
+
1353
+ dbg_assert!(*cpl == cs_info.dpl());
1354
+ }
1355
+
1356
+ // Note: eip from call is ignored
1357
+ let mut new_eip = (info.raw & 0xFFFF) as i32;
1358
+ if !is_16 {
1359
+ new_eip |= ((info.raw >> 32) & 0xFFFF0000) as i32;
1360
+ }
1361
+
1362
+ dbg_log!(
1363
+ "call gate eip={:x} cs={:x} conforming={}",
1364
+ new_eip as u32,
1365
+ cs_selector,
1366
+ cs_info.is_dc()
1367
+ );
1368
+ dbg_assert!((new_eip as u32) <= cs_info.effective_limit(), "todo: #gp");
1369
+
1370
+ update_cs_size(cs_info.is_32());
1371
+
1372
+ *segment_is_null.offset(CS as isize) = false;
1373
+ *segment_limits.offset(CS as isize) = cs_info.effective_limit();
1374
+ *segment_offsets.offset(CS as isize) = cs_info.base();
1375
+ *segment_access_bytes.offset(CS as isize) = cs_info.access_byte();
1376
+ *sreg.offset(CS as isize) = cs_selector as u16 & !3 | *cpl as u16;
1377
+ dbg_assert!(*sreg.offset(CS as isize) & 3 == *cpl as u16);
1378
+
1379
+ *instruction_pointer = get_seg_cs() + new_eip;
1380
+
1381
+ update_state_flags();
1382
+ }
1383
+ else if info.system_type() == 1 || info.system_type() == 9 {
1384
+ dbg_assert!(false, "TODO: far call task gate");
1385
+ }
1386
+ else {
1387
+ dbg_assert!(false, "TODO: #gp invalid system type");
1388
+ }
1389
+ }
1390
+ else {
1391
+ if !info.is_executable() {
1392
+ dbg_log!("#gp non-executable cs: {:x}", selector);
1393
+ trigger_gp(selector & !3);
1394
+ return;
1395
+ }
1396
+
1397
+ if info.is_dc() {
1398
+ // conforming code segment
1399
+ if info.dpl() > *cpl {
1400
+ dbg_log!("#gp cs dpl > cpl: {:x}", selector);
1401
+ trigger_gp(selector & !3);
1402
+ return;
1403
+ }
1404
+ }
1405
+ else {
1406
+ // non-conforming code segment
1407
+
1408
+ if cs_selector.rpl() > *cpl || info.dpl() != *cpl {
1409
+ dbg_log!("#gp cs rpl > cpl or dpl != cpl: {:x}", selector);
1410
+ trigger_gp(selector & !3);
1411
+ return;
1412
+ }
1413
+ }
1414
+
1415
+ if !info.is_present() {
1416
+ dbg_log!("#NP for loading not-present in cs sel={:x}", selector);
1417
+ dbg_trace();
1418
+ trigger_np(selector & !3);
1419
+ return;
1420
+ }
1421
+
1422
+ if is_call {
1423
+ if is_osize_32 {
1424
+ return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-8), 8));
1425
+
1426
+ push32(*sreg.offset(CS as isize) as i32).unwrap();
1427
+ push32(get_real_eip()).unwrap();
1428
+ }
1429
+ else {
1430
+ return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-4), 4));
1431
+
1432
+ push16(*sreg.offset(CS as isize) as i32).unwrap();
1433
+ push16(get_real_eip()).unwrap();
1434
+ }
1435
+ }
1436
+
1437
+ dbg_assert!((eip as u32) <= info.effective_limit(), "todo: #gp");
1438
+
1439
+ update_cs_size(info.is_32());
1440
+
1441
+ *segment_is_null.offset(CS as isize) = false;
1442
+ *segment_limits.offset(CS as isize) = info.effective_limit();
1443
+ *segment_access_bytes.offset(CS as isize) = info.access_byte();
1444
+
1445
+ *segment_offsets.offset(CS as isize) = info.base();
1446
+ *sreg.offset(CS as isize) = selector as u16 & !3 | *cpl as u16;
1447
+
1448
+ *instruction_pointer = get_seg_cs() + eip;
1449
+
1450
+ update_state_flags();
1451
+ }
1452
+ }
1453
+
1454
+ pub unsafe fn far_return(eip: i32, selector: i32, stack_adjust: i32, is_osize_32: bool) {
1455
+ dbg_assert!(selector < 0x10000 && selector >= 0);
1456
+
1457
+ if !*protected_mode {
1458
+ dbg_assert!(!*is_32);
1459
+ }
1460
+
1461
+ if !*protected_mode || vm86_mode() {
1462
+ switch_cs_real_mode(selector);
1463
+ *instruction_pointer = get_seg_cs() + eip;
1464
+ adjust_stack_reg(2 * (if is_osize_32 { 4 } else { 2 }) + stack_adjust);
1465
+ update_state_flags();
1466
+ return;
1467
+ }
1468
+
1469
+ let cs_selector = SegmentSelector::of_u16(selector as u16);
1470
+ let info = match return_on_pagefault!(lookup_segment_selector(cs_selector)) {
1471
+ Ok((desc, _)) => desc,
1472
+ Err(SelectorNullOrInvalid::IsNull) => {
1473
+ dbg_log!("far return: #gp null cs");
1474
+ trigger_gp(0);
1475
+ return;
1476
+ },
1477
+ Err(SelectorNullOrInvalid::OutsideOfTableLimit) => {
1478
+ dbg_log!("far return: #gp invalid cs: {:x}", selector);
1479
+ trigger_gp(selector & !3);
1480
+ return;
1481
+ },
1482
+ };
1483
+
1484
+ if info.is_system() {
1485
+ dbg_assert!(false, "is system in far return");
1486
+ trigger_gp(selector & !3);
1487
+ return;
1488
+ }
1489
+
1490
+ if !info.is_executable() {
1491
+ dbg_log!("non-executable cs: {:x}", selector);
1492
+ trigger_gp(selector & !3);
1493
+ return;
1494
+ }
1495
+
1496
+ if cs_selector.rpl() < *cpl {
1497
+ dbg_log!("cs rpl < cpl: {:x}", selector);
1498
+ trigger_gp(selector & !3);
1499
+ return;
1500
+ }
1501
+
1502
+ if info.is_dc() && info.dpl() > cs_selector.rpl() {
1503
+ dbg_log!("cs conforming and dpl > rpl: {:x}", selector);
1504
+ trigger_gp(selector & !3);
1505
+ return;
1506
+ }
1507
+
1508
+ if !info.is_dc() && info.dpl() != cs_selector.rpl() {
1509
+ dbg_log!("cs non-conforming and dpl != rpl: {:x}", selector);
1510
+ trigger_gp(selector & !3);
1511
+ return;
1512
+ }
1513
+
1514
+ if !info.is_present() {
1515
+ dbg_log!("#NP for loading not-present in cs sel={:x}", selector);
1516
+ dbg_trace();
1517
+ trigger_np(selector & !3);
1518
+ return;
1519
+ }
1520
+
1521
+ if cs_selector.rpl() > *cpl {
1522
+ dbg_log!(
1523
+ "far return privilege change cs: {:x} from={} to={} is_16={}",
1524
+ selector,
1525
+ *cpl,
1526
+ cs_selector.rpl(),
1527
+ is_osize_32
1528
+ );
1529
+
1530
+ let temp_esp;
1531
+ let temp_ss;
1532
+ if is_osize_32 {
1533
+ //dbg_log!("esp read from " + h(translate_address_system_read(get_stack_pointer(stack_adjust + 8))))
1534
+ temp_esp = safe_read32s(get_stack_pointer(stack_adjust + 8)).unwrap();
1535
+ //dbg_log!("esp=" + h(temp_esp));
1536
+ temp_ss = safe_read16(get_stack_pointer(stack_adjust + 12)).unwrap();
1537
+ }
1538
+ else {
1539
+ //dbg_log!("esp read from " + h(translate_address_system_read(get_stack_pointer(stack_adjust + 4))));
1540
+ temp_esp = safe_read16(get_stack_pointer(stack_adjust + 4)).unwrap();
1541
+ //dbg_log!("esp=" + h(temp_esp));
1542
+ temp_ss = safe_read16(get_stack_pointer(stack_adjust + 6)).unwrap();
1543
+ }
1544
+
1545
+ *cpl = cs_selector.rpl();
1546
+ cpl_changed();
1547
+
1548
+ // XXX: This failure should be checked before side effects
1549
+ if !switch_seg(SS, temp_ss) {
1550
+ dbg_assert!(false);
1551
+ }
1552
+ set_stack_reg(temp_esp + stack_adjust);
1553
+
1554
+ //if(is_osize_32)
1555
+ //{
1556
+ // adjust_stack_reg(2 * 4);
1557
+ //}
1558
+ //else
1559
+ //{
1560
+ // adjust_stack_reg(2 * 2);
1561
+ //}
1562
+
1563
+ //throw debug.unimpl("privilege change");
1564
+
1565
+ //adjust_stack_reg(stack_adjust);
1566
+
1567
+ // TODO: invalidate segments that are not accessible at this cpl (see iret)
1568
+ }
1569
+ else {
1570
+ if is_osize_32 {
1571
+ adjust_stack_reg(2 * 4 + stack_adjust);
1572
+ }
1573
+ else {
1574
+ adjust_stack_reg(2 * 2 + stack_adjust);
1575
+ }
1576
+ }
1577
+
1578
+ //dbg_assert(*cpl == info.dpl);
1579
+
1580
+ update_cs_size(info.is_32());
1581
+
1582
+ *segment_is_null.offset(CS as isize) = false;
1583
+ *segment_limits.offset(CS as isize) = info.effective_limit();
1584
+ *segment_access_bytes.offset(CS as isize) = info.access_byte();
1585
+
1586
+ *segment_offsets.offset(CS as isize) = info.base();
1587
+ *sreg.offset(CS as isize) = selector as u16;
1588
+ dbg_assert!(selector & 3 == *cpl as i32);
1589
+
1590
+ *instruction_pointer = get_seg_cs() + eip;
1591
+
1592
+ update_state_flags();
1593
+ }
1594
+
1595
+ pub unsafe fn do_task_switch(selector: i32, error_code: Option<i32>) {
1596
+ dbg_log!("do_task_switch sel={:x}", selector);
1597
+
1598
+ dbg_assert!(*tss_size_32, "TODO: 16-bit TSS in task switch");
1599
+
1600
+ let selector = SegmentSelector::of_u16(selector as u16);
1601
+ let (descriptor, descriptor_address) =
1602
+ match lookup_segment_selector(selector).expect("TODO: handle pagefault") {
1603
+ Ok(desc) => desc,
1604
+ Err(_) => {
1605
+ panic!("#GP handler");
1606
+ },
1607
+ };
1608
+
1609
+ dbg_assert!(selector.is_gdt());
1610
+ dbg_assert!((descriptor.system_type() & !2) == 1 || (descriptor.system_type() & !2) == 9);
1611
+ let tss_is_16 = descriptor.system_type() <= 3;
1612
+ let tss_is_busy = (descriptor.system_type() & 2) == 2;
1613
+
1614
+ if (descriptor.system_type() & 2) == 2 {
1615
+ // is busy
1616
+ panic!("#GP handler");
1617
+ }
1618
+
1619
+ if !descriptor.is_present() {
1620
+ panic!("#NP handler");
1621
+ }
1622
+
1623
+ if descriptor.effective_limit() < 103 {
1624
+ panic!("#NP handler");
1625
+ }
1626
+
1627
+ let _tsr_size = *segment_limits.offset(TR as isize);
1628
+ let tsr_offset = *segment_offsets.offset(TR as isize);
1629
+
1630
+ let mut old_eflags = get_eflags();
1631
+
1632
+ if tss_is_busy {
1633
+ old_eflags &= !FLAG_NT;
1634
+ }
1635
+
1636
+ writable_or_pagefault(tsr_offset, 0x66).unwrap();
1637
+
1638
+ //safe_write32(tsr_offset + TSR_CR3, *cr.offset(3));
1639
+
1640
+ // TODO: Write 16 bit values if old tss is 16 bit
1641
+ safe_write32(tsr_offset + TSR_EIP, get_real_eip()).unwrap();
1642
+ safe_write32(tsr_offset + TSR_EFLAGS, old_eflags).unwrap();
1643
+
1644
+ safe_write32(tsr_offset + TSR_EAX, read_reg32(EAX)).unwrap();
1645
+ safe_write32(tsr_offset + TSR_ECX, read_reg32(ECX)).unwrap();
1646
+ safe_write32(tsr_offset + TSR_EDX, read_reg32(EDX)).unwrap();
1647
+ safe_write32(tsr_offset + TSR_EBX, read_reg32(EBX)).unwrap();
1648
+
1649
+ safe_write32(tsr_offset + TSR_ESP, read_reg32(ESP)).unwrap();
1650
+ safe_write32(tsr_offset + TSR_EBP, read_reg32(EBP)).unwrap();
1651
+ safe_write32(tsr_offset + TSR_ESI, read_reg32(ESI)).unwrap();
1652
+ safe_write32(tsr_offset + TSR_EDI, read_reg32(EDI)).unwrap();
1653
+
1654
+ safe_write32(tsr_offset + TSR_ES, *sreg.offset(ES as isize) as i32).unwrap();
1655
+ safe_write32(tsr_offset + TSR_CS, *sreg.offset(CS as isize) as i32).unwrap();
1656
+ safe_write32(tsr_offset + TSR_SS, *sreg.offset(SS as isize) as i32).unwrap();
1657
+ safe_write32(tsr_offset + TSR_DS, *sreg.offset(DS as isize) as i32).unwrap();
1658
+ safe_write32(tsr_offset + TSR_FS, *sreg.offset(FS as isize) as i32).unwrap();
1659
+ safe_write32(tsr_offset + TSR_GS, *sreg.offset(GS as isize) as i32).unwrap();
1660
+
1661
+ //safe_write32(tsr_offset + TSR_LDT, *sreg.offset(reg_ldtr));
1662
+
1663
+ if true
1664
+ /* is jump or call or int */
1665
+ {
1666
+ safe_write64(descriptor_address, descriptor.set_busy().raw).unwrap();
1667
+ }
1668
+
1669
+ //let new_tsr_size = descriptor.effective_limit;
1670
+ let new_tsr_offset = descriptor.base();
1671
+
1672
+ dbg_assert!(!tss_is_16, "unimplemented");
1673
+
1674
+ if true
1675
+ /* is call or int */
1676
+ {
1677
+ safe_write16(
1678
+ new_tsr_offset + TSR_BACKLINK,
1679
+ *sreg.offset(TR as isize) as i32,
1680
+ )
1681
+ .unwrap();
1682
+ }
1683
+
1684
+ let new_cr3 = safe_read32s(new_tsr_offset + TSR_CR3).unwrap();
1685
+
1686
+ *flags &= !FLAG_VM;
1687
+
1688
+ let new_eip = safe_read32s(new_tsr_offset + TSR_EIP).unwrap();
1689
+ let new_cs = safe_read16(new_tsr_offset + TSR_CS).unwrap();
1690
+ let new_cs_selector = SegmentSelector::of_u16(new_cs as u16);
1691
+ let new_cs_descriptor =
1692
+ match lookup_segment_selector(new_cs_selector).expect("TODO: handle pagefault") {
1693
+ Ok((desc, _)) => desc,
1694
+ Err(SelectorNullOrInvalid::IsNull) => {
1695
+ dbg_log!("null cs");
1696
+ panic!("#TS handler");
1697
+ },
1698
+ Err(SelectorNullOrInvalid::OutsideOfTableLimit) => {
1699
+ dbg_log!("invalid cs: {:x}", new_cs);
1700
+ panic!("#TS handler");
1701
+ },
1702
+ };
1703
+
1704
+ if new_cs_descriptor.is_system() {
1705
+ panic!("#TS handler");
1706
+ }
1707
+
1708
+ if !new_cs_descriptor.is_executable() {
1709
+ panic!("#TS handler");
1710
+ }
1711
+
1712
+ if new_cs_descriptor.is_dc() && new_cs_descriptor.dpl() > new_cs_selector.rpl() {
1713
+ dbg_log!("cs conforming and dpl > rpl: {:x}", selector.raw);
1714
+ panic!("#TS handler");
1715
+ }
1716
+
1717
+ if !new_cs_descriptor.is_dc() && new_cs_descriptor.dpl() != new_cs_selector.rpl() {
1718
+ dbg_log!("cs non-conforming and dpl != rpl: {:x}", selector.raw);
1719
+ panic!("#TS handler");
1720
+ }
1721
+
1722
+ if !new_cs_descriptor.is_present() {
1723
+ dbg_log!("#NP for loading not-present in cs sel={:x}", selector.raw);
1724
+ panic!("#TS handler");
1725
+ }
1726
+
1727
+ *segment_is_null.offset(CS as isize) = false;
1728
+ *segment_limits.offset(CS as isize) = new_cs_descriptor.effective_limit();
1729
+ *segment_offsets.offset(CS as isize) = new_cs_descriptor.base();
1730
+ *segment_access_bytes.offset(CS as isize) = new_cs_descriptor.access_byte();
1731
+ *sreg.offset(CS as isize) = new_cs as u16;
1732
+
1733
+ *cpl = new_cs_descriptor.dpl();
1734
+ cpl_changed();
1735
+
1736
+ dbg_assert!((*sreg.offset(CS as isize) & 3) as u8 == *cpl);
1737
+
1738
+ dbg_assert!(
1739
+ new_eip as u32 <= new_cs_descriptor.effective_limit(),
1740
+ "todo: #gp"
1741
+ );
1742
+ update_cs_size(new_cs_descriptor.is_32());
1743
+
1744
+ let mut new_eflags = safe_read32s(new_tsr_offset + TSR_EFLAGS).unwrap();
1745
+
1746
+ if true
1747
+ /* is call or int */
1748
+ {
1749
+ safe_write32(tsr_offset + TSR_BACKLINK, selector.raw as i32).unwrap();
1750
+ new_eflags |= FLAG_NT;
1751
+ }
1752
+
1753
+ if new_eflags & FLAG_VM != 0 {
1754
+ panic!("task switch to VM mode");
1755
+ }
1756
+
1757
+ update_eflags(new_eflags);
1758
+
1759
+ if true
1760
+ /* call or int */
1761
+ {
1762
+ *flags |= FLAG_NT;
1763
+ }
1764
+
1765
+ let new_ldt = safe_read16(new_tsr_offset + TSR_LDT).unwrap();
1766
+ load_ldt(new_ldt).unwrap();
1767
+
1768
+ write_reg32(EAX, safe_read32s(new_tsr_offset + TSR_EAX).unwrap());
1769
+ write_reg32(ECX, safe_read32s(new_tsr_offset + TSR_ECX).unwrap());
1770
+ write_reg32(EDX, safe_read32s(new_tsr_offset + TSR_EDX).unwrap());
1771
+ write_reg32(EBX, safe_read32s(new_tsr_offset + TSR_EBX).unwrap());
1772
+
1773
+ write_reg32(ESP, safe_read32s(new_tsr_offset + TSR_ESP).unwrap());
1774
+ write_reg32(EBP, safe_read32s(new_tsr_offset + TSR_EBP).unwrap());
1775
+ write_reg32(ESI, safe_read32s(new_tsr_offset + TSR_ESI).unwrap());
1776
+ write_reg32(EDI, safe_read32s(new_tsr_offset + TSR_EDI).unwrap());
1777
+
1778
+ if !switch_seg(ES, safe_read16(new_tsr_offset + TSR_ES).unwrap())
1779
+ || !switch_seg(SS, safe_read16(new_tsr_offset + TSR_SS).unwrap())
1780
+ || !switch_seg(DS, safe_read16(new_tsr_offset + TSR_DS).unwrap())
1781
+ || !switch_seg(FS, safe_read16(new_tsr_offset + TSR_FS).unwrap())
1782
+ || !switch_seg(GS, safe_read16(new_tsr_offset + TSR_GS).unwrap())
1783
+ {
1784
+ // XXX: Should be checked before side effects
1785
+ dbg_assert!(false);
1786
+ }
1787
+
1788
+ *instruction_pointer = get_seg_cs() + new_eip;
1789
+
1790
+ *segment_offsets.offset(TR as isize) = descriptor.base();
1791
+ *segment_limits.offset(TR as isize) = descriptor.effective_limit();
1792
+ *sreg.offset(TR as isize) = selector.raw;
1793
+
1794
+ set_cr3(new_cr3);
1795
+
1796
+ *cr.offset(0) |= CR0_TS;
1797
+
1798
+ if let Some(error_code) = error_code {
1799
+ if tss_is_16 {
1800
+ push16(error_code & 0xFFFF).unwrap();
1801
+ }
1802
+ else {
1803
+ push32(error_code).unwrap();
1804
+ }
1805
+ }
1806
+
1807
+ update_state_flags();
1808
+ }
1809
+
1810
+ pub unsafe fn after_block_boundary() { jit_block_boundary = true; }
1811
+
1812
+ #[no_mangle]
1813
+ pub fn track_jit_exit(phys_addr: u32) {
1814
+ unsafe {
1815
+ debug_last_jump = LastJump::Compiled { phys_addr };
1816
+ }
1817
+ }
1818
+
1819
+ #[no_mangle]
1820
+ pub unsafe fn get_eflags() -> i32 {
1821
+ return *flags & !FLAGS_ALL
1822
+ | getcf() as i32
1823
+ | (getpf() as i32) << 2
1824
+ | (getaf() as i32) << 4
1825
+ | (getzf() as i32) << 6
1826
+ | (getsf() as i32) << 7
1827
+ | (getof() as i32) << 11;
1828
+ }
1829
+
1830
+ pub unsafe fn readable_or_pagefault(addr: i32, size: i32) -> OrPageFault<()> {
1831
+ dbg_assert!(size < 0x1000);
1832
+ dbg_assert!(size > 0);
1833
+
1834
+ let user = *cpl == 3;
1835
+ translate_address(addr, false, user, false, true)?;
1836
+
1837
+ let end = addr + size - 1 & !0xFFF;
1838
+ if addr & !0xFFF != end & !0xFFF {
1839
+ translate_address(end, false, user, false, true)?;
1840
+ }
1841
+
1842
+ return Ok(());
1843
+ }
1844
+
1845
+ pub unsafe fn writable_or_pagefault(addr: i32, size: i32) -> OrPageFault<()> {
1846
+ writable_or_pagefault_cpl(*cpl, addr, size)
1847
+ }
1848
+
1849
+ pub unsafe fn writable_or_pagefault_cpl(other_cpl: u8, addr: i32, size: i32) -> OrPageFault<()> {
1850
+ dbg_assert!(size < 0x1000);
1851
+ dbg_assert!(size > 0);
1852
+
1853
+ let user = other_cpl == 3;
1854
+ translate_address(addr, true, user, false, true)?;
1855
+
1856
+ let end = addr + size - 1 & !0xFFF;
1857
+ if addr & !0xFFF != end & !0xFFF {
1858
+ translate_address(end, true, user, false, true)?;
1859
+ }
1860
+
1861
+ return Ok(());
1862
+ }
1863
+
1864
+ pub fn translate_address_read_no_side_effects(address: i32) -> OrPageFault<u32> {
1865
+ unsafe { translate_address(address, false, *cpl == 3, false, false) }
1866
+ }
1867
+ pub fn translate_address_read(address: i32) -> OrPageFault<u32> {
1868
+ unsafe { translate_address(address, false, *cpl == 3, false, true) }
1869
+ }
1870
+ pub unsafe fn translate_address_read_jit(address: i32) -> OrPageFault<u32> {
1871
+ translate_address(address, false, *cpl == 3, true, true)
1872
+ }
1873
+
1874
+ pub unsafe fn translate_address_write(address: i32) -> OrPageFault<u32> {
1875
+ translate_address(address, true, *cpl == 3, false, true)
1876
+ }
1877
+ pub unsafe fn translate_address_write_jit_and_can_skip_dirty(
1878
+ address: i32,
1879
+ ) -> OrPageFault<(u32, bool)> {
1880
+ let mut entry = tlb_data[(address as u32 >> 12) as usize];
1881
+ let user = *cpl == 3;
1882
+ if entry & (TLB_VALID | if user { TLB_NO_USER } else { 0 } | TLB_READONLY) != TLB_VALID {
1883
+ entry = do_page_walk(address, true, user, true, true)?.get();
1884
+ }
1885
+ Ok((
1886
+ (entry & !0xFFF ^ address) as u32 - memory::mem8 as u32,
1887
+ entry & TLB_HAS_CODE == 0,
1888
+ ))
1889
+ }
1890
+
1891
+ pub unsafe fn translate_address_system_read(address: i32) -> OrPageFault<u32> {
1892
+ translate_address(address, false, false, false, true)
1893
+ }
1894
+ pub unsafe fn translate_address_system_write(address: i32) -> OrPageFault<u32> {
1895
+ translate_address(address, true, false, false, true)
1896
+ }
1897
+
1898
+ #[inline(always)]
1899
+ pub unsafe fn translate_address(
1900
+ address: i32,
1901
+ for_writing: bool,
1902
+ user: bool,
1903
+ jit: bool,
1904
+ side_effects: bool,
1905
+ ) -> OrPageFault<u32> {
1906
+ let mut entry = tlb_data[(address as u32 >> 12) as usize];
1907
+ if entry
1908
+ & (TLB_VALID
1909
+ | if user { TLB_NO_USER } else { 0 }
1910
+ | if for_writing { TLB_READONLY } else { 0 })
1911
+ != TLB_VALID
1912
+ {
1913
+ entry = do_page_walk(address, for_writing, user, jit, side_effects)?.get();
1914
+ }
1915
+ Ok((entry & !0xFFF ^ address) as u32 - memory::mem8 as u32)
1916
+ }
1917
+
1918
+ pub unsafe fn translate_address_write_and_can_skip_dirty(address: i32) -> OrPageFault<(u32, bool)> {
1919
+ let mut entry = tlb_data[(address as u32 >> 12) as usize];
1920
+ let user = *cpl == 3;
1921
+ if entry & (TLB_VALID | if user { TLB_NO_USER } else { 0 } | TLB_READONLY) != TLB_VALID {
1922
+ entry = do_page_walk(address, true, user, false, true)?.get();
1923
+ }
1924
+ Ok((
1925
+ (entry & !0xFFF ^ address) as u32 - memory::mem8 as u32,
1926
+ entry & TLB_HAS_CODE == 0,
1927
+ ))
1928
+ }
1929
+
1930
+ // 32-bit paging:
1931
+ // - 10 bits PD | 10 bits PT | 12 bits offset
1932
+ // - 10 bits PD | 22 bits offset (4MB huge page)
1933
+ //
1934
+ // PAE paging:
1935
+ // - 2 bits PDPT | 9 bits PD | 9 bits PT | 12 bits offset
1936
+ // - 2 bits PDPT | 9 bits PD | 21 bits offset (2MB huge page)
1937
+ //
1938
+ // Note that PAE entries are 64-bit, and can describe physical addresses over 32
1939
+ // bits. However, since we support only 32-bit physical addresses, we require
1940
+ // the high half of the entry to be 0.
1941
+ #[cold]
1942
+ pub unsafe fn do_page_walk(
1943
+ addr: i32,
1944
+ for_writing: bool,
1945
+ user: bool,
1946
+ jit: bool,
1947
+ side_effects: bool,
1948
+ ) -> OrPageFault<std::num::NonZeroI32> {
1949
+ let global;
1950
+ let mut allow_user = true;
1951
+ let page = (addr as u32 >> 12) as i32;
1952
+ let high;
1953
+
1954
+ let cr0 = *cr;
1955
+ let cr4 = *cr.offset(4);
1956
+
1957
+ if cr0 & CR0_PG == 0 {
1958
+ // paging disabled
1959
+ high = addr as u32 & 0xFFFFF000;
1960
+ global = false
1961
+ }
1962
+ else {
1963
+ profiler::stat_increment(stat::TLB_MISS);
1964
+
1965
+ let pae = cr4 & CR4_PAE != 0;
1966
+
1967
+ let (page_dir_addr, page_dir_entry) = if pae {
1968
+ let pdpt_entry = *reg_pdpte.offset(((addr as u32) >> 30) as isize);
1969
+ if pdpt_entry as i32 & PAGE_TABLE_PRESENT_MASK == 0 {
1970
+ if side_effects {
1971
+ trigger_pagefault(addr, false, for_writing, user, jit);
1972
+ }
1973
+ return Err(());
1974
+ }
1975
+
1976
+ let page_dir_addr =
1977
+ (pdpt_entry as u32 & 0xFFFFF000) + ((((addr as u32) >> 21) & 0x1FF) << 3);
1978
+ let page_dir_entry = memory::read64s(page_dir_addr);
1979
+ dbg_assert!(
1980
+ page_dir_entry as u64 & 0x7FFF_FFFF_0000_0000 == 0,
1981
+ "Unsupported: Page directory entry larger than 32 bits"
1982
+ );
1983
+ dbg_assert!(
1984
+ page_dir_entry & 0x8000_0000_0000_0000u64 as i64 == 0,
1985
+ "Unsupported: NX bit"
1986
+ );
1987
+
1988
+ (page_dir_addr, page_dir_entry as i32)
1989
+ }
1990
+ else {
1991
+ let page_dir_addr = *cr.offset(3) as u32 + (((addr as u32) >> 22) << 2);
1992
+ let page_dir_entry = memory::read32s(page_dir_addr);
1993
+ (page_dir_addr, page_dir_entry)
1994
+ };
1995
+
1996
+ if page_dir_entry & PAGE_TABLE_PRESENT_MASK == 0 {
1997
+ if side_effects {
1998
+ trigger_pagefault(addr, false, for_writing, user, jit);
1999
+ }
2000
+ return Err(());
2001
+ }
2002
+
2003
+ let kernel_write_override = !user && 0 == cr0 & CR0_WP;
2004
+ let mut allow_write = page_dir_entry & PAGE_TABLE_RW_MASK != 0;
2005
+ allow_user &= page_dir_entry & PAGE_TABLE_USER_MASK != 0;
2006
+
2007
+ if 0 != page_dir_entry & PAGE_TABLE_PSE_MASK && 0 != cr4 & CR4_PSE {
2008
+ // size bit is set
2009
+
2010
+ if for_writing && !allow_write && !kernel_write_override || user && !allow_user {
2011
+ if side_effects {
2012
+ trigger_pagefault(addr, true, for_writing, user, jit);
2013
+ }
2014
+ return Err(());
2015
+ }
2016
+
2017
+ // set the accessed and dirty bits
2018
+
2019
+ let new_page_dir_entry = page_dir_entry
2020
+ | PAGE_TABLE_ACCESSED_MASK
2021
+ | if for_writing { PAGE_TABLE_DIRTY_MASK } else { 0 };
2022
+
2023
+ if side_effects && page_dir_entry != new_page_dir_entry {
2024
+ memory::write8(page_dir_addr, new_page_dir_entry);
2025
+ }
2026
+
2027
+ high = if pae {
2028
+ page_dir_entry as u32 & 0xFFE00000 | (addr & 0x1FF000) as u32
2029
+ }
2030
+ else {
2031
+ page_dir_entry as u32 & 0xFFC00000 | (addr & 0x3FF000) as u32
2032
+ };
2033
+ global = page_dir_entry & PAGE_TABLE_GLOBAL_MASK == PAGE_TABLE_GLOBAL_MASK
2034
+ }
2035
+ else {
2036
+ let (page_table_addr, page_table_entry) = if pae {
2037
+ let page_table_addr =
2038
+ (page_dir_entry as u32 & 0xFFFFF000) + (((addr as u32 >> 12) & 0x1FF) << 3);
2039
+ let page_table_entry = memory::read64s(page_table_addr);
2040
+ dbg_assert!(
2041
+ page_table_entry as u64 & 0x7FFF_FFFF_0000_0000 == 0,
2042
+ "Unsupported: Page table entry larger than 32 bits"
2043
+ );
2044
+ dbg_assert!(
2045
+ page_table_entry & 0x8000_0000_0000_0000u64 as i64 == 0,
2046
+ "Unsupported: NX bit"
2047
+ );
2048
+
2049
+ (page_table_addr, page_table_entry as i32)
2050
+ }
2051
+ else {
2052
+ let page_table_addr =
2053
+ (page_dir_entry as u32 & 0xFFFFF000) + (((addr as u32 >> 12) & 0x3FF) << 2);
2054
+ let page_table_entry = memory::read32s(page_table_addr);
2055
+ (page_table_addr, page_table_entry)
2056
+ };
2057
+
2058
+ let present = page_table_entry & PAGE_TABLE_PRESENT_MASK != 0;
2059
+ allow_write &= page_table_entry & PAGE_TABLE_RW_MASK != 0;
2060
+ allow_user &= page_table_entry & PAGE_TABLE_USER_MASK != 0;
2061
+
2062
+ if !present
2063
+ || for_writing && !allow_write && !kernel_write_override
2064
+ || user && !allow_user
2065
+ {
2066
+ if side_effects {
2067
+ trigger_pagefault(addr, present, for_writing, user, jit);
2068
+ }
2069
+ return Err(());
2070
+ }
2071
+
2072
+ // Set the accessed and dirty bits
2073
+ // Note: dirty bit is only set on the page table entry
2074
+ let new_page_dir_entry = page_dir_entry | PAGE_TABLE_ACCESSED_MASK;
2075
+ if side_effects && new_page_dir_entry != page_dir_entry {
2076
+ memory::write8(page_dir_addr, new_page_dir_entry);
2077
+ }
2078
+ let new_page_table_entry = page_table_entry
2079
+ | PAGE_TABLE_ACCESSED_MASK
2080
+ | if for_writing { PAGE_TABLE_DIRTY_MASK } else { 0 };
2081
+ if side_effects && page_table_entry != new_page_table_entry {
2082
+ memory::write8(page_table_addr, new_page_table_entry);
2083
+ }
2084
+
2085
+ high = page_table_entry as u32 & 0xFFFFF000;
2086
+ global = page_table_entry & PAGE_TABLE_GLOBAL_MASK == PAGE_TABLE_GLOBAL_MASK
2087
+ }
2088
+ }
2089
+
2090
+ if side_effects && tlb_data[page as usize] == 0 {
2091
+ if valid_tlb_entries_count == VALID_TLB_ENTRY_MAX {
2092
+ profiler::stat_increment(stat::TLB_FULL);
2093
+ clear_tlb();
2094
+ // also clear global entries if tlb is almost full after clearing non-global pages
2095
+ if valid_tlb_entries_count > VALID_TLB_ENTRY_MAX * 3 / 4 {
2096
+ profiler::stat_increment(stat::TLB_GLOBAL_FULL);
2097
+ full_clear_tlb();
2098
+ }
2099
+ }
2100
+ dbg_assert!(valid_tlb_entries_count < VALID_TLB_ENTRY_MAX);
2101
+ valid_tlb_entries[valid_tlb_entries_count as usize] = page;
2102
+ valid_tlb_entries_count += 1;
2103
+ // TODO: Check that there are no duplicates in valid_tlb_entries
2104
+ // XXX: There will probably be duplicates due to invlpg deleting
2105
+ // entries from tlb_data but not from valid_tlb_entries
2106
+ }
2107
+ else if side_effects && CHECK_TLB_INVARIANTS {
2108
+ let mut found = false;
2109
+ for i in 0..valid_tlb_entries_count {
2110
+ if valid_tlb_entries[i as usize] == page {
2111
+ found = true;
2112
+ break;
2113
+ }
2114
+ }
2115
+ dbg_assert!(found);
2116
+ }
2117
+
2118
+ let is_in_mapped_range = memory::in_mapped_range(high);
2119
+ let has_code = if side_effects {
2120
+ !is_in_mapped_range && jit::jit_page_has_code(Page::page_of(high))
2121
+ }
2122
+ else {
2123
+ // If side_effects is false, don't call into jit::jit_page_has_code. This value is not used
2124
+ // anyway (we only get here by translate_address_read_no_side_effects, which only uses the
2125
+ // address part)
2126
+ true
2127
+ };
2128
+ let info_bits = TLB_VALID
2129
+ | if for_writing { 0 } else { TLB_READONLY }
2130
+ | if allow_user { 0 } else { TLB_NO_USER }
2131
+ | if is_in_mapped_range { TLB_IN_MAPPED_RANGE } else { 0 }
2132
+ | if global && 0 != cr4 & CR4_PGE { TLB_GLOBAL } else { 0 }
2133
+ | if has_code { TLB_HAS_CODE } else { 0 };
2134
+
2135
+ let tlb_entry = (high + memory::mem8 as u32) as i32 ^ page << 12 | info_bits as i32;
2136
+
2137
+ dbg_assert!((high ^ (page as u32) << 12) & 0xFFF == 0);
2138
+ if side_effects {
2139
+ // bake in the addition with memory::mem8 to save an instruction from the fast path
2140
+ // of memory accesses
2141
+ tlb_data[page as usize] = tlb_entry;
2142
+
2143
+ jit::update_tlb_code(Page::page_of(addr as u32), Page::page_of(high));
2144
+ }
2145
+
2146
+ Ok(if DEBUG {
2147
+ std::num::NonZeroI32::new(tlb_entry).unwrap()
2148
+ }
2149
+ else {
2150
+ std::num::NonZeroI32::new_unchecked(tlb_entry)
2151
+ })
2152
+ }
2153
+
2154
+ #[no_mangle]
2155
+ pub unsafe fn full_clear_tlb() {
2156
+ profiler::stat_increment(stat::FULL_CLEAR_TLB);
2157
+ // clear tlb including global pages
2158
+ *last_virt_eip = -1;
2159
+ for i in 0..valid_tlb_entries_count {
2160
+ let page = valid_tlb_entries[i as usize];
2161
+ clear_tlb_code(page);
2162
+ tlb_data[page as usize] = 0;
2163
+ }
2164
+ valid_tlb_entries_count = 0;
2165
+
2166
+ if CHECK_TLB_INVARIANTS {
2167
+ #[allow(static_mut_refs)]
2168
+ for &entry in tlb_data.iter() {
2169
+ dbg_assert!(entry == 0);
2170
+ }
2171
+ };
2172
+ }
2173
+
2174
+ #[no_mangle]
2175
+ pub unsafe fn clear_tlb() {
2176
+ profiler::stat_increment(stat::CLEAR_TLB);
2177
+ // clear tlb excluding global pages
2178
+ *last_virt_eip = -1;
2179
+ let mut global_page_offset = 0;
2180
+ for i in 0..valid_tlb_entries_count {
2181
+ let page = valid_tlb_entries[i as usize];
2182
+ let entry = tlb_data[page as usize];
2183
+ if 0 != entry & TLB_GLOBAL {
2184
+ // reinsert at the front
2185
+ valid_tlb_entries[global_page_offset as usize] = page;
2186
+ global_page_offset += 1;
2187
+ }
2188
+ else {
2189
+ clear_tlb_code(page);
2190
+ tlb_data[page as usize] = 0;
2191
+ }
2192
+ }
2193
+ valid_tlb_entries_count = global_page_offset;
2194
+
2195
+ if CHECK_TLB_INVARIANTS {
2196
+ #[allow(static_mut_refs)]
2197
+ for &entry in tlb_data.iter() {
2198
+ dbg_assert!(entry == 0 || 0 != entry & TLB_GLOBAL);
2199
+ }
2200
+ };
2201
+ }
2202
+
2203
+ #[no_mangle]
2204
+ pub unsafe fn trigger_de_jit(eip_offset_in_page: i32) {
2205
+ dbg_log!("#de in jit mode");
2206
+ dbg_assert!(eip_offset_in_page >= 0 && eip_offset_in_page < 0x1000);
2207
+ *instruction_pointer = *instruction_pointer & !0xFFF | eip_offset_in_page;
2208
+ jit_fault = Some((CPU_EXCEPTION_DE, None))
2209
+ }
2210
+
2211
+ #[no_mangle]
2212
+ pub unsafe fn trigger_ud_jit(eip_offset_in_page: i32) {
2213
+ dbg_log!("#ud in jit mode");
2214
+ dbg_assert!(eip_offset_in_page >= 0 && eip_offset_in_page < 0x1000);
2215
+ *instruction_pointer = *instruction_pointer & !0xFFF | eip_offset_in_page;
2216
+ jit_fault = Some((CPU_EXCEPTION_UD, None))
2217
+ }
2218
+
2219
+ #[no_mangle]
2220
+ pub unsafe fn trigger_nm_jit(eip_offset_in_page: i32) {
2221
+ dbg_log!("#nm in jit mode");
2222
+ dbg_assert!(eip_offset_in_page >= 0 && eip_offset_in_page < 0x1000);
2223
+ *instruction_pointer = *instruction_pointer & !0xFFF | eip_offset_in_page;
2224
+ jit_fault = Some((CPU_EXCEPTION_NM, None))
2225
+ }
2226
+
2227
+ #[no_mangle]
2228
+ pub unsafe fn trigger_gp_jit(code: i32, eip_offset_in_page: i32) {
2229
+ dbg_log!("#gp in jit mode");
2230
+ dbg_assert!(eip_offset_in_page >= 0 && eip_offset_in_page < 0x1000);
2231
+ *instruction_pointer = *instruction_pointer & !0xFFF | eip_offset_in_page;
2232
+ jit_fault = Some((CPU_EXCEPTION_GP, Some(code)))
2233
+ }
2234
+
2235
+ #[no_mangle]
2236
+ pub unsafe fn trigger_fault_end_jit() {
2237
+ #[allow(static_mut_refs)]
2238
+ let (code, error_code) = jit_fault.take().unwrap();
2239
+ if DEBUG {
2240
+ if js::cpu_exception_hook(code) {
2241
+ return;
2242
+ }
2243
+ }
2244
+ call_interrupt_vector(code, false, error_code);
2245
+ }
2246
+
2247
+ /// Pagefault handling with the jit works as follows:
2248
+ /// - If the slow path is taken, it calls safe_{read,write}*_jit
2249
+ /// - safe_{read,write}*_jit call translate_address_{read,write}_jit
2250
+ /// - translate_address_{read,write}_jit do the normal page walk and call this method with
2251
+ /// jit=true when a page fault happens
2252
+ /// - this method prepares a page fault by setting cr2, and writes the error code
2253
+ /// into jit_fault. This method *doesn't* trigger the interrupt, as registers are
2254
+ /// still stored in the wasm module
2255
+ /// - back in the wasm module, the generated code detects the page fault, restores the registers
2256
+ /// and finally calls trigger_fault_end_jit, which does the interrupt
2257
+ ///
2258
+ /// Non-jit resets the instruction pointer and does the PF interrupt directly
2259
+ pub unsafe fn trigger_pagefault(addr: i32, present: bool, write: bool, user: bool, jit: bool) {
2260
+ if config::LOG_PAGE_FAULTS {
2261
+ dbg_log!(
2262
+ "page fault{} w={} u={} p={} eip={:x} cr2={:x}",
2263
+ if jit { "jit" } else { "" },
2264
+ write as i32,
2265
+ user as i32,
2266
+ present as i32,
2267
+ *previous_ip,
2268
+ addr
2269
+ );
2270
+ dbg_trace();
2271
+ }
2272
+ profiler::stat_increment(stat::PAGE_FAULT);
2273
+ *cr.offset(2) = addr;
2274
+ // invalidate tlb entry
2275
+ let page = ((addr as u32) >> 12) as i32;
2276
+ clear_tlb_code(page);
2277
+ tlb_data[page as usize] = 0;
2278
+ let error_code = (user as i32) << 2 | (write as i32) << 1 | present as i32;
2279
+ if jit {
2280
+ jit_fault = Some((CPU_EXCEPTION_PF, Some(error_code)));
2281
+ }
2282
+ else {
2283
+ *instruction_pointer = *previous_ip;
2284
+ call_interrupt_vector(CPU_EXCEPTION_PF, false, Some(error_code));
2285
+ }
2286
+ }
2287
+
2288
+ pub fn tlb_set_has_code(physical_page: Page, has_code: bool) {
2289
+ for i in 0..unsafe { valid_tlb_entries_count } {
2290
+ let page = unsafe { valid_tlb_entries[i as usize] };
2291
+ let entry = unsafe { tlb_data[page as usize] };
2292
+ if 0 != entry {
2293
+ let tlb_physical_page = Page::of_u32(
2294
+ (entry as u32 >> 12 ^ page as u32) - (unsafe { memory::mem8 } as u32 >> 12),
2295
+ );
2296
+ if physical_page == tlb_physical_page {
2297
+ unsafe {
2298
+ tlb_data[page as usize] =
2299
+ if has_code { entry | TLB_HAS_CODE } else { entry & !TLB_HAS_CODE }
2300
+ }
2301
+ if !has_code {
2302
+ clear_tlb_code(page);
2303
+ }
2304
+ }
2305
+ }
2306
+ }
2307
+
2308
+ check_tlb_invariants();
2309
+ }
2310
+ pub fn tlb_set_has_code_multiple(physical_pages: &HashSet<Page>, has_code: bool) {
2311
+ let physical_pages: Vec<Page> = physical_pages.into_iter().copied().collect();
2312
+ for i in 0..unsafe { valid_tlb_entries_count } {
2313
+ let page = unsafe { valid_tlb_entries[i as usize] };
2314
+ let entry = unsafe { tlb_data[page as usize] };
2315
+ if 0 != entry {
2316
+ let tlb_physical_page = Page::of_u32(
2317
+ (entry as u32 >> 12 ^ page as u32) - (unsafe { memory::mem8 } as u32 >> 12),
2318
+ );
2319
+ if physical_pages.contains(&tlb_physical_page) {
2320
+ unsafe {
2321
+ tlb_data[page as usize] =
2322
+ if has_code { entry | TLB_HAS_CODE } else { entry & !TLB_HAS_CODE }
2323
+ }
2324
+ }
2325
+ }
2326
+ }
2327
+
2328
+ check_tlb_invariants();
2329
+ }
2330
+
2331
+ pub fn check_tlb_invariants() {
2332
+ if !CHECK_TLB_INVARIANTS {
2333
+ return;
2334
+ }
2335
+
2336
+ for i in 0..unsafe { valid_tlb_entries_count } {
2337
+ let page = unsafe { valid_tlb_entries[i as usize] };
2338
+ let entry = unsafe { tlb_data[page as usize] };
2339
+
2340
+ if 0 == entry || 0 != entry & TLB_IN_MAPPED_RANGE {
2341
+ // there's no code in mapped memory
2342
+ continue;
2343
+ }
2344
+
2345
+ let target = (entry ^ page << 12) as u32 - unsafe { memory::mem8 } as u32;
2346
+ dbg_assert!(!memory::in_mapped_range(target));
2347
+
2348
+ let entry_has_code = entry & TLB_HAS_CODE != 0;
2349
+ let has_code = jit::jit_page_has_code(Page::page_of(target));
2350
+
2351
+ // If some code has been created in a page, the corresponding tlb entries must be marked
2352
+ dbg_assert!(!has_code || entry_has_code);
2353
+ }
2354
+ }
2355
+
2356
+ pub const DISABLE_EIP_TRANSLATION_OPTIMISATION: bool = false;
2357
+
2358
+ pub unsafe fn read_imm8() -> OrPageFault<i32> {
2359
+ let eip = *instruction_pointer;
2360
+ if DISABLE_EIP_TRANSLATION_OPTIMISATION || 0 != eip & !0xFFF ^ *last_virt_eip {
2361
+ *eip_phys = (translate_address_read(eip)? ^ eip as u32) as i32;
2362
+ *last_virt_eip = eip & !0xFFF
2363
+ }
2364
+ dbg_assert!(!memory::in_mapped_range((*eip_phys ^ eip) as u32));
2365
+ let data8 = *memory::mem8.offset((*eip_phys ^ eip) as isize) as i32;
2366
+ *instruction_pointer = eip + 1;
2367
+ return Ok(data8);
2368
+ }
2369
+
2370
+ pub unsafe fn read_imm8s() -> OrPageFault<i32> { return Ok(read_imm8()? << 24 >> 24); }
2371
+
2372
+ pub unsafe fn read_imm16() -> OrPageFault<i32> {
2373
+ // Two checks in one comparison:
2374
+ // 1. Did the high 20 bits of eip change
2375
+ // or 2. Are the low 12 bits of eip 0xFFF (and this read crosses a page boundary)
2376
+ if DISABLE_EIP_TRANSLATION_OPTIMISATION
2377
+ || (*instruction_pointer ^ *last_virt_eip) as u32 > 0xFFE
2378
+ {
2379
+ return Ok(read_imm8()? | read_imm8()? << 8);
2380
+ }
2381
+ else {
2382
+ let data16 = memory::read16((*eip_phys ^ *instruction_pointer) as u32);
2383
+ *instruction_pointer = *instruction_pointer + 2;
2384
+ return Ok(data16);
2385
+ };
2386
+ }
2387
+
2388
+ pub unsafe fn read_imm32s() -> OrPageFault<i32> {
2389
+ // Analogue to the above comment
2390
+ if DISABLE_EIP_TRANSLATION_OPTIMISATION
2391
+ || (*instruction_pointer ^ *last_virt_eip) as u32 > 0xFFC
2392
+ {
2393
+ return Ok(read_imm16()? | read_imm16()? << 16);
2394
+ }
2395
+ else {
2396
+ let data32 = memory::read32s((*eip_phys ^ *instruction_pointer) as u32);
2397
+ *instruction_pointer = *instruction_pointer + 4;
2398
+ return Ok(data32);
2399
+ };
2400
+ }
2401
+
2402
+ pub unsafe fn is_osize_32() -> bool {
2403
+ dbg_assert!(!in_jit);
2404
+ return *is_32 != (*prefixes & prefix::PREFIX_MASK_OPSIZE == prefix::PREFIX_MASK_OPSIZE);
2405
+ }
2406
+
2407
+ pub unsafe fn is_asize_32() -> bool {
2408
+ dbg_assert!(!in_jit);
2409
+ return *is_32 != (*prefixes & prefix::PREFIX_MASK_ADDRSIZE == prefix::PREFIX_MASK_ADDRSIZE);
2410
+ }
2411
+
2412
+ pub unsafe fn lookup_segment_selector(
2413
+ selector: SegmentSelector,
2414
+ ) -> OrPageFault<Result<(SegmentDescriptor, i32), SelectorNullOrInvalid>> {
2415
+ if selector.is_null() {
2416
+ return Ok(Err(SelectorNullOrInvalid::IsNull));
2417
+ }
2418
+
2419
+ let (table_offset, table_limit) = if selector.is_gdt() {
2420
+ (*gdtr_offset as u32, *gdtr_size as u32)
2421
+ }
2422
+ else {
2423
+ (
2424
+ *segment_offsets.offset(LDTR as isize) as u32,
2425
+ *segment_limits.offset(LDTR as isize) as u32,
2426
+ )
2427
+ };
2428
+
2429
+ if selector.descriptor_offset() as u32 > table_limit {
2430
+ dbg_log!(
2431
+ "segment outside of table limit: selector={:x} offset={:x} isgdt={} table_limit={:x}",
2432
+ selector.raw,
2433
+ selector.descriptor_offset(),
2434
+ selector.is_gdt(),
2435
+ table_limit
2436
+ );
2437
+ return Ok(Err(SelectorNullOrInvalid::OutsideOfTableLimit));
2438
+ }
2439
+
2440
+ let descriptor_address = selector.descriptor_offset() as i32 + table_offset as i32;
2441
+
2442
+ let descriptor = SegmentDescriptor::of_u64(memory::read64s(translate_address_system_read(
2443
+ descriptor_address,
2444
+ )?) as u64);
2445
+
2446
+ Ok(Ok((descriptor, descriptor_address)))
2447
+ }
2448
+
2449
+ #[inline(never)]
2450
+ pub unsafe fn switch_seg(reg: i32, selector_raw: i32) -> bool {
2451
+ dbg_assert!(reg >= 0 && reg <= 5);
2452
+ dbg_assert!(reg != CS);
2453
+ dbg_assert!(selector_raw >= 0 && selector_raw < 0x10000);
2454
+
2455
+ if vm86_mode() {
2456
+ // TODO: Should set segment_limits and segment_access_bytes if ever implemented in get_seg
2457
+ // (only vm86, not in real mode)
2458
+ }
2459
+
2460
+ if !*protected_mode || vm86_mode() {
2461
+ *sreg.offset(reg as isize) = selector_raw as u16;
2462
+ *segment_is_null.offset(reg as isize) = false;
2463
+ *segment_offsets.offset(reg as isize) = selector_raw << 4;
2464
+
2465
+ if reg == SS {
2466
+ *stack_size_32 = false;
2467
+ }
2468
+ update_state_flags();
2469
+ return true;
2470
+ }
2471
+
2472
+ let selector = SegmentSelector::of_u16(selector_raw as u16);
2473
+ let (mut descriptor, descriptor_address) =
2474
+ match return_on_pagefault!(lookup_segment_selector(selector), false) {
2475
+ Ok(desc) => desc,
2476
+ Err(SelectorNullOrInvalid::IsNull) => {
2477
+ if reg == SS {
2478
+ dbg_log!("#GP for loading 0 in SS sel={:x}", selector_raw);
2479
+ trigger_gp(0);
2480
+ return false;
2481
+ }
2482
+ else {
2483
+ // es, ds, fs, gs
2484
+ *sreg.offset(reg as isize) = selector_raw as u16;
2485
+ *segment_is_null.offset(reg as isize) = true;
2486
+ update_state_flags();
2487
+ return true;
2488
+ }
2489
+ },
2490
+ Err(SelectorNullOrInvalid::OutsideOfTableLimit) => {
2491
+ dbg_log!(
2492
+ "#GP for loading invalid in seg={} sel={:x}",
2493
+ reg,
2494
+ selector_raw,
2495
+ );
2496
+ dbg_trace();
2497
+ trigger_gp(selector_raw & !3);
2498
+ return false;
2499
+ },
2500
+ };
2501
+
2502
+ if reg == SS {
2503
+ if descriptor.is_system()
2504
+ || selector.rpl() != *cpl
2505
+ || !descriptor.is_writable()
2506
+ || descriptor.dpl() != *cpl
2507
+ {
2508
+ dbg_log!("#GP for loading invalid in SS sel={:x}", selector_raw);
2509
+ trigger_gp(selector_raw & !3);
2510
+ return false;
2511
+ }
2512
+
2513
+ if !descriptor.is_present() {
2514
+ dbg_log!("#SS for loading non-present in SS sel={:x}", selector_raw);
2515
+ trigger_ss(selector_raw & !3);
2516
+ return false;
2517
+ }
2518
+
2519
+ *stack_size_32 = descriptor.is_32();
2520
+ }
2521
+ else {
2522
+ if descriptor.is_system()
2523
+ || !descriptor.is_readable()
2524
+ || (!descriptor.is_conforming_executable()
2525
+ && (selector.rpl() > descriptor.dpl() || *cpl > descriptor.dpl()))
2526
+ {
2527
+ dbg_log!(
2528
+ "#GP for loading invalid in seg {} sel={:x} sys={} readable={} dc={} exec={} rpl={} dpl={} cpl={} present={} paging={}",
2529
+ reg,
2530
+ selector_raw,
2531
+ descriptor.is_system(),
2532
+ descriptor.is_readable(),
2533
+ descriptor.is_dc(),
2534
+ descriptor.is_executable(),
2535
+ selector.rpl(),
2536
+ descriptor.dpl(),
2537
+ *cpl,
2538
+ descriptor.is_present(),
2539
+ *cr & CR0_PG != 0,
2540
+ );
2541
+ dbg_trace();
2542
+ trigger_gp(selector_raw & !3);
2543
+ return false;
2544
+ }
2545
+
2546
+ if !descriptor.is_present() {
2547
+ dbg_log!(
2548
+ "#NP for loading not-present in seg {} sel={:x}",
2549
+ reg,
2550
+ selector_raw,
2551
+ );
2552
+ trigger_np(selector_raw & !3);
2553
+ return false;
2554
+ }
2555
+ }
2556
+
2557
+ if !descriptor.accessed() {
2558
+ descriptor = descriptor.set_accessed();
2559
+
2560
+ memory::write8(
2561
+ translate_address_system_write(descriptor_address + 5).unwrap(),
2562
+ descriptor.access_byte() as i32,
2563
+ );
2564
+ }
2565
+
2566
+ *segment_is_null.offset(reg as isize) = false;
2567
+ *segment_limits.offset(reg as isize) = descriptor.effective_limit();
2568
+ *segment_offsets.offset(reg as isize) = descriptor.base();
2569
+ *segment_access_bytes.offset(reg as isize) = descriptor.access_byte();
2570
+ *sreg.offset(reg as isize) = selector_raw as u16;
2571
+
2572
+ update_state_flags();
2573
+
2574
+ true
2575
+ }
2576
+
2577
+ pub unsafe fn load_tr(selector: i32) {
2578
+ let selector = SegmentSelector::of_u16(selector as u16);
2579
+ dbg_assert!(selector.is_gdt(), "TODO: TR can only be loaded from GDT");
2580
+
2581
+ let (descriptor, descriptor_address) =
2582
+ match return_on_pagefault!(lookup_segment_selector(selector)) {
2583
+ Ok((desc, addr)) => (desc, addr),
2584
+ Err(SelectorNullOrInvalid::IsNull) => {
2585
+ panic!("TODO: null TR");
2586
+ },
2587
+ Err(SelectorNullOrInvalid::OutsideOfTableLimit) => {
2588
+ panic!("TODO: TR selector outside of table limit");
2589
+ },
2590
+ };
2591
+
2592
+ //dbg_log!(
2593
+ // "load tr: {:x} offset={:x} limit={:x} is32={}",
2594
+ // selector.raw,
2595
+ // descriptor.base(),
2596
+ // descriptor.effective_limit(),
2597
+ // descriptor.system_type() == 9,
2598
+ //);
2599
+
2600
+ if !descriptor.is_system() {
2601
+ panic!("#GP | ltr: not a system entry (happens when running kvm-unit-test without ACPI)");
2602
+ }
2603
+
2604
+ if descriptor.system_type() != 9 && descriptor.system_type() != 1 {
2605
+ // 0xB: busy 386 TSS (GP)
2606
+ // 0x9: 386 TSS
2607
+ // 0x3: busy 286 TSS (GP)
2608
+ // 0x1: 286 TSS (??)
2609
+ panic!(
2610
+ "#GP | ltr: invalid type (type = 0x{:x})",
2611
+ descriptor.system_type()
2612
+ );
2613
+ }
2614
+
2615
+ if !descriptor.is_present() {
2616
+ panic!("#NT | present bit not set (ltr)");
2617
+ }
2618
+
2619
+ *tss_size_32 = descriptor.system_type() == 9;
2620
+ *segment_limits.offset(TR as isize) = descriptor.effective_limit();
2621
+ *segment_offsets.offset(TR as isize) = descriptor.base();
2622
+ *sreg.offset(TR as isize) = selector.raw;
2623
+
2624
+ // Mark task as busy
2625
+ memory::write8(
2626
+ translate_address_system_write(descriptor_address + 5).unwrap(),
2627
+ descriptor.set_busy().access_byte() as i32,
2628
+ );
2629
+ }
2630
+
2631
+ pub unsafe fn load_ldt(selector: i32) -> OrPageFault<()> {
2632
+ let selector = SegmentSelector::of_u16(selector as u16);
2633
+
2634
+ if selector.is_null() {
2635
+ dbg_log!("lldt: null loaded");
2636
+ *segment_limits.offset(LDTR as isize) = 0;
2637
+ *segment_offsets.offset(LDTR as isize) = 0;
2638
+ *sreg.offset(LDTR as isize) = selector.raw;
2639
+ return Ok(());
2640
+ }
2641
+
2642
+ dbg_assert!(selector.is_gdt(), "TODO: LDT can only be loaded from GDT");
2643
+
2644
+ let (descriptor, _) = match lookup_segment_selector(selector)? {
2645
+ Ok((desc, addr)) => (desc, addr),
2646
+ Err(SelectorNullOrInvalid::IsNull) => {
2647
+ panic!("TODO: null TR");
2648
+ },
2649
+ Err(SelectorNullOrInvalid::OutsideOfTableLimit) => {
2650
+ panic!("TODO: TR selector outside of table limit");
2651
+ },
2652
+ };
2653
+
2654
+ if !descriptor.is_present() {
2655
+ panic!("#NT | present bit not set (lldt)");
2656
+ }
2657
+
2658
+ if !descriptor.is_system() {
2659
+ panic!("#GP | lldt: not a system entry");
2660
+ }
2661
+
2662
+ if descriptor.system_type() != 2 {
2663
+ panic!(
2664
+ "#GP | lldt: invalid type (type = 0x{:x})",
2665
+ descriptor.system_type()
2666
+ );
2667
+ }
2668
+
2669
+ dbg_log!(
2670
+ "lldt: {:x} offset={:x} limit={:x}",
2671
+ selector.raw,
2672
+ descriptor.base(),
2673
+ descriptor.effective_limit()
2674
+ );
2675
+ *segment_limits.offset(LDTR as isize) = descriptor.effective_limit();
2676
+ *segment_offsets.offset(LDTR as isize) = descriptor.base();
2677
+ *sreg.offset(LDTR as isize) = selector.raw;
2678
+
2679
+ Ok(())
2680
+ }
2681
+
2682
+ #[no_mangle]
2683
+ #[cfg(feature = "profiler")]
2684
+ pub unsafe fn log_segment_null(segment: i32) {
2685
+ dbg_assert!(segment >= 0 && segment < 8);
2686
+ if *segment_is_null.offset(segment as isize) {
2687
+ dbg_assert!(segment != CS && segment != SS);
2688
+ dbg_log!("#gp: Access null segment in jit");
2689
+ }
2690
+ }
2691
+
2692
+ pub unsafe fn get_seg(segment: i32) -> OrPageFault<i32> {
2693
+ dbg_assert!(segment >= 0 && segment < 8);
2694
+ if *segment_is_null.offset(segment as isize) {
2695
+ dbg_assert!(segment != CS && segment != SS);
2696
+ dbg_log!("#gp: Access null segment {}", segment);
2697
+ dbg_trace();
2698
+ dbg_assert!(!in_jit);
2699
+ trigger_gp(0);
2700
+ return Err(());
2701
+ }
2702
+ return Ok(*segment_offsets.offset(segment as isize));
2703
+ }
2704
+
2705
+ pub unsafe fn set_cr0(cr0: i32) {
2706
+ let old_cr0 = *cr;
2707
+
2708
+ if old_cr0 & CR0_AM == 0 && cr0 & CR0_AM != 0 {
2709
+ dbg_log!("Warning: Unimplemented: cr0 alignment mask");
2710
+ }
2711
+ if (cr0 & (CR0_PE | CR0_PG)) == CR0_PG {
2712
+ panic!("cannot load PG without PE");
2713
+ }
2714
+
2715
+ *cr = cr0;
2716
+ *cr |= CR0_ET;
2717
+
2718
+ if old_cr0 & (CR0_PG | CR0_WP) != cr0 & (CR0_PG | CR0_WP) {
2719
+ full_clear_tlb();
2720
+ }
2721
+
2722
+ if *cr.offset(4) & CR4_PAE != 0
2723
+ && old_cr0 & (CR0_CD | CR0_NW | CR0_PG) != cr0 & (CR0_CD | CR0_NW | CR0_PG)
2724
+ {
2725
+ load_pdpte(*cr.offset(3))
2726
+ }
2727
+
2728
+ *protected_mode = (*cr & CR0_PE) == CR0_PE;
2729
+ *segment_access_bytes.offset(CS as isize) = 0x80 | 0x10 | 0x08 | 0x02; // P dpl0 S E RW
2730
+ }
2731
+
2732
+ pub unsafe fn set_cr3(mut cr3: i32) {
2733
+ if false {
2734
+ dbg_log!("cr3 <- {:x}", cr3);
2735
+ }
2736
+ if *cr.offset(4) & CR4_PAE != 0 {
2737
+ cr3 &= !0b1111;
2738
+ load_pdpte(cr3);
2739
+ }
2740
+ else {
2741
+ cr3 &= !0b111111100111;
2742
+ dbg_assert!(cr3 & 0xFFF == 0, "TODO");
2743
+ }
2744
+ *cr.offset(3) = cr3;
2745
+ clear_tlb();
2746
+ }
2747
+
2748
+ pub unsafe fn load_pdpte(cr3: i32) {
2749
+ dbg_assert!(cr3 & 0b1111 == 0);
2750
+ for i in 0..4 {
2751
+ let mut pdpt_entry = memory::read64s(cr3 as u32 + 8 * i as u32) as u64;
2752
+ pdpt_entry &= !0b1110_0000_0000;
2753
+ dbg_assert!(pdpt_entry & 0b11000 == 0, "TODO");
2754
+ dbg_assert!(
2755
+ pdpt_entry as u64 & 0xFFFF_FFFF_0000_0000 == 0,
2756
+ "Unsupported: PDPT entry larger than 32 bits"
2757
+ );
2758
+ if pdpt_entry as i32 & PAGE_TABLE_PRESENT_MASK != 0 {
2759
+ dbg_assert!(
2760
+ pdpt_entry & 0b1_1110_0110 == 0,
2761
+ "TODO: #gp reserved bit in pdpte"
2762
+ );
2763
+ }
2764
+ *reg_pdpte.offset(i) = pdpt_entry;
2765
+ }
2766
+ }
2767
+
2768
+ pub unsafe fn cpl_changed() { *last_virt_eip = -1 }
2769
+
2770
+ pub unsafe fn update_cs_size(new_size: bool) {
2771
+ if *is_32 != new_size {
2772
+ *is_32 = new_size;
2773
+ }
2774
+ }
2775
+
2776
+ #[inline(never)]
2777
+ pub unsafe fn test_privileges_for_io(port: i32, size: i32) -> bool {
2778
+ if *protected_mode && (*cpl > getiopl() as u8 || (*flags & FLAG_VM != 0)) {
2779
+ if !*tss_size_32 {
2780
+ dbg_log!("#GP for port io, 16-bit TSS port={:x} size={}", port, size);
2781
+ trigger_gp(0);
2782
+ return false;
2783
+ }
2784
+
2785
+ let tsr_size = *segment_limits.offset(TR as isize);
2786
+ let tsr_offset = *segment_offsets.offset(TR as isize);
2787
+
2788
+ if tsr_size >= 0x67 {
2789
+ dbg_assert!(tsr_offset + 0x64 + 2 & 0xFFF < 0xFFF);
2790
+
2791
+ let iomap_base = memory::read16(return_on_pagefault!(
2792
+ translate_address_system_read(tsr_offset + 0x64 + 2),
2793
+ false
2794
+ ));
2795
+ let high_port = port + size - 1;
2796
+
2797
+ if tsr_size >= (iomap_base + (high_port >> 3)) as u32 {
2798
+ let mask = ((1 << size) - 1) << (port & 7);
2799
+ let addr = return_on_pagefault!(
2800
+ translate_address_system_read(tsr_offset + iomap_base + (port >> 3)),
2801
+ false
2802
+ );
2803
+ let port_info =
2804
+ if mask & 0xFF00 != 0 { memory::read16(addr) } else { memory::read8(addr) };
2805
+
2806
+ dbg_assert!(addr & 0xFFF < 0xFFF);
2807
+
2808
+ if port_info & mask == 0 {
2809
+ return true;
2810
+ }
2811
+ }
2812
+ }
2813
+
2814
+ dbg_log!("#GP for port io port={:x} size={}", port, size);
2815
+ trigger_gp(0);
2816
+ return false;
2817
+ }
2818
+
2819
+ return true;
2820
+ }
2821
+
2822
+ pub unsafe fn popa16() {
2823
+ return_on_pagefault!(readable_or_pagefault(get_stack_pointer(0), 16));
2824
+
2825
+ write_reg16(DI, pop16().unwrap());
2826
+ write_reg16(SI, pop16().unwrap());
2827
+ write_reg16(BP, pop16().unwrap());
2828
+ adjust_stack_reg(2);
2829
+ write_reg16(BX, pop16().unwrap());
2830
+ write_reg16(DX, pop16().unwrap());
2831
+ write_reg16(CX, pop16().unwrap());
2832
+ write_reg16(AX, pop16().unwrap());
2833
+ }
2834
+
2835
+ pub unsafe fn popa32() {
2836
+ return_on_pagefault!(readable_or_pagefault(get_stack_pointer(0), 32));
2837
+
2838
+ write_reg32(EDI, pop32s().unwrap());
2839
+ write_reg32(ESI, pop32s().unwrap());
2840
+ write_reg32(EBP, pop32s().unwrap());
2841
+ adjust_stack_reg(4);
2842
+ write_reg32(EBX, pop32s().unwrap());
2843
+ write_reg32(EDX, pop32s().unwrap());
2844
+ write_reg32(ECX, pop32s().unwrap());
2845
+ write_reg32(EAX, pop32s().unwrap());
2846
+ }
2847
+
2848
+ pub fn get_state_flags() -> CachedStateFlags { unsafe { *state_flags } }
2849
+
2850
+ #[no_mangle]
2851
+ pub fn get_seg_cs() -> i32 { unsafe { *segment_offsets.offset(CS as isize) } }
2852
+
2853
+ pub unsafe fn get_seg_ss() -> i32 { return *segment_offsets.offset(SS as isize); }
2854
+
2855
+ pub unsafe fn segment_prefix(default_segment: i32) -> i32 {
2856
+ let prefix = *prefixes & prefix::PREFIX_MASK_SEGMENT;
2857
+ if 0 != prefix {
2858
+ dbg_assert!(prefix != prefix::SEG_PREFIX_ZERO);
2859
+ prefix as i32 - 1
2860
+ }
2861
+ else {
2862
+ default_segment
2863
+ }
2864
+ }
2865
+
2866
+ pub unsafe fn get_seg_prefix(default_segment: i32) -> OrPageFault<i32> {
2867
+ dbg_assert!(!in_jit);
2868
+ let prefix = *prefixes & prefix::PREFIX_MASK_SEGMENT;
2869
+ if 0 != prefix {
2870
+ if prefix == prefix::SEG_PREFIX_ZERO {
2871
+ return Ok(0);
2872
+ }
2873
+ else {
2874
+ return get_seg(prefix as i32 - 1);
2875
+ }
2876
+ }
2877
+ else {
2878
+ return get_seg(default_segment);
2879
+ };
2880
+ }
2881
+
2882
+ pub unsafe fn get_seg_prefix_ds(offset: i32) -> OrPageFault<i32> {
2883
+ Ok(get_seg_prefix(DS)? + offset)
2884
+ }
2885
+
2886
+ pub unsafe fn get_seg_prefix_ss(offset: i32) -> OrPageFault<i32> {
2887
+ Ok(get_seg_prefix(SS)? + offset)
2888
+ }
2889
+
2890
+ pub unsafe fn modrm_resolve(modrm_byte: i32) -> OrPageFault<i32> {
2891
+ if is_asize_32() {
2892
+ resolve_modrm32(modrm_byte)
2893
+ }
2894
+ else {
2895
+ resolve_modrm16(modrm_byte)
2896
+ }
2897
+ }
2898
+
2899
+ pub unsafe fn run_instruction(opcode: i32) { gen::interpreter::run(opcode as u32) }
2900
+ pub unsafe fn run_instruction0f_16(opcode: i32) { gen::interpreter0f::run(opcode as u32) }
2901
+ pub unsafe fn run_instruction0f_32(opcode: i32) { gen::interpreter0f::run(opcode as u32 | 0x100) }
2902
+
2903
+ pub unsafe fn cycle_internal() {
2904
+ profiler::stat_increment(stat::CYCLE_INTERNAL);
2905
+ let mut jit_entry = None;
2906
+ let initial_eip = *instruction_pointer;
2907
+ let initial_state_flags = *state_flags;
2908
+
2909
+ match tlb_code[(initial_eip as u32 >> 12) as usize] {
2910
+ None => {},
2911
+ Some(c) => {
2912
+ let c = c.as_ref();
2913
+
2914
+ if initial_state_flags == c.state_flags {
2915
+ let state = c.state_table[initial_eip as usize & 0xFFF];
2916
+ if state != u16::MAX {
2917
+ jit_entry = Some((c.wasm_table_index.to_u16(), state));
2918
+ }
2919
+ else {
2920
+ profiler::stat_increment(if is_near_end_of_page(initial_eip as u32) {
2921
+ stat::RUN_INTERPRETED_NEAR_END_OF_PAGE
2922
+ }
2923
+ else {
2924
+ stat::RUN_INTERPRETED_PAGE_HAS_CODE
2925
+ })
2926
+ }
2927
+ }
2928
+ else {
2929
+ profiler::stat_increment(stat::RUN_INTERPRETED_DIFFERENT_STATE);
2930
+ let s = *state_flags;
2931
+ if c.state_flags.cpl3() != s.cpl3() {
2932
+ profiler::stat_increment(stat::RUN_INTERPRETED_DIFFERENT_STATE_CPL3);
2933
+ }
2934
+ if c.state_flags.has_flat_segmentation() != s.has_flat_segmentation() {
2935
+ profiler::stat_increment(stat::RUN_INTERPRETED_DIFFERENT_STATE_FLAT);
2936
+ }
2937
+ if c.state_flags.is_32() != s.is_32() {
2938
+ profiler::stat_increment(stat::RUN_INTERPRETED_DIFFERENT_STATE_IS32);
2939
+ }
2940
+ if c.state_flags.ssize_32() != s.ssize_32() {
2941
+ profiler::stat_increment(stat::RUN_INTERPRETED_DIFFERENT_STATE_SS32);
2942
+ }
2943
+ }
2944
+ },
2945
+ }
2946
+
2947
+ if let Some((wasm_table_index, initial_state)) = jit_entry {
2948
+ if jit::CHECK_JIT_STATE_INVARIANTS {
2949
+ match get_phys_eip() {
2950
+ Err(()) => dbg_assert!(false),
2951
+ Ok(phys_eip) => {
2952
+ let entry = jit::jit_find_cache_entry(phys_eip, initial_state_flags);
2953
+ dbg_assert!(entry.wasm_table_index.to_u16() == wasm_table_index);
2954
+ dbg_assert!(entry.initial_state == initial_state);
2955
+ },
2956
+ }
2957
+ }
2958
+ profiler::stat_increment(stat::RUN_FROM_CACHE);
2959
+ let initial_instruction_counter = *instruction_counter;
2960
+ #[cfg(debug_assertions)]
2961
+ {
2962
+ in_jit = true;
2963
+ }
2964
+ wasm::call_indirect1(
2965
+ wasm_table_index as i32 + WASM_TABLE_OFFSET as i32,
2966
+ initial_state,
2967
+ );
2968
+ #[cfg(debug_assertions)]
2969
+ {
2970
+ in_jit = false;
2971
+ }
2972
+ profiler::stat_increment_by(
2973
+ stat::RUN_FROM_CACHE_STEPS,
2974
+ (*instruction_counter - initial_instruction_counter) as u64,
2975
+ );
2976
+ dbg_assert!(
2977
+ *instruction_counter != initial_instruction_counter,
2978
+ "Instruction counter didn't change"
2979
+ );
2980
+
2981
+ if cfg!(feature = "profiler") {
2982
+ dbg_assert!(match debug_last_jump {
2983
+ LastJump::Compiled { .. } => true,
2984
+ _ => false,
2985
+ });
2986
+ #[allow(static_mut_refs)]
2987
+ let last_jump_addr = debug_last_jump.phys_address().unwrap();
2988
+ let last_jump_opcode = if last_jump_addr != 0 {
2989
+ memory::read32s(last_jump_addr)
2990
+ }
2991
+ else {
2992
+ // Happens during exit due to loop iteration limit
2993
+ 0
2994
+ };
2995
+
2996
+ opstats::record_opstat_jit_exit(last_jump_opcode as u32);
2997
+ }
2998
+
2999
+ if is_near_end_of_page(*instruction_pointer as u32) {
3000
+ profiler::stat_increment(stat::RUN_FROM_CACHE_EXIT_NEAR_END_OF_PAGE);
3001
+ }
3002
+ else if Page::page_of(initial_eip as u32) == Page::page_of(*instruction_pointer as u32) {
3003
+ profiler::stat_increment(stat::RUN_FROM_CACHE_EXIT_SAME_PAGE);
3004
+ }
3005
+ else {
3006
+ profiler::stat_increment(stat::RUN_FROM_CACHE_EXIT_DIFFERENT_PAGE);
3007
+ }
3008
+ }
3009
+ else {
3010
+ *previous_ip = initial_eip;
3011
+ let phys_addr = return_on_pagefault!(get_phys_eip());
3012
+
3013
+ match tlb_code[(initial_eip as u32 >> 12) as usize] {
3014
+ None => {},
3015
+ Some(c) => {
3016
+ let c = c.as_ref();
3017
+
3018
+ if initial_state_flags == c.state_flags
3019
+ && c.state_table[initial_eip as usize & 0xFFF] != u16::MAX
3020
+ {
3021
+ profiler::stat_increment(stat::RUN_INTERPRETED_PAGE_HAS_ENTRY_AFTER_PAGE_WALK);
3022
+ return;
3023
+ }
3024
+ },
3025
+ }
3026
+
3027
+ #[cfg(feature = "profiler")]
3028
+ {
3029
+ if CHECK_MISSED_ENTRY_POINTS {
3030
+ jit::check_missed_entry_points(phys_addr, initial_state_flags);
3031
+ }
3032
+ }
3033
+
3034
+ let initial_instruction_counter = *instruction_counter;
3035
+ jit_run_interpreted(phys_addr);
3036
+
3037
+ jit::jit_increase_hotness_and_maybe_compile(
3038
+ initial_eip,
3039
+ phys_addr,
3040
+ get_seg_cs() as u32,
3041
+ initial_state_flags,
3042
+ *instruction_counter - initial_instruction_counter,
3043
+ );
3044
+
3045
+ profiler::stat_increment_by(
3046
+ stat::RUN_INTERPRETED_STEPS,
3047
+ (*instruction_counter - initial_instruction_counter) as u64,
3048
+ );
3049
+ dbg_assert!(
3050
+ *instruction_counter != initial_instruction_counter,
3051
+ "Instruction counter didn't change"
3052
+ );
3053
+ };
3054
+ }
3055
+
3056
+ pub unsafe fn get_phys_eip() -> OrPageFault<u32> {
3057
+ let eip = *instruction_pointer;
3058
+ if 0 != eip & !0xFFF ^ *last_virt_eip {
3059
+ *eip_phys = (translate_address_read(eip)? ^ eip as u32) as i32;
3060
+ *last_virt_eip = eip & !0xFFF
3061
+ }
3062
+ let phys_addr = (*eip_phys ^ eip) as u32;
3063
+ dbg_assert!(!memory::in_mapped_range(phys_addr));
3064
+ return Ok(phys_addr);
3065
+ }
3066
+
3067
+ unsafe fn jit_run_interpreted(mut phys_addr: u32) {
3068
+ profiler::stat_increment(stat::RUN_INTERPRETED);
3069
+ dbg_assert!(!memory::in_mapped_range(phys_addr));
3070
+
3071
+ jit_block_boundary = false;
3072
+ let mut i = 0;
3073
+
3074
+ loop {
3075
+ if CHECK_MISSED_ENTRY_POINTS {
3076
+ let entry = jit::jit_find_cache_entry(phys_addr, *state_flags);
3077
+ if entry != jit::CachedCode::NONE {
3078
+ profiler::stat_increment(
3079
+ stat::RUN_INTERPRETED_MISSED_COMPILED_ENTRY_RUN_INTERPRETED,
3080
+ );
3081
+ }
3082
+ }
3083
+
3084
+ i += 1;
3085
+ let start_eip = *instruction_pointer;
3086
+ let opcode = *memory::mem8.offset(phys_addr as isize) as i32;
3087
+ *instruction_pointer += 1;
3088
+ dbg_assert!(*prefixes == 0);
3089
+ run_instruction(opcode | (*is_32 as i32) << 8);
3090
+ dbg_assert!(*prefixes == 0);
3091
+
3092
+ if jit_block_boundary
3093
+ || Page::page_of(start_eip as u32) != Page::page_of(*instruction_pointer as u32)
3094
+ // Limit the number of iterations, as jumps within the same page are not counted as
3095
+ // block boundaries for the interpreter, but only on the next backwards jump
3096
+ || (i >= INTERPRETER_ITERATION_LIMIT
3097
+ && (start_eip as u32) >= (*instruction_pointer as u32))
3098
+ {
3099
+ break;
3100
+ }
3101
+
3102
+ *previous_ip = *instruction_pointer;
3103
+ phys_addr = return_on_pagefault!(get_phys_eip()) as u32;
3104
+ }
3105
+
3106
+ if cfg!(debug_assertions) {
3107
+ debug_last_jump = LastJump::Interpreted { phys_addr };
3108
+ }
3109
+
3110
+ *instruction_counter += i;
3111
+ }
3112
+
3113
+ #[no_mangle]
3114
+ pub fn update_state_flags() {
3115
+ unsafe {
3116
+ *state_flags = CachedStateFlags::of_u32(
3117
+ (*is_32 as u32) << 0
3118
+ | (*stack_size_32 as u32) << 1
3119
+ | ((*cpl == 3) as u32) << 2
3120
+ | (has_flat_segmentation() as u32) << 3,
3121
+ )
3122
+ }
3123
+ }
3124
+
3125
+ #[no_mangle]
3126
+ pub unsafe fn has_flat_segmentation() -> bool {
3127
+ // cs/ss can't be null
3128
+ return *segment_offsets.offset(SS as isize) == 0
3129
+ && !*segment_is_null.offset(DS as isize)
3130
+ && *segment_offsets.offset(DS as isize) == 0
3131
+ && *segment_offsets.offset(CS as isize) == 0;
3132
+ }
3133
+
3134
+ pub unsafe fn run_prefix_instruction() {
3135
+ run_instruction(return_on_pagefault!(read_imm8()) | (is_osize_32() as i32) << 8);
3136
+ }
3137
+
3138
+ pub unsafe fn segment_prefix_op(seg: i32) {
3139
+ dbg_assert!(seg <= 5 && seg >= 0);
3140
+ *prefixes |= seg as u8 + 1;
3141
+ run_prefix_instruction();
3142
+ *prefixes = 0
3143
+ }
3144
+
3145
+ #[no_mangle]
3146
+ pub unsafe fn main_loop() -> f64 {
3147
+ profiler::stat_increment(stat::MAIN_LOOP);
3148
+
3149
+ let start = js::microtick();
3150
+
3151
+ if *in_hlt {
3152
+ if *flags & FLAG_INTERRUPT != 0 {
3153
+ let t = js::run_hardware_timers(*acpi_enabled, start);
3154
+ handle_irqs();
3155
+ if *in_hlt {
3156
+ profiler::stat_increment(stat::MAIN_LOOP_IDLE);
3157
+ return t;
3158
+ }
3159
+ }
3160
+ else {
3161
+ // dead
3162
+ return 100.0;
3163
+ }
3164
+ }
3165
+
3166
+ loop {
3167
+ do_many_cycles_native();
3168
+
3169
+ let now = js::microtick();
3170
+ let t = js::run_hardware_timers(*acpi_enabled, now);
3171
+ handle_irqs();
3172
+ if *in_hlt {
3173
+ return t;
3174
+ }
3175
+
3176
+ if now - start > TIME_PER_FRAME {
3177
+ break;
3178
+ }
3179
+ }
3180
+
3181
+ return 0.0;
3182
+ }
3183
+
3184
+ pub unsafe fn do_many_cycles_native() {
3185
+ profiler::stat_increment(stat::DO_MANY_CYCLES);
3186
+ let initial_instruction_counter = *instruction_counter;
3187
+ while (*instruction_counter).wrapping_sub(initial_instruction_counter) < LOOP_COUNTER as u32
3188
+ && !*in_hlt
3189
+ {
3190
+ cycle_internal();
3191
+ }
3192
+ }
3193
+
3194
+ #[cold]
3195
+ pub unsafe fn trigger_de() {
3196
+ dbg_log!("#de");
3197
+ *instruction_pointer = *previous_ip;
3198
+ if DEBUG {
3199
+ if js::cpu_exception_hook(CPU_EXCEPTION_DE) {
3200
+ return;
3201
+ }
3202
+ }
3203
+ call_interrupt_vector(CPU_EXCEPTION_DE, false, None);
3204
+ }
3205
+
3206
+ #[inline(never)]
3207
+ pub unsafe fn trigger_ud() {
3208
+ dbg_log!("#ud");
3209
+ dbg_trace();
3210
+ *instruction_pointer = *previous_ip;
3211
+ if DEBUG {
3212
+ if js::cpu_exception_hook(CPU_EXCEPTION_UD) {
3213
+ return;
3214
+ }
3215
+ }
3216
+ call_interrupt_vector(CPU_EXCEPTION_UD, false, None);
3217
+ }
3218
+
3219
+ #[inline(never)]
3220
+ pub unsafe fn trigger_nm() {
3221
+ dbg_log!("#nm eip={:x}", *previous_ip);
3222
+ dbg_trace();
3223
+ *instruction_pointer = *previous_ip;
3224
+ if DEBUG {
3225
+ if js::cpu_exception_hook(CPU_EXCEPTION_NM) {
3226
+ return;
3227
+ }
3228
+ }
3229
+ call_interrupt_vector(CPU_EXCEPTION_NM, false, None);
3230
+ }
3231
+
3232
+ #[inline(never)]
3233
+ pub unsafe fn trigger_gp(code: i32) {
3234
+ dbg_log!("#gp");
3235
+ *instruction_pointer = *previous_ip;
3236
+ if DEBUG {
3237
+ if js::cpu_exception_hook(CPU_EXCEPTION_GP) {
3238
+ return;
3239
+ }
3240
+ }
3241
+ call_interrupt_vector(CPU_EXCEPTION_GP, false, Some(code));
3242
+ }
3243
+
3244
+ #[cold]
3245
+ pub unsafe fn virt_boundary_read16(low: u32, high: u32) -> i32 {
3246
+ dbg_assert!(low & 0xFFF == 0xFFF);
3247
+ dbg_assert!(high & 0xFFF == 0);
3248
+ return memory::read8(low as u32) | memory::read8(high as u32) << 8;
3249
+ }
3250
+
3251
+ #[cold]
3252
+ pub unsafe fn virt_boundary_read32s(low: u32, high: u32) -> i32 {
3253
+ dbg_assert!(low & 0xFFF >= 0xFFD);
3254
+ dbg_assert!(high - 3 & 0xFFF == low & 0xFFF);
3255
+ let mid;
3256
+ if 0 != low & 1 {
3257
+ if 0 != low & 2 {
3258
+ // 0xFFF
3259
+ mid = memory::read16(high - 2)
3260
+ }
3261
+ else {
3262
+ // 0xFFD
3263
+ mid = memory::read16(low + 1)
3264
+ }
3265
+ }
3266
+ else {
3267
+ // 0xFFE
3268
+ mid = virt_boundary_read16(low + 1, high - 1)
3269
+ }
3270
+ return memory::read8(low as u32) | mid << 8 | memory::read8(high as u32) << 24;
3271
+ }
3272
+
3273
+ #[cold]
3274
+ pub unsafe fn virt_boundary_write16(low: u32, high: u32, value: i32) {
3275
+ dbg_assert!(low & 0xFFF == 0xFFF);
3276
+ dbg_assert!(high & 0xFFF == 0);
3277
+ memory::write8(low as u32, value);
3278
+ memory::write8(high as u32, value >> 8);
3279
+ }
3280
+
3281
+ #[cold]
3282
+ pub unsafe fn virt_boundary_write32(low: u32, high: u32, value: i32) {
3283
+ dbg_assert!(low & 0xFFF >= 0xFFD);
3284
+ dbg_assert!(high - 3 & 0xFFF == low & 0xFFF);
3285
+ memory::write8(low as u32, value);
3286
+ if 0 != low & 1 {
3287
+ if 0 != low & 2 {
3288
+ // 0xFFF
3289
+ memory::write8((high - 2) as u32, value >> 8);
3290
+ memory::write8((high - 1) as u32, value >> 16);
3291
+ }
3292
+ else {
3293
+ // 0xFFD
3294
+ memory::write8((low + 1) as u32, value >> 8);
3295
+ memory::write8((low + 2) as u32, value >> 16);
3296
+ }
3297
+ }
3298
+ else {
3299
+ // 0xFFE
3300
+ memory::write8((low + 1) as u32, value >> 8);
3301
+ memory::write8((high - 1) as u32, value >> 16);
3302
+ }
3303
+ memory::write8(high as u32, value >> 24);
3304
+ }
3305
+
3306
+ pub unsafe fn safe_read8(addr: i32) -> OrPageFault<i32> {
3307
+ Ok(memory::read8(translate_address_read(addr)?))
3308
+ }
3309
+
3310
+ pub unsafe fn safe_read16(addr: i32) -> OrPageFault<i32> {
3311
+ if addr & 0xFFF == 0xFFF {
3312
+ Ok(safe_read8(addr)? | safe_read8(addr + 1)? << 8)
3313
+ }
3314
+ else {
3315
+ Ok(memory::read16(translate_address_read(addr)?))
3316
+ }
3317
+ }
3318
+
3319
+ pub unsafe fn safe_read32s(addr: i32) -> OrPageFault<i32> {
3320
+ if addr & 0xFFF >= 0xFFD {
3321
+ Ok(safe_read16(addr)? | safe_read16(addr + 2)? << 16)
3322
+ }
3323
+ else {
3324
+ Ok(memory::read32s(translate_address_read(addr)?))
3325
+ }
3326
+ }
3327
+
3328
+ pub unsafe fn safe_read_f32(addr: i32) -> OrPageFault<f32> {
3329
+ Ok(f32::from_bits(i32::cast_unsigned(safe_read32s(addr)?)))
3330
+ }
3331
+
3332
+ pub unsafe fn safe_read64s(addr: i32) -> OrPageFault<u64> {
3333
+ if addr & 0xFFF > 0x1000 - 8 {
3334
+ Ok(safe_read32s(addr)? as u32 as u64 | (safe_read32s(addr + 4)? as u32 as u64) << 32)
3335
+ }
3336
+ else {
3337
+ Ok(memory::read64s(translate_address_read(addr)?) as u64)
3338
+ }
3339
+ }
3340
+
3341
+ pub unsafe fn safe_read128s(addr: i32) -> OrPageFault<reg128> {
3342
+ if addr & 0xFFF > 0x1000 - 16 {
3343
+ Ok(reg128 {
3344
+ u64: [safe_read64s(addr)?, safe_read64s(addr + 8)?],
3345
+ })
3346
+ }
3347
+ else {
3348
+ Ok(memory::read128(translate_address_read(addr)?))
3349
+ }
3350
+ }
3351
+
3352
+ #[no_mangle]
3353
+ #[cfg(feature = "profiler")]
3354
+ pub fn report_safe_read_jit_slow(address: u32, entry: i32) {
3355
+ if entry & TLB_VALID == 0 {
3356
+ profiler::stat_increment(stat::SAFE_READ_SLOW_NOT_VALID);
3357
+ }
3358
+ else if entry & TLB_IN_MAPPED_RANGE != 0 {
3359
+ profiler::stat_increment(stat::SAFE_READ_SLOW_IN_MAPPED_RANGE);
3360
+ }
3361
+ else if entry & TLB_NO_USER != 0 {
3362
+ profiler::stat_increment(stat::SAFE_READ_SLOW_NOT_USER);
3363
+ }
3364
+ else if address & 0xFFF > 0x1000 - 16 {
3365
+ profiler::stat_increment(stat::SAFE_READ_SLOW_PAGE_CROSSED);
3366
+ }
3367
+ else {
3368
+ dbg_log!("Unexpected entry bit: {:x} (read at {:x})", entry, address);
3369
+ dbg_assert!(false);
3370
+ }
3371
+ }
3372
+
3373
+ #[no_mangle]
3374
+ #[cfg(feature = "profiler")]
3375
+ pub fn report_safe_write_jit_slow(address: u32, entry: i32) {
3376
+ if entry & TLB_VALID == 0 {
3377
+ profiler::stat_increment(stat::SAFE_WRITE_SLOW_NOT_VALID);
3378
+ }
3379
+ else if entry & TLB_IN_MAPPED_RANGE != 0 {
3380
+ profiler::stat_increment(stat::SAFE_WRITE_SLOW_IN_MAPPED_RANGE);
3381
+ }
3382
+ else if entry & TLB_HAS_CODE != 0 {
3383
+ profiler::stat_increment(stat::SAFE_WRITE_SLOW_HAS_CODE);
3384
+ }
3385
+ else if entry & TLB_READONLY != 0 {
3386
+ profiler::stat_increment(stat::SAFE_WRITE_SLOW_READ_ONLY);
3387
+ }
3388
+ else if entry & TLB_NO_USER != 0 {
3389
+ profiler::stat_increment(stat::SAFE_WRITE_SLOW_NOT_USER);
3390
+ }
3391
+ else if address & 0xFFF > 0x1000 - 16 {
3392
+ profiler::stat_increment(stat::SAFE_WRITE_SLOW_PAGE_CROSSED);
3393
+ }
3394
+ else {
3395
+ dbg_assert!(false);
3396
+ }
3397
+ }
3398
+
3399
+ #[no_mangle]
3400
+ #[cfg(feature = "profiler")]
3401
+ pub fn report_safe_read_write_jit_slow(address: u32, entry: i32) {
3402
+ if entry & TLB_VALID == 0 {
3403
+ profiler::stat_increment(stat::SAFE_READ_WRITE_SLOW_NOT_VALID);
3404
+ }
3405
+ else if entry & TLB_IN_MAPPED_RANGE != 0 {
3406
+ profiler::stat_increment(stat::SAFE_READ_WRITE_SLOW_IN_MAPPED_RANGE);
3407
+ }
3408
+ else if entry & TLB_HAS_CODE != 0 {
3409
+ profiler::stat_increment(stat::SAFE_READ_WRITE_SLOW_HAS_CODE);
3410
+ }
3411
+ else if entry & TLB_READONLY != 0 {
3412
+ profiler::stat_increment(stat::SAFE_READ_WRITE_SLOW_READ_ONLY);
3413
+ }
3414
+ else if entry & TLB_NO_USER != 0 {
3415
+ profiler::stat_increment(stat::SAFE_READ_WRITE_SLOW_NOT_USER);
3416
+ }
3417
+ else if address & 0xFFF > 0x1000 - 16 {
3418
+ profiler::stat_increment(stat::SAFE_READ_WRITE_SLOW_PAGE_CROSSED);
3419
+ }
3420
+ else {
3421
+ dbg_assert!(false);
3422
+ }
3423
+ }
3424
+
3425
+ #[repr(align(0x1000))]
3426
+ struct ScratchBuffer([u8; 0x1000 * 2]);
3427
+ static mut jit_paging_scratch_buffer: ScratchBuffer = ScratchBuffer([0; 2 * 0x1000]);
3428
+
3429
+ pub unsafe fn safe_read_slow_jit(
3430
+ addr: i32,
3431
+ bitsize: i32,
3432
+ eip_offset_in_page: i32,
3433
+ is_write: bool,
3434
+ ) -> i32 {
3435
+ dbg_assert!(eip_offset_in_page >= 0 && eip_offset_in_page < 0x1000);
3436
+ if is_write && Page::page_of(*instruction_pointer as u32) == Page::page_of(addr as u32) {
3437
+ // XXX: Check based on virtual address
3438
+ dbg_log!(
3439
+ "SMC (rmw): bits={} eip={:x} writeaddr={:x}",
3440
+ bitsize,
3441
+ (*instruction_pointer & !0xFFF | eip_offset_in_page) as u32,
3442
+ addr as u32
3443
+ );
3444
+ }
3445
+ let crosses_page = (addr & 0xFFF) + bitsize / 8 > 0x1000;
3446
+ let addr_low = match if is_write {
3447
+ translate_address_write_jit_and_can_skip_dirty(addr).map(|x| x.0)
3448
+ }
3449
+ else {
3450
+ translate_address_read_jit(addr)
3451
+ } {
3452
+ Err(()) => {
3453
+ *instruction_pointer = *instruction_pointer & !0xFFF | eip_offset_in_page;
3454
+ return 1;
3455
+ },
3456
+ Ok(addr) => addr,
3457
+ };
3458
+ if crosses_page {
3459
+ let boundary_addr = (addr | 0xFFF) + 1;
3460
+ let addr_high = match if is_write {
3461
+ translate_address_write_jit_and_can_skip_dirty(boundary_addr).map(|x| x.0)
3462
+ }
3463
+ else {
3464
+ translate_address_read_jit(boundary_addr)
3465
+ } {
3466
+ Err(()) => {
3467
+ *instruction_pointer = *instruction_pointer & !0xFFF | eip_offset_in_page;
3468
+ return 1;
3469
+ },
3470
+ Ok(addr) => addr,
3471
+ };
3472
+ // TODO: Could check if virtual pages point to consecutive physical and go to fast path
3473
+ // do read, write into scratch buffer
3474
+
3475
+ let scratch = &raw mut jit_paging_scratch_buffer.0 as u32;
3476
+ dbg_assert!(scratch & 0xFFF == 0);
3477
+
3478
+ for s in addr_low..((addr_low | 0xFFF) + 1) {
3479
+ *(scratch as *mut u8).offset((s & 0xFFF) as isize) = memory::read8(s) as u8
3480
+ }
3481
+ for s in addr_high..(addr_high + (addr + bitsize / 8 & 0xFFF) as u32) {
3482
+ *(scratch as *mut u8).offset((0x1000 | s & 0xFFF) as isize) = memory::read8(s) as u8
3483
+ }
3484
+
3485
+ ((scratch as i32) ^ addr) & !0xFFF
3486
+ }
3487
+ else if memory::in_mapped_range(addr_low) {
3488
+ let scratch = &raw mut jit_paging_scratch_buffer.0[0];
3489
+
3490
+ match bitsize {
3491
+ 128 => ptr::write_unaligned(
3492
+ scratch.offset(addr_low as isize & 0xFFF) as *mut reg128,
3493
+ memory::read128(addr_low),
3494
+ ),
3495
+ 64 => ptr::write_unaligned(
3496
+ scratch.offset(addr_low as isize & 0xFFF) as *mut i64,
3497
+ memory::read64s(addr_low),
3498
+ ),
3499
+ 32 => ptr::write_unaligned(
3500
+ scratch.offset(addr_low as isize & 0xFFF) as *mut i32,
3501
+ memory::read32s(addr_low),
3502
+ ),
3503
+ 16 => ptr::write_unaligned(
3504
+ scratch.offset(addr_low as isize & 0xFFF) as *mut u16,
3505
+ memory::read16(addr_low) as u16,
3506
+ ),
3507
+ 8 => {
3508
+ *(scratch.offset(addr_low as isize & 0xFFF) as *mut u8) =
3509
+ memory::read8(addr_low) as u8
3510
+ },
3511
+ _ => {
3512
+ dbg_assert!(false);
3513
+ },
3514
+ }
3515
+
3516
+ ((scratch as i32) ^ addr) & !0xFFF
3517
+ }
3518
+ else {
3519
+ ((addr_low as i32 + memory::mem8 as i32) ^ addr) & !0xFFF
3520
+ }
3521
+ }
3522
+
3523
+ #[no_mangle]
3524
+ pub unsafe fn safe_read8_slow_jit(addr: i32, eip: i32) -> i32 {
3525
+ safe_read_slow_jit(addr, 8, eip, false)
3526
+ }
3527
+ #[no_mangle]
3528
+ pub unsafe fn safe_read16_slow_jit(addr: i32, eip: i32) -> i32 {
3529
+ safe_read_slow_jit(addr, 16, eip, false)
3530
+ }
3531
+ #[no_mangle]
3532
+ pub unsafe fn safe_read32s_slow_jit(addr: i32, eip: i32) -> i32 {
3533
+ safe_read_slow_jit(addr, 32, eip, false)
3534
+ }
3535
+ #[no_mangle]
3536
+ pub unsafe fn safe_read64s_slow_jit(addr: i32, eip: i32) -> i32 {
3537
+ safe_read_slow_jit(addr, 64, eip, false)
3538
+ }
3539
+ #[no_mangle]
3540
+ pub unsafe fn safe_read128s_slow_jit(addr: i32, eip: i32) -> i32 {
3541
+ safe_read_slow_jit(addr, 128, eip, false)
3542
+ }
3543
+
3544
+ #[no_mangle]
3545
+ pub unsafe fn get_phys_eip_slow_jit(addr: i32) -> i32 {
3546
+ match translate_address_read_jit(addr) {
3547
+ Err(()) => 1,
3548
+ Ok(addr_low) => {
3549
+ dbg_assert!(!memory::in_mapped_range(addr_low as u32)); // same assumption as in read_imm8
3550
+ ((addr_low as i32 + memory::mem8 as i32) ^ addr) & !0xFFF
3551
+ },
3552
+ }
3553
+ }
3554
+
3555
+ #[no_mangle]
3556
+ pub unsafe fn safe_read_write8_slow_jit(addr: i32, eip: i32) -> i32 {
3557
+ safe_read_slow_jit(addr, 8, eip, true)
3558
+ }
3559
+ #[no_mangle]
3560
+ pub unsafe fn safe_read_write16_slow_jit(addr: i32, eip: i32) -> i32 {
3561
+ safe_read_slow_jit(addr, 16, eip, true)
3562
+ }
3563
+ #[no_mangle]
3564
+ pub unsafe fn safe_read_write32s_slow_jit(addr: i32, eip: i32) -> i32 {
3565
+ safe_read_slow_jit(addr, 32, eip, true)
3566
+ }
3567
+ #[no_mangle]
3568
+ pub unsafe fn safe_read_write64_slow_jit(addr: i32, eip: i32) -> i32 {
3569
+ safe_read_slow_jit(addr, 64, eip, true)
3570
+ }
3571
+
3572
+ pub unsafe fn safe_write_slow_jit(
3573
+ addr: i32,
3574
+ bitsize: i32,
3575
+ value_low: u64,
3576
+ value_high: u64,
3577
+ eip_offset_in_page: i32,
3578
+ ) -> i32 {
3579
+ dbg_assert!(eip_offset_in_page >= 0 && eip_offset_in_page < 0x1000);
3580
+ if Page::page_of(*instruction_pointer as u32) == Page::page_of(addr as u32) {
3581
+ // XXX: Check based on virtual address
3582
+ dbg_log!(
3583
+ "SMC: bits={} eip={:x} writeaddr={:x} value={:x}",
3584
+ bitsize,
3585
+ (*instruction_pointer & !0xFFF | eip_offset_in_page) as u32,
3586
+ addr as u32,
3587
+ value_low,
3588
+ );
3589
+ }
3590
+ let crosses_page = (addr & 0xFFF) + bitsize / 8 > 0x1000;
3591
+ let (addr_low, can_skip_dirty_page) = match translate_address_write_jit_and_can_skip_dirty(addr)
3592
+ {
3593
+ Err(()) => {
3594
+ *instruction_pointer = *instruction_pointer & !0xFFF | eip_offset_in_page;
3595
+ return 1;
3596
+ },
3597
+ Ok(x) => x,
3598
+ };
3599
+ if crosses_page {
3600
+ let (addr_high, _) =
3601
+ match translate_address_write_jit_and_can_skip_dirty((addr | 0xFFF) + 1) {
3602
+ Err(()) => {
3603
+ *instruction_pointer = *instruction_pointer & !0xFFF | eip_offset_in_page;
3604
+ return 1;
3605
+ },
3606
+ Ok(x) => x,
3607
+ };
3608
+ // TODO: Could check if virtual pages point to consecutive physical and go to fast path
3609
+
3610
+ // do write, return dummy pointer for fast path to write into
3611
+
3612
+ match bitsize {
3613
+ 128 => safe_write128(
3614
+ addr,
3615
+ reg128 {
3616
+ u64: [value_low, value_high],
3617
+ },
3618
+ )
3619
+ .unwrap(),
3620
+ 64 => safe_write64(addr, value_low).unwrap(),
3621
+ 32 => virt_boundary_write32(
3622
+ addr_low,
3623
+ addr_high | (addr as u32 + 3 & 3),
3624
+ value_low as i32,
3625
+ ),
3626
+ 16 => virt_boundary_write16(addr_low, addr_high, value_low as i32),
3627
+ 8 => {
3628
+ dbg_assert!(false);
3629
+ },
3630
+ _ => {
3631
+ dbg_assert!(false);
3632
+ },
3633
+ }
3634
+
3635
+ let scratch = &raw mut jit_paging_scratch_buffer.0 as u32;
3636
+ dbg_assert!(scratch & 0xFFF == 0);
3637
+ ((scratch as i32) ^ addr) & !0xFFF
3638
+ }
3639
+ else if memory::in_mapped_range(addr_low) {
3640
+ match bitsize {
3641
+ 128 => memory::mmap_write128(addr_low, value_low, value_high),
3642
+ 64 => memory::mmap_write64(addr_low, value_low),
3643
+ 32 => memory::mmap_write32(addr_low, value_low as i32),
3644
+ 16 => memory::mmap_write16(addr_low, (value_low & 0xFFFF) as i32),
3645
+ 8 => memory::mmap_write8(addr_low, (value_low & 0xFF) as i32),
3646
+ _ => {
3647
+ dbg_assert!(false);
3648
+ },
3649
+ }
3650
+
3651
+ let scratch = &raw mut jit_paging_scratch_buffer.0 as u32;
3652
+ dbg_assert!(scratch & 0xFFF == 0);
3653
+ ((scratch as i32) ^ addr) & !0xFFF
3654
+ }
3655
+ else {
3656
+ if !can_skip_dirty_page {
3657
+ jit::jit_dirty_page(Page::page_of(addr_low));
3658
+ }
3659
+ ((addr_low as i32 + memory::mem8 as i32) ^ addr) & !0xFFF
3660
+ }
3661
+ }
3662
+
3663
+ #[no_mangle]
3664
+ pub unsafe fn safe_write8_slow_jit(addr: i32, value: u32, eip_offset_in_page: i32) -> i32 {
3665
+ safe_write_slow_jit(addr, 8, value as u64, 0, eip_offset_in_page)
3666
+ }
3667
+ #[no_mangle]
3668
+ pub unsafe fn safe_write16_slow_jit(addr: i32, value: u32, eip_offset_in_page: i32) -> i32 {
3669
+ safe_write_slow_jit(addr, 16, value as u64, 0, eip_offset_in_page)
3670
+ }
3671
+ #[no_mangle]
3672
+ pub unsafe fn safe_write32_slow_jit(addr: i32, value: u32, eip_offset_in_page: i32) -> i32 {
3673
+ safe_write_slow_jit(addr, 32, value as u64, 0, eip_offset_in_page)
3674
+ }
3675
+ #[no_mangle]
3676
+ pub unsafe fn safe_write64_slow_jit(addr: i32, value: u64, eip_offset_in_page: i32) -> i32 {
3677
+ safe_write_slow_jit(addr, 64, value, 0, eip_offset_in_page)
3678
+ }
3679
+ #[no_mangle]
3680
+ pub unsafe fn safe_write128_slow_jit(
3681
+ addr: i32,
3682
+ low: u64,
3683
+ high: u64,
3684
+ eip_offset_in_page: i32,
3685
+ ) -> i32 {
3686
+ safe_write_slow_jit(addr, 128, low, high, eip_offset_in_page)
3687
+ }
3688
+
3689
+ pub unsafe fn safe_write8(addr: i32, value: i32) -> OrPageFault<()> {
3690
+ let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
3691
+ if memory::in_mapped_range(phys_addr) {
3692
+ memory::mmap_write8(phys_addr, value);
3693
+ }
3694
+ else {
3695
+ if !can_skip_dirty_page {
3696
+ jit::jit_dirty_page(Page::page_of(phys_addr));
3697
+ }
3698
+ else {
3699
+ dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
3700
+ }
3701
+ memory::write8_no_mmap_or_dirty_check(phys_addr, value);
3702
+ };
3703
+ Ok(())
3704
+ }
3705
+
3706
+ pub unsafe fn safe_write16(addr: i32, value: i32) -> OrPageFault<()> {
3707
+ let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
3708
+ dbg_assert!(value >= 0 && value < 0x10000);
3709
+ if addr & 0xFFF == 0xFFF {
3710
+ virt_boundary_write16(phys_addr, translate_address_write(addr + 1)?, value);
3711
+ }
3712
+ else if memory::in_mapped_range(phys_addr) {
3713
+ memory::mmap_write16(phys_addr, value);
3714
+ }
3715
+ else {
3716
+ if !can_skip_dirty_page {
3717
+ jit::jit_dirty_page(Page::page_of(phys_addr));
3718
+ }
3719
+ else {
3720
+ dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
3721
+ }
3722
+ memory::write16_no_mmap_or_dirty_check(phys_addr, value);
3723
+ };
3724
+ Ok(())
3725
+ }
3726
+
3727
+ pub unsafe fn safe_write32(addr: i32, value: i32) -> OrPageFault<()> {
3728
+ let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
3729
+ if addr & 0xFFF > 0x1000 - 4 {
3730
+ virt_boundary_write32(
3731
+ phys_addr,
3732
+ translate_address_write(addr + 3 & !3)? | (addr as u32 + 3 & 3),
3733
+ value,
3734
+ );
3735
+ }
3736
+ else if memory::in_mapped_range(phys_addr) {
3737
+ memory::mmap_write32(phys_addr, value);
3738
+ }
3739
+ else {
3740
+ if !can_skip_dirty_page {
3741
+ jit::jit_dirty_page(Page::page_of(phys_addr));
3742
+ }
3743
+ else {
3744
+ dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
3745
+ }
3746
+ memory::write32_no_mmap_or_dirty_check(phys_addr, value);
3747
+ };
3748
+ Ok(())
3749
+ }
3750
+
3751
+ pub unsafe fn safe_write64(addr: i32, value: u64) -> OrPageFault<()> {
3752
+ if addr & 0xFFF > 0x1000 - 8 {
3753
+ writable_or_pagefault(addr, 8)?;
3754
+ safe_write32(addr, value as i32).unwrap();
3755
+ safe_write32(addr + 4, (value >> 32) as i32).unwrap();
3756
+ }
3757
+ else {
3758
+ let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
3759
+ if memory::in_mapped_range(phys_addr) {
3760
+ memory::mmap_write64(phys_addr, value);
3761
+ }
3762
+ else {
3763
+ if !can_skip_dirty_page {
3764
+ jit::jit_dirty_page(Page::page_of(phys_addr));
3765
+ }
3766
+ else {
3767
+ dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
3768
+ }
3769
+ memory::write64_no_mmap_or_dirty_check(phys_addr, value);
3770
+ }
3771
+ };
3772
+ Ok(())
3773
+ }
3774
+
3775
+ pub unsafe fn safe_write128(addr: i32, value: reg128) -> OrPageFault<()> {
3776
+ if addr & 0xFFF > 0x1000 - 16 {
3777
+ writable_or_pagefault(addr, 16)?;
3778
+ safe_write64(addr, value.u64[0]).unwrap();
3779
+ safe_write64(addr + 8, value.u64[1]).unwrap();
3780
+ }
3781
+ else {
3782
+ let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
3783
+ if memory::in_mapped_range(phys_addr) {
3784
+ memory::mmap_write128(phys_addr, value.u64[0], value.u64[1]);
3785
+ }
3786
+ else {
3787
+ if !can_skip_dirty_page {
3788
+ jit::jit_dirty_page(Page::page_of(phys_addr));
3789
+ }
3790
+ else {
3791
+ dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
3792
+ }
3793
+ memory::write128_no_mmap_or_dirty_check(phys_addr, value);
3794
+ }
3795
+ };
3796
+ Ok(())
3797
+ }
3798
+
3799
+ #[inline(always)]
3800
+ pub unsafe fn safe_read_write8(addr: i32, instruction: &dyn Fn(i32) -> i32) {
3801
+ let (phys_addr, can_skip_dirty_page) =
3802
+ return_on_pagefault!(translate_address_write_and_can_skip_dirty(addr));
3803
+ let x = memory::read8(phys_addr);
3804
+ let value = instruction(x);
3805
+ dbg_assert!(value >= 0 && value < 0x100);
3806
+ if memory::in_mapped_range(phys_addr) {
3807
+ memory::mmap_write8(phys_addr, value);
3808
+ }
3809
+ else {
3810
+ if !can_skip_dirty_page {
3811
+ jit::jit_dirty_page(Page::page_of(phys_addr));
3812
+ }
3813
+ else {
3814
+ dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
3815
+ }
3816
+ memory::write8_no_mmap_or_dirty_check(phys_addr, value);
3817
+ }
3818
+ }
3819
+
3820
+ #[inline(always)]
3821
+ pub unsafe fn safe_read_write16(addr: i32, instruction: &dyn Fn(i32) -> i32) {
3822
+ let (phys_addr, can_skip_dirty_page) =
3823
+ return_on_pagefault!(translate_address_write_and_can_skip_dirty(addr));
3824
+ if phys_addr & 0xFFF == 0xFFF {
3825
+ let phys_addr_high = return_on_pagefault!(translate_address_write(addr + 1));
3826
+ let x = virt_boundary_read16(phys_addr, phys_addr_high);
3827
+ virt_boundary_write16(phys_addr, phys_addr_high, instruction(x));
3828
+ }
3829
+ else {
3830
+ let x = memory::read16(phys_addr);
3831
+ let value = instruction(x);
3832
+ dbg_assert!(value >= 0 && value < 0x10000);
3833
+ if memory::in_mapped_range(phys_addr) {
3834
+ memory::mmap_write16(phys_addr, value);
3835
+ }
3836
+ else {
3837
+ if !can_skip_dirty_page {
3838
+ jit::jit_dirty_page(Page::page_of(phys_addr));
3839
+ }
3840
+ else {
3841
+ dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
3842
+ }
3843
+ memory::write16_no_mmap_or_dirty_check(phys_addr, value);
3844
+ };
3845
+ }
3846
+ }
3847
+
3848
+ #[inline(always)]
3849
+ pub unsafe fn safe_read_write32(addr: i32, instruction: &dyn Fn(i32) -> i32) {
3850
+ let (phys_addr, can_skip_dirty_page) =
3851
+ return_on_pagefault!(translate_address_write_and_can_skip_dirty(addr));
3852
+ if phys_addr & 0xFFF >= 0xFFD {
3853
+ let phys_addr_high = return_on_pagefault!(translate_address_write(addr + 3 & !3));
3854
+ let phys_addr_high = phys_addr_high | (addr as u32) + 3 & 3;
3855
+ let x = virt_boundary_read32s(phys_addr, phys_addr_high);
3856
+ virt_boundary_write32(phys_addr, phys_addr_high, instruction(x));
3857
+ }
3858
+ else {
3859
+ let x = memory::read32s(phys_addr);
3860
+ let value = instruction(x);
3861
+ if memory::in_mapped_range(phys_addr) {
3862
+ memory::mmap_write32(phys_addr, value);
3863
+ }
3864
+ else {
3865
+ if !can_skip_dirty_page {
3866
+ jit::jit_dirty_page(Page::page_of(phys_addr));
3867
+ }
3868
+ else {
3869
+ dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
3870
+ }
3871
+ memory::write32_no_mmap_or_dirty_check(phys_addr, value);
3872
+ };
3873
+ }
3874
+ }
3875
+
3876
+ fn get_reg8_index(index: i32) -> i32 { return index << 2 & 12 | index >> 2 & 1; }
3877
+
3878
+ pub unsafe fn read_reg8(index: i32) -> i32 {
3879
+ dbg_assert!(index >= 0 && index < 8);
3880
+ return *reg8.offset(get_reg8_index(index) as isize) as i32;
3881
+ }
3882
+
3883
+ pub unsafe fn write_reg8(index: i32, value: i32) {
3884
+ dbg_assert!(index >= 0 && index < 8);
3885
+ *reg8.offset(get_reg8_index(index) as isize) = value as u8;
3886
+ }
3887
+
3888
+ fn get_reg16_index(index: i32) -> i32 { return index << 1; }
3889
+
3890
+ pub unsafe fn read_reg16(index: i32) -> i32 {
3891
+ dbg_assert!(index >= 0 && index < 8);
3892
+ return *reg16.offset(get_reg16_index(index) as isize) as i32;
3893
+ }
3894
+
3895
+ pub unsafe fn write_reg16(index: i32, value: i32) {
3896
+ dbg_assert!(index >= 0 && index < 8);
3897
+ *reg16.offset(get_reg16_index(index) as isize) = value as u16;
3898
+ }
3899
+
3900
+ pub unsafe fn read_reg32(index: i32) -> i32 {
3901
+ dbg_assert!(index >= 0 && index < 8);
3902
+ *reg32.offset(index as isize)
3903
+ }
3904
+
3905
+ pub unsafe fn write_reg32(index: i32, value: i32) {
3906
+ dbg_assert!(index >= 0 && index < 8);
3907
+ *reg32.offset(index as isize) = value;
3908
+ }
3909
+
3910
+ pub unsafe fn read_mmx32s(r: i32) -> i32 { (*fpu_st.offset(r as isize)).mantissa as i32 }
3911
+
3912
+ pub unsafe fn read_mmx64s(r: i32) -> u64 { (*fpu_st.offset(r as isize)).mantissa }
3913
+
3914
+ pub unsafe fn write_mmx_reg64(r: i32, data: u64) { (*fpu_st.offset(r as isize)).mantissa = data; }
3915
+
3916
+ pub unsafe fn read_xmm_f32(r: i32) -> f32 { return (*reg_xmm.offset(r as isize)).f32[0]; }
3917
+
3918
+ pub unsafe fn read_xmm32(r: i32) -> i32 { return (*reg_xmm.offset(r as isize)).u32[0] as i32; }
3919
+
3920
+ pub unsafe fn read_xmm64s(r: i32) -> u64 { (*reg_xmm.offset(r as isize)).u64[0] }
3921
+
3922
+ pub unsafe fn read_xmm128s(r: i32) -> reg128 { return *reg_xmm.offset(r as isize); }
3923
+
3924
+ pub unsafe fn write_xmm_f32(r: i32, data: f32) { (*reg_xmm.offset(r as isize)).f32[0] = data; }
3925
+
3926
+ pub unsafe fn write_xmm32(r: i32, data: i32) { (*reg_xmm.offset(r as isize)).i32[0] = data; }
3927
+
3928
+ pub unsafe fn write_xmm64(r: i32, data: u64) { (*reg_xmm.offset(r as isize)).u64[0] = data }
3929
+ pub unsafe fn write_xmm_f64(r: i32, data: f64) { (*reg_xmm.offset(r as isize)).f64[0] = data }
3930
+
3931
+ pub unsafe fn write_xmm128(r: i32, i0: i32, i1: i32, i2: i32, i3: i32) {
3932
+ let x = reg128 {
3933
+ u32: [i0 as u32, i1 as u32, i2 as u32, i3 as u32],
3934
+ };
3935
+ *reg_xmm.offset(r as isize) = x;
3936
+ }
3937
+
3938
+ pub unsafe fn write_xmm128_2(r: i32, i0: u64, i1: u64) {
3939
+ *reg_xmm.offset(r as isize) = reg128 { u64: [i0, i1] };
3940
+ }
3941
+
3942
+ pub unsafe fn write_xmm_reg128(r: i32, data: reg128) { *reg_xmm.offset(r as isize) = data; }
3943
+
3944
+ /// Set the fpu tag word to valid and the top-of-stack to 0 on mmx instructions
3945
+ pub fn transition_fpu_to_mmx() {
3946
+ unsafe {
3947
+ fpu_set_tag_word(0);
3948
+ *fpu_stack_ptr = 0;
3949
+ }
3950
+ }
3951
+
3952
+ pub unsafe fn task_switch_test() -> bool {
3953
+ if 0 != *cr & (CR0_EM | CR0_TS) {
3954
+ trigger_nm();
3955
+ return false;
3956
+ }
3957
+ else {
3958
+ return true;
3959
+ };
3960
+ }
3961
+
3962
+ pub unsafe fn set_mxcsr(new_mxcsr: i32) {
3963
+ dbg_assert!(new_mxcsr & !MXCSR_MASK == 0); // checked by caller
3964
+
3965
+ if *mxcsr & MXCSR_DAZ == 0 && new_mxcsr & MXCSR_DAZ != 0 {
3966
+ dbg_log!("Warning: Unimplemented MXCSR bit: Denormals Are Zero");
3967
+ }
3968
+ if *mxcsr & MXCSR_FZ == 0 && new_mxcsr & MXCSR_FZ != 0 {
3969
+ dbg_log!("Warning: Unimplemented MXCSR bit: Flush To Zero");
3970
+ }
3971
+
3972
+ let rounding_mode = new_mxcsr >> MXCSR_RC_SHIFT & 3;
3973
+ if *mxcsr >> MXCSR_RC_SHIFT & 3 == 0 && rounding_mode != 0 {
3974
+ dbg_log!(
3975
+ "Warning: Unimplemented MXCSR rounding mode: {}",
3976
+ rounding_mode
3977
+ );
3978
+ }
3979
+
3980
+ let exception_mask = new_mxcsr >> 7 & 0b111111;
3981
+ if *mxcsr >> 7 & 0b111111 != exception_mask && exception_mask != 0b111111 {
3982
+ dbg_log!(
3983
+ "Warning: Unimplemented MXCSR exception mask: 0b{:b}",
3984
+ exception_mask
3985
+ );
3986
+ }
3987
+
3988
+ *mxcsr = new_mxcsr;
3989
+ }
3990
+
3991
+ #[no_mangle]
3992
+ pub unsafe fn task_switch_test_jit(eip_offset_in_page: i32) {
3993
+ dbg_assert!(0 != *cr & (CR0_EM | CR0_TS));
3994
+ dbg_assert!(eip_offset_in_page >= 0 && eip_offset_in_page < 0x1000);
3995
+ trigger_nm_jit(eip_offset_in_page);
3996
+ }
3997
+
3998
+ pub unsafe fn task_switch_test_mmx() -> bool {
3999
+ if *cr.offset(4) & CR4_OSFXSR == 0 {
4000
+ dbg_log!("Warning: Unimplemented task switch test with cr4.osfxsr=0");
4001
+ }
4002
+ if 0 != *cr & CR0_EM {
4003
+ trigger_ud();
4004
+ return false;
4005
+ }
4006
+ else if 0 != *cr & CR0_TS {
4007
+ trigger_nm();
4008
+ return false;
4009
+ }
4010
+ else {
4011
+ return true;
4012
+ };
4013
+ }
4014
+
4015
+ #[no_mangle]
4016
+ pub unsafe fn task_switch_test_mmx_jit(eip_offset_in_page: i32) {
4017
+ dbg_assert!(eip_offset_in_page >= 0 && eip_offset_in_page < 0x1000);
4018
+ if *cr.offset(4) & CR4_OSFXSR == 0 {
4019
+ dbg_log!("Warning: Unimplemented task switch test with cr4.osfxsr=0");
4020
+ }
4021
+ if 0 != *cr & CR0_EM {
4022
+ trigger_ud_jit(eip_offset_in_page);
4023
+ }
4024
+ else if 0 != *cr & CR0_TS {
4025
+ trigger_nm_jit(eip_offset_in_page);
4026
+ }
4027
+ else {
4028
+ dbg_assert!(false);
4029
+ }
4030
+ }
4031
+
4032
+ pub unsafe fn read_moffs() -> OrPageFault<i32> {
4033
+ // read 2 or 4 byte from ip, depending on address size attribute
4034
+ if is_asize_32() {
4035
+ read_imm32s()
4036
+ }
4037
+ else {
4038
+ read_imm16()
4039
+ }
4040
+ }
4041
+
4042
+ #[no_mangle]
4043
+ pub unsafe fn get_real_eip() -> i32 {
4044
+ // Returns the 'real' instruction pointer, without segment offset
4045
+ return *instruction_pointer - get_seg_cs();
4046
+ }
4047
+
4048
+ pub unsafe fn get_stack_reg() -> i32 {
4049
+ if *stack_size_32 {
4050
+ return read_reg32(ESP);
4051
+ }
4052
+ else {
4053
+ return read_reg16(SP);
4054
+ };
4055
+ }
4056
+
4057
+ pub unsafe fn set_stack_reg(value: i32) {
4058
+ if *stack_size_32 {
4059
+ write_reg32(ESP, value)
4060
+ }
4061
+ else {
4062
+ write_reg16(SP, value)
4063
+ };
4064
+ }
4065
+
4066
+ pub unsafe fn get_reg_asize(reg: i32) -> i32 {
4067
+ dbg_assert!(reg == ECX || reg == ESI || reg == EDI);
4068
+ let r = read_reg32(reg);
4069
+ if is_asize_32() {
4070
+ return r;
4071
+ }
4072
+ else {
4073
+ return r & 0xFFFF;
4074
+ };
4075
+ }
4076
+
4077
+ pub unsafe fn set_reg_asize(is_asize_32: bool, reg: i32, value: i32) {
4078
+ dbg_assert!(reg == ECX || reg == ESI || reg == EDI);
4079
+ if is_asize_32 {
4080
+ write_reg32(reg, value)
4081
+ }
4082
+ else {
4083
+ write_reg16(reg, value)
4084
+ };
4085
+ }
4086
+
4087
+ pub unsafe fn decr_ecx_asize(is_asize_32: bool) -> i32 {
4088
+ return if is_asize_32 {
4089
+ write_reg32(ECX, read_reg32(ECX) - 1);
4090
+ read_reg32(ECX)
4091
+ }
4092
+ else {
4093
+ write_reg16(CX, read_reg16(CX) - 1);
4094
+ read_reg16(CX)
4095
+ };
4096
+ }
4097
+
4098
+ #[no_mangle]
4099
+ pub unsafe fn set_tsc(low: u32, high: u32) {
4100
+ let new_value = low as u64 | (high as u64) << 32;
4101
+ let current_value = read_tsc();
4102
+ tsc_offset = current_value - new_value;
4103
+ }
4104
+
4105
+ #[no_mangle]
4106
+ pub unsafe fn read_tsc() -> u64 {
4107
+ let value = (js::microtick() * TSC_RATE) as u64 - tsc_offset;
4108
+
4109
+ if !TSC_ENABLE_IMPRECISE_BROWSER_WORKAROUND {
4110
+ return value;
4111
+ }
4112
+
4113
+ if value == tsc_last_value {
4114
+ // If the browser returns the same value as last time, extrapolate based on the number of
4115
+ // rdtsc calls between the last two changes
4116
+ tsc_number_of_same_readings += 1;
4117
+ let extra = (tsc_number_of_same_readings * tsc_resolution) / tsc_speed;
4118
+ let extra = u64::min(extra, tsc_resolution - 1);
4119
+ #[cfg(debug_assertions)]
4120
+ {
4121
+ tsc_last_extra = extra;
4122
+ }
4123
+ return value + extra;
4124
+ }
4125
+
4126
+ #[cfg(debug_assertions)]
4127
+ if tsc_last_extra != 0 {
4128
+ if TSC_VERBOSE_LOGGING || tsc_last_extra >= tsc_resolution {
4129
+ dbg_log!(
4130
+ "rdtsc: jump from {}+{} to {} (diff {}, {}%)",
4131
+ tsc_last_value as u64,
4132
+ tsc_last_extra as u64,
4133
+ value,
4134
+ value - (tsc_last_value + tsc_last_extra),
4135
+ (100 * tsc_last_extra) / tsc_resolution,
4136
+ );
4137
+ dbg_assert!(tsc_last_extra < tsc_resolution, "XXX: Overshot tsc");
4138
+ }
4139
+ tsc_last_extra = 0;
4140
+ }
4141
+
4142
+ let d = value - tsc_last_value;
4143
+ if d < tsc_resolution {
4144
+ dbg_log!("rdtsc resolution: {}", d);
4145
+ }
4146
+ tsc_resolution = tsc_resolution.min(d);
4147
+ tsc_last_value = value;
4148
+ if tsc_number_of_same_readings != 0 {
4149
+ tsc_speed = tsc_number_of_same_readings;
4150
+ tsc_number_of_same_readings = 0;
4151
+ }
4152
+
4153
+ value
4154
+ }
4155
+
4156
+ pub unsafe fn vm86_mode() -> bool { return *flags & FLAG_VM == FLAG_VM; }
4157
+
4158
+ #[no_mangle]
4159
+ pub unsafe fn getiopl() -> i32 { return *flags >> 12 & 3; }
4160
+
4161
+ #[no_mangle]
4162
+ #[cfg(feature = "profiler")]
4163
+ pub unsafe fn get_opstats_buffer(
4164
+ compiled: bool,
4165
+ jit_exit: bool,
4166
+ unguarded_register: bool,
4167
+ wasm_size: bool,
4168
+ opcode: u8,
4169
+ is_0f: bool,
4170
+ is_mem: bool,
4171
+ fixed_g: u8,
4172
+ ) -> f64 {
4173
+ {
4174
+ let index = (is_0f as usize) << 12
4175
+ | (opcode as usize) << 4
4176
+ | (is_mem as usize) << 3
4177
+ | fixed_g as usize;
4178
+ (if compiled {
4179
+ opstats::opstats_compiled_buffer[index]
4180
+ }
4181
+ else if jit_exit {
4182
+ opstats::opstats_jit_exit_buffer[index]
4183
+ }
4184
+ else if unguarded_register {
4185
+ opstats::opstats_unguarded_register_buffer[index]
4186
+ }
4187
+ else if wasm_size {
4188
+ opstats::opstats_wasm_size[index]
4189
+ }
4190
+ else {
4191
+ opstats::opstats_buffer[index]
4192
+ }) as f64
4193
+ }
4194
+ }
4195
+
4196
+ #[no_mangle]
4197
+ #[cfg(not(feature = "profiler"))]
4198
+ pub unsafe fn get_opstats_buffer() -> f64 { 0.0 }
4199
+
4200
+ pub fn clear_tlb_code(page: i32) {
4201
+ unsafe {
4202
+ if let Some(c) = tlb_code[page as usize] {
4203
+ drop(Box::from_raw(c.as_ptr()));
4204
+ }
4205
+ tlb_code[page as usize] = None;
4206
+ }
4207
+ }
4208
+
4209
+ pub unsafe fn invlpg(addr: i32) {
4210
+ let page = (addr as u32 >> 12) as i32;
4211
+ // Note: Doesn't remove this page from valid_tlb_entries: This isn't
4212
+ // necessary, because when valid_tlb_entries grows too large, it will be
4213
+ // empties by calling clear_tlb, which removes this entry as it isn't global.
4214
+ // This however means that valid_tlb_entries can contain some invalid entries
4215
+ clear_tlb_code(page);
4216
+ tlb_data[page as usize] = 0;
4217
+ *last_virt_eip = -1;
4218
+ }
4219
+
4220
+ #[no_mangle]
4221
+ pub unsafe fn update_eflags(new_flags: i32) {
4222
+ let mut dont_update = FLAG_RF | FLAG_VM | FLAG_VIP | FLAG_VIF;
4223
+ let mut clear = !FLAG_VIP & !FLAG_VIF & FLAGS_MASK;
4224
+ if 0 != *flags & FLAG_VM {
4225
+ // other case needs to be handled in popf or iret
4226
+ dbg_assert!(getiopl() == 3);
4227
+ dont_update |= FLAG_IOPL;
4228
+ // don't clear vip or vif
4229
+ clear |= FLAG_VIP | FLAG_VIF
4230
+ }
4231
+ else {
4232
+ if !*protected_mode {
4233
+ dbg_assert!(*cpl == 0);
4234
+ }
4235
+ if 0 != *cpl {
4236
+ // cpl > 0
4237
+ // cannot update iopl
4238
+ dont_update |= FLAG_IOPL;
4239
+ if *cpl as i32 > getiopl() {
4240
+ // cpl > iopl
4241
+ // cannot update interrupt flag
4242
+ dont_update |= FLAG_INTERRUPT
4243
+ }
4244
+ }
4245
+ }
4246
+ *flags = (new_flags ^ (*flags ^ new_flags) & dont_update) & clear | FLAGS_DEFAULT;
4247
+ *flags_changed = 0;
4248
+
4249
+ if *flags & FLAG_TRAP != 0 {
4250
+ dbg_log!("Not supported: trap flag");
4251
+ }
4252
+ *flags &= !FLAG_TRAP;
4253
+ }
4254
+
4255
+ #[no_mangle]
4256
+ pub unsafe fn get_valid_tlb_entries_count() -> i32 {
4257
+ if !cfg!(feature = "profiler") {
4258
+ return 0;
4259
+ }
4260
+ let mut result = 0;
4261
+ for i in 0..valid_tlb_entries_count {
4262
+ let page = valid_tlb_entries[i as usize];
4263
+ let entry = tlb_data[page as usize];
4264
+ if 0 != entry {
4265
+ result += 1
4266
+ }
4267
+ }
4268
+ return result;
4269
+ }
4270
+
4271
+ #[no_mangle]
4272
+ pub unsafe fn get_valid_global_tlb_entries_count() -> i32 {
4273
+ if !cfg!(feature = "profiler") {
4274
+ return 0;
4275
+ }
4276
+ let mut result = 0;
4277
+ for i in 0..valid_tlb_entries_count {
4278
+ let page = valid_tlb_entries[i as usize];
4279
+ let entry = tlb_data[page as usize];
4280
+ if 0 != entry & TLB_GLOBAL {
4281
+ result += 1
4282
+ }
4283
+ }
4284
+ return result;
4285
+ }
4286
+
4287
+ #[inline(never)]
4288
+ pub unsafe fn trigger_np(code: i32) {
4289
+ dbg_log!("#np");
4290
+ *instruction_pointer = *previous_ip;
4291
+ if DEBUG {
4292
+ if js::cpu_exception_hook(CPU_EXCEPTION_NP) {
4293
+ return;
4294
+ }
4295
+ }
4296
+ call_interrupt_vector(CPU_EXCEPTION_NP, false, Some(code));
4297
+ }
4298
+
4299
+ #[inline(never)]
4300
+ pub unsafe fn trigger_ss(code: i32) {
4301
+ dbg_log!("#ss");
4302
+ *instruction_pointer = *previous_ip;
4303
+ if DEBUG {
4304
+ if js::cpu_exception_hook(CPU_EXCEPTION_SS) {
4305
+ return;
4306
+ }
4307
+ }
4308
+ call_interrupt_vector(CPU_EXCEPTION_SS, false, Some(code));
4309
+ }
4310
+
4311
+ #[no_mangle]
4312
+ pub unsafe fn store_current_tsc() { *current_tsc = read_tsc(); }
4313
+
4314
+ #[no_mangle]
4315
+ pub unsafe fn handle_irqs() {
4316
+ if *flags & FLAG_INTERRUPT != 0 {
4317
+ if let Some(irq) = pic::pic_acknowledge_irq() {
4318
+ pic_call_irq(irq)
4319
+ }
4320
+ else if *acpi_enabled {
4321
+ if let Some(irq) = apic::acknowledge_irq() {
4322
+ pic_call_irq(irq)
4323
+ }
4324
+ }
4325
+ }
4326
+ }
4327
+
4328
+ unsafe fn pic_call_irq(interrupt_nr: u8) {
4329
+ *previous_ip = *instruction_pointer; // XXX: What if called after instruction (port IO)
4330
+ if *in_hlt {
4331
+ js::stop_idling();
4332
+ *in_hlt = false;
4333
+ }
4334
+ call_interrupt_vector(interrupt_nr as i32, false, None);
4335
+ }
4336
+
4337
+ #[no_mangle]
4338
+ unsafe fn device_raise_irq(i: u8) {
4339
+ pic::set_irq(i);
4340
+ if *acpi_enabled {
4341
+ ioapic::set_irq(i);
4342
+ }
4343
+ handle_irqs()
4344
+ }
4345
+
4346
+ #[no_mangle]
4347
+ unsafe fn device_lower_irq(i: u8) {
4348
+ pic::clear_irq(i);
4349
+ if *acpi_enabled {
4350
+ ioapic::clear_irq(i);
4351
+ }
4352
+ handle_irqs()
4353
+ }
4354
+
4355
+ pub fn io_port_read8(port: i32) -> i32 {
4356
+ unsafe {
4357
+ match port {
4358
+ 0x20 => pic::port20_read() as i32,
4359
+ 0x21 => pic::port21_read() as i32,
4360
+ 0xA0 => pic::portA0_read() as i32,
4361
+ 0xA1 => pic::portA1_read() as i32,
4362
+ 0x4D0 => pic::port4D0_read() as i32,
4363
+ 0x4D1 => pic::port4D1_read() as i32,
4364
+ _ => js::io_port_read8(port),
4365
+ }
4366
+ }
4367
+ }
4368
+ pub fn io_port_read16(port: i32) -> i32 { unsafe { js::io_port_read16(port) } }
4369
+ pub fn io_port_read32(port: i32) -> i32 { unsafe { js::io_port_read32(port) } }
4370
+
4371
+ pub fn io_port_write8(port: i32, value: i32) {
4372
+ unsafe {
4373
+ match port {
4374
+ 0x20 | 0x21 | 0xA0 | 0xA1 | 0x4D0 | 0x4D1 => {
4375
+ match port {
4376
+ 0x20 => pic::port20_write(value as u8),
4377
+ 0x21 => pic::port21_write(value as u8),
4378
+ 0xA0 => pic::portA0_write(value as u8),
4379
+ 0xA1 => pic::portA1_write(value as u8),
4380
+ 0x4D0 => pic::port4D0_write(value as u8),
4381
+ 0x4D1 => pic::port4D1_write(value as u8),
4382
+ _ => dbg_assert!(false),
4383
+ };
4384
+ handle_irqs()
4385
+ },
4386
+ _ => js::io_port_write8(port, value),
4387
+ }
4388
+ }
4389
+ }
4390
+ pub fn io_port_write16(port: i32, value: i32) { unsafe { js::io_port_write16(port, value) } }
4391
+ pub fn io_port_write32(port: i32, value: i32) { unsafe { js::io_port_write32(port, value) } }
4392
+
4393
+ #[no_mangle]
4394
+ #[cfg(debug_assertions)]
4395
+ pub unsafe fn check_page_switch(block_addr: u32, next_block_addr: u32) {
4396
+ let x = translate_address_read_jit(*instruction_pointer);
4397
+ if x != Ok(next_block_addr) {
4398
+ dbg_log!(
4399
+ "page switch from={:x} to={:x} prev_eip={:x} eip={:x} phys_eip={:x}",
4400
+ block_addr,
4401
+ next_block_addr,
4402
+ *previous_ip,
4403
+ *instruction_pointer,
4404
+ x.unwrap_or(0),
4405
+ );
4406
+ }
4407
+ dbg_assert!(next_block_addr & 0xFFF == *instruction_pointer as u32 & 0xFFF);
4408
+ dbg_assert!(x.is_ok());
4409
+ dbg_assert!(x == Ok(next_block_addr));
4410
+ }
4411
+
4412
+ #[no_mangle]
4413
+ pub unsafe fn reset_cpu() {
4414
+ for i in 0..8 {
4415
+ *segment_is_null.offset(i) = false;
4416
+ *segment_limits.offset(i) = 0;
4417
+ *segment_offsets.offset(i) = 0;
4418
+ *segment_access_bytes.offset(i) = 0x80 | (0 << 5) | 0x10 | 0x02; // P dpl0 S RW
4419
+
4420
+ *reg32.offset(i) = 0;
4421
+
4422
+ *sreg.offset(i) = 0;
4423
+ *dreg.offset(i) = 0;
4424
+
4425
+ write_xmm128_2(i as i32, 0, 0);
4426
+
4427
+ *fpu_st.offset(i) = softfloat::F80::ZERO;
4428
+ }
4429
+ *segment_access_bytes.offset(CS as isize) = 0x80 | (0 << 5) | 0x10 | 0x08 | 0x02; // P dpl0 S E RW
4430
+
4431
+ for i in 0..4 {
4432
+ *reg_pdpte.offset(i) = 0
4433
+ }
4434
+
4435
+ *fpu_stack_empty = 0xFF;
4436
+ *fpu_stack_ptr = 0;
4437
+ *fpu_control_word = 0x37F;
4438
+ *fpu_status_word = 0;
4439
+ *fpu_ip = 0;
4440
+ *fpu_ip_selector = 0;
4441
+ *fpu_opcode = 0;
4442
+ *fpu_dp = 0;
4443
+ *fpu_dp_selector = 0;
4444
+
4445
+ *mxcsr = 0x1F80;
4446
+
4447
+ full_clear_tlb();
4448
+
4449
+ *protected_mode = false;
4450
+
4451
+ // http://www.sandpile.org/x86/initial.htm
4452
+ *idtr_size = 0;
4453
+ *idtr_offset = 0;
4454
+
4455
+ *gdtr_size = 0;
4456
+ *gdtr_offset = 0;
4457
+
4458
+ *page_fault = false;
4459
+ *cr = 1 << 30 | 1 << 29 | 1 << 4;
4460
+ *cr.offset(2) = 0;
4461
+ *cr.offset(3) = 0;
4462
+ *cr.offset(4) = 0;
4463
+ *dreg.offset(6) = 0xFFFF0FF0u32 as i32;
4464
+ *dreg.offset(7) = 0x400;
4465
+ *cpl = 0;
4466
+
4467
+ *is_32 = false;
4468
+ *stack_size_32 = false;
4469
+ *prefixes = 0;
4470
+
4471
+ *last_virt_eip = -1;
4472
+
4473
+ *instruction_counter = 0;
4474
+ *previous_ip = 0;
4475
+ *in_hlt = false;
4476
+
4477
+ *sysenter_cs = 0;
4478
+ *sysenter_esp = 0;
4479
+ *sysenter_eip = 0;
4480
+
4481
+ *flags = FLAGS_DEFAULT;
4482
+ *flags_changed = 0;
4483
+ *last_result = 0;
4484
+ *last_op1 = 0;
4485
+ *last_op_size = 0;
4486
+
4487
+ set_tsc(0, 0);
4488
+
4489
+ *instruction_pointer = 0xFFFF0;
4490
+ switch_cs_real_mode(0xF000);
4491
+
4492
+ switch_seg(SS, 0x30);
4493
+ write_reg32(ESP, 0x100);
4494
+
4495
+ update_state_flags();
4496
+
4497
+ jit::jit_clear_cache_js();
4498
+ }
4499
+
4500
+ #[no_mangle]
4501
+ pub unsafe fn set_cpuid_level(level: u32) { cpuid_level = level }