VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 37676

Last change on this file since 37676 was 37676, checked in by vboxsync, 14 years ago

clean up...

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 179.0 KB
Line 
1/* $Id: VBoxRecompiler.c 37676 2011-06-29 07:10:23Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28
29#include <VBox/vmm/rem.h>
30#include <VBox/vmm/vmapi.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/iom.h>
36#include <VBox/vmm/mm.h>
37#include <VBox/vmm/pgm.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/dbgf.h>
40#include <VBox/dbg.h>
41#include <VBox/vmm/hwaccm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include "REMInternal.h"
45#include <VBox/vmm/vm.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49#include <VBox/log.h>
50#include <iprt/semaphore.h>
51#include <iprt/asm.h>
52#include <iprt/assert.h>
53#include <iprt/thread.h>
54#include <iprt/string.h>
55
56/* Don't wanna include everything. */
57extern void cpu_exec_init_all(unsigned long tb_size);
58extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
59extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
60extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
61extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
62extern void tlb_flush(CPUState *env, int flush_global);
63extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
64extern void sync_ldtr(CPUX86State *env1, int selector);
65
66#ifdef VBOX_STRICT
67unsigned long get_phys_page_offset(target_ulong addr);
68#endif
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74
75/** Copy 80-bit fpu register at pSrc to pDst.
76 * This is probably faster than *calling* memcpy.
77 */
78#define REM_COPY_FPU_REG(pDst, pSrc) \
79 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
80
81/** How remR3RunLoggingStep operates. */
82#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
83
84
85/*******************************************************************************
86* Internal Functions *
87*******************************************************************************/
88static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
91static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
92
93static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
96static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99
100static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
101static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
103static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
104static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106
107static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
108static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
109static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
110
111/*******************************************************************************
112* Global Variables *
113*******************************************************************************/
114
115/** @todo Move stats to REM::s some rainy day we have nothing do to. */
116#ifdef VBOX_WITH_STATISTICS
117static STAMPROFILEADV gStatExecuteSingleInstr;
118static STAMPROFILEADV gStatCompilationQEmu;
119static STAMPROFILEADV gStatRunCodeQEmu;
120static STAMPROFILEADV gStatTotalTimeQEmu;
121static STAMPROFILEADV gStatTimers;
122static STAMPROFILEADV gStatTBLookup;
123static STAMPROFILEADV gStatIRQ;
124static STAMPROFILEADV gStatRawCheck;
125static STAMPROFILEADV gStatMemRead;
126static STAMPROFILEADV gStatMemWrite;
127static STAMPROFILE gStatGCPhys2HCVirt;
128static STAMPROFILE gStatHCVirt2GCPhys;
129static STAMCOUNTER gStatCpuGetTSC;
130static STAMCOUNTER gStatRefuseTFInhibit;
131static STAMCOUNTER gStatRefuseVM86;
132static STAMCOUNTER gStatRefusePaging;
133static STAMCOUNTER gStatRefusePAE;
134static STAMCOUNTER gStatRefuseIOPLNot0;
135static STAMCOUNTER gStatRefuseIF0;
136static STAMCOUNTER gStatRefuseCode16;
137static STAMCOUNTER gStatRefuseWP0;
138static STAMCOUNTER gStatRefuseRing1or2;
139static STAMCOUNTER gStatRefuseCanExecute;
140static STAMCOUNTER gStatREMGDTChange;
141static STAMCOUNTER gStatREMIDTChange;
142static STAMCOUNTER gStatREMLDTRChange;
143static STAMCOUNTER gStatREMTRChange;
144static STAMCOUNTER gStatSelOutOfSync[6];
145static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
146static STAMCOUNTER gStatFlushTBs;
147#endif
148/* in exec.c */
149extern uint32_t tlb_flush_count;
150extern uint32_t tb_flush_count;
151extern uint32_t tb_phys_invalidate_count;
152
153/*
154 * Global stuff.
155 */
156
157/** MMIO read callbacks. */
158CPUReadMemoryFunc *g_apfnMMIORead[3] =
159{
160 remR3MMIOReadU8,
161 remR3MMIOReadU16,
162 remR3MMIOReadU32
163};
164
165/** MMIO write callbacks. */
166CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
167{
168 remR3MMIOWriteU8,
169 remR3MMIOWriteU16,
170 remR3MMIOWriteU32
171};
172
173/** Handler read callbacks. */
174CPUReadMemoryFunc *g_apfnHandlerRead[3] =
175{
176 remR3HandlerReadU8,
177 remR3HandlerReadU16,
178 remR3HandlerReadU32
179};
180
181/** Handler write callbacks. */
182CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
183{
184 remR3HandlerWriteU8,
185 remR3HandlerWriteU16,
186 remR3HandlerWriteU32
187};
188
189
190#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
191/*
192 * Debugger commands.
193 */
194static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
195
196/** '.remstep' arguments. */
197static const DBGCVARDESC g_aArgRemStep[] =
198{
199 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
200 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
201};
202
203/** Command descriptors. */
204static const DBGCCMD g_aCmds[] =
205{
206 {
207 .pszCmd ="remstep",
208 .cArgsMin = 0,
209 .cArgsMax = 1,
210 .paArgDescs = &g_aArgRemStep[0],
211 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
212 .fFlags = 0,
213 .pfnHandler = remR3CmdDisasEnableStepping,
214 .pszSyntax = "[on/off]",
215 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
216 "If no arguments show the current state."
217 }
218};
219#endif
220
221/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
222uint8_t *code_gen_prologue;
223
224
225/*******************************************************************************
226* Internal Functions *
227*******************************************************************************/
228void remAbort(int rc, const char *pszTip);
229extern int testmath(void);
230
231/* Put them here to avoid unused variable warning. */
232AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
233#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
234//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
235/* Why did this have to be identical?? */
236AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
237#else
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#endif
240
241
242/**
243 * Initializes the REM.
244 *
245 * @returns VBox status code.
246 * @param pVM The VM to operate on.
247 */
248REMR3DECL(int) REMR3Init(PVM pVM)
249{
250 PREMHANDLERNOTIFICATION pCur;
251 uint32_t u32Dummy;
252 int rc;
253 unsigned i;
254
255#ifdef VBOX_ENABLE_VBOXREM64
256 LogRel(("Using 64-bit aware REM\n"));
257#endif
258
259 /*
260 * Assert sanity.
261 */
262 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
263 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
264 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
265#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
266 Assert(!testmath());
267#endif
268
269 /*
270 * Init some internal data members.
271 */
272 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
273 pVM->rem.s.Env.pVM = pVM;
274#ifdef CPU_RAW_MODE_INIT
275 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
276#endif
277
278 /*
279 * Initialize the REM critical section.
280 *
281 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
282 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
283 * deadlocks. (mostly pgm vs rem locking)
284 */
285 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
286 AssertRCReturn(rc, rc);
287
288 /* ctx. */
289 pVM->rem.s.pCtx = NULL; /* set when executing code. */
290 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
291
292 /* ignore all notifications */
293 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
294
295 code_gen_prologue = RTMemExecAlloc(_1K);
296 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
297
298 cpu_exec_init_all(0);
299
300 /*
301 * Init the recompiler.
302 */
303 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
304 {
305 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
306 return VERR_GENERAL_FAILURE;
307 }
308 PVMCPU pVCpu = VMMGetCpu(pVM);
309 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
310 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
311
312 cpu_reset(&pVM->rem.s.Env);
313
314 /* allocate code buffer for single instruction emulation. */
315 pVM->rem.s.Env.cbCodeBuffer = 4096;
316 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
317 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
318
319 /* Finally, set the cpu_single_env global. */
320 cpu_single_env = &pVM->rem.s.Env;
321
322 /* Nothing is pending by default */
323 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
324
325 /*
326 * Register ram types.
327 */
328 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
330 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
331 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
332 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
333
334 /* stop ignoring. */
335 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
336
337 /*
338 * Register the saved state data unit.
339 */
340 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
341 NULL, NULL, NULL,
342 NULL, remR3Save, NULL,
343 NULL, remR3Load, NULL);
344 if (RT_FAILURE(rc))
345 return rc;
346
347#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
348 /*
349 * Debugger commands.
350 */
351 static bool fRegisteredCmds = false;
352 if (!fRegisteredCmds)
353 {
354 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
355 if (RT_SUCCESS(rc))
356 fRegisteredCmds = true;
357 }
358#endif
359
360#ifdef VBOX_WITH_STATISTICS
361 /*
362 * Statistics.
363 */
364 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
365 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
366 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
367 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
368 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
370 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
371 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
372 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
373 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
374 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
375 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
376
377 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
378
379 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
380 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
381 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
382 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
383 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
384 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
385 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
386 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
387 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
388 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
389 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
390
391 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
392 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
393 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
394 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
395
396 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
402
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
407 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
409
410 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
411#endif /* VBOX_WITH_STATISTICS */
412
413 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
414 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
415 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
416
417
418#ifdef DEBUG_ALL_LOGGING
419 loglevel = ~0;
420#endif
421
422 /*
423 * Init the handler notification lists.
424 */
425 pVM->rem.s.idxPendingList = UINT32_MAX;
426 pVM->rem.s.idxFreeList = 0;
427
428 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
429 {
430 pCur = &pVM->rem.s.aHandlerNotifications[i];
431 pCur->idxNext = i + 1;
432 pCur->idxSelf = i;
433 }
434 pCur->idxNext = UINT32_MAX; /* the last record. */
435
436 return rc;
437}
438
439
440/**
441 * Finalizes the REM initialization.
442 *
443 * This is called after all components, devices and drivers has
444 * been initialized. Its main purpose it to finish the RAM related
445 * initialization.
446 *
447 * @returns VBox status code.
448 *
449 * @param pVM The VM handle.
450 */
451REMR3DECL(int) REMR3InitFinalize(PVM pVM)
452{
453 int rc;
454
455 /*
456 * Ram size & dirty bit map.
457 */
458 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
459 pVM->rem.s.fGCPhysLastRamFixed = true;
460#ifdef RT_STRICT
461 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
462#else
463 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
464#endif
465 return rc;
466}
467
468
469/**
470 * Initializes phys_ram_dirty and phys_ram_dirty_size.
471 *
472 * @returns VBox status code.
473 * @param pVM The VM handle.
474 * @param fGuarded Whether to guard the map.
475 */
476static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
477{
478 int rc = VINF_SUCCESS;
479 RTGCPHYS cb;
480
481 cb = pVM->rem.s.GCPhysLastRam + 1;
482 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
483 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
484 VERR_OUT_OF_RANGE);
485 phys_ram_dirty_size = cb >> PAGE_SHIFT;
486 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
487
488 if (!fGuarded)
489 {
490 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
491 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
492 }
493 else
494 {
495 /*
496 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
497 */
498 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
499 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
500 if (cbBitmapFull == cbBitmapAligned)
501 cbBitmapFull += _4G >> PAGE_SHIFT;
502 else if (cbBitmapFull - cbBitmapAligned < _64K)
503 cbBitmapFull += _64K;
504
505 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
506 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
507
508 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
509 if (RT_FAILURE(rc))
510 {
511 RTMemPageFree(phys_ram_dirty, cbBitmapFull);
512 AssertLogRelRCReturn(rc, rc);
513 }
514
515 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
516 }
517
518 /* initialize it. */
519 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
520 return rc;
521}
522
523
524/**
525 * Terminates the REM.
526 *
527 * Termination means cleaning up and freeing all resources,
528 * the VM it self is at this point powered off or suspended.
529 *
530 * @returns VBox status code.
531 * @param pVM The VM to operate on.
532 */
533REMR3DECL(int) REMR3Term(PVM pVM)
534{
535#ifdef VBOX_WITH_STATISTICS
536 /*
537 * Statistics.
538 */
539 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
540 STAM_DEREG(pVM, &gStatCompilationQEmu);
541 STAM_DEREG(pVM, &gStatRunCodeQEmu);
542 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
543 STAM_DEREG(pVM, &gStatTimers);
544 STAM_DEREG(pVM, &gStatTBLookup);
545 STAM_DEREG(pVM, &gStatIRQ);
546 STAM_DEREG(pVM, &gStatRawCheck);
547 STAM_DEREG(pVM, &gStatMemRead);
548 STAM_DEREG(pVM, &gStatMemWrite);
549 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
550 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
551
552 STAM_DEREG(pVM, &gStatCpuGetTSC);
553
554 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
555 STAM_DEREG(pVM, &gStatRefuseVM86);
556 STAM_DEREG(pVM, &gStatRefusePaging);
557 STAM_DEREG(pVM, &gStatRefusePAE);
558 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
559 STAM_DEREG(pVM, &gStatRefuseIF0);
560 STAM_DEREG(pVM, &gStatRefuseCode16);
561 STAM_DEREG(pVM, &gStatRefuseWP0);
562 STAM_DEREG(pVM, &gStatRefuseRing1or2);
563 STAM_DEREG(pVM, &gStatRefuseCanExecute);
564 STAM_DEREG(pVM, &gStatFlushTBs);
565
566 STAM_DEREG(pVM, &gStatREMGDTChange);
567 STAM_DEREG(pVM, &gStatREMLDTRChange);
568 STAM_DEREG(pVM, &gStatREMIDTChange);
569 STAM_DEREG(pVM, &gStatREMTRChange);
570
571 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
572 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
573 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
574 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
577
578 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
579 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
580 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
584
585 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
586#endif /* VBOX_WITH_STATISTICS */
587
588 STAM_REL_DEREG(pVM, &tb_flush_count);
589 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
590 STAM_REL_DEREG(pVM, &tlb_flush_count);
591
592 return VINF_SUCCESS;
593}
594
595
596/**
597 * The VM is being reset.
598 *
599 * For the REM component this means to call the cpu_reset() and
600 * reinitialize some state variables.
601 *
602 * @param pVM VM handle.
603 */
604REMR3DECL(void) REMR3Reset(PVM pVM)
605{
606 /*
607 * Reset the REM cpu.
608 */
609 Assert(pVM->rem.s.cIgnoreAll == 0);
610 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
611 cpu_reset(&pVM->rem.s.Env);
612 pVM->rem.s.cInvalidatedPages = 0;
613 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
614 Assert(pVM->rem.s.cIgnoreAll == 0);
615
616 /* Clear raw ring 0 init state */
617 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
618
619 /* Flush the TBs the next time we execute code here. */
620 pVM->rem.s.fFlushTBs = true;
621}
622
623
624/**
625 * Execute state save operation.
626 *
627 * @returns VBox status code.
628 * @param pVM VM Handle.
629 * @param pSSM SSM operation handle.
630 */
631static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
632{
633 PREM pRem = &pVM->rem.s;
634
635 /*
636 * Save the required CPU Env bits.
637 * (Not much because we're never in REM when doing the save.)
638 */
639 LogFlow(("remR3Save:\n"));
640 Assert(!pRem->fInREM);
641 SSMR3PutU32(pSSM, pRem->Env.hflags);
642 SSMR3PutU32(pSSM, ~0); /* separator */
643
644 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
645 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
646 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
647
648 return SSMR3PutU32(pSSM, ~0); /* terminator */
649}
650
651
652/**
653 * Execute state load operation.
654 *
655 * @returns VBox status code.
656 * @param pVM VM Handle.
657 * @param pSSM SSM operation handle.
658 * @param uVersion Data layout version.
659 * @param uPass The data pass.
660 */
661static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
662{
663 uint32_t u32Dummy;
664 uint32_t fRawRing0 = false;
665 uint32_t u32Sep;
666 uint32_t i;
667 int rc;
668 PREM pRem;
669
670 LogFlow(("remR3Load:\n"));
671 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
672
673 /*
674 * Validate version.
675 */
676 if ( uVersion != REM_SAVED_STATE_VERSION
677 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
678 {
679 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
680 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
681 }
682
683 /*
684 * Do a reset to be on the safe side...
685 */
686 REMR3Reset(pVM);
687
688 /*
689 * Ignore all ignorable notifications.
690 * (Not doing this will cause serious trouble.)
691 */
692 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
693
694 /*
695 * Load the required CPU Env bits.
696 * (Not much because we're never in REM when doing the save.)
697 */
698 pRem = &pVM->rem.s;
699 Assert(!pRem->fInREM);
700 SSMR3GetU32(pSSM, &pRem->Env.hflags);
701 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
702 {
703 /* Redundant REM CPU state has to be loaded, but can be ignored. */
704 CPUX86State_Ver16 temp;
705 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
706 }
707
708 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
709 if (RT_FAILURE(rc))
710 return rc;
711 if (u32Sep != ~0U)
712 {
713 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
714 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
715 }
716
717 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
718 SSMR3GetUInt(pSSM, &fRawRing0);
719 if (fRawRing0)
720 pRem->Env.state |= CPU_RAW_RING0;
721
722 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
723 {
724 /*
725 * Load the REM stuff.
726 */
727 /** @todo r=bird: We should just drop all these items, restoring doesn't make
728 * sense. */
729 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
730 if (RT_FAILURE(rc))
731 return rc;
732 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
733 {
734 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
735 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
736 }
737 for (i = 0; i < pRem->cInvalidatedPages; i++)
738 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
739 }
740
741 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
742 if (RT_FAILURE(rc))
743 return rc;
744
745 /* check the terminator. */
746 rc = SSMR3GetU32(pSSM, &u32Sep);
747 if (RT_FAILURE(rc))
748 return rc;
749 if (u32Sep != ~0U)
750 {
751 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
752 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
753 }
754
755 /*
756 * Get the CPUID features.
757 */
758 PVMCPU pVCpu = VMMGetCpu(pVM);
759 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
760 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
761
762 /*
763 * Sync the Load Flush the TLB
764 */
765 tlb_flush(&pRem->Env, 1);
766
767 /*
768 * Stop ignoring ignorable notifications.
769 */
770 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
771
772 /*
773 * Sync the whole CPU state when executing code in the recompiler.
774 */
775 for (i = 0; i < pVM->cCpus; i++)
776 {
777 PVMCPU pVCpu = &pVM->aCpus[i];
778 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
779 }
780 return VINF_SUCCESS;
781}
782
783
784
785#undef LOG_GROUP
786#define LOG_GROUP LOG_GROUP_REM_RUN
787
788/**
789 * Single steps an instruction in recompiled mode.
790 *
791 * Before calling this function the REM state needs to be in sync with
792 * the VM. Call REMR3State() to perform the sync. It's only necessary
793 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
794 * and after calling REMR3StateBack().
795 *
796 * @returns VBox status code.
797 *
798 * @param pVM VM Handle.
799 * @param pVCpu VMCPU Handle.
800 */
801REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
802{
803 int rc, interrupt_request;
804 RTGCPTR GCPtrPC;
805 bool fBp;
806
807 /*
808 * Lock the REM - we don't wanna have anyone interrupting us
809 * while stepping - and enabled single stepping. We also ignore
810 * pending interrupts and suchlike.
811 */
812 interrupt_request = pVM->rem.s.Env.interrupt_request;
813 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
814 pVM->rem.s.Env.interrupt_request = 0;
815 cpu_single_step(&pVM->rem.s.Env, 1);
816
817 /*
818 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
819 */
820 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
821 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
822
823 /*
824 * Execute and handle the return code.
825 * We execute without enabling the cpu tick, so on success we'll
826 * just flip it on and off to make sure it moves
827 */
828 rc = cpu_exec(&pVM->rem.s.Env);
829 if (rc == EXCP_DEBUG)
830 {
831 TMR3NotifyResume(pVM, pVCpu);
832 TMR3NotifySuspend(pVM, pVCpu);
833 rc = VINF_EM_DBG_STEPPED;
834 }
835 else
836 {
837 switch (rc)
838 {
839 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
840 case EXCP_HLT:
841 case EXCP_HALTED: rc = VINF_EM_HALT; break;
842 case EXCP_RC:
843 rc = pVM->rem.s.rc;
844 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
845 break;
846 case EXCP_EXECUTE_RAW:
847 case EXCP_EXECUTE_HWACC:
848 /** @todo: is it correct? No! */
849 rc = VINF_SUCCESS;
850 break;
851 default:
852 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
853 rc = VERR_INTERNAL_ERROR;
854 break;
855 }
856 }
857
858 /*
859 * Restore the stuff we changed to prevent interruption.
860 * Unlock the REM.
861 */
862 if (fBp)
863 {
864 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
865 Assert(rc2 == 0); NOREF(rc2);
866 }
867 cpu_single_step(&pVM->rem.s.Env, 0);
868 pVM->rem.s.Env.interrupt_request = interrupt_request;
869
870 return rc;
871}
872
873
874/**
875 * Set a breakpoint using the REM facilities.
876 *
877 * @returns VBox status code.
878 * @param pVM The VM handle.
879 * @param Address The breakpoint address.
880 * @thread The emulation thread.
881 */
882REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
883{
884 VM_ASSERT_EMT(pVM);
885 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
886 {
887 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
888 return VINF_SUCCESS;
889 }
890 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
891 return VERR_REM_NO_MORE_BP_SLOTS;
892}
893
894
895/**
896 * Clears a breakpoint set by REMR3BreakpointSet().
897 *
898 * @returns VBox status code.
899 * @param pVM The VM handle.
900 * @param Address The breakpoint address.
901 * @thread The emulation thread.
902 */
903REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
904{
905 VM_ASSERT_EMT(pVM);
906 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
907 {
908 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
909 return VINF_SUCCESS;
910 }
911 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
912 return VERR_REM_BP_NOT_FOUND;
913}
914
915
916/**
917 * Emulate an instruction.
918 *
919 * This function executes one instruction without letting anyone
920 * interrupt it. This is intended for being called while being in
921 * raw mode and thus will take care of all the state syncing between
922 * REM and the rest.
923 *
924 * @returns VBox status code.
925 * @param pVM VM handle.
926 * @param pVCpu VMCPU Handle.
927 */
928REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
929{
930 bool fFlushTBs;
931
932 int rc, rc2;
933 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
934
935 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
936 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
937 */
938 if (HWACCMIsEnabled(pVM))
939 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
940
941 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
942 fFlushTBs = pVM->rem.s.fFlushTBs;
943 pVM->rem.s.fFlushTBs = false;
944
945 /*
946 * Sync the state and enable single instruction / single stepping.
947 */
948 rc = REMR3State(pVM, pVCpu);
949 pVM->rem.s.fFlushTBs = fFlushTBs;
950 if (RT_SUCCESS(rc))
951 {
952 int interrupt_request = pVM->rem.s.Env.interrupt_request;
953 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
954#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
955 cpu_single_step(&pVM->rem.s.Env, 0);
956#endif
957 Assert(!pVM->rem.s.Env.singlestep_enabled);
958
959 /*
960 * Now we set the execute single instruction flag and enter the cpu_exec loop.
961 */
962 TMNotifyStartOfExecution(pVCpu);
963 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
964 rc = cpu_exec(&pVM->rem.s.Env);
965 TMNotifyEndOfExecution(pVCpu);
966 switch (rc)
967 {
968 /*
969 * Executed without anything out of the way happening.
970 */
971 case EXCP_SINGLE_INSTR:
972 rc = VINF_EM_RESCHEDULE;
973 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
974 break;
975
976 /*
977 * If we take a trap or start servicing a pending interrupt, we might end up here.
978 * (Timer thread or some other thread wishing EMT's attention.)
979 */
980 case EXCP_INTERRUPT:
981 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
982 rc = VINF_EM_RESCHEDULE;
983 break;
984
985 /*
986 * Single step, we assume!
987 * If there was a breakpoint there we're fucked now.
988 */
989 case EXCP_DEBUG:
990 if (pVM->rem.s.Env.watchpoint_hit)
991 {
992 /** @todo deal with watchpoints */
993 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
994 rc = VINF_EM_DBG_BREAKPOINT;
995 }
996 else
997 {
998 CPUBreakpoint *pBP;
999 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1000 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1001 if (pBP->pc == GCPtrPC)
1002 break;
1003 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1004 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1005 }
1006 break;
1007
1008 /*
1009 * hlt instruction.
1010 */
1011 case EXCP_HLT:
1012 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1013 rc = VINF_EM_HALT;
1014 break;
1015
1016 /*
1017 * The VM has halted.
1018 */
1019 case EXCP_HALTED:
1020 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1021 rc = VINF_EM_HALT;
1022 break;
1023
1024 /*
1025 * Switch to RAW-mode.
1026 */
1027 case EXCP_EXECUTE_RAW:
1028 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1029 rc = VINF_EM_RESCHEDULE_RAW;
1030 break;
1031
1032 /*
1033 * Switch to hardware accelerated RAW-mode.
1034 */
1035 case EXCP_EXECUTE_HWACC:
1036 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1037 rc = VINF_EM_RESCHEDULE_HWACC;
1038 break;
1039
1040 /*
1041 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1042 */
1043 case EXCP_RC:
1044 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1045 rc = pVM->rem.s.rc;
1046 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1047 break;
1048
1049 /*
1050 * Figure out the rest when they arrive....
1051 */
1052 default:
1053 AssertMsgFailed(("rc=%d\n", rc));
1054 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1055 rc = VINF_EM_RESCHEDULE;
1056 break;
1057 }
1058
1059 /*
1060 * Switch back the state.
1061 */
1062 pVM->rem.s.Env.interrupt_request = interrupt_request;
1063 rc2 = REMR3StateBack(pVM, pVCpu);
1064 AssertRC(rc2);
1065 }
1066
1067 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1068 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1069 return rc;
1070}
1071
1072
1073/**
1074 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1075 *
1076 * @returns VBox status code.
1077 *
1078 * @param pVM The VM handle.
1079 * @param pVCpu The Virtual CPU handle.
1080 */
1081static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1082{
1083 int rc;
1084
1085 Assert(pVM->rem.s.fInREM);
1086#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1087 cpu_single_step(&pVM->rem.s.Env, 1);
1088#else
1089 Assert(!pVM->rem.s.Env.singlestep_enabled);
1090#endif
1091
1092 /*
1093 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1094 */
1095 for (;;)
1096 {
1097 char szBuf[256];
1098
1099 /*
1100 * Log the current registers state and instruction.
1101 */
1102 remR3StateUpdate(pVM, pVCpu);
1103 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1104 szBuf[0] = '\0';
1105 rc = DBGFR3DisasInstrEx(pVM,
1106 pVCpu->idCpu,
1107 0, /* Sel */
1108 0, /* GCPtr */
1109 DBGF_DISAS_FLAGS_CURRENT_GUEST
1110 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1111 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1112 szBuf,
1113 sizeof(szBuf),
1114 NULL);
1115 if (RT_FAILURE(rc))
1116 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1117 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1118
1119 /*
1120 * Execute the instruction.
1121 */
1122 TMNotifyStartOfExecution(pVCpu);
1123
1124 if ( pVM->rem.s.Env.exception_index < 0
1125 || pVM->rem.s.Env.exception_index > 256)
1126 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1127
1128#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1129 pVM->rem.s.Env.interrupt_request = 0;
1130#else
1131 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1132#endif
1133 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1134 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1135 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1136 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1137 pVM->rem.s.Env.interrupt_request,
1138 pVM->rem.s.Env.halted,
1139 pVM->rem.s.Env.exception_index
1140 );
1141
1142 rc = cpu_exec(&pVM->rem.s.Env);
1143
1144 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1145 pVM->rem.s.Env.interrupt_request,
1146 pVM->rem.s.Env.halted,
1147 pVM->rem.s.Env.exception_index
1148 );
1149
1150 TMNotifyEndOfExecution(pVCpu);
1151
1152 switch (rc)
1153 {
1154#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1155 /*
1156 * The normal exit.
1157 */
1158 case EXCP_SINGLE_INSTR:
1159 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1160 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1161 continue;
1162 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1163 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1164 rc = VINF_SUCCESS;
1165 break;
1166
1167#else
1168 /*
1169 * The normal exit, check for breakpoints at PC just to be sure.
1170 */
1171#endif
1172 case EXCP_DEBUG:
1173 if (pVM->rem.s.Env.watchpoint_hit)
1174 {
1175 /** @todo deal with watchpoints */
1176 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1177 rc = VINF_EM_DBG_BREAKPOINT;
1178 }
1179 else
1180 {
1181 CPUBreakpoint *pBP;
1182 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1183 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1184 if (pBP->pc == GCPtrPC)
1185 break;
1186 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1187 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1188 }
1189#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1190 if (rc == VINF_EM_DBG_STEPPED)
1191 {
1192 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1193 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1194 continue;
1195
1196 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1197 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1198 rc = VINF_SUCCESS;
1199 }
1200#endif
1201 break;
1202
1203 /*
1204 * If we take a trap or start servicing a pending interrupt, we might end up here.
1205 * (Timer thread or some other thread wishing EMT's attention.)
1206 */
1207 case EXCP_INTERRUPT:
1208 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1209 rc = VINF_SUCCESS;
1210 break;
1211
1212 /*
1213 * hlt instruction.
1214 */
1215 case EXCP_HLT:
1216 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1217 rc = VINF_EM_HALT;
1218 break;
1219
1220 /*
1221 * The VM has halted.
1222 */
1223 case EXCP_HALTED:
1224 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1225 rc = VINF_EM_HALT;
1226 break;
1227
1228 /*
1229 * Switch to RAW-mode.
1230 */
1231 case EXCP_EXECUTE_RAW:
1232 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1233 rc = VINF_EM_RESCHEDULE_RAW;
1234 break;
1235
1236 /*
1237 * Switch to hardware accelerated RAW-mode.
1238 */
1239 case EXCP_EXECUTE_HWACC:
1240 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1241 rc = VINF_EM_RESCHEDULE_HWACC;
1242 break;
1243
1244 /*
1245 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1246 */
1247 case EXCP_RC:
1248 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1249 rc = pVM->rem.s.rc;
1250 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1251 break;
1252
1253 /*
1254 * Figure out the rest when they arrive....
1255 */
1256 default:
1257 AssertMsgFailed(("rc=%d\n", rc));
1258 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1259 rc = VINF_EM_RESCHEDULE;
1260 break;
1261 }
1262 break;
1263 }
1264
1265#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1266// cpu_single_step(&pVM->rem.s.Env, 0);
1267#else
1268 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1269#endif
1270 return rc;
1271}
1272
1273
1274/**
1275 * Runs code in recompiled mode.
1276 *
1277 * Before calling this function the REM state needs to be in sync with
1278 * the VM. Call REMR3State() to perform the sync. It's only necessary
1279 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1280 * and after calling REMR3StateBack().
1281 *
1282 * @returns VBox status code.
1283 *
1284 * @param pVM VM Handle.
1285 * @param pVCpu VMCPU Handle.
1286 */
1287REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1288{
1289 int rc;
1290
1291 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1292 return remR3RunLoggingStep(pVM, pVCpu);
1293
1294 Assert(pVM->rem.s.fInREM);
1295 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1296
1297 TMNotifyStartOfExecution(pVCpu);
1298 rc = cpu_exec(&pVM->rem.s.Env);
1299 TMNotifyEndOfExecution(pVCpu);
1300 switch (rc)
1301 {
1302 /*
1303 * This happens when the execution was interrupted
1304 * by an external event, like pending timers.
1305 */
1306 case EXCP_INTERRUPT:
1307 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1308 rc = VINF_SUCCESS;
1309 break;
1310
1311 /*
1312 * hlt instruction.
1313 */
1314 case EXCP_HLT:
1315 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1316 rc = VINF_EM_HALT;
1317 break;
1318
1319 /*
1320 * The VM has halted.
1321 */
1322 case EXCP_HALTED:
1323 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1324 rc = VINF_EM_HALT;
1325 break;
1326
1327 /*
1328 * Breakpoint/single step.
1329 */
1330 case EXCP_DEBUG:
1331 if (pVM->rem.s.Env.watchpoint_hit)
1332 {
1333 /** @todo deal with watchpoints */
1334 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1335 rc = VINF_EM_DBG_BREAKPOINT;
1336 }
1337 else
1338 {
1339 CPUBreakpoint *pBP;
1340 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1341 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1342 if (pBP->pc == GCPtrPC)
1343 break;
1344 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1345 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1346 }
1347 break;
1348
1349 /*
1350 * Switch to RAW-mode.
1351 */
1352 case EXCP_EXECUTE_RAW:
1353 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1354 rc = VINF_EM_RESCHEDULE_RAW;
1355 break;
1356
1357 /*
1358 * Switch to hardware accelerated RAW-mode.
1359 */
1360 case EXCP_EXECUTE_HWACC:
1361 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1362 rc = VINF_EM_RESCHEDULE_HWACC;
1363 break;
1364
1365 /*
1366 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1367 */
1368 case EXCP_RC:
1369 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1370 rc = pVM->rem.s.rc;
1371 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1372 break;
1373
1374 /*
1375 * Figure out the rest when they arrive....
1376 */
1377 default:
1378 AssertMsgFailed(("rc=%d\n", rc));
1379 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1380 rc = VINF_SUCCESS;
1381 break;
1382 }
1383
1384 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1385 return rc;
1386}
1387
1388
1389/**
1390 * Check if the cpu state is suitable for Raw execution.
1391 *
1392 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1393 *
1394 * @param env The CPU env struct.
1395 * @param eip The EIP to check this for (might differ from env->eip).
1396 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1397 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1398 *
1399 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1400 */
1401bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1402{
1403 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1404 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1405 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1406 uint32_t u32CR0;
1407
1408#ifdef IEM_VERIFICATION_MODE
1409 return false;
1410#endif
1411
1412 /* Update counter. */
1413 env->pVM->rem.s.cCanExecuteRaw++;
1414
1415 /* Never when single stepping+logging guest code. */
1416 if (env->state & CPU_EMULATE_SINGLE_STEP)
1417 return false;
1418
1419 if (HWACCMIsEnabled(env->pVM))
1420 {
1421 CPUMCTX Ctx;
1422
1423 env->state |= CPU_RAW_HWACC;
1424
1425 /*
1426 * Create partial context for HWACCMR3CanExecuteGuest
1427 */
1428 Ctx.cr0 = env->cr[0];
1429 Ctx.cr3 = env->cr[3];
1430 Ctx.cr4 = env->cr[4];
1431
1432 Ctx.tr = env->tr.selector;
1433 Ctx.trHid.u64Base = env->tr.base;
1434 Ctx.trHid.u32Limit = env->tr.limit;
1435 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1436
1437 Ctx.ldtr = env->ldt.selector;
1438 Ctx.ldtrHid.u64Base = env->ldt.base;
1439 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1440 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1441
1442 Ctx.idtr.cbIdt = env->idt.limit;
1443 Ctx.idtr.pIdt = env->idt.base;
1444
1445 Ctx.gdtr.cbGdt = env->gdt.limit;
1446 Ctx.gdtr.pGdt = env->gdt.base;
1447
1448 Ctx.rsp = env->regs[R_ESP];
1449 Ctx.rip = env->eip;
1450
1451 Ctx.eflags.u32 = env->eflags;
1452
1453 Ctx.cs = env->segs[R_CS].selector;
1454 Ctx.csHid.u64Base = env->segs[R_CS].base;
1455 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1456 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1457
1458 Ctx.ds = env->segs[R_DS].selector;
1459 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1460 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1461 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1462
1463 Ctx.es = env->segs[R_ES].selector;
1464 Ctx.esHid.u64Base = env->segs[R_ES].base;
1465 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1466 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1467
1468 Ctx.fs = env->segs[R_FS].selector;
1469 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1470 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1471 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1472
1473 Ctx.gs = env->segs[R_GS].selector;
1474 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1475 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1476 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1477
1478 Ctx.ss = env->segs[R_SS].selector;
1479 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1480 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1481 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1482
1483 Ctx.msrEFER = env->efer;
1484
1485 /* Hardware accelerated raw-mode:
1486 *
1487 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1488 */
1489 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1490 {
1491 *piException = EXCP_EXECUTE_HWACC;
1492 return true;
1493 }
1494 return false;
1495 }
1496
1497 /*
1498 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1499 * or 32 bits protected mode ring 0 code
1500 *
1501 * The tests are ordered by the likelihood of being true during normal execution.
1502 */
1503 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1504 {
1505 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1506 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1507 return false;
1508 }
1509
1510#ifndef VBOX_RAW_V86
1511 if (fFlags & VM_MASK) {
1512 STAM_COUNTER_INC(&gStatRefuseVM86);
1513 Log2(("raw mode refused: VM_MASK\n"));
1514 return false;
1515 }
1516#endif
1517
1518 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1519 {
1520#ifndef DEBUG_bird
1521 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1522#endif
1523 return false;
1524 }
1525
1526 if (env->singlestep_enabled)
1527 {
1528 //Log2(("raw mode refused: Single step\n"));
1529 return false;
1530 }
1531
1532 if (!QTAILQ_EMPTY(&env->breakpoints))
1533 {
1534 //Log2(("raw mode refused: Breakpoints\n"));
1535 return false;
1536 }
1537
1538 if (!QTAILQ_EMPTY(&env->watchpoints))
1539 {
1540 //Log2(("raw mode refused: Watchpoints\n"));
1541 return false;
1542 }
1543
1544 u32CR0 = env->cr[0];
1545 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1546 {
1547 STAM_COUNTER_INC(&gStatRefusePaging);
1548 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1549 return false;
1550 }
1551
1552 if (env->cr[4] & CR4_PAE_MASK)
1553 {
1554 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1555 {
1556 STAM_COUNTER_INC(&gStatRefusePAE);
1557 return false;
1558 }
1559 }
1560
1561 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1562 {
1563 if (!EMIsRawRing3Enabled(env->pVM))
1564 return false;
1565
1566 if (!(env->eflags & IF_MASK))
1567 {
1568 STAM_COUNTER_INC(&gStatRefuseIF0);
1569 Log2(("raw mode refused: IF (RawR3)\n"));
1570 return false;
1571 }
1572
1573 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1574 {
1575 STAM_COUNTER_INC(&gStatRefuseWP0);
1576 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1577 return false;
1578 }
1579 }
1580 else
1581 {
1582 if (!EMIsRawRing0Enabled(env->pVM))
1583 return false;
1584
1585 // Let's start with pure 32 bits ring 0 code first
1586 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1587 {
1588 STAM_COUNTER_INC(&gStatRefuseCode16);
1589 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1590 return false;
1591 }
1592
1593 // Only R0
1594 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1595 {
1596 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1597 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1598 return false;
1599 }
1600
1601 if (!(u32CR0 & CR0_WP_MASK))
1602 {
1603 STAM_COUNTER_INC(&gStatRefuseWP0);
1604 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1605 return false;
1606 }
1607
1608 if (PATMIsPatchGCAddr(env->pVM, eip))
1609 {
1610 Log2(("raw r0 mode forced: patch code\n"));
1611 *piException = EXCP_EXECUTE_RAW;
1612 return true;
1613 }
1614
1615#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1616 if (!(env->eflags & IF_MASK))
1617 {
1618 STAM_COUNTER_INC(&gStatRefuseIF0);
1619 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1620 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1621 return false;
1622 }
1623#endif
1624
1625 env->state |= CPU_RAW_RING0;
1626 }
1627
1628 /*
1629 * Don't reschedule the first time we're called, because there might be
1630 * special reasons why we're here that is not covered by the above checks.
1631 */
1632 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1633 {
1634 Log2(("raw mode refused: first scheduling\n"));
1635 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1636 return false;
1637 }
1638
1639 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1640 *piException = EXCP_EXECUTE_RAW;
1641 return true;
1642}
1643
1644
1645/**
1646 * Fetches a code byte.
1647 *
1648 * @returns Success indicator (bool) for ease of use.
1649 * @param env The CPU environment structure.
1650 * @param GCPtrInstr Where to fetch code.
1651 * @param pu8Byte Where to store the byte on success
1652 */
1653bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1654{
1655 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1656 if (RT_SUCCESS(rc))
1657 return true;
1658 return false;
1659}
1660
1661
1662/**
1663 * Flush (or invalidate if you like) page table/dir entry.
1664 *
1665 * (invlpg instruction; tlb_flush_page)
1666 *
1667 * @param env Pointer to cpu environment.
1668 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1669 */
1670void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1671{
1672 PVM pVM = env->pVM;
1673 PCPUMCTX pCtx;
1674 int rc;
1675
1676 /*
1677 * When we're replaying invlpg instructions or restoring a saved
1678 * state we disable this path.
1679 */
1680 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1681 return;
1682 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1683 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1684
1685 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1686
1687 /*
1688 * Update the control registers before calling PGMFlushPage.
1689 */
1690 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1691 Assert(pCtx);
1692 pCtx->cr0 = env->cr[0];
1693 pCtx->cr3 = env->cr[3];
1694 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1695 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1696 pCtx->cr4 = env->cr[4];
1697
1698 /*
1699 * Let PGM do the rest.
1700 */
1701 Assert(env->pVCpu);
1702 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1703 if (RT_FAILURE(rc))
1704 {
1705 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1706 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1707 }
1708 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1709}
1710
1711
1712#ifndef REM_PHYS_ADDR_IN_TLB
1713/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1714void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1715{
1716 void *pv;
1717 int rc;
1718
1719 /* Address must be aligned enough to fiddle with lower bits */
1720 Assert((physAddr & 0x3) == 0);
1721
1722 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1723 Assert( rc == VINF_SUCCESS
1724 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1725 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1726 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1727 if (RT_FAILURE(rc))
1728 return (void *)1;
1729 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1730 return (void *)((uintptr_t)pv | 2);
1731 return pv;
1732}
1733#endif /* REM_PHYS_ADDR_IN_TLB */
1734
1735
1736/**
1737 * Called from tlb_protect_code in order to write monitor a code page.
1738 *
1739 * @param env Pointer to the CPU environment.
1740 * @param GCPtr Code page to monitor
1741 */
1742void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1743{
1744#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1745 Assert(env->pVM->rem.s.fInREM);
1746 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1747 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1748 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1749 && !(env->eflags & VM_MASK) /* no V86 mode */
1750 && !HWACCMIsEnabled(env->pVM))
1751 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1752#endif
1753}
1754
1755
1756/**
1757 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1758 *
1759 * @param env Pointer to the CPU environment.
1760 * @param GCPtr Code page to monitor
1761 */
1762void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1763{
1764 Assert(env->pVM->rem.s.fInREM);
1765#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1766 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1767 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1768 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1769 && !(env->eflags & VM_MASK) /* no V86 mode */
1770 && !HWACCMIsEnabled(env->pVM))
1771 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1772#endif
1773}
1774
1775
1776/**
1777 * Called when the CPU is initialized, any of the CRx registers are changed or
1778 * when the A20 line is modified.
1779 *
1780 * @param env Pointer to the CPU environment.
1781 * @param fGlobal Set if the flush is global.
1782 */
1783void remR3FlushTLB(CPUState *env, bool fGlobal)
1784{
1785 PVM pVM = env->pVM;
1786 PCPUMCTX pCtx;
1787
1788 /*
1789 * When we're replaying invlpg instructions or restoring a saved
1790 * state we disable this path.
1791 */
1792 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1793 return;
1794 Assert(pVM->rem.s.fInREM);
1795
1796 /*
1797 * The caller doesn't check cr4, so we have to do that for ourselves.
1798 */
1799 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1800 fGlobal = true;
1801 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1802
1803 /*
1804 * Update the control registers before calling PGMR3FlushTLB.
1805 */
1806 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1807 Assert(pCtx);
1808 pCtx->cr0 = env->cr[0];
1809 pCtx->cr3 = env->cr[3];
1810 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1811 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1812 pCtx->cr4 = env->cr[4];
1813
1814 /*
1815 * Let PGM do the rest.
1816 */
1817 Assert(env->pVCpu);
1818 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1819}
1820
1821
1822/**
1823 * Called when any of the cr0, cr4 or efer registers is updated.
1824 *
1825 * @param env Pointer to the CPU environment.
1826 */
1827void remR3ChangeCpuMode(CPUState *env)
1828{
1829 PVM pVM = env->pVM;
1830 uint64_t efer;
1831 PCPUMCTX pCtx;
1832 int rc;
1833
1834 /*
1835 * When we're replaying loads or restoring a saved
1836 * state this path is disabled.
1837 */
1838 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1839 return;
1840 Assert(pVM->rem.s.fInREM);
1841
1842 /*
1843 * Update the control registers before calling PGMChangeMode()
1844 * as it may need to map whatever cr3 is pointing to.
1845 */
1846 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1847 Assert(pCtx);
1848 pCtx->cr0 = env->cr[0];
1849 pCtx->cr3 = env->cr[3];
1850 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1851 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1852 pCtx->cr4 = env->cr[4];
1853#ifdef TARGET_X86_64
1854 efer = env->efer;
1855 pCtx->msrEFER = efer;
1856#else
1857 efer = 0;
1858#endif
1859 Assert(env->pVCpu);
1860 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1861 if (rc != VINF_SUCCESS)
1862 {
1863 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1864 {
1865 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1866 remR3RaiseRC(env->pVM, rc);
1867 }
1868 else
1869 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1870 }
1871}
1872
1873
1874/**
1875 * Called from compiled code to run dma.
1876 *
1877 * @param env Pointer to the CPU environment.
1878 */
1879void remR3DmaRun(CPUState *env)
1880{
1881 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1882 PDMR3DmaRun(env->pVM);
1883 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1884}
1885
1886
1887/**
1888 * Called from compiled code to schedule pending timers in VMM
1889 *
1890 * @param env Pointer to the CPU environment.
1891 */
1892void remR3TimersRun(CPUState *env)
1893{
1894 LogFlow(("remR3TimersRun:\n"));
1895 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1896 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1897 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1898 TMR3TimerQueuesDo(env->pVM);
1899 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1900 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1901}
1902
1903
1904/**
1905 * Record trap occurrence
1906 *
1907 * @returns VBox status code
1908 * @param env Pointer to the CPU environment.
1909 * @param uTrap Trap nr
1910 * @param uErrorCode Error code
1911 * @param pvNextEIP Next EIP
1912 */
1913int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1914{
1915 PVM pVM = env->pVM;
1916#ifdef VBOX_WITH_STATISTICS
1917 static STAMCOUNTER s_aStatTrap[255];
1918 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1919#endif
1920
1921#ifdef VBOX_WITH_STATISTICS
1922 if (uTrap < 255)
1923 {
1924 if (!s_aRegisters[uTrap])
1925 {
1926 char szStatName[64];
1927 s_aRegisters[uTrap] = true;
1928 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1929 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1930 }
1931 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1932 }
1933#endif
1934 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1935 if( uTrap < 0x20
1936 && (env->cr[0] & X86_CR0_PE)
1937 && !(env->eflags & X86_EFL_VM))
1938 {
1939#ifdef DEBUG
1940 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1941#endif
1942 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1943 {
1944 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1945 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1946 return VERR_REM_TOO_MANY_TRAPS;
1947 }
1948 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1949 pVM->rem.s.cPendingExceptions = 1;
1950 pVM->rem.s.uPendingException = uTrap;
1951 pVM->rem.s.uPendingExcptEIP = env->eip;
1952 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1953 }
1954 else
1955 {
1956 pVM->rem.s.cPendingExceptions = 0;
1957 pVM->rem.s.uPendingException = uTrap;
1958 pVM->rem.s.uPendingExcptEIP = env->eip;
1959 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1960 }
1961 return VINF_SUCCESS;
1962}
1963
1964
1965/*
1966 * Clear current active trap
1967 *
1968 * @param pVM VM Handle.
1969 */
1970void remR3TrapClear(PVM pVM)
1971{
1972 pVM->rem.s.cPendingExceptions = 0;
1973 pVM->rem.s.uPendingException = 0;
1974 pVM->rem.s.uPendingExcptEIP = 0;
1975 pVM->rem.s.uPendingExcptCR2 = 0;
1976}
1977
1978
1979/*
1980 * Record previous call instruction addresses
1981 *
1982 * @param env Pointer to the CPU environment.
1983 */
1984void remR3RecordCall(CPUState *env)
1985{
1986 CSAMR3RecordCallAddress(env->pVM, env->eip);
1987}
1988
1989
1990/**
1991 * Syncs the internal REM state with the VM.
1992 *
1993 * This must be called before REMR3Run() is invoked whenever when the REM
1994 * state is not up to date. Calling it several times in a row is not
1995 * permitted.
1996 *
1997 * @returns VBox status code.
1998 *
1999 * @param pVM VM Handle.
2000 * @param pVCpu VMCPU Handle.
2001 *
2002 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2003 * no do this since the majority of the callers don't want any unnecessary of events
2004 * pending that would immediately interrupt execution.
2005 */
2006REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2007{
2008 register const CPUMCTX *pCtx;
2009 register unsigned fFlags;
2010 bool fHiddenSelRegsValid;
2011 unsigned i;
2012 TRPMEVENT enmType;
2013 uint8_t u8TrapNo;
2014 uint32_t uCpl;
2015 int rc;
2016
2017 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2018 Log2(("REMR3State:\n"));
2019
2020 pVM->rem.s.Env.pVCpu = pVCpu;
2021 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2022 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2023
2024 Assert(!pVM->rem.s.fInREM);
2025 pVM->rem.s.fInStateSync = true;
2026
2027 /*
2028 * If we have to flush TBs, do that immediately.
2029 */
2030 if (pVM->rem.s.fFlushTBs)
2031 {
2032 STAM_COUNTER_INC(&gStatFlushTBs);
2033 tb_flush(&pVM->rem.s.Env);
2034 pVM->rem.s.fFlushTBs = false;
2035 }
2036
2037 /*
2038 * Copy the registers which require no special handling.
2039 */
2040#ifdef TARGET_X86_64
2041 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2042 Assert(R_EAX == 0);
2043 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2044 Assert(R_ECX == 1);
2045 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2046 Assert(R_EDX == 2);
2047 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2048 Assert(R_EBX == 3);
2049 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2050 Assert(R_ESP == 4);
2051 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2052 Assert(R_EBP == 5);
2053 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2054 Assert(R_ESI == 6);
2055 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2056 Assert(R_EDI == 7);
2057 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2058 pVM->rem.s.Env.regs[8] = pCtx->r8;
2059 pVM->rem.s.Env.regs[9] = pCtx->r9;
2060 pVM->rem.s.Env.regs[10] = pCtx->r10;
2061 pVM->rem.s.Env.regs[11] = pCtx->r11;
2062 pVM->rem.s.Env.regs[12] = pCtx->r12;
2063 pVM->rem.s.Env.regs[13] = pCtx->r13;
2064 pVM->rem.s.Env.regs[14] = pCtx->r14;
2065 pVM->rem.s.Env.regs[15] = pCtx->r15;
2066
2067 pVM->rem.s.Env.eip = pCtx->rip;
2068
2069 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2070#else
2071 Assert(R_EAX == 0);
2072 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2073 Assert(R_ECX == 1);
2074 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2075 Assert(R_EDX == 2);
2076 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2077 Assert(R_EBX == 3);
2078 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2079 Assert(R_ESP == 4);
2080 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2081 Assert(R_EBP == 5);
2082 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2083 Assert(R_ESI == 6);
2084 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2085 Assert(R_EDI == 7);
2086 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2087 pVM->rem.s.Env.eip = pCtx->eip;
2088
2089 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2090#endif
2091
2092 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2093
2094 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2095 for (i=0;i<8;i++)
2096 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2097
2098#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2099 /*
2100 * Clear the halted hidden flag (the interrupt waking up the CPU can
2101 * have been dispatched in raw mode).
2102 */
2103 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2104#endif
2105
2106 /*
2107 * Replay invlpg?
2108 */
2109 if (pVM->rem.s.cInvalidatedPages)
2110 {
2111 RTUINT i;
2112
2113 pVM->rem.s.fIgnoreInvlPg = true;
2114 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2115 {
2116 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2117 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2118 }
2119 pVM->rem.s.fIgnoreInvlPg = false;
2120 pVM->rem.s.cInvalidatedPages = 0;
2121 }
2122
2123 /* Replay notification changes. */
2124 REMR3ReplayHandlerNotifications(pVM);
2125
2126 /* Update MSRs; before CRx registers! */
2127 pVM->rem.s.Env.efer = pCtx->msrEFER;
2128 pVM->rem.s.Env.star = pCtx->msrSTAR;
2129 pVM->rem.s.Env.pat = pCtx->msrPAT;
2130#ifdef TARGET_X86_64
2131 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2132 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2133 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2134 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2135
2136 /* Update the internal long mode activate flag according to the new EFER value. */
2137 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2138 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2139 else
2140 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2141#endif
2142
2143 /*
2144 * Registers which are rarely changed and require special handling / order when changed.
2145 */
2146 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2147 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2148 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2149 | CPUM_CHANGED_CR4
2150 | CPUM_CHANGED_CR0
2151 | CPUM_CHANGED_CR3
2152 | CPUM_CHANGED_GDTR
2153 | CPUM_CHANGED_IDTR
2154 | CPUM_CHANGED_SYSENTER_MSR
2155 | CPUM_CHANGED_LDTR
2156 | CPUM_CHANGED_CPUID
2157 | CPUM_CHANGED_FPU_REM
2158 )
2159 )
2160 {
2161 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2162 {
2163 pVM->rem.s.fIgnoreCR3Load = true;
2164 tlb_flush(&pVM->rem.s.Env, true);
2165 pVM->rem.s.fIgnoreCR3Load = false;
2166 }
2167
2168 /* CR4 before CR0! */
2169 if (fFlags & CPUM_CHANGED_CR4)
2170 {
2171 pVM->rem.s.fIgnoreCR3Load = true;
2172 pVM->rem.s.fIgnoreCpuMode = true;
2173 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2174 pVM->rem.s.fIgnoreCpuMode = false;
2175 pVM->rem.s.fIgnoreCR3Load = false;
2176 }
2177
2178 if (fFlags & CPUM_CHANGED_CR0)
2179 {
2180 pVM->rem.s.fIgnoreCR3Load = true;
2181 pVM->rem.s.fIgnoreCpuMode = true;
2182 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2183 pVM->rem.s.fIgnoreCpuMode = false;
2184 pVM->rem.s.fIgnoreCR3Load = false;
2185 }
2186
2187 if (fFlags & CPUM_CHANGED_CR3)
2188 {
2189 pVM->rem.s.fIgnoreCR3Load = true;
2190 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2191 pVM->rem.s.fIgnoreCR3Load = false;
2192 }
2193
2194 if (fFlags & CPUM_CHANGED_GDTR)
2195 {
2196 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2197 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2198 }
2199
2200 if (fFlags & CPUM_CHANGED_IDTR)
2201 {
2202 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2203 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2204 }
2205
2206 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2207 {
2208 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2209 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2210 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2211 }
2212
2213 if (fFlags & CPUM_CHANGED_LDTR)
2214 {
2215 if (fHiddenSelRegsValid)
2216 {
2217 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2218 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2219 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2220 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2221 }
2222 else
2223 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2224 }
2225
2226 if (fFlags & CPUM_CHANGED_CPUID)
2227 {
2228 uint32_t u32Dummy;
2229
2230 /*
2231 * Get the CPUID features.
2232 */
2233 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2234 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2235 }
2236
2237 /* Sync FPU state after CR4, CPUID and EFER (!). */
2238 if (fFlags & CPUM_CHANGED_FPU_REM)
2239 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2240 }
2241
2242 /*
2243 * Sync TR unconditionally to make life simpler.
2244 */
2245 pVM->rem.s.Env.tr.selector = pCtx->tr;
2246 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2247 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2248 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2249 /* Note! do_interrupt will fault if the busy flag is still set... */
2250 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2251
2252 /*
2253 * Update selector registers.
2254 * This must be done *after* we've synced gdt, ldt and crX registers
2255 * since we're reading the GDT/LDT om sync_seg. This will happen with
2256 * saved state which takes a quick dip into rawmode for instance.
2257 */
2258 /*
2259 * Stack; Note first check this one as the CPL might have changed. The
2260 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2261 */
2262
2263 if (fHiddenSelRegsValid)
2264 {
2265 /* The hidden selector registers are valid in the CPU context. */
2266 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2267
2268 /* Set current CPL */
2269 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2270
2271 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2272 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2273 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2274 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2275 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2276 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2277 }
2278 else
2279 {
2280 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2281 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2282 {
2283 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2284
2285 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2286 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2287#ifdef VBOX_WITH_STATISTICS
2288 if (pVM->rem.s.Env.segs[R_SS].newselector)
2289 {
2290 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2291 }
2292#endif
2293 }
2294 else
2295 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2296
2297 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2298 {
2299 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2300 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2301#ifdef VBOX_WITH_STATISTICS
2302 if (pVM->rem.s.Env.segs[R_ES].newselector)
2303 {
2304 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2305 }
2306#endif
2307 }
2308 else
2309 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2310
2311 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2312 {
2313 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2314 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2315#ifdef VBOX_WITH_STATISTICS
2316 if (pVM->rem.s.Env.segs[R_CS].newselector)
2317 {
2318 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2319 }
2320#endif
2321 }
2322 else
2323 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2324
2325 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2326 {
2327 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2328 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2329#ifdef VBOX_WITH_STATISTICS
2330 if (pVM->rem.s.Env.segs[R_DS].newselector)
2331 {
2332 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2333 }
2334#endif
2335 }
2336 else
2337 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2338
2339 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2340 * be the same but not the base/limit. */
2341 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2342 {
2343 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2344 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2345#ifdef VBOX_WITH_STATISTICS
2346 if (pVM->rem.s.Env.segs[R_FS].newselector)
2347 {
2348 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2349 }
2350#endif
2351 }
2352 else
2353 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2354
2355 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2356 {
2357 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2358 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2359#ifdef VBOX_WITH_STATISTICS
2360 if (pVM->rem.s.Env.segs[R_GS].newselector)
2361 {
2362 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2363 }
2364#endif
2365 }
2366 else
2367 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2368 }
2369
2370 /*
2371 * Check for traps.
2372 */
2373 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2374 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2375 if (RT_SUCCESS(rc))
2376 {
2377#ifdef DEBUG
2378 if (u8TrapNo == 0x80)
2379 {
2380 remR3DumpLnxSyscall(pVCpu);
2381 remR3DumpOBsdSyscall(pVCpu);
2382 }
2383#endif
2384
2385 pVM->rem.s.Env.exception_index = u8TrapNo;
2386 if (enmType != TRPM_SOFTWARE_INT)
2387 {
2388 pVM->rem.s.Env.exception_is_int = 0;
2389 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2390 }
2391 else
2392 {
2393 /*
2394 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2395 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2396 * for int03 and into.
2397 */
2398 pVM->rem.s.Env.exception_is_int = 1;
2399 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2400 /* int 3 may be generated by one-byte 0xcc */
2401 if (u8TrapNo == 3)
2402 {
2403 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2404 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2405 }
2406 /* int 4 may be generated by one-byte 0xce */
2407 else if (u8TrapNo == 4)
2408 {
2409 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2410 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2411 }
2412 }
2413
2414 /* get error code and cr2 if needed. */
2415 if (enmType == TRPM_TRAP)
2416 {
2417 switch (u8TrapNo)
2418 {
2419 case 0x0e:
2420 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2421 /* fallthru */
2422 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2423 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2424 break;
2425
2426 case 0x11: case 0x08:
2427 default:
2428 pVM->rem.s.Env.error_code = 0;
2429 break;
2430 }
2431 }
2432 else
2433 pVM->rem.s.Env.error_code = 0;
2434
2435 /*
2436 * We can now reset the active trap since the recompiler is gonna have a go at it.
2437 */
2438 rc = TRPMResetTrap(pVCpu);
2439 AssertRC(rc);
2440 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2441 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2442 }
2443
2444 /*
2445 * Clear old interrupt request flags; Check for pending hardware interrupts.
2446 * (See @remark for why we don't check for other FFs.)
2447 */
2448 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2449 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2450 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2451 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2452
2453 /*
2454 * We're now in REM mode.
2455 */
2456 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2457 pVM->rem.s.fInREM = true;
2458 pVM->rem.s.fInStateSync = false;
2459 pVM->rem.s.cCanExecuteRaw = 0;
2460 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2461 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2462 return VINF_SUCCESS;
2463}
2464
2465
2466/**
2467 * Syncs back changes in the REM state to the the VM state.
2468 *
2469 * This must be called after invoking REMR3Run().
2470 * Calling it several times in a row is not permitted.
2471 *
2472 * @returns VBox status code.
2473 *
2474 * @param pVM VM Handle.
2475 * @param pVCpu VMCPU Handle.
2476 */
2477REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2478{
2479 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2480 Assert(pCtx);
2481 unsigned i;
2482
2483 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2484 Log2(("REMR3StateBack:\n"));
2485 Assert(pVM->rem.s.fInREM);
2486
2487 /*
2488 * Copy back the registers.
2489 * This is done in the order they are declared in the CPUMCTX structure.
2490 */
2491
2492 /** @todo FOP */
2493 /** @todo FPUIP */
2494 /** @todo CS */
2495 /** @todo FPUDP */
2496 /** @todo DS */
2497
2498 /** @todo check if FPU/XMM was actually used in the recompiler */
2499 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2500//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2501
2502#ifdef TARGET_X86_64
2503 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2504 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2505 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2506 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2507 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2508 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2509 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2510 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2511 pCtx->r8 = pVM->rem.s.Env.regs[8];
2512 pCtx->r9 = pVM->rem.s.Env.regs[9];
2513 pCtx->r10 = pVM->rem.s.Env.regs[10];
2514 pCtx->r11 = pVM->rem.s.Env.regs[11];
2515 pCtx->r12 = pVM->rem.s.Env.regs[12];
2516 pCtx->r13 = pVM->rem.s.Env.regs[13];
2517 pCtx->r14 = pVM->rem.s.Env.regs[14];
2518 pCtx->r15 = pVM->rem.s.Env.regs[15];
2519
2520 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2521
2522#else
2523 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2524 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2525 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2526 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2527 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2528 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2529 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2530
2531 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2532#endif
2533
2534 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2535
2536#ifdef VBOX_WITH_STATISTICS
2537 if (pVM->rem.s.Env.segs[R_SS].newselector)
2538 {
2539 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2540 }
2541 if (pVM->rem.s.Env.segs[R_GS].newselector)
2542 {
2543 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2544 }
2545 if (pVM->rem.s.Env.segs[R_FS].newselector)
2546 {
2547 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2548 }
2549 if (pVM->rem.s.Env.segs[R_ES].newselector)
2550 {
2551 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2552 }
2553 if (pVM->rem.s.Env.segs[R_DS].newselector)
2554 {
2555 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2556 }
2557 if (pVM->rem.s.Env.segs[R_CS].newselector)
2558 {
2559 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2560 }
2561#endif
2562 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2563 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2564 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2565 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2566 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2567
2568#ifdef TARGET_X86_64
2569 pCtx->rip = pVM->rem.s.Env.eip;
2570 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2571#else
2572 pCtx->eip = pVM->rem.s.Env.eip;
2573 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2574#endif
2575
2576 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2577 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2578 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2579 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2580 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2581 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2582
2583 for (i = 0; i < 8; i++)
2584 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2585
2586 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2587 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2588 {
2589 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2590 STAM_COUNTER_INC(&gStatREMGDTChange);
2591 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2592 }
2593
2594 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2595 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2596 {
2597 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2598 STAM_COUNTER_INC(&gStatREMIDTChange);
2599 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2600 }
2601
2602 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2603 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2604 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2605 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2606 {
2607 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2608 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2609 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2610 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2611 STAM_COUNTER_INC(&gStatREMLDTRChange);
2612 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2613 }
2614
2615 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2616 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2617 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2618 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2619 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2620 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2621 : 0) )
2622 {
2623 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2624 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2625 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2626 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2627 pCtx->tr = pVM->rem.s.Env.tr.selector;
2628 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2629 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2630 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2631 if (pCtx->trHid.Attr.u)
2632 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2633 STAM_COUNTER_INC(&gStatREMTRChange);
2634 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2635 }
2636
2637 /** @todo These values could still be out of sync! */
2638 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2639 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2640 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2641 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2642
2643 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2644 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2645 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2646
2647 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2648 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2649 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2650
2651 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2652 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2653 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2654
2655 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2656 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2657 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2658
2659 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2660 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2661 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2662
2663 /* Sysenter MSR */
2664 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2665 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2666 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2667
2668 /* System MSRs. */
2669 pCtx->msrEFER = pVM->rem.s.Env.efer;
2670 pCtx->msrSTAR = pVM->rem.s.Env.star;
2671 pCtx->msrPAT = pVM->rem.s.Env.pat;
2672#ifdef TARGET_X86_64
2673 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2674 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2675 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2676 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2677#endif
2678
2679 remR3TrapClear(pVM);
2680
2681 /*
2682 * Check for traps.
2683 */
2684 if ( pVM->rem.s.Env.exception_index >= 0
2685 && pVM->rem.s.Env.exception_index < 256)
2686 {
2687 int rc;
2688
2689 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2690 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2691 AssertRC(rc);
2692 switch (pVM->rem.s.Env.exception_index)
2693 {
2694 case 0x0e:
2695 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2696 /* fallthru */
2697 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2698 case 0x11: case 0x08: /* 0 */
2699 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2700 break;
2701 }
2702
2703 }
2704
2705 /*
2706 * We're not longer in REM mode.
2707 */
2708 CPUMR3RemLeave(pVCpu,
2709 HWACCMIsEnabled(pVM)
2710 || ( pVM->rem.s.Env.segs[R_SS].newselector
2711 | pVM->rem.s.Env.segs[R_GS].newselector
2712 | pVM->rem.s.Env.segs[R_FS].newselector
2713 | pVM->rem.s.Env.segs[R_ES].newselector
2714 | pVM->rem.s.Env.segs[R_DS].newselector
2715 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2716 );
2717 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2718 pVM->rem.s.fInREM = false;
2719 pVM->rem.s.pCtx = NULL;
2720 pVM->rem.s.Env.pVCpu = NULL;
2721 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2722 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2723 return VINF_SUCCESS;
2724}
2725
2726
2727/**
2728 * This is called by the disassembler when it wants to update the cpu state
2729 * before for instance doing a register dump.
2730 */
2731static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2732{
2733 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2734 unsigned i;
2735
2736 Assert(pVM->rem.s.fInREM);
2737
2738 /*
2739 * Copy back the registers.
2740 * This is done in the order they are declared in the CPUMCTX structure.
2741 */
2742
2743 /** @todo FOP */
2744 /** @todo FPUIP */
2745 /** @todo CS */
2746 /** @todo FPUDP */
2747 /** @todo DS */
2748 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2749 pCtx->fpu.MXCSR = 0;
2750 pCtx->fpu.MXCSR_MASK = 0;
2751
2752 /** @todo check if FPU/XMM was actually used in the recompiler */
2753 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2754//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2755
2756#ifdef TARGET_X86_64
2757 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2758 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2759 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2760 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2761 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2762 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2763 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2764 pCtx->r8 = pVM->rem.s.Env.regs[8];
2765 pCtx->r9 = pVM->rem.s.Env.regs[9];
2766 pCtx->r10 = pVM->rem.s.Env.regs[10];
2767 pCtx->r11 = pVM->rem.s.Env.regs[11];
2768 pCtx->r12 = pVM->rem.s.Env.regs[12];
2769 pCtx->r13 = pVM->rem.s.Env.regs[13];
2770 pCtx->r14 = pVM->rem.s.Env.regs[14];
2771 pCtx->r15 = pVM->rem.s.Env.regs[15];
2772
2773 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2774#else
2775 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2776 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2777 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2778 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2779 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2780 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2781 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2782
2783 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2784#endif
2785
2786 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2787
2788 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2789 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2790 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2791 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2792 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2793
2794#ifdef TARGET_X86_64
2795 pCtx->rip = pVM->rem.s.Env.eip;
2796 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2797#else
2798 pCtx->eip = pVM->rem.s.Env.eip;
2799 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2800#endif
2801
2802 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2803 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2804 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2805 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2806 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2807 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2808
2809 for (i = 0; i < 8; i++)
2810 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2811
2812 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2813 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2814 {
2815 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2816 STAM_COUNTER_INC(&gStatREMGDTChange);
2817 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2818 }
2819
2820 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2821 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2822 {
2823 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2824 STAM_COUNTER_INC(&gStatREMIDTChange);
2825 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2826 }
2827
2828 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2829 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2830 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2831 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2832 {
2833 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2834 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2835 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2836 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2837 STAM_COUNTER_INC(&gStatREMLDTRChange);
2838 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2839 }
2840
2841 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2842 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2843 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2844 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2845 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2846 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2847 : 0) )
2848 {
2849 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2850 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2851 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2852 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2853 pCtx->tr = pVM->rem.s.Env.tr.selector;
2854 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2855 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2856 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2857 if (pCtx->trHid.Attr.u)
2858 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2859 STAM_COUNTER_INC(&gStatREMTRChange);
2860 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2861 }
2862
2863 /** @todo These values could still be out of sync! */
2864 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2865 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2866 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2867 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2868
2869 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2870 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2871 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2872
2873 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2874 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2875 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2876
2877 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2878 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2879 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2880
2881 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2882 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2883 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2884
2885 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2886 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2887 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2888
2889 /* Sysenter MSR */
2890 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2891 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2892 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2893
2894 /* System MSRs. */
2895 pCtx->msrEFER = pVM->rem.s.Env.efer;
2896 pCtx->msrSTAR = pVM->rem.s.Env.star;
2897 pCtx->msrPAT = pVM->rem.s.Env.pat;
2898#ifdef TARGET_X86_64
2899 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2900 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2901 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2902 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2903#endif
2904
2905}
2906
2907
2908/**
2909 * Update the VMM state information if we're currently in REM.
2910 *
2911 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2912 * we're currently executing in REM and the VMM state is invalid. This method will of
2913 * course check that we're executing in REM before syncing any data over to the VMM.
2914 *
2915 * @param pVM The VM handle.
2916 * @param pVCpu The VMCPU handle.
2917 */
2918REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2919{
2920 if (pVM->rem.s.fInREM)
2921 remR3StateUpdate(pVM, pVCpu);
2922}
2923
2924
2925#undef LOG_GROUP
2926#define LOG_GROUP LOG_GROUP_REM
2927
2928
2929/**
2930 * Notify the recompiler about Address Gate 20 state change.
2931 *
2932 * This notification is required since A20 gate changes are
2933 * initialized from a device driver and the VM might just as
2934 * well be in REM mode as in RAW mode.
2935 *
2936 * @param pVM VM handle.
2937 * @param pVCpu VMCPU handle.
2938 * @param fEnable True if the gate should be enabled.
2939 * False if the gate should be disabled.
2940 */
2941REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2942{
2943 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2944 VM_ASSERT_EMT(pVM);
2945
2946 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2947 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2948 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2949}
2950
2951
2952/**
2953 * Replays the handler notification changes
2954 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2955 *
2956 * @param pVM VM handle.
2957 */
2958REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2959{
2960 /*
2961 * Replay the flushes.
2962 */
2963 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2964 VM_ASSERT_EMT(pVM);
2965
2966 /** @todo this isn't ensuring correct replay order. */
2967 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2968 {
2969 uint32_t idxNext;
2970 uint32_t idxRevHead;
2971 uint32_t idxHead;
2972#ifdef VBOX_STRICT
2973 int32_t c = 0;
2974#endif
2975
2976 /* Lockless purging of pending notifications. */
2977 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2978 if (idxHead == UINT32_MAX)
2979 return;
2980 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2981
2982 /*
2983 * Reverse the list to process it in FIFO order.
2984 */
2985 idxRevHead = UINT32_MAX;
2986 do
2987 {
2988 /* Save the index of the next rec. */
2989 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2990 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2991 /* Push the record onto the reversed list. */
2992 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2993 idxRevHead = idxHead;
2994 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2995 /* Advance. */
2996 idxHead = idxNext;
2997 } while (idxHead != UINT32_MAX);
2998
2999 /*
3000 * Loop thru the list, reinserting the record into the free list as they are
3001 * processed to avoid having other EMTs running out of entries while we're flushing.
3002 */
3003 idxHead = idxRevHead;
3004 do
3005 {
3006 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3007 uint32_t idxCur;
3008 Assert(--c >= 0);
3009
3010 switch (pCur->enmKind)
3011 {
3012 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3013 remR3NotifyHandlerPhysicalRegister(pVM,
3014 pCur->u.PhysicalRegister.enmType,
3015 pCur->u.PhysicalRegister.GCPhys,
3016 pCur->u.PhysicalRegister.cb,
3017 pCur->u.PhysicalRegister.fHasHCHandler);
3018 break;
3019
3020 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3021 remR3NotifyHandlerPhysicalDeregister(pVM,
3022 pCur->u.PhysicalDeregister.enmType,
3023 pCur->u.PhysicalDeregister.GCPhys,
3024 pCur->u.PhysicalDeregister.cb,
3025 pCur->u.PhysicalDeregister.fHasHCHandler,
3026 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3027 break;
3028
3029 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3030 remR3NotifyHandlerPhysicalModify(pVM,
3031 pCur->u.PhysicalModify.enmType,
3032 pCur->u.PhysicalModify.GCPhysOld,
3033 pCur->u.PhysicalModify.GCPhysNew,
3034 pCur->u.PhysicalModify.cb,
3035 pCur->u.PhysicalModify.fHasHCHandler,
3036 pCur->u.PhysicalModify.fRestoreAsRAM);
3037 break;
3038
3039 default:
3040 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3041 break;
3042 }
3043
3044 /*
3045 * Advance idxHead.
3046 */
3047 idxCur = idxHead;
3048 idxHead = pCur->idxNext;
3049 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3050
3051 /*
3052 * Put the record back into the free list.
3053 */
3054 do
3055 {
3056 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3057 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3058 ASMCompilerBarrier();
3059 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3060 } while (idxHead != UINT32_MAX);
3061
3062#ifdef VBOX_STRICT
3063 if (pVM->cCpus == 1)
3064 {
3065 unsigned c;
3066 /* Check that all records are now on the free list. */
3067 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3068 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3069 c++;
3070 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3071 }
3072#endif
3073 }
3074}
3075
3076
3077/**
3078 * Notify REM about changed code page.
3079 *
3080 * @returns VBox status code.
3081 * @param pVM VM handle.
3082 * @param pVCpu VMCPU handle.
3083 * @param pvCodePage Code page address
3084 */
3085REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3086{
3087#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3088 int rc;
3089 RTGCPHYS PhysGC;
3090 uint64_t flags;
3091
3092 VM_ASSERT_EMT(pVM);
3093
3094 /*
3095 * Get the physical page address.
3096 */
3097 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3098 if (rc == VINF_SUCCESS)
3099 {
3100 /*
3101 * Sync the required registers and flush the whole page.
3102 * (Easier to do the whole page than notifying it about each physical
3103 * byte that was changed.
3104 */
3105 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3106 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3107 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3108 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3109
3110 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3111 }
3112#endif
3113 return VINF_SUCCESS;
3114}
3115
3116
3117/**
3118 * Notification about a successful MMR3PhysRegister() call.
3119 *
3120 * @param pVM VM handle.
3121 * @param GCPhys The physical address the RAM.
3122 * @param cb Size of the memory.
3123 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3124 */
3125REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3126{
3127 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3128 VM_ASSERT_EMT(pVM);
3129
3130 /*
3131 * Validate input - we trust the caller.
3132 */
3133 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3134 Assert(cb);
3135 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3136 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3137
3138 /*
3139 * Base ram? Update GCPhysLastRam.
3140 */
3141 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3142 {
3143 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3144 {
3145 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3146 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3147 }
3148 }
3149
3150 /*
3151 * Register the ram.
3152 */
3153 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3154
3155 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3156 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3157 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3158
3159 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3160}
3161
3162
3163/**
3164 * Notification about a successful MMR3PhysRomRegister() call.
3165 *
3166 * @param pVM VM handle.
3167 * @param GCPhys The physical address of the ROM.
3168 * @param cb The size of the ROM.
3169 * @param pvCopy Pointer to the ROM copy.
3170 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3171 * This function will be called when ever the protection of the
3172 * shadow ROM changes (at reset and end of POST).
3173 */
3174REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3175{
3176 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3177 VM_ASSERT_EMT(pVM);
3178
3179 /*
3180 * Validate input - we trust the caller.
3181 */
3182 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3183 Assert(cb);
3184 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3185
3186 /*
3187 * Register the rom.
3188 */
3189 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3190
3191 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3192 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3193 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3194
3195 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3196}
3197
3198
3199/**
3200 * Notification about a successful memory deregistration or reservation.
3201 *
3202 * @param pVM VM Handle.
3203 * @param GCPhys Start physical address.
3204 * @param cb The size of the range.
3205 */
3206REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3207{
3208 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3209 VM_ASSERT_EMT(pVM);
3210
3211 /*
3212 * Validate input - we trust the caller.
3213 */
3214 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3215 Assert(cb);
3216 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3217
3218 /*
3219 * Unassigning the memory.
3220 */
3221 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3222
3223 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3224 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3225 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3226
3227 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3228}
3229
3230
3231/**
3232 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3233 *
3234 * @param pVM VM Handle.
3235 * @param enmType Handler type.
3236 * @param GCPhys Handler range address.
3237 * @param cb Size of the handler range.
3238 * @param fHasHCHandler Set if the handler has a HC callback function.
3239 *
3240 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3241 * Handler memory type to memory which has no HC handler.
3242 */
3243static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3244{
3245 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3246 enmType, GCPhys, cb, fHasHCHandler));
3247
3248 VM_ASSERT_EMT(pVM);
3249 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3250 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3251
3252
3253 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3254
3255 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3256 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3257 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3258 else if (fHasHCHandler)
3259 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3260 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3261
3262 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3263}
3264
3265/**
3266 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3267 *
3268 * @param pVM VM Handle.
3269 * @param enmType Handler type.
3270 * @param GCPhys Handler range address.
3271 * @param cb Size of the handler range.
3272 * @param fHasHCHandler Set if the handler has a HC callback function.
3273 *
3274 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3275 * Handler memory type to memory which has no HC handler.
3276 */
3277REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3278{
3279 REMR3ReplayHandlerNotifications(pVM);
3280
3281 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3282}
3283
3284/**
3285 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3286 *
3287 * @param pVM VM Handle.
3288 * @param enmType Handler type.
3289 * @param GCPhys Handler range address.
3290 * @param cb Size of the handler range.
3291 * @param fHasHCHandler Set if the handler has a HC callback function.
3292 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3293 */
3294static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3295{
3296 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3297 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3298 VM_ASSERT_EMT(pVM);
3299
3300
3301 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3302
3303 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3304 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3305 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3306 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3307 else if (fHasHCHandler)
3308 {
3309 if (!fRestoreAsRAM)
3310 {
3311 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3312 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3313 }
3314 else
3315 {
3316 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3317 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3318 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3319 }
3320 }
3321 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3322
3323 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3324}
3325
3326/**
3327 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3328 *
3329 * @param pVM VM Handle.
3330 * @param enmType Handler type.
3331 * @param GCPhys Handler range address.
3332 * @param cb Size of the handler range.
3333 * @param fHasHCHandler Set if the handler has a HC callback function.
3334 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3335 */
3336REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3337{
3338 REMR3ReplayHandlerNotifications(pVM);
3339 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3340}
3341
3342
3343/**
3344 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3345 *
3346 * @param pVM VM Handle.
3347 * @param enmType Handler type.
3348 * @param GCPhysOld Old handler range address.
3349 * @param GCPhysNew New handler range address.
3350 * @param cb Size of the handler range.
3351 * @param fHasHCHandler Set if the handler has a HC callback function.
3352 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3353 */
3354static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3355{
3356 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3357 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3358 VM_ASSERT_EMT(pVM);
3359 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3360
3361 if (fHasHCHandler)
3362 {
3363 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3364
3365 /*
3366 * Reset the old page.
3367 */
3368 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3369 if (!fRestoreAsRAM)
3370 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3371 else
3372 {
3373 /* This is not perfect, but it'll do for PD monitoring... */
3374 Assert(cb == PAGE_SIZE);
3375 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3376 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3377 }
3378
3379 /*
3380 * Update the new page.
3381 */
3382 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3383 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3384 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3385 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3386
3387 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3388 }
3389}
3390
3391/**
3392 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3393 *
3394 * @param pVM VM Handle.
3395 * @param enmType Handler type.
3396 * @param GCPhysOld Old handler range address.
3397 * @param GCPhysNew New handler range address.
3398 * @param cb Size of the handler range.
3399 * @param fHasHCHandler Set if the handler has a HC callback function.
3400 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3401 */
3402REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3403{
3404 REMR3ReplayHandlerNotifications(pVM);
3405
3406 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3407}
3408
3409/**
3410 * Checks if we're handling access to this page or not.
3411 *
3412 * @returns true if we're trapping access.
3413 * @returns false if we aren't.
3414 * @param pVM The VM handle.
3415 * @param GCPhys The physical address.
3416 *
3417 * @remark This function will only work correctly in VBOX_STRICT builds!
3418 */
3419REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3420{
3421#ifdef VBOX_STRICT
3422 unsigned long off;
3423 REMR3ReplayHandlerNotifications(pVM);
3424
3425 off = get_phys_page_offset(GCPhys);
3426 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3427 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3428 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3429#else
3430 return false;
3431#endif
3432}
3433
3434
3435/**
3436 * Deals with a rare case in get_phys_addr_code where the code
3437 * is being monitored.
3438 *
3439 * It could also be an MMIO page, in which case we will raise a fatal error.
3440 *
3441 * @returns The physical address corresponding to addr.
3442 * @param env The cpu environment.
3443 * @param addr The virtual address.
3444 * @param pTLBEntry The TLB entry.
3445 */
3446target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3447 target_ulong addr,
3448 CPUTLBEntry* pTLBEntry,
3449 target_phys_addr_t ioTLBEntry)
3450{
3451 PVM pVM = env->pVM;
3452
3453 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3454 {
3455 /* If code memory is being monitored, appropriate IOTLB entry will have
3456 handler IO type, and addend will provide real physical address, no
3457 matter if we store VA in TLB or not, as handlers are always passed PA */
3458 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3459 return ret;
3460 }
3461 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3462 "*** handlers\n",
3463 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3464 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3465 LogRel(("*** mmio\n"));
3466 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3467 LogRel(("*** phys\n"));
3468 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3469 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3470 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3471 AssertFatalFailed();
3472}
3473
3474/**
3475 * Read guest RAM and ROM.
3476 *
3477 * @param SrcGCPhys The source address (guest physical).
3478 * @param pvDst The destination address.
3479 * @param cb Number of bytes
3480 */
3481void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3482{
3483 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3484 VBOX_CHECK_ADDR(SrcGCPhys);
3485 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3486#ifdef VBOX_DEBUG_PHYS
3487 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3488#endif
3489 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3490}
3491
3492
3493/**
3494 * Read guest RAM and ROM, unsigned 8-bit.
3495 *
3496 * @param SrcGCPhys The source address (guest physical).
3497 */
3498RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3499{
3500 uint8_t val;
3501 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3502 VBOX_CHECK_ADDR(SrcGCPhys);
3503 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3504 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3505#ifdef VBOX_DEBUG_PHYS
3506 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3507#endif
3508 return val;
3509}
3510
3511
3512/**
3513 * Read guest RAM and ROM, signed 8-bit.
3514 *
3515 * @param SrcGCPhys The source address (guest physical).
3516 */
3517RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3518{
3519 int8_t val;
3520 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3521 VBOX_CHECK_ADDR(SrcGCPhys);
3522 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3523 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3524#ifdef VBOX_DEBUG_PHYS
3525 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3526#endif
3527 return val;
3528}
3529
3530
3531/**
3532 * Read guest RAM and ROM, unsigned 16-bit.
3533 *
3534 * @param SrcGCPhys The source address (guest physical).
3535 */
3536RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3537{
3538 uint16_t val;
3539 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3540 VBOX_CHECK_ADDR(SrcGCPhys);
3541 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3542 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3543#ifdef VBOX_DEBUG_PHYS
3544 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3545#endif
3546 return val;
3547}
3548
3549
3550/**
3551 * Read guest RAM and ROM, signed 16-bit.
3552 *
3553 * @param SrcGCPhys The source address (guest physical).
3554 */
3555RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3556{
3557 int16_t val;
3558 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3559 VBOX_CHECK_ADDR(SrcGCPhys);
3560 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3561 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3562#ifdef VBOX_DEBUG_PHYS
3563 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3564#endif
3565 return val;
3566}
3567
3568
3569/**
3570 * Read guest RAM and ROM, unsigned 32-bit.
3571 *
3572 * @param SrcGCPhys The source address (guest physical).
3573 */
3574RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3575{
3576 uint32_t val;
3577 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3578 VBOX_CHECK_ADDR(SrcGCPhys);
3579 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3580 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3581#ifdef VBOX_DEBUG_PHYS
3582 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3583#endif
3584 return val;
3585}
3586
3587
3588/**
3589 * Read guest RAM and ROM, signed 32-bit.
3590 *
3591 * @param SrcGCPhys The source address (guest physical).
3592 */
3593RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3594{
3595 int32_t val;
3596 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3597 VBOX_CHECK_ADDR(SrcGCPhys);
3598 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3599 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3600#ifdef VBOX_DEBUG_PHYS
3601 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3602#endif
3603 return val;
3604}
3605
3606
3607/**
3608 * Read guest RAM and ROM, unsigned 64-bit.
3609 *
3610 * @param SrcGCPhys The source address (guest physical).
3611 */
3612uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3613{
3614 uint64_t val;
3615 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3616 VBOX_CHECK_ADDR(SrcGCPhys);
3617 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3618 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3619#ifdef VBOX_DEBUG_PHYS
3620 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3621#endif
3622 return val;
3623}
3624
3625
3626/**
3627 * Read guest RAM and ROM, signed 64-bit.
3628 *
3629 * @param SrcGCPhys The source address (guest physical).
3630 */
3631int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3632{
3633 int64_t val;
3634 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3635 VBOX_CHECK_ADDR(SrcGCPhys);
3636 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3637 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3638#ifdef VBOX_DEBUG_PHYS
3639 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3640#endif
3641 return val;
3642}
3643
3644
3645/**
3646 * Write guest RAM.
3647 *
3648 * @param DstGCPhys The destination address (guest physical).
3649 * @param pvSrc The source address.
3650 * @param cb Number of bytes to write
3651 */
3652void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3653{
3654 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3655 VBOX_CHECK_ADDR(DstGCPhys);
3656 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3657 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3658#ifdef VBOX_DEBUG_PHYS
3659 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3660#endif
3661}
3662
3663
3664/**
3665 * Write guest RAM, unsigned 8-bit.
3666 *
3667 * @param DstGCPhys The destination address (guest physical).
3668 * @param val Value
3669 */
3670void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3671{
3672 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3673 VBOX_CHECK_ADDR(DstGCPhys);
3674 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3675 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3676#ifdef VBOX_DEBUG_PHYS
3677 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3678#endif
3679}
3680
3681
3682/**
3683 * Write guest RAM, unsigned 8-bit.
3684 *
3685 * @param DstGCPhys The destination address (guest physical).
3686 * @param val Value
3687 */
3688void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3689{
3690 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3691 VBOX_CHECK_ADDR(DstGCPhys);
3692 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3693 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3694#ifdef VBOX_DEBUG_PHYS
3695 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3696#endif
3697}
3698
3699
3700/**
3701 * Write guest RAM, unsigned 32-bit.
3702 *
3703 * @param DstGCPhys The destination address (guest physical).
3704 * @param val Value
3705 */
3706void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3707{
3708 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3709 VBOX_CHECK_ADDR(DstGCPhys);
3710 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3711 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3712#ifdef VBOX_DEBUG_PHYS
3713 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3714#endif
3715}
3716
3717
3718/**
3719 * Write guest RAM, unsigned 64-bit.
3720 *
3721 * @param DstGCPhys The destination address (guest physical).
3722 * @param val Value
3723 */
3724void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3725{
3726 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3727 VBOX_CHECK_ADDR(DstGCPhys);
3728 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3729 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3730#ifdef VBOX_DEBUG_PHYS
3731 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3732#endif
3733}
3734
3735#undef LOG_GROUP
3736#define LOG_GROUP LOG_GROUP_REM_MMIO
3737
3738/** Read MMIO memory. */
3739static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3740{
3741 uint32_t u32 = 0;
3742 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3743 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3744 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3745 return u32;
3746}
3747
3748/** Read MMIO memory. */
3749static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3750{
3751 uint32_t u32 = 0;
3752 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3753 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3754 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3755 return u32;
3756}
3757
3758/** Read MMIO memory. */
3759static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3760{
3761 uint32_t u32 = 0;
3762 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3763 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3764 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3765 return u32;
3766}
3767
3768/** Write to MMIO memory. */
3769static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3770{
3771 int rc;
3772 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3773 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3774 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3775}
3776
3777/** Write to MMIO memory. */
3778static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3779{
3780 int rc;
3781 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3782 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3783 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3784}
3785
3786/** Write to MMIO memory. */
3787static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3788{
3789 int rc;
3790 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3791 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3792 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3793}
3794
3795
3796#undef LOG_GROUP
3797#define LOG_GROUP LOG_GROUP_REM_HANDLER
3798
3799/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3800
3801static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3802{
3803 uint8_t u8;
3804 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3805 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3806 return u8;
3807}
3808
3809static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3810{
3811 uint16_t u16;
3812 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3813 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3814 return u16;
3815}
3816
3817static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3818{
3819 uint32_t u32;
3820 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3821 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3822 return u32;
3823}
3824
3825static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3826{
3827 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3828 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3829}
3830
3831static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3832{
3833 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3834 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3835}
3836
3837static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3838{
3839 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3840 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3841}
3842
3843/* -+- disassembly -+- */
3844
3845#undef LOG_GROUP
3846#define LOG_GROUP LOG_GROUP_REM_DISAS
3847
3848
3849/**
3850 * Enables or disables singled stepped disassembly.
3851 *
3852 * @returns VBox status code.
3853 * @param pVM VM handle.
3854 * @param fEnable To enable set this flag, to disable clear it.
3855 */
3856static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3857{
3858 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3859 VM_ASSERT_EMT(pVM);
3860
3861 if (fEnable)
3862 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3863 else
3864 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3865#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3866 cpu_single_step(&pVM->rem.s.Env, fEnable);
3867#endif
3868 return VINF_SUCCESS;
3869}
3870
3871
3872/**
3873 * Enables or disables singled stepped disassembly.
3874 *
3875 * @returns VBox status code.
3876 * @param pVM VM handle.
3877 * @param fEnable To enable set this flag, to disable clear it.
3878 */
3879REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3880{
3881 int rc;
3882
3883 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3884 if (VM_IS_EMT(pVM))
3885 return remR3DisasEnableStepping(pVM, fEnable);
3886
3887 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3888 AssertRC(rc);
3889 return rc;
3890}
3891
3892
3893#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3894/**
3895 * External Debugger Command: .remstep [on|off|1|0]
3896 */
3897static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3898{
3899 int rc;
3900
3901 if (cArgs == 0)
3902 /*
3903 * Print the current status.
3904 */
3905 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3906 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3907 else
3908 {
3909 /*
3910 * Convert the argument and change the mode.
3911 */
3912 bool fEnable;
3913 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3914 if (RT_SUCCESS(rc))
3915 {
3916 rc = REMR3DisasEnableStepping(pVM, fEnable);
3917 if (RT_SUCCESS(rc))
3918 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3919 else
3920 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3921 }
3922 else
3923 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3924 }
3925 return rc;
3926}
3927#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3928
3929
3930/**
3931 * Disassembles one instruction and prints it to the log.
3932 *
3933 * @returns Success indicator.
3934 * @param env Pointer to the recompiler CPU structure.
3935 * @param f32BitCode Indicates that whether or not the code should
3936 * be disassembled as 16 or 32 bit. If -1 the CS
3937 * selector will be inspected.
3938 * @param pszPrefix
3939 */
3940bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3941{
3942 PVM pVM = env->pVM;
3943 const bool fLog = LogIsEnabled();
3944 const bool fLog2 = LogIs2Enabled();
3945 int rc = VINF_SUCCESS;
3946
3947 /*
3948 * Don't bother if there ain't any log output to do.
3949 */
3950 if (!fLog && !fLog2)
3951 return true;
3952
3953 /*
3954 * Update the state so DBGF reads the correct register values.
3955 */
3956 remR3StateUpdate(pVM, env->pVCpu);
3957
3958 /*
3959 * Log registers if requested.
3960 */
3961 if (fLog2)
3962 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3963
3964 /*
3965 * Disassemble to log.
3966 */
3967 if (fLog)
3968 {
3969 PVMCPU pVCpu = VMMGetCpu(pVM);
3970 char szBuf[256];
3971 szBuf[0] = '\0';
3972 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3973 pVCpu->idCpu,
3974 0, /* Sel */
3975 0, /* GCPtr */
3976 DBGF_DISAS_FLAGS_CURRENT_GUEST
3977 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3978 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3979 szBuf,
3980 sizeof(szBuf),
3981 NULL);
3982 if (RT_FAILURE(rc))
3983 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3984 if (pszPrefix && *pszPrefix)
3985 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3986 else
3987 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3988 }
3989
3990 return RT_SUCCESS(rc);
3991}
3992
3993
3994/**
3995 * Disassemble recompiled code.
3996 *
3997 * @param phFileIgnored Ignored, logfile usually.
3998 * @param pvCode Pointer to the code block.
3999 * @param cb Size of the code block.
4000 */
4001void disas(FILE *phFile, void *pvCode, unsigned long cb)
4002{
4003 if (LogIs2Enabled())
4004 {
4005 unsigned off = 0;
4006 char szOutput[256];
4007 DISCPUSTATE Cpu;
4008
4009 memset(&Cpu, 0, sizeof(Cpu));
4010#ifdef RT_ARCH_X86
4011 Cpu.mode = CPUMODE_32BIT;
4012#else
4013 Cpu.mode = CPUMODE_64BIT;
4014#endif
4015
4016 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4017 while (off < cb)
4018 {
4019 uint32_t cbInstr;
4020 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4021 RTLogPrintf("%s", szOutput);
4022 else
4023 {
4024 RTLogPrintf("disas error\n");
4025 cbInstr = 1;
4026#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4027 break;
4028#endif
4029 }
4030 off += cbInstr;
4031 }
4032 }
4033}
4034
4035
4036/**
4037 * Disassemble guest code.
4038 *
4039 * @param phFileIgnored Ignored, logfile usually.
4040 * @param uCode The guest address of the code to disassemble. (flat?)
4041 * @param cb Number of bytes to disassemble.
4042 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4043 */
4044void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4045{
4046 if (LogIs2Enabled())
4047 {
4048 PVM pVM = cpu_single_env->pVM;
4049 PVMCPU pVCpu = cpu_single_env->pVCpu;
4050 RTSEL cs;
4051 RTGCUINTPTR eip;
4052
4053 Assert(pVCpu);
4054
4055 /*
4056 * Update the state so DBGF reads the correct register values (flags).
4057 */
4058 remR3StateUpdate(pVM, pVCpu);
4059
4060 /*
4061 * Do the disassembling.
4062 */
4063 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4064 cs = cpu_single_env->segs[R_CS].selector;
4065 eip = uCode - cpu_single_env->segs[R_CS].base;
4066 for (;;)
4067 {
4068 char szBuf[256];
4069 uint32_t cbInstr;
4070 int rc = DBGFR3DisasInstrEx(pVM,
4071 pVCpu->idCpu,
4072 cs,
4073 eip,
4074 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4075 szBuf, sizeof(szBuf),
4076 &cbInstr);
4077 if (RT_SUCCESS(rc))
4078 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4079 else
4080 {
4081 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4082 cbInstr = 1;
4083 }
4084
4085 /* next */
4086 if (cb <= cbInstr)
4087 break;
4088 cb -= cbInstr;
4089 uCode += cbInstr;
4090 eip += cbInstr;
4091 }
4092 }
4093}
4094
4095
4096/**
4097 * Looks up a guest symbol.
4098 *
4099 * @returns Pointer to symbol name. This is a static buffer.
4100 * @param orig_addr The address in question.
4101 */
4102const char *lookup_symbol(target_ulong orig_addr)
4103{
4104 PVM pVM = cpu_single_env->pVM;
4105 RTGCINTPTR off = 0;
4106 RTDBGSYMBOL Sym;
4107 DBGFADDRESS Addr;
4108
4109 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4110 if (RT_SUCCESS(rc))
4111 {
4112 static char szSym[sizeof(Sym.szName) + 48];
4113 if (!off)
4114 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4115 else if (off > 0)
4116 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4117 else
4118 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4119 return szSym;
4120 }
4121 return "<N/A>";
4122}
4123
4124
4125#undef LOG_GROUP
4126#define LOG_GROUP LOG_GROUP_REM
4127
4128
4129/* -+- FF notifications -+- */
4130
4131
4132/**
4133 * Notification about a pending interrupt.
4134 *
4135 * @param pVM VM Handle.
4136 * @param pVCpu VMCPU Handle.
4137 * @param u8Interrupt Interrupt
4138 * @thread The emulation thread.
4139 */
4140REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4141{
4142 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4143 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4144}
4145
4146/**
4147 * Notification about a pending interrupt.
4148 *
4149 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4150 * @param pVM VM Handle.
4151 * @param pVCpu VMCPU Handle.
4152 * @thread The emulation thread.
4153 */
4154REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4155{
4156 return pVM->rem.s.u32PendingInterrupt;
4157}
4158
4159/**
4160 * Notification about the interrupt FF being set.
4161 *
4162 * @param pVM VM Handle.
4163 * @param pVCpu VMCPU Handle.
4164 * @thread The emulation thread.
4165 */
4166REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4167{
4168#ifndef IEM_VERIFICATION_MODE
4169 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4170 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4171 if (pVM->rem.s.fInREM)
4172 {
4173 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4174 CPU_INTERRUPT_EXTERNAL_HARD);
4175 }
4176#endif
4177}
4178
4179
4180/**
4181 * Notification about the interrupt FF being set.
4182 *
4183 * @param pVM VM Handle.
4184 * @param pVCpu VMCPU Handle.
4185 * @thread Any.
4186 */
4187REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4188{
4189 LogFlow(("REMR3NotifyInterruptClear:\n"));
4190 if (pVM->rem.s.fInREM)
4191 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4192}
4193
4194
4195/**
4196 * Notification about pending timer(s).
4197 *
4198 * @param pVM VM Handle.
4199 * @param pVCpuDst The target cpu for this notification.
4200 * TM will not broadcast pending timer events, but use
4201 * a dedicated EMT for them. So, only interrupt REM
4202 * execution if the given CPU is executing in REM.
4203 * @thread Any.
4204 */
4205REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4206{
4207#ifndef IEM_VERIFICATION_MODE
4208#ifndef DEBUG_bird
4209 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4210#endif
4211 if (pVM->rem.s.fInREM)
4212 {
4213 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4214 {
4215 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4216 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4217 CPU_INTERRUPT_EXTERNAL_TIMER);
4218 }
4219 else
4220 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4221 }
4222 else
4223 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4224#endif
4225}
4226
4227
4228/**
4229 * Notification about pending DMA transfers.
4230 *
4231 * @param pVM VM Handle.
4232 * @thread Any.
4233 */
4234REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4235{
4236#ifndef IEM_VERIFICATION_MODE
4237 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4238 if (pVM->rem.s.fInREM)
4239 {
4240 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4241 CPU_INTERRUPT_EXTERNAL_DMA);
4242 }
4243#endif
4244}
4245
4246
4247/**
4248 * Notification about pending timer(s).
4249 *
4250 * @param pVM VM Handle.
4251 * @thread Any.
4252 */
4253REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4254{
4255#ifndef IEM_VERIFICATION_MODE
4256 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4257 if (pVM->rem.s.fInREM)
4258 {
4259 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4260 CPU_INTERRUPT_EXTERNAL_EXIT);
4261 }
4262#endif
4263}
4264
4265
4266/**
4267 * Notification about pending FF set by an external thread.
4268 *
4269 * @param pVM VM handle.
4270 * @thread Any.
4271 */
4272REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4273{
4274#ifndef IEM_VERIFICATION_MODE
4275 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4276 if (pVM->rem.s.fInREM)
4277 {
4278 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4279 CPU_INTERRUPT_EXTERNAL_EXIT);
4280 }
4281#endif
4282}
4283
4284
4285#ifdef VBOX_WITH_STATISTICS
4286void remR3ProfileStart(int statcode)
4287{
4288 STAMPROFILEADV *pStat;
4289 switch(statcode)
4290 {
4291 case STATS_EMULATE_SINGLE_INSTR:
4292 pStat = &gStatExecuteSingleInstr;
4293 break;
4294 case STATS_QEMU_COMPILATION:
4295 pStat = &gStatCompilationQEmu;
4296 break;
4297 case STATS_QEMU_RUN_EMULATED_CODE:
4298 pStat = &gStatRunCodeQEmu;
4299 break;
4300 case STATS_QEMU_TOTAL:
4301 pStat = &gStatTotalTimeQEmu;
4302 break;
4303 case STATS_QEMU_RUN_TIMERS:
4304 pStat = &gStatTimers;
4305 break;
4306 case STATS_TLB_LOOKUP:
4307 pStat= &gStatTBLookup;
4308 break;
4309 case STATS_IRQ_HANDLING:
4310 pStat= &gStatIRQ;
4311 break;
4312 case STATS_RAW_CHECK:
4313 pStat = &gStatRawCheck;
4314 break;
4315
4316 default:
4317 AssertMsgFailed(("unknown stat %d\n", statcode));
4318 return;
4319 }
4320 STAM_PROFILE_ADV_START(pStat, a);
4321}
4322
4323
4324void remR3ProfileStop(int statcode)
4325{
4326 STAMPROFILEADV *pStat;
4327 switch(statcode)
4328 {
4329 case STATS_EMULATE_SINGLE_INSTR:
4330 pStat = &gStatExecuteSingleInstr;
4331 break;
4332 case STATS_QEMU_COMPILATION:
4333 pStat = &gStatCompilationQEmu;
4334 break;
4335 case STATS_QEMU_RUN_EMULATED_CODE:
4336 pStat = &gStatRunCodeQEmu;
4337 break;
4338 case STATS_QEMU_TOTAL:
4339 pStat = &gStatTotalTimeQEmu;
4340 break;
4341 case STATS_QEMU_RUN_TIMERS:
4342 pStat = &gStatTimers;
4343 break;
4344 case STATS_TLB_LOOKUP:
4345 pStat= &gStatTBLookup;
4346 break;
4347 case STATS_IRQ_HANDLING:
4348 pStat= &gStatIRQ;
4349 break;
4350 case STATS_RAW_CHECK:
4351 pStat = &gStatRawCheck;
4352 break;
4353 default:
4354 AssertMsgFailed(("unknown stat %d\n", statcode));
4355 return;
4356 }
4357 STAM_PROFILE_ADV_STOP(pStat, a);
4358}
4359#endif
4360
4361/**
4362 * Raise an RC, force rem exit.
4363 *
4364 * @param pVM VM handle.
4365 * @param rc The rc.
4366 */
4367void remR3RaiseRC(PVM pVM, int rc)
4368{
4369 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4370 Assert(pVM->rem.s.fInREM);
4371 VM_ASSERT_EMT(pVM);
4372 pVM->rem.s.rc = rc;
4373 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4374}
4375
4376
4377/* -+- timers -+- */
4378
4379uint64_t cpu_get_tsc(CPUX86State *env)
4380{
4381 STAM_COUNTER_INC(&gStatCpuGetTSC);
4382 return TMCpuTickGet(env->pVCpu);
4383}
4384
4385
4386/* -+- interrupts -+- */
4387
4388void cpu_set_ferr(CPUX86State *env)
4389{
4390 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4391 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4392}
4393
4394int cpu_get_pic_interrupt(CPUState *env)
4395{
4396 uint8_t u8Interrupt;
4397 int rc;
4398
4399 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4400 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4401 * with the (a)pic.
4402 */
4403 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4404 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4405 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4406 * remove this kludge. */
4407 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4408 {
4409 rc = VINF_SUCCESS;
4410 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4411 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4412 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4413 }
4414 else
4415 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4416
4417 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4418 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4419 if (RT_SUCCESS(rc))
4420 {
4421 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4422 env->interrupt_request |= CPU_INTERRUPT_HARD;
4423 return u8Interrupt;
4424 }
4425 return -1;
4426}
4427
4428
4429/* -+- local apic -+- */
4430
4431#if 0 /* CPUMSetGuestMsr does this now. */
4432void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4433{
4434 int rc = PDMApicSetBase(env->pVM, val);
4435 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4436}
4437#endif
4438
4439uint64_t cpu_get_apic_base(CPUX86State *env)
4440{
4441 uint64_t u64;
4442 int rc = PDMApicGetBase(env->pVM, &u64);
4443 if (RT_SUCCESS(rc))
4444 {
4445 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4446 return u64;
4447 }
4448 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4449 return 0;
4450}
4451
4452void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4453{
4454 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4455 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4456}
4457
4458uint8_t cpu_get_apic_tpr(CPUX86State *env)
4459{
4460 uint8_t u8;
4461 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4462 if (RT_SUCCESS(rc))
4463 {
4464 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4465 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4466 }
4467 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4468 return 0;
4469}
4470
4471/**
4472 * Read an MSR.
4473 *
4474 * @retval 0 success.
4475 * @retval -1 failure, raise \#GP(0).
4476 * @param env The cpu state.
4477 * @param idMsr The MSR to read.
4478 * @param puValue Where to return the value.
4479 */
4480int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4481{
4482 Assert(env->pVCpu);
4483 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4484}
4485
4486/**
4487 * Write to an MSR.
4488 *
4489 * @retval 0 success.
4490 * @retval -1 failure, raise \#GP(0).
4491 * @param env The cpu state.
4492 * @param idMsr The MSR to read.
4493 * @param puValue Where to return the value.
4494 */
4495int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4496{
4497 Assert(env->pVCpu);
4498 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4499}
4500
4501/* -+- I/O Ports -+- */
4502
4503#undef LOG_GROUP
4504#define LOG_GROUP LOG_GROUP_REM_IOPORT
4505
4506void cpu_outb(CPUState *env, pio_addr_t addr, uint8_t val)
4507{
4508 int rc;
4509
4510 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4511 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4512
4513 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4514 if (RT_LIKELY(rc == VINF_SUCCESS))
4515 return;
4516 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4517 {
4518 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4519 remR3RaiseRC(env->pVM, rc);
4520 return;
4521 }
4522 remAbort(rc, __FUNCTION__);
4523}
4524
4525void cpu_outw(CPUState *env, pio_addr_t addr, uint16_t val)
4526{
4527 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4528 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4529 if (RT_LIKELY(rc == VINF_SUCCESS))
4530 return;
4531 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4532 {
4533 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4534 remR3RaiseRC(env->pVM, rc);
4535 return;
4536 }
4537 remAbort(rc, __FUNCTION__);
4538}
4539
4540void cpu_outl(CPUState *env, pio_addr_t addr, uint32_t val)
4541{
4542 int rc;
4543 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4544 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4545 if (RT_LIKELY(rc == VINF_SUCCESS))
4546 return;
4547 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4548 {
4549 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4550 remR3RaiseRC(env->pVM, rc);
4551 return;
4552 }
4553 remAbort(rc, __FUNCTION__);
4554}
4555
4556uint8_t cpu_inb(CPUState *env, pio_addr_t addr)
4557{
4558 uint32_t u32 = 0;
4559 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4560 if (RT_LIKELY(rc == VINF_SUCCESS))
4561 {
4562 if (/*addr != 0x61 && */addr != 0x71)
4563 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4564 return (uint8_t)u32;
4565 }
4566 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4567 {
4568 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4569 remR3RaiseRC(env->pVM, rc);
4570 return (uint8_t)u32;
4571 }
4572 remAbort(rc, __FUNCTION__);
4573 return UINT8_C(0xff);
4574}
4575
4576uint16_t cpu_inw(CPUState *env, pio_addr_t addr)
4577{
4578 uint32_t u32 = 0;
4579 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4580 if (RT_LIKELY(rc == VINF_SUCCESS))
4581 {
4582 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4583 return (uint16_t)u32;
4584 }
4585 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4586 {
4587 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4588 remR3RaiseRC(env->pVM, rc);
4589 return (uint16_t)u32;
4590 }
4591 remAbort(rc, __FUNCTION__);
4592 return UINT16_C(0xffff);
4593}
4594
4595uint32_t cpu_inl(CPUState *env, pio_addr_t addr)
4596{
4597 uint32_t u32 = 0;
4598 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4599 if (RT_LIKELY(rc == VINF_SUCCESS))
4600 {
4601//if (addr==0x01f0 && u32 == 0x6b6d)
4602// loglevel = ~0;
4603 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4604 return u32;
4605 }
4606 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4607 {
4608 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4609 remR3RaiseRC(env->pVM, rc);
4610 return u32;
4611 }
4612 remAbort(rc, __FUNCTION__);
4613 return UINT32_C(0xffffffff);
4614}
4615
4616#undef LOG_GROUP
4617#define LOG_GROUP LOG_GROUP_REM
4618
4619
4620/* -+- helpers and misc other interfaces -+- */
4621
4622/**
4623 * Perform the CPUID instruction.
4624 *
4625 * @param env Pointer to the recompiler CPU structure.
4626 * @param idx The CPUID leaf (eax).
4627 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4628 * @param pvEAX Where to store eax.
4629 * @param pvEBX Where to store ebx.
4630 * @param pvECX Where to store ecx.
4631 * @param pvEDX Where to store edx.
4632 */
4633void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4634 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4635{
4636 NOREF(idxSub);
4637 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4638}
4639
4640
4641#if 0 /* not used */
4642/**
4643 * Interface for qemu hardware to report back fatal errors.
4644 */
4645void hw_error(const char *pszFormat, ...)
4646{
4647 /*
4648 * Bitch about it.
4649 */
4650 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4651 * this in my Odin32 tree at home! */
4652 va_list args;
4653 va_start(args, pszFormat);
4654 RTLogPrintf("fatal error in virtual hardware:");
4655 RTLogPrintfV(pszFormat, args);
4656 va_end(args);
4657 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4658
4659 /*
4660 * If we're in REM context we'll sync back the state before 'jumping' to
4661 * the EMs failure handling.
4662 */
4663 PVM pVM = cpu_single_env->pVM;
4664 if (pVM->rem.s.fInREM)
4665 REMR3StateBack(pVM);
4666 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4667 AssertMsgFailed(("EMR3FatalError returned!\n"));
4668}
4669#endif
4670
4671/**
4672 * Interface for the qemu cpu to report unhandled situation
4673 * raising a fatal VM error.
4674 */
4675void cpu_abort(CPUState *env, const char *pszFormat, ...)
4676{
4677 va_list va;
4678 PVM pVM;
4679 PVMCPU pVCpu;
4680 char szMsg[256];
4681
4682 /*
4683 * Bitch about it.
4684 */
4685 RTLogFlags(NULL, "nodisabled nobuffered");
4686 RTLogFlush(NULL);
4687
4688 va_start(va, pszFormat);
4689#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4690 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4691 unsigned cArgs = 0;
4692 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4693 const char *psz = strchr(pszFormat, '%');
4694 while (psz && cArgs < 6)
4695 {
4696 auArgs[cArgs++] = va_arg(va, uintptr_t);
4697 psz = strchr(psz + 1, '%');
4698 }
4699 switch (cArgs)
4700 {
4701 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4702 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4703 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4704 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4705 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4706 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4707 default:
4708 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4709 }
4710#else
4711 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4712#endif
4713 va_end(va);
4714
4715 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4716 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4717
4718 /*
4719 * If we're in REM context we'll sync back the state before 'jumping' to
4720 * the EMs failure handling.
4721 */
4722 pVM = cpu_single_env->pVM;
4723 pVCpu = cpu_single_env->pVCpu;
4724 Assert(pVCpu);
4725
4726 if (pVM->rem.s.fInREM)
4727 REMR3StateBack(pVM, pVCpu);
4728 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4729 AssertMsgFailed(("EMR3FatalError returned!\n"));
4730}
4731
4732
4733/**
4734 * Aborts the VM.
4735 *
4736 * @param rc VBox error code.
4737 * @param pszTip Hint about why/when this happened.
4738 */
4739void remAbort(int rc, const char *pszTip)
4740{
4741 PVM pVM;
4742 PVMCPU pVCpu;
4743
4744 /*
4745 * Bitch about it.
4746 */
4747 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4748 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4749
4750 /*
4751 * Jump back to where we entered the recompiler.
4752 */
4753 pVM = cpu_single_env->pVM;
4754 pVCpu = cpu_single_env->pVCpu;
4755 Assert(pVCpu);
4756
4757 if (pVM->rem.s.fInREM)
4758 REMR3StateBack(pVM, pVCpu);
4759
4760 EMR3FatalError(pVCpu, rc);
4761 AssertMsgFailed(("EMR3FatalError returned!\n"));
4762}
4763
4764
4765/**
4766 * Dumps a linux system call.
4767 * @param pVCpu VMCPU handle.
4768 */
4769void remR3DumpLnxSyscall(PVMCPU pVCpu)
4770{
4771 static const char *apsz[] =
4772 {
4773 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4774 "sys_exit",
4775 "sys_fork",
4776 "sys_read",
4777 "sys_write",
4778 "sys_open", /* 5 */
4779 "sys_close",
4780 "sys_waitpid",
4781 "sys_creat",
4782 "sys_link",
4783 "sys_unlink", /* 10 */
4784 "sys_execve",
4785 "sys_chdir",
4786 "sys_time",
4787 "sys_mknod",
4788 "sys_chmod", /* 15 */
4789 "sys_lchown16",
4790 "sys_ni_syscall", /* old break syscall holder */
4791 "sys_stat",
4792 "sys_lseek",
4793 "sys_getpid", /* 20 */
4794 "sys_mount",
4795 "sys_oldumount",
4796 "sys_setuid16",
4797 "sys_getuid16",
4798 "sys_stime", /* 25 */
4799 "sys_ptrace",
4800 "sys_alarm",
4801 "sys_fstat",
4802 "sys_pause",
4803 "sys_utime", /* 30 */
4804 "sys_ni_syscall", /* old stty syscall holder */
4805 "sys_ni_syscall", /* old gtty syscall holder */
4806 "sys_access",
4807 "sys_nice",
4808 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4809 "sys_sync",
4810 "sys_kill",
4811 "sys_rename",
4812 "sys_mkdir",
4813 "sys_rmdir", /* 40 */
4814 "sys_dup",
4815 "sys_pipe",
4816 "sys_times",
4817 "sys_ni_syscall", /* old prof syscall holder */
4818 "sys_brk", /* 45 */
4819 "sys_setgid16",
4820 "sys_getgid16",
4821 "sys_signal",
4822 "sys_geteuid16",
4823 "sys_getegid16", /* 50 */
4824 "sys_acct",
4825 "sys_umount", /* recycled never used phys() */
4826 "sys_ni_syscall", /* old lock syscall holder */
4827 "sys_ioctl",
4828 "sys_fcntl", /* 55 */
4829 "sys_ni_syscall", /* old mpx syscall holder */
4830 "sys_setpgid",
4831 "sys_ni_syscall", /* old ulimit syscall holder */
4832 "sys_olduname",
4833 "sys_umask", /* 60 */
4834 "sys_chroot",
4835 "sys_ustat",
4836 "sys_dup2",
4837 "sys_getppid",
4838 "sys_getpgrp", /* 65 */
4839 "sys_setsid",
4840 "sys_sigaction",
4841 "sys_sgetmask",
4842 "sys_ssetmask",
4843 "sys_setreuid16", /* 70 */
4844 "sys_setregid16",
4845 "sys_sigsuspend",
4846 "sys_sigpending",
4847 "sys_sethostname",
4848 "sys_setrlimit", /* 75 */
4849 "sys_old_getrlimit",
4850 "sys_getrusage",
4851 "sys_gettimeofday",
4852 "sys_settimeofday",
4853 "sys_getgroups16", /* 80 */
4854 "sys_setgroups16",
4855 "old_select",
4856 "sys_symlink",
4857 "sys_lstat",
4858 "sys_readlink", /* 85 */
4859 "sys_uselib",
4860 "sys_swapon",
4861 "sys_reboot",
4862 "old_readdir",
4863 "old_mmap", /* 90 */
4864 "sys_munmap",
4865 "sys_truncate",
4866 "sys_ftruncate",
4867 "sys_fchmod",
4868 "sys_fchown16", /* 95 */
4869 "sys_getpriority",
4870 "sys_setpriority",
4871 "sys_ni_syscall", /* old profil syscall holder */
4872 "sys_statfs",
4873 "sys_fstatfs", /* 100 */
4874 "sys_ioperm",
4875 "sys_socketcall",
4876 "sys_syslog",
4877 "sys_setitimer",
4878 "sys_getitimer", /* 105 */
4879 "sys_newstat",
4880 "sys_newlstat",
4881 "sys_newfstat",
4882 "sys_uname",
4883 "sys_iopl", /* 110 */
4884 "sys_vhangup",
4885 "sys_ni_syscall", /* old "idle" system call */
4886 "sys_vm86old",
4887 "sys_wait4",
4888 "sys_swapoff", /* 115 */
4889 "sys_sysinfo",
4890 "sys_ipc",
4891 "sys_fsync",
4892 "sys_sigreturn",
4893 "sys_clone", /* 120 */
4894 "sys_setdomainname",
4895 "sys_newuname",
4896 "sys_modify_ldt",
4897 "sys_adjtimex",
4898 "sys_mprotect", /* 125 */
4899 "sys_sigprocmask",
4900 "sys_ni_syscall", /* old "create_module" */
4901 "sys_init_module",
4902 "sys_delete_module",
4903 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4904 "sys_quotactl",
4905 "sys_getpgid",
4906 "sys_fchdir",
4907 "sys_bdflush",
4908 "sys_sysfs", /* 135 */
4909 "sys_personality",
4910 "sys_ni_syscall", /* reserved for afs_syscall */
4911 "sys_setfsuid16",
4912 "sys_setfsgid16",
4913 "sys_llseek", /* 140 */
4914 "sys_getdents",
4915 "sys_select",
4916 "sys_flock",
4917 "sys_msync",
4918 "sys_readv", /* 145 */
4919 "sys_writev",
4920 "sys_getsid",
4921 "sys_fdatasync",
4922 "sys_sysctl",
4923 "sys_mlock", /* 150 */
4924 "sys_munlock",
4925 "sys_mlockall",
4926 "sys_munlockall",
4927 "sys_sched_setparam",
4928 "sys_sched_getparam", /* 155 */
4929 "sys_sched_setscheduler",
4930 "sys_sched_getscheduler",
4931 "sys_sched_yield",
4932 "sys_sched_get_priority_max",
4933 "sys_sched_get_priority_min", /* 160 */
4934 "sys_sched_rr_get_interval",
4935 "sys_nanosleep",
4936 "sys_mremap",
4937 "sys_setresuid16",
4938 "sys_getresuid16", /* 165 */
4939 "sys_vm86",
4940 "sys_ni_syscall", /* Old sys_query_module */
4941 "sys_poll",
4942 "sys_nfsservctl",
4943 "sys_setresgid16", /* 170 */
4944 "sys_getresgid16",
4945 "sys_prctl",
4946 "sys_rt_sigreturn",
4947 "sys_rt_sigaction",
4948 "sys_rt_sigprocmask", /* 175 */
4949 "sys_rt_sigpending",
4950 "sys_rt_sigtimedwait",
4951 "sys_rt_sigqueueinfo",
4952 "sys_rt_sigsuspend",
4953 "sys_pread64", /* 180 */
4954 "sys_pwrite64",
4955 "sys_chown16",
4956 "sys_getcwd",
4957 "sys_capget",
4958 "sys_capset", /* 185 */
4959 "sys_sigaltstack",
4960 "sys_sendfile",
4961 "sys_ni_syscall", /* reserved for streams1 */
4962 "sys_ni_syscall", /* reserved for streams2 */
4963 "sys_vfork", /* 190 */
4964 "sys_getrlimit",
4965 "sys_mmap2",
4966 "sys_truncate64",
4967 "sys_ftruncate64",
4968 "sys_stat64", /* 195 */
4969 "sys_lstat64",
4970 "sys_fstat64",
4971 "sys_lchown",
4972 "sys_getuid",
4973 "sys_getgid", /* 200 */
4974 "sys_geteuid",
4975 "sys_getegid",
4976 "sys_setreuid",
4977 "sys_setregid",
4978 "sys_getgroups", /* 205 */
4979 "sys_setgroups",
4980 "sys_fchown",
4981 "sys_setresuid",
4982 "sys_getresuid",
4983 "sys_setresgid", /* 210 */
4984 "sys_getresgid",
4985 "sys_chown",
4986 "sys_setuid",
4987 "sys_setgid",
4988 "sys_setfsuid", /* 215 */
4989 "sys_setfsgid",
4990 "sys_pivot_root",
4991 "sys_mincore",
4992 "sys_madvise",
4993 "sys_getdents64", /* 220 */
4994 "sys_fcntl64",
4995 "sys_ni_syscall", /* reserved for TUX */
4996 "sys_ni_syscall",
4997 "sys_gettid",
4998 "sys_readahead", /* 225 */
4999 "sys_setxattr",
5000 "sys_lsetxattr",
5001 "sys_fsetxattr",
5002 "sys_getxattr",
5003 "sys_lgetxattr", /* 230 */
5004 "sys_fgetxattr",
5005 "sys_listxattr",
5006 "sys_llistxattr",
5007 "sys_flistxattr",
5008 "sys_removexattr", /* 235 */
5009 "sys_lremovexattr",
5010 "sys_fremovexattr",
5011 "sys_tkill",
5012 "sys_sendfile64",
5013 "sys_futex", /* 240 */
5014 "sys_sched_setaffinity",
5015 "sys_sched_getaffinity",
5016 "sys_set_thread_area",
5017 "sys_get_thread_area",
5018 "sys_io_setup", /* 245 */
5019 "sys_io_destroy",
5020 "sys_io_getevents",
5021 "sys_io_submit",
5022 "sys_io_cancel",
5023 "sys_fadvise64", /* 250 */
5024 "sys_ni_syscall",
5025 "sys_exit_group",
5026 "sys_lookup_dcookie",
5027 "sys_epoll_create",
5028 "sys_epoll_ctl", /* 255 */
5029 "sys_epoll_wait",
5030 "sys_remap_file_pages",
5031 "sys_set_tid_address",
5032 "sys_timer_create",
5033 "sys_timer_settime", /* 260 */
5034 "sys_timer_gettime",
5035 "sys_timer_getoverrun",
5036 "sys_timer_delete",
5037 "sys_clock_settime",
5038 "sys_clock_gettime", /* 265 */
5039 "sys_clock_getres",
5040 "sys_clock_nanosleep",
5041 "sys_statfs64",
5042 "sys_fstatfs64",
5043 "sys_tgkill", /* 270 */
5044 "sys_utimes",
5045 "sys_fadvise64_64",
5046 "sys_ni_syscall" /* sys_vserver */
5047 };
5048
5049 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5050 switch (uEAX)
5051 {
5052 default:
5053 if (uEAX < RT_ELEMENTS(apsz))
5054 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5055 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5056 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5057 else
5058 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5059 break;
5060
5061 }
5062}
5063
5064
5065/**
5066 * Dumps an OpenBSD system call.
5067 * @param pVCpu VMCPU handle.
5068 */
5069void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5070{
5071 static const char *apsz[] =
5072 {
5073 "SYS_syscall", //0
5074 "SYS_exit", //1
5075 "SYS_fork", //2
5076 "SYS_read", //3
5077 "SYS_write", //4
5078 "SYS_open", //5
5079 "SYS_close", //6
5080 "SYS_wait4", //7
5081 "SYS_8",
5082 "SYS_link", //9
5083 "SYS_unlink", //10
5084 "SYS_11",
5085 "SYS_chdir", //12
5086 "SYS_fchdir", //13
5087 "SYS_mknod", //14
5088 "SYS_chmod", //15
5089 "SYS_chown", //16
5090 "SYS_break", //17
5091 "SYS_18",
5092 "SYS_19",
5093 "SYS_getpid", //20
5094 "SYS_mount", //21
5095 "SYS_unmount", //22
5096 "SYS_setuid", //23
5097 "SYS_getuid", //24
5098 "SYS_geteuid", //25
5099 "SYS_ptrace", //26
5100 "SYS_recvmsg", //27
5101 "SYS_sendmsg", //28
5102 "SYS_recvfrom", //29
5103 "SYS_accept", //30
5104 "SYS_getpeername", //31
5105 "SYS_getsockname", //32
5106 "SYS_access", //33
5107 "SYS_chflags", //34
5108 "SYS_fchflags", //35
5109 "SYS_sync", //36
5110 "SYS_kill", //37
5111 "SYS_38",
5112 "SYS_getppid", //39
5113 "SYS_40",
5114 "SYS_dup", //41
5115 "SYS_opipe", //42
5116 "SYS_getegid", //43
5117 "SYS_profil", //44
5118 "SYS_ktrace", //45
5119 "SYS_sigaction", //46
5120 "SYS_getgid", //47
5121 "SYS_sigprocmask", //48
5122 "SYS_getlogin", //49
5123 "SYS_setlogin", //50
5124 "SYS_acct", //51
5125 "SYS_sigpending", //52
5126 "SYS_osigaltstack", //53
5127 "SYS_ioctl", //54
5128 "SYS_reboot", //55
5129 "SYS_revoke", //56
5130 "SYS_symlink", //57
5131 "SYS_readlink", //58
5132 "SYS_execve", //59
5133 "SYS_umask", //60
5134 "SYS_chroot", //61
5135 "SYS_62",
5136 "SYS_63",
5137 "SYS_64",
5138 "SYS_65",
5139 "SYS_vfork", //66
5140 "SYS_67",
5141 "SYS_68",
5142 "SYS_sbrk", //69
5143 "SYS_sstk", //70
5144 "SYS_61",
5145 "SYS_vadvise", //72
5146 "SYS_munmap", //73
5147 "SYS_mprotect", //74
5148 "SYS_madvise", //75
5149 "SYS_76",
5150 "SYS_77",
5151 "SYS_mincore", //78
5152 "SYS_getgroups", //79
5153 "SYS_setgroups", //80
5154 "SYS_getpgrp", //81
5155 "SYS_setpgid", //82
5156 "SYS_setitimer", //83
5157 "SYS_84",
5158 "SYS_85",
5159 "SYS_getitimer", //86
5160 "SYS_87",
5161 "SYS_88",
5162 "SYS_89",
5163 "SYS_dup2", //90
5164 "SYS_91",
5165 "SYS_fcntl", //92
5166 "SYS_select", //93
5167 "SYS_94",
5168 "SYS_fsync", //95
5169 "SYS_setpriority", //96
5170 "SYS_socket", //97
5171 "SYS_connect", //98
5172 "SYS_99",
5173 "SYS_getpriority", //100
5174 "SYS_101",
5175 "SYS_102",
5176 "SYS_sigreturn", //103
5177 "SYS_bind", //104
5178 "SYS_setsockopt", //105
5179 "SYS_listen", //106
5180 "SYS_107",
5181 "SYS_108",
5182 "SYS_109",
5183 "SYS_110",
5184 "SYS_sigsuspend", //111
5185 "SYS_112",
5186 "SYS_113",
5187 "SYS_114",
5188 "SYS_115",
5189 "SYS_gettimeofday", //116
5190 "SYS_getrusage", //117
5191 "SYS_getsockopt", //118
5192 "SYS_119",
5193 "SYS_readv", //120
5194 "SYS_writev", //121
5195 "SYS_settimeofday", //122
5196 "SYS_fchown", //123
5197 "SYS_fchmod", //124
5198 "SYS_125",
5199 "SYS_setreuid", //126
5200 "SYS_setregid", //127
5201 "SYS_rename", //128
5202 "SYS_129",
5203 "SYS_130",
5204 "SYS_flock", //131
5205 "SYS_mkfifo", //132
5206 "SYS_sendto", //133
5207 "SYS_shutdown", //134
5208 "SYS_socketpair", //135
5209 "SYS_mkdir", //136
5210 "SYS_rmdir", //137
5211 "SYS_utimes", //138
5212 "SYS_139",
5213 "SYS_adjtime", //140
5214 "SYS_141",
5215 "SYS_142",
5216 "SYS_143",
5217 "SYS_144",
5218 "SYS_145",
5219 "SYS_146",
5220 "SYS_setsid", //147
5221 "SYS_quotactl", //148
5222 "SYS_149",
5223 "SYS_150",
5224 "SYS_151",
5225 "SYS_152",
5226 "SYS_153",
5227 "SYS_154",
5228 "SYS_nfssvc", //155
5229 "SYS_156",
5230 "SYS_157",
5231 "SYS_158",
5232 "SYS_159",
5233 "SYS_160",
5234 "SYS_getfh", //161
5235 "SYS_162",
5236 "SYS_163",
5237 "SYS_164",
5238 "SYS_sysarch", //165
5239 "SYS_166",
5240 "SYS_167",
5241 "SYS_168",
5242 "SYS_169",
5243 "SYS_170",
5244 "SYS_171",
5245 "SYS_172",
5246 "SYS_pread", //173
5247 "SYS_pwrite", //174
5248 "SYS_175",
5249 "SYS_176",
5250 "SYS_177",
5251 "SYS_178",
5252 "SYS_179",
5253 "SYS_180",
5254 "SYS_setgid", //181
5255 "SYS_setegid", //182
5256 "SYS_seteuid", //183
5257 "SYS_lfs_bmapv", //184
5258 "SYS_lfs_markv", //185
5259 "SYS_lfs_segclean", //186
5260 "SYS_lfs_segwait", //187
5261 "SYS_188",
5262 "SYS_189",
5263 "SYS_190",
5264 "SYS_pathconf", //191
5265 "SYS_fpathconf", //192
5266 "SYS_swapctl", //193
5267 "SYS_getrlimit", //194
5268 "SYS_setrlimit", //195
5269 "SYS_getdirentries", //196
5270 "SYS_mmap", //197
5271 "SYS___syscall", //198
5272 "SYS_lseek", //199
5273 "SYS_truncate", //200
5274 "SYS_ftruncate", //201
5275 "SYS___sysctl", //202
5276 "SYS_mlock", //203
5277 "SYS_munlock", //204
5278 "SYS_205",
5279 "SYS_futimes", //206
5280 "SYS_getpgid", //207
5281 "SYS_xfspioctl", //208
5282 "SYS_209",
5283 "SYS_210",
5284 "SYS_211",
5285 "SYS_212",
5286 "SYS_213",
5287 "SYS_214",
5288 "SYS_215",
5289 "SYS_216",
5290 "SYS_217",
5291 "SYS_218",
5292 "SYS_219",
5293 "SYS_220",
5294 "SYS_semget", //221
5295 "SYS_222",
5296 "SYS_223",
5297 "SYS_224",
5298 "SYS_msgget", //225
5299 "SYS_msgsnd", //226
5300 "SYS_msgrcv", //227
5301 "SYS_shmat", //228
5302 "SYS_229",
5303 "SYS_shmdt", //230
5304 "SYS_231",
5305 "SYS_clock_gettime", //232
5306 "SYS_clock_settime", //233
5307 "SYS_clock_getres", //234
5308 "SYS_235",
5309 "SYS_236",
5310 "SYS_237",
5311 "SYS_238",
5312 "SYS_239",
5313 "SYS_nanosleep", //240
5314 "SYS_241",
5315 "SYS_242",
5316 "SYS_243",
5317 "SYS_244",
5318 "SYS_245",
5319 "SYS_246",
5320 "SYS_247",
5321 "SYS_248",
5322 "SYS_249",
5323 "SYS_minherit", //250
5324 "SYS_rfork", //251
5325 "SYS_poll", //252
5326 "SYS_issetugid", //253
5327 "SYS_lchown", //254
5328 "SYS_getsid", //255
5329 "SYS_msync", //256
5330 "SYS_257",
5331 "SYS_258",
5332 "SYS_259",
5333 "SYS_getfsstat", //260
5334 "SYS_statfs", //261
5335 "SYS_fstatfs", //262
5336 "SYS_pipe", //263
5337 "SYS_fhopen", //264
5338 "SYS_265",
5339 "SYS_fhstatfs", //266
5340 "SYS_preadv", //267
5341 "SYS_pwritev", //268
5342 "SYS_kqueue", //269
5343 "SYS_kevent", //270
5344 "SYS_mlockall", //271
5345 "SYS_munlockall", //272
5346 "SYS_getpeereid", //273
5347 "SYS_274",
5348 "SYS_275",
5349 "SYS_276",
5350 "SYS_277",
5351 "SYS_278",
5352 "SYS_279",
5353 "SYS_280",
5354 "SYS_getresuid", //281
5355 "SYS_setresuid", //282
5356 "SYS_getresgid", //283
5357 "SYS_setresgid", //284
5358 "SYS_285",
5359 "SYS_mquery", //286
5360 "SYS_closefrom", //287
5361 "SYS_sigaltstack", //288
5362 "SYS_shmget", //289
5363 "SYS_semop", //290
5364 "SYS_stat", //291
5365 "SYS_fstat", //292
5366 "SYS_lstat", //293
5367 "SYS_fhstat", //294
5368 "SYS___semctl", //295
5369 "SYS_shmctl", //296
5370 "SYS_msgctl", //297
5371 "SYS_MAXSYSCALL", //298
5372 //299
5373 //300
5374 };
5375 uint32_t uEAX;
5376 if (!LogIsEnabled())
5377 return;
5378 uEAX = CPUMGetGuestEAX(pVCpu);
5379 switch (uEAX)
5380 {
5381 default:
5382 if (uEAX < RT_ELEMENTS(apsz))
5383 {
5384 uint32_t au32Args[8] = {0};
5385 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5386 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5387 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5388 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5389 }
5390 else
5391 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5392 break;
5393 }
5394}
5395
5396
5397#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5398/**
5399 * The Dll main entry point (stub).
5400 */
5401bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5402{
5403 return true;
5404}
5405
5406void *memcpy(void *dst, const void *src, size_t size)
5407{
5408 uint8_t*pbDst = dst, *pbSrc = src;
5409 while (size-- > 0)
5410 *pbDst++ = *pbSrc++;
5411 return dst;
5412}
5413
5414#endif
5415
5416void cpu_smm_update(CPUState *env)
5417{
5418}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette