VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 36054

Last change on this file since 36054 was 36054, checked in by vboxsync, 14 years ago

VMM/REM: Made .remstep work to some degree (might skip interrupts/traps).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 178.4 KB
Line 
1/* $Id: VBoxRecompiler.c 36054 2011-02-22 15:04:28Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include "vl.h"
24#include "osdep.h"
25#include "exec-all.h"
26#include "config.h"
27#include "cpu-all.h"
28
29#include <VBox/vmm/rem.h>
30#include <VBox/vmm/vmapi.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/iom.h>
36#include <VBox/vmm/mm.h>
37#include <VBox/vmm/pgm.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/dbgf.h>
40#include <VBox/dbg.h>
41#include <VBox/vmm/hwaccm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include "REMInternal.h"
45#include <VBox/vmm/vm.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49#include <VBox/log.h>
50#include <iprt/semaphore.h>
51#include <iprt/asm.h>
52#include <iprt/assert.h>
53#include <iprt/thread.h>
54#include <iprt/string.h>
55
56/* Don't wanna include everything. */
57extern void cpu_exec_init_all(unsigned long tb_size);
58extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
59extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
60extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
61extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
62extern void tlb_flush(CPUState *env, int flush_global);
63extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
64extern void sync_ldtr(CPUX86State *env1, int selector);
65
66#ifdef VBOX_STRICT
67unsigned long get_phys_page_offset(target_ulong addr);
68#endif
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74
75/** Copy 80-bit fpu register at pSrc to pDst.
76 * This is probably faster than *calling* memcpy.
77 */
78#define REM_COPY_FPU_REG(pDst, pSrc) \
79 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
80
81
82/*******************************************************************************
83* Internal Functions *
84*******************************************************************************/
85static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
88static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
89
90static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
91static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
92static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
93static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
94static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
95static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
96
97static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
98static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
99static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
100static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
101static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
102static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
103
104static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
105static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
106static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
107
108/*******************************************************************************
109* Global Variables *
110*******************************************************************************/
111
112/** @todo Move stats to REM::s some rainy day we have nothing do to. */
113#ifdef VBOX_WITH_STATISTICS
114static STAMPROFILEADV gStatExecuteSingleInstr;
115static STAMPROFILEADV gStatCompilationQEmu;
116static STAMPROFILEADV gStatRunCodeQEmu;
117static STAMPROFILEADV gStatTotalTimeQEmu;
118static STAMPROFILEADV gStatTimers;
119static STAMPROFILEADV gStatTBLookup;
120static STAMPROFILEADV gStatIRQ;
121static STAMPROFILEADV gStatRawCheck;
122static STAMPROFILEADV gStatMemRead;
123static STAMPROFILEADV gStatMemWrite;
124static STAMPROFILE gStatGCPhys2HCVirt;
125static STAMPROFILE gStatHCVirt2GCPhys;
126static STAMCOUNTER gStatCpuGetTSC;
127static STAMCOUNTER gStatRefuseTFInhibit;
128static STAMCOUNTER gStatRefuseVM86;
129static STAMCOUNTER gStatRefusePaging;
130static STAMCOUNTER gStatRefusePAE;
131static STAMCOUNTER gStatRefuseIOPLNot0;
132static STAMCOUNTER gStatRefuseIF0;
133static STAMCOUNTER gStatRefuseCode16;
134static STAMCOUNTER gStatRefuseWP0;
135static STAMCOUNTER gStatRefuseRing1or2;
136static STAMCOUNTER gStatRefuseCanExecute;
137static STAMCOUNTER gStatREMGDTChange;
138static STAMCOUNTER gStatREMIDTChange;
139static STAMCOUNTER gStatREMLDTRChange;
140static STAMCOUNTER gStatREMTRChange;
141static STAMCOUNTER gStatSelOutOfSync[6];
142static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
143static STAMCOUNTER gStatFlushTBs;
144#endif
145/* in exec.c */
146extern uint32_t tlb_flush_count;
147extern uint32_t tb_flush_count;
148extern uint32_t tb_phys_invalidate_count;
149
150/*
151 * Global stuff.
152 */
153
154/** MMIO read callbacks. */
155CPUReadMemoryFunc *g_apfnMMIORead[3] =
156{
157 remR3MMIOReadU8,
158 remR3MMIOReadU16,
159 remR3MMIOReadU32
160};
161
162/** MMIO write callbacks. */
163CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
164{
165 remR3MMIOWriteU8,
166 remR3MMIOWriteU16,
167 remR3MMIOWriteU32
168};
169
170/** Handler read callbacks. */
171CPUReadMemoryFunc *g_apfnHandlerRead[3] =
172{
173 remR3HandlerReadU8,
174 remR3HandlerReadU16,
175 remR3HandlerReadU32
176};
177
178/** Handler write callbacks. */
179CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
180{
181 remR3HandlerWriteU8,
182 remR3HandlerWriteU16,
183 remR3HandlerWriteU32
184};
185
186
187#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
188/*
189 * Debugger commands.
190 */
191static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
192
193/** '.remstep' arguments. */
194static const DBGCVARDESC g_aArgRemStep[] =
195{
196 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
197 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
198};
199
200/** Command descriptors. */
201static const DBGCCMD g_aCmds[] =
202{
203 {
204 .pszCmd ="remstep",
205 .cArgsMin = 0,
206 .cArgsMax = 1,
207 .paArgDescs = &g_aArgRemStep[0],
208 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
209 .fFlags = 0,
210 .pfnHandler = remR3CmdDisasEnableStepping,
211 .pszSyntax = "[on/off]",
212 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
213 "If no arguments show the current state."
214 }
215};
216#endif
217
218/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
219uint8_t *code_gen_prologue;
220
221
222/*******************************************************************************
223* Internal Functions *
224*******************************************************************************/
225void remAbort(int rc, const char *pszTip);
226extern int testmath(void);
227
228/* Put them here to avoid unused variable warning. */
229AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
230#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
231//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
232/* Why did this have to be identical?? */
233AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
234#else
235AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
236#endif
237
238
239/**
240 * Initializes the REM.
241 *
242 * @returns VBox status code.
243 * @param pVM The VM to operate on.
244 */
245REMR3DECL(int) REMR3Init(PVM pVM)
246{
247 PREMHANDLERNOTIFICATION pCur;
248 uint32_t u32Dummy;
249 int rc;
250 unsigned i;
251
252#ifdef VBOX_ENABLE_VBOXREM64
253 LogRel(("Using 64-bit aware REM\n"));
254#endif
255
256 /*
257 * Assert sanity.
258 */
259 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
260 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
261 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
262#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
263 Assert(!testmath());
264#endif
265
266 /*
267 * Init some internal data members.
268 */
269 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
270 pVM->rem.s.Env.pVM = pVM;
271#ifdef CPU_RAW_MODE_INIT
272 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
273#endif
274
275 /*
276 * Initialize the REM critical section.
277 *
278 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
279 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
280 * deadlocks. (mostly pgm vs rem locking)
281 */
282 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
283 AssertRCReturn(rc, rc);
284
285 /* ctx. */
286 pVM->rem.s.pCtx = NULL; /* set when executing code. */
287 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
288
289 /* ignore all notifications */
290 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
291
292 code_gen_prologue = RTMemExecAlloc(_1K);
293 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
294
295 cpu_exec_init_all(0);
296
297 /*
298 * Init the recompiler.
299 */
300 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
301 {
302 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
303 return VERR_GENERAL_FAILURE;
304 }
305 PVMCPU pVCpu = VMMGetCpu(pVM);
306 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
307 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
308
309 /* allocate code buffer for single instruction emulation. */
310 pVM->rem.s.Env.cbCodeBuffer = 4096;
311 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
312 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
313
314 /* finally, set the cpu_single_env global. */
315 cpu_single_env = &pVM->rem.s.Env;
316
317 /* Nothing is pending by default */
318 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
319
320 /*
321 * Register ram types.
322 */
323 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
324 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
325 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
326 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
327 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
328
329 /* stop ignoring. */
330 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
331
332 /*
333 * Register the saved state data unit.
334 */
335 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
336 NULL, NULL, NULL,
337 NULL, remR3Save, NULL,
338 NULL, remR3Load, NULL);
339 if (RT_FAILURE(rc))
340 return rc;
341
342#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
343 /*
344 * Debugger commands.
345 */
346 static bool fRegisteredCmds = false;
347 if (!fRegisteredCmds)
348 {
349 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
350 if (RT_SUCCESS(rc))
351 fRegisteredCmds = true;
352 }
353#endif
354
355#ifdef VBOX_WITH_STATISTICS
356 /*
357 * Statistics.
358 */
359 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
360 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
361 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
362 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
363 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
364 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
365 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
366 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
368 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
369 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
370 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
371
372 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
373
374 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
375 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
376 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
377 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
378 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
379 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
380 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
381 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
382 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
383 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
384 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
385
386 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
387 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
388 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
389 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
390
391 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
392 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
393 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
394 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
397
398 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
404
405 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
406#endif /* VBOX_WITH_STATISTICS */
407
408 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
409 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
410 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
411
412
413#ifdef DEBUG_ALL_LOGGING
414 loglevel = ~0;
415# ifdef DEBUG_TMP_LOGGING
416 logfile = fopen("/tmp/vbox-qemu.log", "w");
417# endif
418#endif
419
420 /*
421 * Init the handler notification lists.
422 */
423 pVM->rem.s.idxPendingList = UINT32_MAX;
424 pVM->rem.s.idxFreeList = 0;
425
426 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
427 {
428 pCur = &pVM->rem.s.aHandlerNotifications[i];
429 pCur->idxNext = i + 1;
430 pCur->idxSelf = i;
431 }
432 pCur->idxNext = UINT32_MAX; /* the last record. */
433
434 return rc;
435}
436
437
438/**
439 * Finalizes the REM initialization.
440 *
441 * This is called after all components, devices and drivers has
442 * been initialized. Its main purpose it to finish the RAM related
443 * initialization.
444 *
445 * @returns VBox status code.
446 *
447 * @param pVM The VM handle.
448 */
449REMR3DECL(int) REMR3InitFinalize(PVM pVM)
450{
451 int rc;
452
453 /*
454 * Ram size & dirty bit map.
455 */
456 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
457 pVM->rem.s.fGCPhysLastRamFixed = true;
458#ifdef RT_STRICT
459 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
460#else
461 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
462#endif
463 return rc;
464}
465
466
467/**
468 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
469 *
470 * @returns VBox status code.
471 * @param pVM The VM handle.
472 * @param fGuarded Whether to guard the map.
473 */
474static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
475{
476 int rc = VINF_SUCCESS;
477 RTGCPHYS cb;
478
479 cb = pVM->rem.s.GCPhysLastRam + 1;
480 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
481 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
482 VERR_OUT_OF_RANGE);
483 phys_ram_size = cb;
484 phys_ram_dirty_size = cb >> PAGE_SHIFT;
485 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
486
487 if (!fGuarded)
488 {
489 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
490 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
491 }
492 else
493 {
494 /*
495 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
496 */
497 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
498 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
499 if (cbBitmapFull == cbBitmapAligned)
500 cbBitmapFull += _4G >> PAGE_SHIFT;
501 else if (cbBitmapFull - cbBitmapAligned < _64K)
502 cbBitmapFull += _64K;
503
504 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
505 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
506
507 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
508 if (RT_FAILURE(rc))
509 {
510 RTMemPageFree(phys_ram_dirty, cbBitmapFull);
511 AssertLogRelRCReturn(rc, rc);
512 }
513
514 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
515 }
516
517 /* initialize it. */
518 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
519 return rc;
520}
521
522
523/**
524 * Terminates the REM.
525 *
526 * Termination means cleaning up and freeing all resources,
527 * the VM it self is at this point powered off or suspended.
528 *
529 * @returns VBox status code.
530 * @param pVM The VM to operate on.
531 */
532REMR3DECL(int) REMR3Term(PVM pVM)
533{
534#ifdef VBOX_WITH_STATISTICS
535 /*
536 * Statistics.
537 */
538 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
539 STAM_DEREG(pVM, &gStatCompilationQEmu);
540 STAM_DEREG(pVM, &gStatRunCodeQEmu);
541 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
542 STAM_DEREG(pVM, &gStatTimers);
543 STAM_DEREG(pVM, &gStatTBLookup);
544 STAM_DEREG(pVM, &gStatIRQ);
545 STAM_DEREG(pVM, &gStatRawCheck);
546 STAM_DEREG(pVM, &gStatMemRead);
547 STAM_DEREG(pVM, &gStatMemWrite);
548 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
549 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
550
551 STAM_DEREG(pVM, &gStatCpuGetTSC);
552
553 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
554 STAM_DEREG(pVM, &gStatRefuseVM86);
555 STAM_DEREG(pVM, &gStatRefusePaging);
556 STAM_DEREG(pVM, &gStatRefusePAE);
557 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
558 STAM_DEREG(pVM, &gStatRefuseIF0);
559 STAM_DEREG(pVM, &gStatRefuseCode16);
560 STAM_DEREG(pVM, &gStatRefuseWP0);
561 STAM_DEREG(pVM, &gStatRefuseRing1or2);
562 STAM_DEREG(pVM, &gStatRefuseCanExecute);
563 STAM_DEREG(pVM, &gStatFlushTBs);
564
565 STAM_DEREG(pVM, &gStatREMGDTChange);
566 STAM_DEREG(pVM, &gStatREMLDTRChange);
567 STAM_DEREG(pVM, &gStatREMIDTChange);
568 STAM_DEREG(pVM, &gStatREMTRChange);
569
570 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
571 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
572 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
573 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
574 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
576
577 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
578 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
579 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
580 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
583
584 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
585#endif /* VBOX_WITH_STATISTICS */
586
587 STAM_REL_DEREG(pVM, &tb_flush_count);
588 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
589 STAM_REL_DEREG(pVM, &tlb_flush_count);
590
591 return VINF_SUCCESS;
592}
593
594
595/**
596 * The VM is being reset.
597 *
598 * For the REM component this means to call the cpu_reset() and
599 * reinitialize some state variables.
600 *
601 * @param pVM VM handle.
602 */
603REMR3DECL(void) REMR3Reset(PVM pVM)
604{
605 /*
606 * Reset the REM cpu.
607 */
608 Assert(pVM->rem.s.cIgnoreAll == 0);
609 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
610 cpu_reset(&pVM->rem.s.Env);
611 pVM->rem.s.cInvalidatedPages = 0;
612 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
613 Assert(pVM->rem.s.cIgnoreAll == 0);
614
615 /* Clear raw ring 0 init state */
616 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
617
618 /* Flush the TBs the next time we execute code here. */
619 pVM->rem.s.fFlushTBs = true;
620}
621
622
623/**
624 * Execute state save operation.
625 *
626 * @returns VBox status code.
627 * @param pVM VM Handle.
628 * @param pSSM SSM operation handle.
629 */
630static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
631{
632 PREM pRem = &pVM->rem.s;
633
634 /*
635 * Save the required CPU Env bits.
636 * (Not much because we're never in REM when doing the save.)
637 */
638 LogFlow(("remR3Save:\n"));
639 Assert(!pRem->fInREM);
640 SSMR3PutU32(pSSM, pRem->Env.hflags);
641 SSMR3PutU32(pSSM, ~0); /* separator */
642
643 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
644 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
645 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
646
647 return SSMR3PutU32(pSSM, ~0); /* terminator */
648}
649
650
651/**
652 * Execute state load operation.
653 *
654 * @returns VBox status code.
655 * @param pVM VM Handle.
656 * @param pSSM SSM operation handle.
657 * @param uVersion Data layout version.
658 * @param uPass The data pass.
659 */
660static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
661{
662 uint32_t u32Dummy;
663 uint32_t fRawRing0 = false;
664 uint32_t u32Sep;
665 uint32_t i;
666 int rc;
667 PREM pRem;
668
669 LogFlow(("remR3Load:\n"));
670 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
671
672 /*
673 * Validate version.
674 */
675 if ( uVersion != REM_SAVED_STATE_VERSION
676 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
677 {
678 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
679 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
680 }
681
682 /*
683 * Do a reset to be on the safe side...
684 */
685 REMR3Reset(pVM);
686
687 /*
688 * Ignore all ignorable notifications.
689 * (Not doing this will cause serious trouble.)
690 */
691 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
692
693 /*
694 * Load the required CPU Env bits.
695 * (Not much because we're never in REM when doing the save.)
696 */
697 pRem = &pVM->rem.s;
698 Assert(!pRem->fInREM);
699 SSMR3GetU32(pSSM, &pRem->Env.hflags);
700 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
701 {
702 /* Redundant REM CPU state has to be loaded, but can be ignored. */
703 CPUX86State_Ver16 temp;
704 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
705 }
706
707 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
708 if (RT_FAILURE(rc))
709 return rc;
710 if (u32Sep != ~0U)
711 {
712 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
713 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
714 }
715
716 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
717 SSMR3GetUInt(pSSM, &fRawRing0);
718 if (fRawRing0)
719 pRem->Env.state |= CPU_RAW_RING0;
720
721 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
722 {
723 /*
724 * Load the REM stuff.
725 */
726 /** @todo r=bird: We should just drop all these items, restoring doesn't make
727 * sense. */
728 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
729 if (RT_FAILURE(rc))
730 return rc;
731 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
732 {
733 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
734 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
735 }
736 for (i = 0; i < pRem->cInvalidatedPages; i++)
737 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
738 }
739
740 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
741 if (RT_FAILURE(rc))
742 return rc;
743
744 /* check the terminator. */
745 rc = SSMR3GetU32(pSSM, &u32Sep);
746 if (RT_FAILURE(rc))
747 return rc;
748 if (u32Sep != ~0U)
749 {
750 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
751 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
752 }
753
754 /*
755 * Get the CPUID features.
756 */
757 PVMCPU pVCpu = VMMGetCpu(pVM);
758 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
759 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
760
761 /*
762 * Sync the Load Flush the TLB
763 */
764 tlb_flush(&pRem->Env, 1);
765
766 /*
767 * Stop ignoring ignorable notifications.
768 */
769 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
770
771 /*
772 * Sync the whole CPU state when executing code in the recompiler.
773 */
774 for (i = 0; i < pVM->cCpus; i++)
775 {
776 PVMCPU pVCpu = &pVM->aCpus[i];
777 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
778 }
779 return VINF_SUCCESS;
780}
781
782
783
784#undef LOG_GROUP
785#define LOG_GROUP LOG_GROUP_REM_RUN
786
787/**
788 * Single steps an instruction in recompiled mode.
789 *
790 * Before calling this function the REM state needs to be in sync with
791 * the VM. Call REMR3State() to perform the sync. It's only necessary
792 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
793 * and after calling REMR3StateBack().
794 *
795 * @returns VBox status code.
796 *
797 * @param pVM VM Handle.
798 * @param pVCpu VMCPU Handle.
799 */
800REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
801{
802 int rc, interrupt_request;
803 RTGCPTR GCPtrPC;
804 bool fBp;
805
806 /*
807 * Lock the REM - we don't wanna have anyone interrupting us
808 * while stepping - and enabled single stepping. We also ignore
809 * pending interrupts and suchlike.
810 */
811 interrupt_request = pVM->rem.s.Env.interrupt_request;
812 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
813 pVM->rem.s.Env.interrupt_request = 0;
814 cpu_single_step(&pVM->rem.s.Env, 1);
815
816 /*
817 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
818 */
819 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
820 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
821
822 /*
823 * Execute and handle the return code.
824 * We execute without enabling the cpu tick, so on success we'll
825 * just flip it on and off to make sure it moves
826 */
827 rc = cpu_exec(&pVM->rem.s.Env);
828 if (rc == EXCP_DEBUG)
829 {
830 TMR3NotifyResume(pVM, pVCpu);
831 TMR3NotifySuspend(pVM, pVCpu);
832 rc = VINF_EM_DBG_STEPPED;
833 }
834 else
835 {
836 switch (rc)
837 {
838 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
839 case EXCP_HLT:
840 case EXCP_HALTED: rc = VINF_EM_HALT; break;
841 case EXCP_RC:
842 rc = pVM->rem.s.rc;
843 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
844 break;
845 case EXCP_EXECUTE_RAW:
846 case EXCP_EXECUTE_HWACC:
847 /** @todo: is it correct? No! */
848 rc = VINF_SUCCESS;
849 break;
850 default:
851 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
852 rc = VERR_INTERNAL_ERROR;
853 break;
854 }
855 }
856
857 /*
858 * Restore the stuff we changed to prevent interruption.
859 * Unlock the REM.
860 */
861 if (fBp)
862 {
863 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
864 Assert(rc2 == 0); NOREF(rc2);
865 }
866 cpu_single_step(&pVM->rem.s.Env, 0);
867 pVM->rem.s.Env.interrupt_request = interrupt_request;
868
869 return rc;
870}
871
872
873/**
874 * Set a breakpoint using the REM facilities.
875 *
876 * @returns VBox status code.
877 * @param pVM The VM handle.
878 * @param Address The breakpoint address.
879 * @thread The emulation thread.
880 */
881REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
882{
883 VM_ASSERT_EMT(pVM);
884 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
885 {
886 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
887 return VINF_SUCCESS;
888 }
889 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
890 return VERR_REM_NO_MORE_BP_SLOTS;
891}
892
893
894/**
895 * Clears a breakpoint set by REMR3BreakpointSet().
896 *
897 * @returns VBox status code.
898 * @param pVM The VM handle.
899 * @param Address The breakpoint address.
900 * @thread The emulation thread.
901 */
902REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
903{
904 VM_ASSERT_EMT(pVM);
905 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
906 {
907 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
908 return VINF_SUCCESS;
909 }
910 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
911 return VERR_REM_BP_NOT_FOUND;
912}
913
914
915/**
916 * Emulate an instruction.
917 *
918 * This function executes one instruction without letting anyone
919 * interrupt it. This is intended for being called while being in
920 * raw mode and thus will take care of all the state syncing between
921 * REM and the rest.
922 *
923 * @returns VBox status code.
924 * @param pVM VM handle.
925 * @param pVCpu VMCPU Handle.
926 */
927REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
928{
929 bool fFlushTBs;
930
931 int rc, rc2;
932 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
933
934 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
935 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
936 */
937 if (HWACCMIsEnabled(pVM))
938 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
939
940 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
941 fFlushTBs = pVM->rem.s.fFlushTBs;
942 pVM->rem.s.fFlushTBs = false;
943
944 /*
945 * Sync the state and enable single instruction / single stepping.
946 */
947 rc = REMR3State(pVM, pVCpu);
948 pVM->rem.s.fFlushTBs = fFlushTBs;
949 if (RT_SUCCESS(rc))
950 {
951 int interrupt_request = pVM->rem.s.Env.interrupt_request;
952 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
953 Assert(!pVM->rem.s.Env.singlestep_enabled);
954 /*
955 * Now we set the execute single instruction flag and enter the cpu_exec loop.
956 */
957 TMNotifyStartOfExecution(pVCpu);
958 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
959 rc = cpu_exec(&pVM->rem.s.Env);
960 TMNotifyEndOfExecution(pVCpu);
961 switch (rc)
962 {
963 /*
964 * Executed without anything out of the way happening.
965 */
966 case EXCP_SINGLE_INSTR:
967 rc = VINF_EM_RESCHEDULE;
968 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
969 break;
970
971 /*
972 * If we take a trap or start servicing a pending interrupt, we might end up here.
973 * (Timer thread or some other thread wishing EMT's attention.)
974 */
975 case EXCP_INTERRUPT:
976 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
977 rc = VINF_EM_RESCHEDULE;
978 break;
979
980 /*
981 * Single step, we assume!
982 * If there was a breakpoint there we're fucked now.
983 */
984 case EXCP_DEBUG:
985 {
986 /* breakpoint or single step? */
987 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
988 int iBP;
989 rc = VINF_EM_DBG_STEPPED;
990 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
991 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
992 {
993 rc = VINF_EM_DBG_BREAKPOINT;
994 break;
995 }
996 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
997 break;
998 }
999
1000 /*
1001 * hlt instruction.
1002 */
1003 case EXCP_HLT:
1004 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1005 rc = VINF_EM_HALT;
1006 break;
1007
1008 /*
1009 * The VM has halted.
1010 */
1011 case EXCP_HALTED:
1012 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1013 rc = VINF_EM_HALT;
1014 break;
1015
1016 /*
1017 * Switch to RAW-mode.
1018 */
1019 case EXCP_EXECUTE_RAW:
1020 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1021 rc = VINF_EM_RESCHEDULE_RAW;
1022 break;
1023
1024 /*
1025 * Switch to hardware accelerated RAW-mode.
1026 */
1027 case EXCP_EXECUTE_HWACC:
1028 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1029 rc = VINF_EM_RESCHEDULE_HWACC;
1030 break;
1031
1032 /*
1033 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1034 */
1035 case EXCP_RC:
1036 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1037 rc = pVM->rem.s.rc;
1038 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1039 break;
1040
1041 /*
1042 * Figure out the rest when they arrive....
1043 */
1044 default:
1045 AssertMsgFailed(("rc=%d\n", rc));
1046 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1047 rc = VINF_EM_RESCHEDULE;
1048 break;
1049 }
1050
1051 /*
1052 * Switch back the state.
1053 */
1054 pVM->rem.s.Env.interrupt_request = interrupt_request;
1055 rc2 = REMR3StateBack(pVM, pVCpu);
1056 AssertRC(rc2);
1057 }
1058
1059 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1060 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1061 return rc;
1062}
1063
1064
1065/**
1066 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1067 *
1068 * @returns VBox status code.
1069 *
1070 * @param pVM The VM handle.
1071 * @param pVCpu The Virtual CPU handle.
1072 */
1073static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1074{
1075 int rc;
1076 Assert(!pVM->rem.s.Env.singlestep_enabled);
1077 Assert(pVM->rem.s.fInREM);
1078/* #define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING - slow (tb flushing?) */
1079#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1080 cpu_single_step(&pVM->rem.s.Env, 1);
1081#endif
1082
1083 /*
1084 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1085 */
1086 for (;;)
1087 {
1088 char szBuf[256];
1089
1090 /*
1091 * Log the current registers state and instruction.
1092 */
1093 remR3StateUpdate(pVM, pVCpu);
1094 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1095 szBuf[0] = '\0';
1096 rc = DBGFR3DisasInstrEx(pVM,
1097 pVCpu->idCpu,
1098 0, /* Sel */
1099 0, /* GCPtr */
1100 DBGF_DISAS_FLAGS_CURRENT_GUEST
1101 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1102 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1103 szBuf,
1104 sizeof(szBuf),
1105 NULL);
1106 if (RT_FAILURE(rc))
1107 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1108 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1109
1110 /*
1111 * Execute the instruction.
1112 */
1113 TMNotifyStartOfExecution(pVCpu);
1114
1115 if ( pVM->rem.s.Env.exception_index < 0
1116 || pVM->rem.s.Env.exception_index > 256)
1117 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1118
1119#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1120 pVM->rem.s.Env.interrupt_request = 0;
1121#else
1122 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1123#endif
1124 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1125 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1126 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1127 pVM->rem.s.Env.interrupt_request,
1128 pVM->rem.s.Env.halted,
1129 pVM->rem.s.Env.exception_index
1130 );
1131
1132 rc = cpu_exec(&pVM->rem.s.Env);
1133
1134 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1135 pVM->rem.s.Env.interrupt_request,
1136 pVM->rem.s.Env.halted,
1137 pVM->rem.s.Env.exception_index
1138 );
1139
1140 TMNotifyEndOfExecution(pVCpu);
1141
1142 switch (rc)
1143 {
1144#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1145 /*
1146 * The normal exit.
1147 */
1148 case EXCP_SINGLE_INSTR:
1149 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1150 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1151 continue;
1152 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1153 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1154 rc = VINF_SUCCESS;
1155 break;
1156
1157#else
1158 /*
1159 * The normal exit, check for breakpoints at PC just to be sure.
1160 */
1161#endif
1162 case EXCP_DEBUG:
1163 rc = VINF_EM_DBG_STEPPED;
1164 if (pVM->rem.s.Env.nb_breakpoints > 0)
1165 {
1166 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1167 int iBP;
1168 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1169 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1170 {
1171 rc = VINF_EM_DBG_BREAKPOINT;
1172 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC);
1173 break;
1174 }
1175 }
1176#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1177 if (rc == VINF_EM_DBG_STEPPED)
1178 {
1179 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1180 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1181 continue;
1182
1183 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1184 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1185 rc = VINF_SUCCESS;
1186 }
1187#endif
1188 break;
1189
1190 /*
1191 * If we take a trap or start servicing a pending interrupt, we might end up here.
1192 * (Timer thread or some other thread wishing EMT's attention.)
1193 */
1194 case EXCP_INTERRUPT:
1195 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1196 rc = VINF_SUCCESS;
1197 break;
1198
1199 /*
1200 * hlt instruction.
1201 */
1202 case EXCP_HLT:
1203 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1204 rc = VINF_EM_HALT;
1205 break;
1206
1207 /*
1208 * The VM has halted.
1209 */
1210 case EXCP_HALTED:
1211 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1212 rc = VINF_EM_HALT;
1213 break;
1214
1215 /*
1216 * Switch to RAW-mode.
1217 */
1218 case EXCP_EXECUTE_RAW:
1219 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1220 rc = VINF_EM_RESCHEDULE_RAW;
1221 break;
1222
1223 /*
1224 * Switch to hardware accelerated RAW-mode.
1225 */
1226 case EXCP_EXECUTE_HWACC:
1227 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1228 rc = VINF_EM_RESCHEDULE_HWACC;
1229 break;
1230
1231 /*
1232 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1233 */
1234 case EXCP_RC:
1235 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1236 rc = pVM->rem.s.rc;
1237 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1238 break;
1239
1240 /*
1241 * Figure out the rest when they arrive....
1242 */
1243 default:
1244 AssertMsgFailed(("rc=%d\n", rc));
1245 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1246 rc = VINF_EM_RESCHEDULE;
1247 break;
1248 }
1249 break;
1250 }
1251
1252#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1253 cpu_single_step(&pVM->rem.s.Env, 0);
1254#else
1255 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1256#endif
1257 return rc;
1258}
1259
1260
1261/**
1262 * Runs code in recompiled mode.
1263 *
1264 * Before calling this function the REM state needs to be in sync with
1265 * the VM. Call REMR3State() to perform the sync. It's only necessary
1266 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1267 * and after calling REMR3StateBack().
1268 *
1269 * @returns VBox status code.
1270 *
1271 * @param pVM VM Handle.
1272 * @param pVCpu VMCPU Handle.
1273 */
1274REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1275{
1276 int rc;
1277
1278 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1279 return remR3RunLoggingStep(pVM, pVCpu);
1280
1281 Assert(pVM->rem.s.fInREM);
1282 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1283
1284 TMNotifyStartOfExecution(pVCpu);
1285 rc = cpu_exec(&pVM->rem.s.Env);
1286 TMNotifyEndOfExecution(pVCpu);
1287 switch (rc)
1288 {
1289 /*
1290 * This happens when the execution was interrupted
1291 * by an external event, like pending timers.
1292 */
1293 case EXCP_INTERRUPT:
1294 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1295 rc = VINF_SUCCESS;
1296 break;
1297
1298 /*
1299 * hlt instruction.
1300 */
1301 case EXCP_HLT:
1302 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1303 rc = VINF_EM_HALT;
1304 break;
1305
1306 /*
1307 * The VM has halted.
1308 */
1309 case EXCP_HALTED:
1310 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1311 rc = VINF_EM_HALT;
1312 break;
1313
1314 /*
1315 * Breakpoint/single step.
1316 */
1317 case EXCP_DEBUG:
1318 {
1319#if 0//def DEBUG_bird
1320 static int iBP = 0;
1321 printf("howdy, breakpoint! iBP=%d\n", iBP);
1322 switch (iBP)
1323 {
1324 case 0:
1325 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1326 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1327 //pVM->rem.s.Env.interrupt_request = 0;
1328 //pVM->rem.s.Env.exception_index = -1;
1329 //g_fInterruptDisabled = 1;
1330 rc = VINF_SUCCESS;
1331 asm("int3");
1332 break;
1333 default:
1334 asm("int3");
1335 break;
1336 }
1337 iBP++;
1338#else
1339 /* breakpoint or single step? */
1340 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1341 int iBP;
1342 rc = VINF_EM_DBG_STEPPED;
1343 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1344 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1345 {
1346 rc = VINF_EM_DBG_BREAKPOINT;
1347 break;
1348 }
1349 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1350#endif
1351 break;
1352 }
1353
1354 /*
1355 * Switch to RAW-mode.
1356 */
1357 case EXCP_EXECUTE_RAW:
1358 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1359 rc = VINF_EM_RESCHEDULE_RAW;
1360 break;
1361
1362 /*
1363 * Switch to hardware accelerated RAW-mode.
1364 */
1365 case EXCP_EXECUTE_HWACC:
1366 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1367 rc = VINF_EM_RESCHEDULE_HWACC;
1368 break;
1369
1370 /*
1371 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1372 */
1373 case EXCP_RC:
1374 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1375 rc = pVM->rem.s.rc;
1376 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1377 break;
1378
1379 /*
1380 * Figure out the rest when they arrive....
1381 */
1382 default:
1383 AssertMsgFailed(("rc=%d\n", rc));
1384 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1385 rc = VINF_SUCCESS;
1386 break;
1387 }
1388
1389 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1390 return rc;
1391}
1392
1393
1394/**
1395 * Check if the cpu state is suitable for Raw execution.
1396 *
1397 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1398 *
1399 * @param env The CPU env struct.
1400 * @param eip The EIP to check this for (might differ from env->eip).
1401 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1402 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1403 *
1404 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1405 */
1406bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1407{
1408 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1409 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1410 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1411 uint32_t u32CR0;
1412
1413 /* Update counter. */
1414 env->pVM->rem.s.cCanExecuteRaw++;
1415
1416 /* Never when single stepping+logging guest code. */
1417 if (env->state & CPU_EMULATE_SINGLE_STEP)
1418 return false;
1419
1420 if (HWACCMIsEnabled(env->pVM))
1421 {
1422 CPUMCTX Ctx;
1423
1424 env->state |= CPU_RAW_HWACC;
1425
1426 /*
1427 * Create partial context for HWACCMR3CanExecuteGuest
1428 */
1429 Ctx.cr0 = env->cr[0];
1430 Ctx.cr3 = env->cr[3];
1431 Ctx.cr4 = env->cr[4];
1432
1433 Ctx.tr = env->tr.selector;
1434 Ctx.trHid.u64Base = env->tr.base;
1435 Ctx.trHid.u32Limit = env->tr.limit;
1436 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1437
1438 Ctx.ldtr = env->ldt.selector;
1439 Ctx.ldtrHid.u64Base = env->ldt.base;
1440 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1441 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1442
1443 Ctx.idtr.cbIdt = env->idt.limit;
1444 Ctx.idtr.pIdt = env->idt.base;
1445
1446 Ctx.gdtr.cbGdt = env->gdt.limit;
1447 Ctx.gdtr.pGdt = env->gdt.base;
1448
1449 Ctx.rsp = env->regs[R_ESP];
1450 Ctx.rip = env->eip;
1451
1452 Ctx.eflags.u32 = env->eflags;
1453
1454 Ctx.cs = env->segs[R_CS].selector;
1455 Ctx.csHid.u64Base = env->segs[R_CS].base;
1456 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1457 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1458
1459 Ctx.ds = env->segs[R_DS].selector;
1460 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1461 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1462 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1463
1464 Ctx.es = env->segs[R_ES].selector;
1465 Ctx.esHid.u64Base = env->segs[R_ES].base;
1466 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1467 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1468
1469 Ctx.fs = env->segs[R_FS].selector;
1470 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1471 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1472 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1473
1474 Ctx.gs = env->segs[R_GS].selector;
1475 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1476 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1477 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1478
1479 Ctx.ss = env->segs[R_SS].selector;
1480 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1481 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1482 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1483
1484 Ctx.msrEFER = env->efer;
1485
1486 /* Hardware accelerated raw-mode:
1487 *
1488 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1489 */
1490 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1491 {
1492 *piException = EXCP_EXECUTE_HWACC;
1493 return true;
1494 }
1495 return false;
1496 }
1497
1498 /*
1499 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1500 * or 32 bits protected mode ring 0 code
1501 *
1502 * The tests are ordered by the likelihood of being true during normal execution.
1503 */
1504 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1505 {
1506 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1507 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1508 return false;
1509 }
1510
1511#ifndef VBOX_RAW_V86
1512 if (fFlags & VM_MASK) {
1513 STAM_COUNTER_INC(&gStatRefuseVM86);
1514 Log2(("raw mode refused: VM_MASK\n"));
1515 return false;
1516 }
1517#endif
1518
1519 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1520 {
1521#ifndef DEBUG_bird
1522 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1523#endif
1524 return false;
1525 }
1526
1527 if (env->singlestep_enabled)
1528 {
1529 //Log2(("raw mode refused: Single step\n"));
1530 return false;
1531 }
1532
1533 if (env->nb_breakpoints > 0)
1534 {
1535 //Log2(("raw mode refused: Breakpoints\n"));
1536 return false;
1537 }
1538
1539 u32CR0 = env->cr[0];
1540 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1541 {
1542 STAM_COUNTER_INC(&gStatRefusePaging);
1543 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1544 return false;
1545 }
1546
1547 if (env->cr[4] & CR4_PAE_MASK)
1548 {
1549 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1550 {
1551 STAM_COUNTER_INC(&gStatRefusePAE);
1552 return false;
1553 }
1554 }
1555
1556 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1557 {
1558 if (!EMIsRawRing3Enabled(env->pVM))
1559 return false;
1560
1561 if (!(env->eflags & IF_MASK))
1562 {
1563 STAM_COUNTER_INC(&gStatRefuseIF0);
1564 Log2(("raw mode refused: IF (RawR3)\n"));
1565 return false;
1566 }
1567
1568 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1569 {
1570 STAM_COUNTER_INC(&gStatRefuseWP0);
1571 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1572 return false;
1573 }
1574 }
1575 else
1576 {
1577 if (!EMIsRawRing0Enabled(env->pVM))
1578 return false;
1579
1580 // Let's start with pure 32 bits ring 0 code first
1581 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1582 {
1583 STAM_COUNTER_INC(&gStatRefuseCode16);
1584 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1585 return false;
1586 }
1587
1588 // Only R0
1589 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1590 {
1591 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1592 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1593 return false;
1594 }
1595
1596 if (!(u32CR0 & CR0_WP_MASK))
1597 {
1598 STAM_COUNTER_INC(&gStatRefuseWP0);
1599 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1600 return false;
1601 }
1602
1603 if (PATMIsPatchGCAddr(env->pVM, eip))
1604 {
1605 Log2(("raw r0 mode forced: patch code\n"));
1606 *piException = EXCP_EXECUTE_RAW;
1607 return true;
1608 }
1609
1610#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1611 if (!(env->eflags & IF_MASK))
1612 {
1613 STAM_COUNTER_INC(&gStatRefuseIF0);
1614 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1615 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1616 return false;
1617 }
1618#endif
1619
1620 env->state |= CPU_RAW_RING0;
1621 }
1622
1623 /*
1624 * Don't reschedule the first time we're called, because there might be
1625 * special reasons why we're here that is not covered by the above checks.
1626 */
1627 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1628 {
1629 Log2(("raw mode refused: first scheduling\n"));
1630 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1631 return false;
1632 }
1633
1634 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1635 *piException = EXCP_EXECUTE_RAW;
1636 return true;
1637}
1638
1639
1640/**
1641 * Fetches a code byte.
1642 *
1643 * @returns Success indicator (bool) for ease of use.
1644 * @param env The CPU environment structure.
1645 * @param GCPtrInstr Where to fetch code.
1646 * @param pu8Byte Where to store the byte on success
1647 */
1648bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1649{
1650 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1651 if (RT_SUCCESS(rc))
1652 return true;
1653 return false;
1654}
1655
1656
1657/**
1658 * Flush (or invalidate if you like) page table/dir entry.
1659 *
1660 * (invlpg instruction; tlb_flush_page)
1661 *
1662 * @param env Pointer to cpu environment.
1663 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1664 */
1665void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1666{
1667 PVM pVM = env->pVM;
1668 PCPUMCTX pCtx;
1669 int rc;
1670
1671 /*
1672 * When we're replaying invlpg instructions or restoring a saved
1673 * state we disable this path.
1674 */
1675 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1676 return;
1677 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1678 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1679
1680 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1681
1682 /*
1683 * Update the control registers before calling PGMFlushPage.
1684 */
1685 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1686 Assert(pCtx);
1687 pCtx->cr0 = env->cr[0];
1688 pCtx->cr3 = env->cr[3];
1689 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1690 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1691 pCtx->cr4 = env->cr[4];
1692
1693 /*
1694 * Let PGM do the rest.
1695 */
1696 Assert(env->pVCpu);
1697 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1698 if (RT_FAILURE(rc))
1699 {
1700 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1701 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1702 }
1703 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1704}
1705
1706
1707#ifndef REM_PHYS_ADDR_IN_TLB
1708/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1709void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1710{
1711 void *pv;
1712 int rc;
1713
1714 /* Address must be aligned enough to fiddle with lower bits */
1715 Assert((physAddr & 0x3) == 0);
1716
1717 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1718 Assert( rc == VINF_SUCCESS
1719 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1720 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1721 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1722 if (RT_FAILURE(rc))
1723 return (void *)1;
1724 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1725 return (void *)((uintptr_t)pv | 2);
1726 return pv;
1727}
1728#endif /* REM_PHYS_ADDR_IN_TLB */
1729
1730
1731/**
1732 * Called from tlb_protect_code in order to write monitor a code page.
1733 *
1734 * @param env Pointer to the CPU environment.
1735 * @param GCPtr Code page to monitor
1736 */
1737void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1738{
1739#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1740 Assert(env->pVM->rem.s.fInREM);
1741 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1742 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1743 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1744 && !(env->eflags & VM_MASK) /* no V86 mode */
1745 && !HWACCMIsEnabled(env->pVM))
1746 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1747#endif
1748}
1749
1750
1751/**
1752 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1753 *
1754 * @param env Pointer to the CPU environment.
1755 * @param GCPtr Code page to monitor
1756 */
1757void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1758{
1759 Assert(env->pVM->rem.s.fInREM);
1760#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1761 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1762 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1763 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1764 && !(env->eflags & VM_MASK) /* no V86 mode */
1765 && !HWACCMIsEnabled(env->pVM))
1766 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1767#endif
1768}
1769
1770
1771/**
1772 * Called when the CPU is initialized, any of the CRx registers are changed or
1773 * when the A20 line is modified.
1774 *
1775 * @param env Pointer to the CPU environment.
1776 * @param fGlobal Set if the flush is global.
1777 */
1778void remR3FlushTLB(CPUState *env, bool fGlobal)
1779{
1780 PVM pVM = env->pVM;
1781 PCPUMCTX pCtx;
1782
1783 /*
1784 * When we're replaying invlpg instructions or restoring a saved
1785 * state we disable this path.
1786 */
1787 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1788 return;
1789 Assert(pVM->rem.s.fInREM);
1790
1791 /*
1792 * The caller doesn't check cr4, so we have to do that for ourselves.
1793 */
1794 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1795 fGlobal = true;
1796 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1797
1798 /*
1799 * Update the control registers before calling PGMR3FlushTLB.
1800 */
1801 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1802 Assert(pCtx);
1803 pCtx->cr0 = env->cr[0];
1804 pCtx->cr3 = env->cr[3];
1805 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1806 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1807 pCtx->cr4 = env->cr[4];
1808
1809 /*
1810 * Let PGM do the rest.
1811 */
1812 Assert(env->pVCpu);
1813 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1814}
1815
1816
1817/**
1818 * Called when any of the cr0, cr4 or efer registers is updated.
1819 *
1820 * @param env Pointer to the CPU environment.
1821 */
1822void remR3ChangeCpuMode(CPUState *env)
1823{
1824 PVM pVM = env->pVM;
1825 uint64_t efer;
1826 PCPUMCTX pCtx;
1827 int rc;
1828
1829 /*
1830 * When we're replaying loads or restoring a saved
1831 * state this path is disabled.
1832 */
1833 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1834 return;
1835 Assert(pVM->rem.s.fInREM);
1836
1837 /*
1838 * Update the control registers before calling PGMChangeMode()
1839 * as it may need to map whatever cr3 is pointing to.
1840 */
1841 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1842 Assert(pCtx);
1843 pCtx->cr0 = env->cr[0];
1844 pCtx->cr3 = env->cr[3];
1845 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1846 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1847 pCtx->cr4 = env->cr[4];
1848
1849#ifdef TARGET_X86_64
1850 efer = env->efer;
1851#else
1852 efer = 0;
1853#endif
1854 Assert(env->pVCpu);
1855 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1856 if (rc != VINF_SUCCESS)
1857 {
1858 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1859 {
1860 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1861 remR3RaiseRC(env->pVM, rc);
1862 }
1863 else
1864 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1865 }
1866}
1867
1868
1869/**
1870 * Called from compiled code to run dma.
1871 *
1872 * @param env Pointer to the CPU environment.
1873 */
1874void remR3DmaRun(CPUState *env)
1875{
1876 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1877 PDMR3DmaRun(env->pVM);
1878 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1879}
1880
1881
1882/**
1883 * Called from compiled code to schedule pending timers in VMM
1884 *
1885 * @param env Pointer to the CPU environment.
1886 */
1887void remR3TimersRun(CPUState *env)
1888{
1889 LogFlow(("remR3TimersRun:\n"));
1890 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1891 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1892 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1893 TMR3TimerQueuesDo(env->pVM);
1894 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1895 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1896}
1897
1898
1899/**
1900 * Record trap occurrence
1901 *
1902 * @returns VBox status code
1903 * @param env Pointer to the CPU environment.
1904 * @param uTrap Trap nr
1905 * @param uErrorCode Error code
1906 * @param pvNextEIP Next EIP
1907 */
1908int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1909{
1910 PVM pVM = env->pVM;
1911#ifdef VBOX_WITH_STATISTICS
1912 static STAMCOUNTER s_aStatTrap[255];
1913 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1914#endif
1915
1916#ifdef VBOX_WITH_STATISTICS
1917 if (uTrap < 255)
1918 {
1919 if (!s_aRegisters[uTrap])
1920 {
1921 char szStatName[64];
1922 s_aRegisters[uTrap] = true;
1923 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1924 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1925 }
1926 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1927 }
1928#endif
1929 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1930 if( uTrap < 0x20
1931 && (env->cr[0] & X86_CR0_PE)
1932 && !(env->eflags & X86_EFL_VM))
1933 {
1934#ifdef DEBUG
1935 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1936#endif
1937 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1938 {
1939 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1940 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1941 return VERR_REM_TOO_MANY_TRAPS;
1942 }
1943 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1944 pVM->rem.s.cPendingExceptions = 1;
1945 pVM->rem.s.uPendingException = uTrap;
1946 pVM->rem.s.uPendingExcptEIP = env->eip;
1947 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1948 }
1949 else
1950 {
1951 pVM->rem.s.cPendingExceptions = 0;
1952 pVM->rem.s.uPendingException = uTrap;
1953 pVM->rem.s.uPendingExcptEIP = env->eip;
1954 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1955 }
1956 return VINF_SUCCESS;
1957}
1958
1959
1960/*
1961 * Clear current active trap
1962 *
1963 * @param pVM VM Handle.
1964 */
1965void remR3TrapClear(PVM pVM)
1966{
1967 pVM->rem.s.cPendingExceptions = 0;
1968 pVM->rem.s.uPendingException = 0;
1969 pVM->rem.s.uPendingExcptEIP = 0;
1970 pVM->rem.s.uPendingExcptCR2 = 0;
1971}
1972
1973
1974/*
1975 * Record previous call instruction addresses
1976 *
1977 * @param env Pointer to the CPU environment.
1978 */
1979void remR3RecordCall(CPUState *env)
1980{
1981 CSAMR3RecordCallAddress(env->pVM, env->eip);
1982}
1983
1984
1985/**
1986 * Syncs the internal REM state with the VM.
1987 *
1988 * This must be called before REMR3Run() is invoked whenever when the REM
1989 * state is not up to date. Calling it several times in a row is not
1990 * permitted.
1991 *
1992 * @returns VBox status code.
1993 *
1994 * @param pVM VM Handle.
1995 * @param pVCpu VMCPU Handle.
1996 *
1997 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1998 * no do this since the majority of the callers don't want any unnecessary of events
1999 * pending that would immediately interrupt execution.
2000 */
2001REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2002{
2003 register const CPUMCTX *pCtx;
2004 register unsigned fFlags;
2005 bool fHiddenSelRegsValid;
2006 unsigned i;
2007 TRPMEVENT enmType;
2008 uint8_t u8TrapNo;
2009 uint32_t uCpl;
2010 int rc;
2011
2012 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2013 Log2(("REMR3State:\n"));
2014
2015 pVM->rem.s.Env.pVCpu = pVCpu;
2016 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2017 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2018
2019 Assert(!pVM->rem.s.fInREM);
2020 pVM->rem.s.fInStateSync = true;
2021
2022 /*
2023 * If we have to flush TBs, do that immediately.
2024 */
2025 if (pVM->rem.s.fFlushTBs)
2026 {
2027 STAM_COUNTER_INC(&gStatFlushTBs);
2028 tb_flush(&pVM->rem.s.Env);
2029 pVM->rem.s.fFlushTBs = false;
2030 }
2031
2032 /*
2033 * Copy the registers which require no special handling.
2034 */
2035#ifdef TARGET_X86_64
2036 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2037 Assert(R_EAX == 0);
2038 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2039 Assert(R_ECX == 1);
2040 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2041 Assert(R_EDX == 2);
2042 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2043 Assert(R_EBX == 3);
2044 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2045 Assert(R_ESP == 4);
2046 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2047 Assert(R_EBP == 5);
2048 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2049 Assert(R_ESI == 6);
2050 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2051 Assert(R_EDI == 7);
2052 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2053 pVM->rem.s.Env.regs[8] = pCtx->r8;
2054 pVM->rem.s.Env.regs[9] = pCtx->r9;
2055 pVM->rem.s.Env.regs[10] = pCtx->r10;
2056 pVM->rem.s.Env.regs[11] = pCtx->r11;
2057 pVM->rem.s.Env.regs[12] = pCtx->r12;
2058 pVM->rem.s.Env.regs[13] = pCtx->r13;
2059 pVM->rem.s.Env.regs[14] = pCtx->r14;
2060 pVM->rem.s.Env.regs[15] = pCtx->r15;
2061
2062 pVM->rem.s.Env.eip = pCtx->rip;
2063
2064 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2065#else
2066 Assert(R_EAX == 0);
2067 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2068 Assert(R_ECX == 1);
2069 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2070 Assert(R_EDX == 2);
2071 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2072 Assert(R_EBX == 3);
2073 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2074 Assert(R_ESP == 4);
2075 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2076 Assert(R_EBP == 5);
2077 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2078 Assert(R_ESI == 6);
2079 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2080 Assert(R_EDI == 7);
2081 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2082 pVM->rem.s.Env.eip = pCtx->eip;
2083
2084 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2085#endif
2086
2087 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2088
2089 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2090 for (i=0;i<8;i++)
2091 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2092
2093 /*
2094 * Clear the halted hidden flag (the interrupt waking up the CPU can
2095 * have been dispatched in raw mode).
2096 */
2097 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2098
2099 /*
2100 * Replay invlpg?
2101 */
2102 if (pVM->rem.s.cInvalidatedPages)
2103 {
2104 RTUINT i;
2105
2106 pVM->rem.s.fIgnoreInvlPg = true;
2107 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2108 {
2109 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2110 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2111 }
2112 pVM->rem.s.fIgnoreInvlPg = false;
2113 pVM->rem.s.cInvalidatedPages = 0;
2114 }
2115
2116 /* Replay notification changes. */
2117 REMR3ReplayHandlerNotifications(pVM);
2118
2119 /* Update MSRs; before CRx registers! */
2120 pVM->rem.s.Env.efer = pCtx->msrEFER;
2121 pVM->rem.s.Env.star = pCtx->msrSTAR;
2122 pVM->rem.s.Env.pat = pCtx->msrPAT;
2123#ifdef TARGET_X86_64
2124 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2125 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2126 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2127 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2128
2129 /* Update the internal long mode activate flag according to the new EFER value. */
2130 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2131 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2132 else
2133 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2134#endif
2135
2136 /*
2137 * Registers which are rarely changed and require special handling / order when changed.
2138 */
2139 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2140 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2141 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2142 | CPUM_CHANGED_CR4
2143 | CPUM_CHANGED_CR0
2144 | CPUM_CHANGED_CR3
2145 | CPUM_CHANGED_GDTR
2146 | CPUM_CHANGED_IDTR
2147 | CPUM_CHANGED_SYSENTER_MSR
2148 | CPUM_CHANGED_LDTR
2149 | CPUM_CHANGED_CPUID
2150 | CPUM_CHANGED_FPU_REM
2151 )
2152 )
2153 {
2154 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2155 {
2156 pVM->rem.s.fIgnoreCR3Load = true;
2157 tlb_flush(&pVM->rem.s.Env, true);
2158 pVM->rem.s.fIgnoreCR3Load = false;
2159 }
2160
2161 /* CR4 before CR0! */
2162 if (fFlags & CPUM_CHANGED_CR4)
2163 {
2164 pVM->rem.s.fIgnoreCR3Load = true;
2165 pVM->rem.s.fIgnoreCpuMode = true;
2166 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2167 pVM->rem.s.fIgnoreCpuMode = false;
2168 pVM->rem.s.fIgnoreCR3Load = false;
2169 }
2170
2171 if (fFlags & CPUM_CHANGED_CR0)
2172 {
2173 pVM->rem.s.fIgnoreCR3Load = true;
2174 pVM->rem.s.fIgnoreCpuMode = true;
2175 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2176 pVM->rem.s.fIgnoreCpuMode = false;
2177 pVM->rem.s.fIgnoreCR3Load = false;
2178 }
2179
2180 if (fFlags & CPUM_CHANGED_CR3)
2181 {
2182 pVM->rem.s.fIgnoreCR3Load = true;
2183 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2184 pVM->rem.s.fIgnoreCR3Load = false;
2185 }
2186
2187 if (fFlags & CPUM_CHANGED_GDTR)
2188 {
2189 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2190 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2191 }
2192
2193 if (fFlags & CPUM_CHANGED_IDTR)
2194 {
2195 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2196 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2197 }
2198
2199 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2200 {
2201 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2202 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2203 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2204 }
2205
2206 if (fFlags & CPUM_CHANGED_LDTR)
2207 {
2208 if (fHiddenSelRegsValid)
2209 {
2210 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2211 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2212 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2213 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2214 }
2215 else
2216 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2217 }
2218
2219 if (fFlags & CPUM_CHANGED_CPUID)
2220 {
2221 uint32_t u32Dummy;
2222
2223 /*
2224 * Get the CPUID features.
2225 */
2226 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2227 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2228 }
2229
2230 /* Sync FPU state after CR4, CPUID and EFER (!). */
2231 if (fFlags & CPUM_CHANGED_FPU_REM)
2232 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2233 }
2234
2235 /*
2236 * Sync TR unconditionally to make life simpler.
2237 */
2238 pVM->rem.s.Env.tr.selector = pCtx->tr;
2239 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2240 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2241 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2242 /* Note! do_interrupt will fault if the busy flag is still set... */
2243 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2244
2245 /*
2246 * Update selector registers.
2247 * This must be done *after* we've synced gdt, ldt and crX registers
2248 * since we're reading the GDT/LDT om sync_seg. This will happen with
2249 * saved state which takes a quick dip into rawmode for instance.
2250 */
2251 /*
2252 * Stack; Note first check this one as the CPL might have changed. The
2253 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2254 */
2255
2256 if (fHiddenSelRegsValid)
2257 {
2258 /* The hidden selector registers are valid in the CPU context. */
2259 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2260
2261 /* Set current CPL */
2262 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2263
2264 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2265 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2266 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2267 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2268 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2269 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2270 }
2271 else
2272 {
2273 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2274 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2275 {
2276 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2277
2278 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2279 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2280#ifdef VBOX_WITH_STATISTICS
2281 if (pVM->rem.s.Env.segs[R_SS].newselector)
2282 {
2283 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2284 }
2285#endif
2286 }
2287 else
2288 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2289
2290 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2291 {
2292 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2293 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2294#ifdef VBOX_WITH_STATISTICS
2295 if (pVM->rem.s.Env.segs[R_ES].newselector)
2296 {
2297 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2298 }
2299#endif
2300 }
2301 else
2302 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2303
2304 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2305 {
2306 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2307 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2308#ifdef VBOX_WITH_STATISTICS
2309 if (pVM->rem.s.Env.segs[R_CS].newselector)
2310 {
2311 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2312 }
2313#endif
2314 }
2315 else
2316 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2317
2318 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2319 {
2320 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2321 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2322#ifdef VBOX_WITH_STATISTICS
2323 if (pVM->rem.s.Env.segs[R_DS].newselector)
2324 {
2325 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2326 }
2327#endif
2328 }
2329 else
2330 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2331
2332 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2333 * be the same but not the base/limit. */
2334 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2335 {
2336 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2337 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2338#ifdef VBOX_WITH_STATISTICS
2339 if (pVM->rem.s.Env.segs[R_FS].newselector)
2340 {
2341 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2342 }
2343#endif
2344 }
2345 else
2346 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2347
2348 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2349 {
2350 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2351 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2352#ifdef VBOX_WITH_STATISTICS
2353 if (pVM->rem.s.Env.segs[R_GS].newselector)
2354 {
2355 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2356 }
2357#endif
2358 }
2359 else
2360 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2361 }
2362
2363 /*
2364 * Check for traps.
2365 */
2366 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2367 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2368 if (RT_SUCCESS(rc))
2369 {
2370#ifdef DEBUG
2371 if (u8TrapNo == 0x80)
2372 {
2373 remR3DumpLnxSyscall(pVCpu);
2374 remR3DumpOBsdSyscall(pVCpu);
2375 }
2376#endif
2377
2378 pVM->rem.s.Env.exception_index = u8TrapNo;
2379 if (enmType != TRPM_SOFTWARE_INT)
2380 {
2381 pVM->rem.s.Env.exception_is_int = 0;
2382 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2383 }
2384 else
2385 {
2386 /*
2387 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2388 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2389 * for int03 and into.
2390 */
2391 pVM->rem.s.Env.exception_is_int = 1;
2392 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2393 /* int 3 may be generated by one-byte 0xcc */
2394 if (u8TrapNo == 3)
2395 {
2396 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2397 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2398 }
2399 /* int 4 may be generated by one-byte 0xce */
2400 else if (u8TrapNo == 4)
2401 {
2402 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2403 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2404 }
2405 }
2406
2407 /* get error code and cr2 if needed. */
2408 switch (u8TrapNo)
2409 {
2410 case 0x0e:
2411 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2412 /* fallthru */
2413 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2414 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2415 break;
2416
2417 case 0x11: case 0x08:
2418 default:
2419 pVM->rem.s.Env.error_code = 0;
2420 break;
2421 }
2422
2423 /*
2424 * We can now reset the active trap since the recompiler is gonna have a go at it.
2425 */
2426 rc = TRPMResetTrap(pVCpu);
2427 AssertRC(rc);
2428 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2429 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2430 }
2431
2432 /*
2433 * Clear old interrupt request flags; Check for pending hardware interrupts.
2434 * (See @remark for why we don't check for other FFs.)
2435 */
2436 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2437 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2438 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2439 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2440
2441 /*
2442 * We're now in REM mode.
2443 */
2444 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2445 pVM->rem.s.fInREM = true;
2446 pVM->rem.s.fInStateSync = false;
2447 pVM->rem.s.cCanExecuteRaw = 0;
2448 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2449 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2450 return VINF_SUCCESS;
2451}
2452
2453
2454/**
2455 * Syncs back changes in the REM state to the the VM state.
2456 *
2457 * This must be called after invoking REMR3Run().
2458 * Calling it several times in a row is not permitted.
2459 *
2460 * @returns VBox status code.
2461 *
2462 * @param pVM VM Handle.
2463 * @param pVCpu VMCPU Handle.
2464 */
2465REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2466{
2467 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2468 Assert(pCtx);
2469 unsigned i;
2470
2471 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2472 Log2(("REMR3StateBack:\n"));
2473 Assert(pVM->rem.s.fInREM);
2474
2475 /*
2476 * Copy back the registers.
2477 * This is done in the order they are declared in the CPUMCTX structure.
2478 */
2479
2480 /** @todo FOP */
2481 /** @todo FPUIP */
2482 /** @todo CS */
2483 /** @todo FPUDP */
2484 /** @todo DS */
2485
2486 /** @todo check if FPU/XMM was actually used in the recompiler */
2487 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2488//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2489
2490#ifdef TARGET_X86_64
2491 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2492 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2493 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2494 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2495 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2496 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2497 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2498 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2499 pCtx->r8 = pVM->rem.s.Env.regs[8];
2500 pCtx->r9 = pVM->rem.s.Env.regs[9];
2501 pCtx->r10 = pVM->rem.s.Env.regs[10];
2502 pCtx->r11 = pVM->rem.s.Env.regs[11];
2503 pCtx->r12 = pVM->rem.s.Env.regs[12];
2504 pCtx->r13 = pVM->rem.s.Env.regs[13];
2505 pCtx->r14 = pVM->rem.s.Env.regs[14];
2506 pCtx->r15 = pVM->rem.s.Env.regs[15];
2507
2508 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2509
2510#else
2511 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2512 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2513 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2514 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2515 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2516 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2517 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2518
2519 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2520#endif
2521
2522 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2523
2524#ifdef VBOX_WITH_STATISTICS
2525 if (pVM->rem.s.Env.segs[R_SS].newselector)
2526 {
2527 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2528 }
2529 if (pVM->rem.s.Env.segs[R_GS].newselector)
2530 {
2531 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2532 }
2533 if (pVM->rem.s.Env.segs[R_FS].newselector)
2534 {
2535 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2536 }
2537 if (pVM->rem.s.Env.segs[R_ES].newselector)
2538 {
2539 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2540 }
2541 if (pVM->rem.s.Env.segs[R_DS].newselector)
2542 {
2543 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2544 }
2545 if (pVM->rem.s.Env.segs[R_CS].newselector)
2546 {
2547 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2548 }
2549#endif
2550 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2551 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2552 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2553 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2554 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2555
2556#ifdef TARGET_X86_64
2557 pCtx->rip = pVM->rem.s.Env.eip;
2558 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2559#else
2560 pCtx->eip = pVM->rem.s.Env.eip;
2561 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2562#endif
2563
2564 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2565 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2566 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2567 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2568 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2569 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2570
2571 for (i = 0; i < 8; i++)
2572 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2573
2574 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2575 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2576 {
2577 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2578 STAM_COUNTER_INC(&gStatREMGDTChange);
2579 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2580 }
2581
2582 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2583 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2584 {
2585 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2586 STAM_COUNTER_INC(&gStatREMIDTChange);
2587 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2588 }
2589
2590 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2591 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2592 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2593 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2594 {
2595 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2596 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2597 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2598 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2599 STAM_COUNTER_INC(&gStatREMLDTRChange);
2600 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2601 }
2602
2603 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2604 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2605 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2606 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2607 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2608 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2609 : 0) )
2610 {
2611 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2612 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2613 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2614 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2615 pCtx->tr = pVM->rem.s.Env.tr.selector;
2616 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2617 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2618 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2619 if (pCtx->trHid.Attr.u)
2620 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2621 STAM_COUNTER_INC(&gStatREMTRChange);
2622 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2623 }
2624
2625 /** @todo These values could still be out of sync! */
2626 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2627 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2628 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2629 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2630
2631 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2632 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2633 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2634
2635 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2636 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2637 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2638
2639 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2640 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2641 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2642
2643 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2644 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2645 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2646
2647 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2648 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2649 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2650
2651 /* Sysenter MSR */
2652 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2653 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2654 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2655
2656 /* System MSRs. */
2657 pCtx->msrEFER = pVM->rem.s.Env.efer;
2658 pCtx->msrSTAR = pVM->rem.s.Env.star;
2659 pCtx->msrPAT = pVM->rem.s.Env.pat;
2660#ifdef TARGET_X86_64
2661 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2662 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2663 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2664 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2665#endif
2666
2667 remR3TrapClear(pVM);
2668
2669 /*
2670 * Check for traps.
2671 */
2672 if ( pVM->rem.s.Env.exception_index >= 0
2673 && pVM->rem.s.Env.exception_index < 256)
2674 {
2675 int rc;
2676
2677 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2678 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2679 AssertRC(rc);
2680 switch (pVM->rem.s.Env.exception_index)
2681 {
2682 case 0x0e:
2683 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2684 /* fallthru */
2685 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2686 case 0x11: case 0x08: /* 0 */
2687 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2688 break;
2689 }
2690
2691 }
2692
2693 /*
2694 * We're not longer in REM mode.
2695 */
2696 CPUMR3RemLeave(pVCpu,
2697 HWACCMIsEnabled(pVM)
2698 || ( pVM->rem.s.Env.segs[R_SS].newselector
2699 | pVM->rem.s.Env.segs[R_GS].newselector
2700 | pVM->rem.s.Env.segs[R_FS].newselector
2701 | pVM->rem.s.Env.segs[R_ES].newselector
2702 | pVM->rem.s.Env.segs[R_DS].newselector
2703 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2704 );
2705 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2706 pVM->rem.s.fInREM = false;
2707 pVM->rem.s.pCtx = NULL;
2708 pVM->rem.s.Env.pVCpu = NULL;
2709 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2710 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2711 return VINF_SUCCESS;
2712}
2713
2714
2715/**
2716 * This is called by the disassembler when it wants to update the cpu state
2717 * before for instance doing a register dump.
2718 */
2719static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2720{
2721 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2722 unsigned i;
2723
2724 Assert(pVM->rem.s.fInREM);
2725
2726 /*
2727 * Copy back the registers.
2728 * This is done in the order they are declared in the CPUMCTX structure.
2729 */
2730
2731 /** @todo FOP */
2732 /** @todo FPUIP */
2733 /** @todo CS */
2734 /** @todo FPUDP */
2735 /** @todo DS */
2736 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2737 pCtx->fpu.MXCSR = 0;
2738 pCtx->fpu.MXCSR_MASK = 0;
2739
2740 /** @todo check if FPU/XMM was actually used in the recompiler */
2741 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2742//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2743
2744#ifdef TARGET_X86_64
2745 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2746 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2747 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2748 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2749 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2750 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2751 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2752 pCtx->r8 = pVM->rem.s.Env.regs[8];
2753 pCtx->r9 = pVM->rem.s.Env.regs[9];
2754 pCtx->r10 = pVM->rem.s.Env.regs[10];
2755 pCtx->r11 = pVM->rem.s.Env.regs[11];
2756 pCtx->r12 = pVM->rem.s.Env.regs[12];
2757 pCtx->r13 = pVM->rem.s.Env.regs[13];
2758 pCtx->r14 = pVM->rem.s.Env.regs[14];
2759 pCtx->r15 = pVM->rem.s.Env.regs[15];
2760
2761 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2762#else
2763 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2764 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2765 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2766 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2767 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2768 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2769 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2770
2771 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2772#endif
2773
2774 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2775
2776 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2777 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2778 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2779 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2780 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2781
2782#ifdef TARGET_X86_64
2783 pCtx->rip = pVM->rem.s.Env.eip;
2784 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2785#else
2786 pCtx->eip = pVM->rem.s.Env.eip;
2787 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2788#endif
2789
2790 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2791 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2792 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2793 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2794 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2795 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2796
2797 for (i = 0; i < 8; i++)
2798 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2799
2800 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2801 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2802 {
2803 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2804 STAM_COUNTER_INC(&gStatREMGDTChange);
2805 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2806 }
2807
2808 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2809 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2810 {
2811 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2812 STAM_COUNTER_INC(&gStatREMIDTChange);
2813 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2814 }
2815
2816 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2817 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2818 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2819 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2820 {
2821 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2822 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2823 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2824 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2825 STAM_COUNTER_INC(&gStatREMLDTRChange);
2826 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2827 }
2828
2829 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2830 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2831 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2832 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2833 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2834 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2835 : 0) )
2836 {
2837 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2838 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2839 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2840 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2841 pCtx->tr = pVM->rem.s.Env.tr.selector;
2842 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2843 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2844 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2845 if (pCtx->trHid.Attr.u)
2846 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2847 STAM_COUNTER_INC(&gStatREMTRChange);
2848 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2849 }
2850
2851 /** @todo These values could still be out of sync! */
2852 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2853 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2854 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2855 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2856
2857 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2858 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2859 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2860
2861 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2862 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2863 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2864
2865 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2866 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2867 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2868
2869 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2870 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2871 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2872
2873 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2874 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2875 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2876
2877 /* Sysenter MSR */
2878 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2879 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2880 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2881
2882 /* System MSRs. */
2883 pCtx->msrEFER = pVM->rem.s.Env.efer;
2884 pCtx->msrSTAR = pVM->rem.s.Env.star;
2885 pCtx->msrPAT = pVM->rem.s.Env.pat;
2886#ifdef TARGET_X86_64
2887 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2888 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2889 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2890 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2891#endif
2892
2893}
2894
2895
2896/**
2897 * Update the VMM state information if we're currently in REM.
2898 *
2899 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2900 * we're currently executing in REM and the VMM state is invalid. This method will of
2901 * course check that we're executing in REM before syncing any data over to the VMM.
2902 *
2903 * @param pVM The VM handle.
2904 * @param pVCpu The VMCPU handle.
2905 */
2906REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2907{
2908 if (pVM->rem.s.fInREM)
2909 remR3StateUpdate(pVM, pVCpu);
2910}
2911
2912
2913#undef LOG_GROUP
2914#define LOG_GROUP LOG_GROUP_REM
2915
2916
2917/**
2918 * Notify the recompiler about Address Gate 20 state change.
2919 *
2920 * This notification is required since A20 gate changes are
2921 * initialized from a device driver and the VM might just as
2922 * well be in REM mode as in RAW mode.
2923 *
2924 * @param pVM VM handle.
2925 * @param pVCpu VMCPU handle.
2926 * @param fEnable True if the gate should be enabled.
2927 * False if the gate should be disabled.
2928 */
2929REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2930{
2931 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2932 VM_ASSERT_EMT(pVM);
2933
2934 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2935 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2936 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2937}
2938
2939
2940/**
2941 * Replays the handler notification changes
2942 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2943 *
2944 * @param pVM VM handle.
2945 */
2946REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2947{
2948 /*
2949 * Replay the flushes.
2950 */
2951 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2952 VM_ASSERT_EMT(pVM);
2953
2954 /** @todo this isn't ensuring correct replay order. */
2955 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2956 {
2957 uint32_t idxNext;
2958 uint32_t idxRevHead;
2959 uint32_t idxHead;
2960#ifdef VBOX_STRICT
2961 int32_t c = 0;
2962#endif
2963
2964 /* Lockless purging of pending notifications. */
2965 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2966 if (idxHead == UINT32_MAX)
2967 return;
2968 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2969
2970 /*
2971 * Reverse the list to process it in FIFO order.
2972 */
2973 idxRevHead = UINT32_MAX;
2974 do
2975 {
2976 /* Save the index of the next rec. */
2977 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2978 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2979 /* Push the record onto the reversed list. */
2980 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2981 idxRevHead = idxHead;
2982 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2983 /* Advance. */
2984 idxHead = idxNext;
2985 } while (idxHead != UINT32_MAX);
2986
2987 /*
2988 * Loop thru the list, reinserting the record into the free list as they are
2989 * processed to avoid having other EMTs running out of entries while we're flushing.
2990 */
2991 idxHead = idxRevHead;
2992 do
2993 {
2994 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
2995 uint32_t idxCur;
2996 Assert(--c >= 0);
2997
2998 switch (pCur->enmKind)
2999 {
3000 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3001 remR3NotifyHandlerPhysicalRegister(pVM,
3002 pCur->u.PhysicalRegister.enmType,
3003 pCur->u.PhysicalRegister.GCPhys,
3004 pCur->u.PhysicalRegister.cb,
3005 pCur->u.PhysicalRegister.fHasHCHandler);
3006 break;
3007
3008 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3009 remR3NotifyHandlerPhysicalDeregister(pVM,
3010 pCur->u.PhysicalDeregister.enmType,
3011 pCur->u.PhysicalDeregister.GCPhys,
3012 pCur->u.PhysicalDeregister.cb,
3013 pCur->u.PhysicalDeregister.fHasHCHandler,
3014 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3015 break;
3016
3017 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3018 remR3NotifyHandlerPhysicalModify(pVM,
3019 pCur->u.PhysicalModify.enmType,
3020 pCur->u.PhysicalModify.GCPhysOld,
3021 pCur->u.PhysicalModify.GCPhysNew,
3022 pCur->u.PhysicalModify.cb,
3023 pCur->u.PhysicalModify.fHasHCHandler,
3024 pCur->u.PhysicalModify.fRestoreAsRAM);
3025 break;
3026
3027 default:
3028 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3029 break;
3030 }
3031
3032 /*
3033 * Advance idxHead.
3034 */
3035 idxCur = idxHead;
3036 idxHead = pCur->idxNext;
3037 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3038
3039 /*
3040 * Put the record back into the free list.
3041 */
3042 do
3043 {
3044 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3045 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3046 ASMCompilerBarrier();
3047 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3048 } while (idxHead != UINT32_MAX);
3049
3050#ifdef VBOX_STRICT
3051 if (pVM->cCpus == 1)
3052 {
3053 unsigned c;
3054 /* Check that all records are now on the free list. */
3055 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3056 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3057 c++;
3058 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3059 }
3060#endif
3061 }
3062}
3063
3064
3065/**
3066 * Notify REM about changed code page.
3067 *
3068 * @returns VBox status code.
3069 * @param pVM VM handle.
3070 * @param pVCpu VMCPU handle.
3071 * @param pvCodePage Code page address
3072 */
3073REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3074{
3075#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3076 int rc;
3077 RTGCPHYS PhysGC;
3078 uint64_t flags;
3079
3080 VM_ASSERT_EMT(pVM);
3081
3082 /*
3083 * Get the physical page address.
3084 */
3085 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3086 if (rc == VINF_SUCCESS)
3087 {
3088 /*
3089 * Sync the required registers and flush the whole page.
3090 * (Easier to do the whole page than notifying it about each physical
3091 * byte that was changed.
3092 */
3093 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3094 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3095 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3096 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3097
3098 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3099 }
3100#endif
3101 return VINF_SUCCESS;
3102}
3103
3104
3105/**
3106 * Notification about a successful MMR3PhysRegister() call.
3107 *
3108 * @param pVM VM handle.
3109 * @param GCPhys The physical address the RAM.
3110 * @param cb Size of the memory.
3111 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3112 */
3113REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3114{
3115 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3116 VM_ASSERT_EMT(pVM);
3117
3118 /*
3119 * Validate input - we trust the caller.
3120 */
3121 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3122 Assert(cb);
3123 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3124 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3125
3126 /*
3127 * Base ram? Update GCPhysLastRam.
3128 */
3129 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3130 {
3131 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3132 {
3133 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3134 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3135 }
3136 }
3137
3138 /*
3139 * Register the ram.
3140 */
3141 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3142
3143 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3144 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3145 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3146
3147 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3148}
3149
3150
3151/**
3152 * Notification about a successful MMR3PhysRomRegister() call.
3153 *
3154 * @param pVM VM handle.
3155 * @param GCPhys The physical address of the ROM.
3156 * @param cb The size of the ROM.
3157 * @param pvCopy Pointer to the ROM copy.
3158 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3159 * This function will be called when ever the protection of the
3160 * shadow ROM changes (at reset and end of POST).
3161 */
3162REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3163{
3164 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3165 VM_ASSERT_EMT(pVM);
3166
3167 /*
3168 * Validate input - we trust the caller.
3169 */
3170 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3171 Assert(cb);
3172 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3173
3174 /*
3175 * Register the rom.
3176 */
3177 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3178
3179 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3180 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
3181 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3182
3183 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3184}
3185
3186
3187/**
3188 * Notification about a successful memory deregistration or reservation.
3189 *
3190 * @param pVM VM Handle.
3191 * @param GCPhys Start physical address.
3192 * @param cb The size of the range.
3193 */
3194REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3195{
3196 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3197 VM_ASSERT_EMT(pVM);
3198
3199 /*
3200 * Validate input - we trust the caller.
3201 */
3202 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3203 Assert(cb);
3204 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3205
3206 /*
3207 * Unassigning the memory.
3208 */
3209 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3210
3211 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3212 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3213 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3214
3215 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3216}
3217
3218
3219/**
3220 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3221 *
3222 * @param pVM VM Handle.
3223 * @param enmType Handler type.
3224 * @param GCPhys Handler range address.
3225 * @param cb Size of the handler range.
3226 * @param fHasHCHandler Set if the handler has a HC callback function.
3227 *
3228 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3229 * Handler memory type to memory which has no HC handler.
3230 */
3231static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3232{
3233 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3234 enmType, GCPhys, cb, fHasHCHandler));
3235
3236 VM_ASSERT_EMT(pVM);
3237 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3238 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3239
3240
3241 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3242
3243 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3244 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3245 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3246 else if (fHasHCHandler)
3247 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3248 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3249
3250 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3251}
3252
3253/**
3254 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3255 *
3256 * @param pVM VM Handle.
3257 * @param enmType Handler type.
3258 * @param GCPhys Handler range address.
3259 * @param cb Size of the handler range.
3260 * @param fHasHCHandler Set if the handler has a HC callback function.
3261 *
3262 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3263 * Handler memory type to memory which has no HC handler.
3264 */
3265REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3266{
3267 REMR3ReplayHandlerNotifications(pVM);
3268
3269 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3270}
3271
3272/**
3273 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3274 *
3275 * @param pVM VM Handle.
3276 * @param enmType Handler type.
3277 * @param GCPhys Handler range address.
3278 * @param cb Size of the handler range.
3279 * @param fHasHCHandler Set if the handler has a HC callback function.
3280 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3281 */
3282static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3283{
3284 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3285 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3286 VM_ASSERT_EMT(pVM);
3287
3288
3289 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3290
3291 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3292 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3293 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3294 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3295 else if (fHasHCHandler)
3296 {
3297 if (!fRestoreAsRAM)
3298 {
3299 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3300 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3301 }
3302 else
3303 {
3304 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3305 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3306 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3307 }
3308 }
3309 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3310
3311 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3312}
3313
3314/**
3315 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3316 *
3317 * @param pVM VM Handle.
3318 * @param enmType Handler type.
3319 * @param GCPhys Handler range address.
3320 * @param cb Size of the handler range.
3321 * @param fHasHCHandler Set if the handler has a HC callback function.
3322 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3323 */
3324REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3325{
3326 REMR3ReplayHandlerNotifications(pVM);
3327 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3328}
3329
3330
3331/**
3332 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3333 *
3334 * @param pVM VM Handle.
3335 * @param enmType Handler type.
3336 * @param GCPhysOld Old handler range address.
3337 * @param GCPhysNew New handler range address.
3338 * @param cb Size of the handler range.
3339 * @param fHasHCHandler Set if the handler has a HC callback function.
3340 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3341 */
3342static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3343{
3344 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3345 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3346 VM_ASSERT_EMT(pVM);
3347 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3348
3349 if (fHasHCHandler)
3350 {
3351 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3352
3353 /*
3354 * Reset the old page.
3355 */
3356 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3357 if (!fRestoreAsRAM)
3358 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3359 else
3360 {
3361 /* This is not perfect, but it'll do for PD monitoring... */
3362 Assert(cb == PAGE_SIZE);
3363 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3364 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3365 }
3366
3367 /*
3368 * Update the new page.
3369 */
3370 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3371 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3372 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3373 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3374
3375 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3376 }
3377}
3378
3379/**
3380 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3381 *
3382 * @param pVM VM Handle.
3383 * @param enmType Handler type.
3384 * @param GCPhysOld Old handler range address.
3385 * @param GCPhysNew New handler range address.
3386 * @param cb Size of the handler range.
3387 * @param fHasHCHandler Set if the handler has a HC callback function.
3388 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3389 */
3390REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3391{
3392 REMR3ReplayHandlerNotifications(pVM);
3393
3394 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3395}
3396
3397/**
3398 * Checks if we're handling access to this page or not.
3399 *
3400 * @returns true if we're trapping access.
3401 * @returns false if we aren't.
3402 * @param pVM The VM handle.
3403 * @param GCPhys The physical address.
3404 *
3405 * @remark This function will only work correctly in VBOX_STRICT builds!
3406 */
3407REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3408{
3409#ifdef VBOX_STRICT
3410 unsigned long off;
3411 REMR3ReplayHandlerNotifications(pVM);
3412
3413 off = get_phys_page_offset(GCPhys);
3414 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3415 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3416 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3417#else
3418 return false;
3419#endif
3420}
3421
3422
3423/**
3424 * Deals with a rare case in get_phys_addr_code where the code
3425 * is being monitored.
3426 *
3427 * It could also be an MMIO page, in which case we will raise a fatal error.
3428 *
3429 * @returns The physical address corresponding to addr.
3430 * @param env The cpu environment.
3431 * @param addr The virtual address.
3432 * @param pTLBEntry The TLB entry.
3433 */
3434target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3435 target_ulong addr,
3436 CPUTLBEntry* pTLBEntry,
3437 target_phys_addr_t ioTLBEntry)
3438{
3439 PVM pVM = env->pVM;
3440
3441 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3442 {
3443 /* If code memory is being monitored, appropriate IOTLB entry will have
3444 handler IO type, and addend will provide real physical address, no
3445 matter if we store VA in TLB or not, as handlers are always passed PA */
3446 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3447 return ret;
3448 }
3449 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3450 "*** handlers\n",
3451 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3452 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3453 LogRel(("*** mmio\n"));
3454 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3455 LogRel(("*** phys\n"));
3456 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3457 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3458 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3459 AssertFatalFailed();
3460}
3461
3462/**
3463 * Read guest RAM and ROM.
3464 *
3465 * @param SrcGCPhys The source address (guest physical).
3466 * @param pvDst The destination address.
3467 * @param cb Number of bytes
3468 */
3469void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3470{
3471 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3472 VBOX_CHECK_ADDR(SrcGCPhys);
3473 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3474#ifdef VBOX_DEBUG_PHYS
3475 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3476#endif
3477 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3478}
3479
3480
3481/**
3482 * Read guest RAM and ROM, unsigned 8-bit.
3483 *
3484 * @param SrcGCPhys The source address (guest physical).
3485 */
3486RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3487{
3488 uint8_t val;
3489 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3490 VBOX_CHECK_ADDR(SrcGCPhys);
3491 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3492 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3493#ifdef VBOX_DEBUG_PHYS
3494 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3495#endif
3496 return val;
3497}
3498
3499
3500/**
3501 * Read guest RAM and ROM, signed 8-bit.
3502 *
3503 * @param SrcGCPhys The source address (guest physical).
3504 */
3505RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3506{
3507 int8_t val;
3508 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3509 VBOX_CHECK_ADDR(SrcGCPhys);
3510 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3511 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3512#ifdef VBOX_DEBUG_PHYS
3513 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3514#endif
3515 return val;
3516}
3517
3518
3519/**
3520 * Read guest RAM and ROM, unsigned 16-bit.
3521 *
3522 * @param SrcGCPhys The source address (guest physical).
3523 */
3524RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3525{
3526 uint16_t val;
3527 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3528 VBOX_CHECK_ADDR(SrcGCPhys);
3529 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3530 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3531#ifdef VBOX_DEBUG_PHYS
3532 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3533#endif
3534 return val;
3535}
3536
3537
3538/**
3539 * Read guest RAM and ROM, signed 16-bit.
3540 *
3541 * @param SrcGCPhys The source address (guest physical).
3542 */
3543RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3544{
3545 int16_t val;
3546 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3547 VBOX_CHECK_ADDR(SrcGCPhys);
3548 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3549 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3550#ifdef VBOX_DEBUG_PHYS
3551 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3552#endif
3553 return val;
3554}
3555
3556
3557/**
3558 * Read guest RAM and ROM, unsigned 32-bit.
3559 *
3560 * @param SrcGCPhys The source address (guest physical).
3561 */
3562RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3563{
3564 uint32_t val;
3565 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3566 VBOX_CHECK_ADDR(SrcGCPhys);
3567 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3568 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3569#ifdef VBOX_DEBUG_PHYS
3570 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3571#endif
3572 return val;
3573}
3574
3575
3576/**
3577 * Read guest RAM and ROM, signed 32-bit.
3578 *
3579 * @param SrcGCPhys The source address (guest physical).
3580 */
3581RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3582{
3583 int32_t val;
3584 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3585 VBOX_CHECK_ADDR(SrcGCPhys);
3586 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3587 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3588#ifdef VBOX_DEBUG_PHYS
3589 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3590#endif
3591 return val;
3592}
3593
3594
3595/**
3596 * Read guest RAM and ROM, unsigned 64-bit.
3597 *
3598 * @param SrcGCPhys The source address (guest physical).
3599 */
3600uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3601{
3602 uint64_t val;
3603 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3604 VBOX_CHECK_ADDR(SrcGCPhys);
3605 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3606 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3607#ifdef VBOX_DEBUG_PHYS
3608 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3609#endif
3610 return val;
3611}
3612
3613
3614/**
3615 * Read guest RAM and ROM, signed 64-bit.
3616 *
3617 * @param SrcGCPhys The source address (guest physical).
3618 */
3619int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3620{
3621 int64_t val;
3622 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3623 VBOX_CHECK_ADDR(SrcGCPhys);
3624 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3625 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3626#ifdef VBOX_DEBUG_PHYS
3627 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3628#endif
3629 return val;
3630}
3631
3632
3633/**
3634 * Write guest RAM.
3635 *
3636 * @param DstGCPhys The destination address (guest physical).
3637 * @param pvSrc The source address.
3638 * @param cb Number of bytes to write
3639 */
3640void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3641{
3642 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3643 VBOX_CHECK_ADDR(DstGCPhys);
3644 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3645 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3646#ifdef VBOX_DEBUG_PHYS
3647 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3648#endif
3649}
3650
3651
3652/**
3653 * Write guest RAM, unsigned 8-bit.
3654 *
3655 * @param DstGCPhys The destination address (guest physical).
3656 * @param val Value
3657 */
3658void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3659{
3660 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3661 VBOX_CHECK_ADDR(DstGCPhys);
3662 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3663 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3664#ifdef VBOX_DEBUG_PHYS
3665 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3666#endif
3667}
3668
3669
3670/**
3671 * Write guest RAM, unsigned 8-bit.
3672 *
3673 * @param DstGCPhys The destination address (guest physical).
3674 * @param val Value
3675 */
3676void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3677{
3678 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3679 VBOX_CHECK_ADDR(DstGCPhys);
3680 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3681 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3682#ifdef VBOX_DEBUG_PHYS
3683 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3684#endif
3685}
3686
3687
3688/**
3689 * Write guest RAM, unsigned 32-bit.
3690 *
3691 * @param DstGCPhys The destination address (guest physical).
3692 * @param val Value
3693 */
3694void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3695{
3696 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3697 VBOX_CHECK_ADDR(DstGCPhys);
3698 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3699 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3700#ifdef VBOX_DEBUG_PHYS
3701 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3702#endif
3703}
3704
3705
3706/**
3707 * Write guest RAM, unsigned 64-bit.
3708 *
3709 * @param DstGCPhys The destination address (guest physical).
3710 * @param val Value
3711 */
3712void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3713{
3714 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3715 VBOX_CHECK_ADDR(DstGCPhys);
3716 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3717 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3718#ifdef VBOX_DEBUG_PHYS
3719 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3720#endif
3721}
3722
3723#undef LOG_GROUP
3724#define LOG_GROUP LOG_GROUP_REM_MMIO
3725
3726/** Read MMIO memory. */
3727static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3728{
3729 uint32_t u32 = 0;
3730 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3731 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3732 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3733 return u32;
3734}
3735
3736/** Read MMIO memory. */
3737static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3738{
3739 uint32_t u32 = 0;
3740 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3741 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3742 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3743 return u32;
3744}
3745
3746/** Read MMIO memory. */
3747static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3748{
3749 uint32_t u32 = 0;
3750 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3751 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3752 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3753 return u32;
3754}
3755
3756/** Write to MMIO memory. */
3757static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3758{
3759 int rc;
3760 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3761 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3762 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3763}
3764
3765/** Write to MMIO memory. */
3766static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3767{
3768 int rc;
3769 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3770 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3771 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3772}
3773
3774/** Write to MMIO memory. */
3775static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3776{
3777 int rc;
3778 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3779 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3780 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3781}
3782
3783
3784#undef LOG_GROUP
3785#define LOG_GROUP LOG_GROUP_REM_HANDLER
3786
3787/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3788
3789static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3790{
3791 uint8_t u8;
3792 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3793 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3794 return u8;
3795}
3796
3797static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3798{
3799 uint16_t u16;
3800 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3801 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3802 return u16;
3803}
3804
3805static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3806{
3807 uint32_t u32;
3808 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3809 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3810 return u32;
3811}
3812
3813static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3814{
3815 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3816 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3817}
3818
3819static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3820{
3821 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3822 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3823}
3824
3825static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3826{
3827 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3828 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3829}
3830
3831/* -+- disassembly -+- */
3832
3833#undef LOG_GROUP
3834#define LOG_GROUP LOG_GROUP_REM_DISAS
3835
3836
3837/**
3838 * Enables or disables singled stepped disassembly.
3839 *
3840 * @returns VBox status code.
3841 * @param pVM VM handle.
3842 * @param fEnable To enable set this flag, to disable clear it.
3843 */
3844static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3845{
3846 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3847 VM_ASSERT_EMT(pVM);
3848
3849 if (fEnable)
3850 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3851 else
3852 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3853 return VINF_SUCCESS;
3854}
3855
3856
3857/**
3858 * Enables or disables singled stepped disassembly.
3859 *
3860 * @returns VBox status code.
3861 * @param pVM VM handle.
3862 * @param fEnable To enable set this flag, to disable clear it.
3863 */
3864REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3865{
3866 int rc;
3867
3868 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3869 if (VM_IS_EMT(pVM))
3870 return remR3DisasEnableStepping(pVM, fEnable);
3871
3872 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3873 AssertRC(rc);
3874 return rc;
3875}
3876
3877
3878#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3879/**
3880 * External Debugger Command: .remstep [on|off|1|0]
3881 */
3882static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3883{
3884 int rc;
3885
3886 if (cArgs == 0)
3887 /*
3888 * Print the current status.
3889 */
3890 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3891 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3892 else
3893 {
3894 /*
3895 * Convert the argument and change the mode.
3896 */
3897 bool fEnable;
3898 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3899 if (RT_SUCCESS(rc))
3900 {
3901 rc = REMR3DisasEnableStepping(pVM, fEnable);
3902 if (RT_SUCCESS(rc))
3903 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3904 else
3905 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3906 }
3907 else
3908 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3909 }
3910 return rc;
3911}
3912#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3913
3914
3915/**
3916 * Disassembles one instruction and prints it to the log.
3917 *
3918 * @returns Success indicator.
3919 * @param env Pointer to the recompiler CPU structure.
3920 * @param f32BitCode Indicates that whether or not the code should
3921 * be disassembled as 16 or 32 bit. If -1 the CS
3922 * selector will be inspected.
3923 * @param pszPrefix
3924 */
3925bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3926{
3927 PVM pVM = env->pVM;
3928 const bool fLog = LogIsEnabled();
3929 const bool fLog2 = LogIs2Enabled();
3930 int rc = VINF_SUCCESS;
3931
3932 /*
3933 * Don't bother if there ain't any log output to do.
3934 */
3935 if (!fLog && !fLog2)
3936 return true;
3937
3938 /*
3939 * Update the state so DBGF reads the correct register values.
3940 */
3941 remR3StateUpdate(pVM, env->pVCpu);
3942
3943 /*
3944 * Log registers if requested.
3945 */
3946 if (fLog2)
3947 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3948
3949 /*
3950 * Disassemble to log.
3951 */
3952 if (fLog)
3953 {
3954 PVMCPU pVCpu = VMMGetCpu(pVM);
3955 char szBuf[256];
3956 szBuf[0] = '\0';
3957 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3958 pVCpu->idCpu,
3959 0, /* Sel */
3960 0, /* GCPtr */
3961 DBGF_DISAS_FLAGS_CURRENT_GUEST
3962 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3963 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3964 szBuf,
3965 sizeof(szBuf),
3966 NULL);
3967 if (RT_FAILURE(rc))
3968 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3969 if (pszPrefix && *pszPrefix)
3970 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3971 else
3972 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3973 }
3974
3975 return RT_SUCCESS(rc);
3976}
3977
3978
3979/**
3980 * Disassemble recompiled code.
3981 *
3982 * @param phFileIgnored Ignored, logfile usually.
3983 * @param pvCode Pointer to the code block.
3984 * @param cb Size of the code block.
3985 */
3986void disas(FILE *phFile, void *pvCode, unsigned long cb)
3987{
3988#ifdef DEBUG_TMP_LOGGING
3989# define DISAS_PRINTF(x...) fprintf(phFile, x)
3990#else
3991# define DISAS_PRINTF(x...) RTLogPrintf(x)
3992 if (LogIs2Enabled())
3993#endif
3994 {
3995 unsigned off = 0;
3996 char szOutput[256];
3997 DISCPUSTATE Cpu;
3998
3999 memset(&Cpu, 0, sizeof(Cpu));
4000#ifdef RT_ARCH_X86
4001 Cpu.mode = CPUMODE_32BIT;
4002#else
4003 Cpu.mode = CPUMODE_64BIT;
4004#endif
4005
4006 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4007 while (off < cb)
4008 {
4009 uint32_t cbInstr;
4010 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4011 DISAS_PRINTF("%s", szOutput);
4012 else
4013 {
4014 DISAS_PRINTF("disas error\n");
4015 cbInstr = 1;
4016#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4017 break;
4018#endif
4019 }
4020 off += cbInstr;
4021 }
4022 }
4023
4024#undef DISAS_PRINTF
4025}
4026
4027
4028/**
4029 * Disassemble guest code.
4030 *
4031 * @param phFileIgnored Ignored, logfile usually.
4032 * @param uCode The guest address of the code to disassemble. (flat?)
4033 * @param cb Number of bytes to disassemble.
4034 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4035 */
4036void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4037{
4038#ifdef DEBUG_TMP_LOGGING
4039# define DISAS_PRINTF(x...) fprintf(phFile, x)
4040#else
4041# define DISAS_PRINTF(x...) RTLogPrintf(x)
4042 if (LogIs2Enabled())
4043#endif
4044 {
4045 PVM pVM = cpu_single_env->pVM;
4046 PVMCPU pVCpu = cpu_single_env->pVCpu;
4047 RTSEL cs;
4048 RTGCUINTPTR eip;
4049
4050 Assert(pVCpu);
4051
4052 /*
4053 * Update the state so DBGF reads the correct register values (flags).
4054 */
4055 remR3StateUpdate(pVM, pVCpu);
4056
4057 /*
4058 * Do the disassembling.
4059 */
4060 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4061 cs = cpu_single_env->segs[R_CS].selector;
4062 eip = uCode - cpu_single_env->segs[R_CS].base;
4063 for (;;)
4064 {
4065 char szBuf[256];
4066 uint32_t cbInstr;
4067 int rc = DBGFR3DisasInstrEx(pVM,
4068 pVCpu->idCpu,
4069 cs,
4070 eip,
4071 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4072 szBuf, sizeof(szBuf),
4073 &cbInstr);
4074 if (RT_SUCCESS(rc))
4075 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
4076 else
4077 {
4078 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4079 cbInstr = 1;
4080 }
4081
4082 /* next */
4083 if (cb <= cbInstr)
4084 break;
4085 cb -= cbInstr;
4086 uCode += cbInstr;
4087 eip += cbInstr;
4088 }
4089 }
4090#undef DISAS_PRINTF
4091}
4092
4093
4094/**
4095 * Looks up a guest symbol.
4096 *
4097 * @returns Pointer to symbol name. This is a static buffer.
4098 * @param orig_addr The address in question.
4099 */
4100const char *lookup_symbol(target_ulong orig_addr)
4101{
4102 PVM pVM = cpu_single_env->pVM;
4103 RTGCINTPTR off = 0;
4104 RTDBGSYMBOL Sym;
4105 DBGFADDRESS Addr;
4106
4107 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4108 if (RT_SUCCESS(rc))
4109 {
4110 static char szSym[sizeof(Sym.szName) + 48];
4111 if (!off)
4112 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4113 else if (off > 0)
4114 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4115 else
4116 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4117 return szSym;
4118 }
4119 return "<N/A>";
4120}
4121
4122
4123#undef LOG_GROUP
4124#define LOG_GROUP LOG_GROUP_REM
4125
4126
4127/* -+- FF notifications -+- */
4128
4129
4130/**
4131 * Notification about a pending interrupt.
4132 *
4133 * @param pVM VM Handle.
4134 * @param pVCpu VMCPU Handle.
4135 * @param u8Interrupt Interrupt
4136 * @thread The emulation thread.
4137 */
4138REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4139{
4140 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4141 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4142}
4143
4144/**
4145 * Notification about a pending interrupt.
4146 *
4147 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4148 * @param pVM VM Handle.
4149 * @param pVCpu VMCPU Handle.
4150 * @thread The emulation thread.
4151 */
4152REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4153{
4154 return pVM->rem.s.u32PendingInterrupt;
4155}
4156
4157/**
4158 * Notification about the interrupt FF being set.
4159 *
4160 * @param pVM VM Handle.
4161 * @param pVCpu VMCPU Handle.
4162 * @thread The emulation thread.
4163 */
4164REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4165{
4166 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4167 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4168 if (pVM->rem.s.fInREM)
4169 {
4170 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4171 CPU_INTERRUPT_EXTERNAL_HARD);
4172 }
4173}
4174
4175
4176/**
4177 * Notification about the interrupt FF being set.
4178 *
4179 * @param pVM VM Handle.
4180 * @param pVCpu VMCPU Handle.
4181 * @thread Any.
4182 */
4183REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4184{
4185 LogFlow(("REMR3NotifyInterruptClear:\n"));
4186 if (pVM->rem.s.fInREM)
4187 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4188}
4189
4190
4191/**
4192 * Notification about pending timer(s).
4193 *
4194 * @param pVM VM Handle.
4195 * @param pVCpuDst The target cpu for this notification.
4196 * TM will not broadcast pending timer events, but use
4197 * a dedicated EMT for them. So, only interrupt REM
4198 * execution if the given CPU is executing in REM.
4199 * @thread Any.
4200 */
4201REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4202{
4203#ifndef DEBUG_bird
4204 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4205#endif
4206 if (pVM->rem.s.fInREM)
4207 {
4208 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4209 {
4210 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4211 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4212 CPU_INTERRUPT_EXTERNAL_TIMER);
4213 }
4214 else
4215 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4216 }
4217 else
4218 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4219}
4220
4221
4222/**
4223 * Notification about pending DMA transfers.
4224 *
4225 * @param pVM VM Handle.
4226 * @thread Any.
4227 */
4228REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4229{
4230 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4231 if (pVM->rem.s.fInREM)
4232 {
4233 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4234 CPU_INTERRUPT_EXTERNAL_DMA);
4235 }
4236}
4237
4238
4239/**
4240 * Notification about pending timer(s).
4241 *
4242 * @param pVM VM Handle.
4243 * @thread Any.
4244 */
4245REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4246{
4247 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4248 if (pVM->rem.s.fInREM)
4249 {
4250 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4251 CPU_INTERRUPT_EXTERNAL_EXIT);
4252 }
4253}
4254
4255
4256/**
4257 * Notification about pending FF set by an external thread.
4258 *
4259 * @param pVM VM handle.
4260 * @thread Any.
4261 */
4262REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4263{
4264 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4265 if (pVM->rem.s.fInREM)
4266 {
4267 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4268 CPU_INTERRUPT_EXTERNAL_EXIT);
4269 }
4270}
4271
4272
4273#ifdef VBOX_WITH_STATISTICS
4274void remR3ProfileStart(int statcode)
4275{
4276 STAMPROFILEADV *pStat;
4277 switch(statcode)
4278 {
4279 case STATS_EMULATE_SINGLE_INSTR:
4280 pStat = &gStatExecuteSingleInstr;
4281 break;
4282 case STATS_QEMU_COMPILATION:
4283 pStat = &gStatCompilationQEmu;
4284 break;
4285 case STATS_QEMU_RUN_EMULATED_CODE:
4286 pStat = &gStatRunCodeQEmu;
4287 break;
4288 case STATS_QEMU_TOTAL:
4289 pStat = &gStatTotalTimeQEmu;
4290 break;
4291 case STATS_QEMU_RUN_TIMERS:
4292 pStat = &gStatTimers;
4293 break;
4294 case STATS_TLB_LOOKUP:
4295 pStat= &gStatTBLookup;
4296 break;
4297 case STATS_IRQ_HANDLING:
4298 pStat= &gStatIRQ;
4299 break;
4300 case STATS_RAW_CHECK:
4301 pStat = &gStatRawCheck;
4302 break;
4303
4304 default:
4305 AssertMsgFailed(("unknown stat %d\n", statcode));
4306 return;
4307 }
4308 STAM_PROFILE_ADV_START(pStat, a);
4309}
4310
4311
4312void remR3ProfileStop(int statcode)
4313{
4314 STAMPROFILEADV *pStat;
4315 switch(statcode)
4316 {
4317 case STATS_EMULATE_SINGLE_INSTR:
4318 pStat = &gStatExecuteSingleInstr;
4319 break;
4320 case STATS_QEMU_COMPILATION:
4321 pStat = &gStatCompilationQEmu;
4322 break;
4323 case STATS_QEMU_RUN_EMULATED_CODE:
4324 pStat = &gStatRunCodeQEmu;
4325 break;
4326 case STATS_QEMU_TOTAL:
4327 pStat = &gStatTotalTimeQEmu;
4328 break;
4329 case STATS_QEMU_RUN_TIMERS:
4330 pStat = &gStatTimers;
4331 break;
4332 case STATS_TLB_LOOKUP:
4333 pStat= &gStatTBLookup;
4334 break;
4335 case STATS_IRQ_HANDLING:
4336 pStat= &gStatIRQ;
4337 break;
4338 case STATS_RAW_CHECK:
4339 pStat = &gStatRawCheck;
4340 break;
4341 default:
4342 AssertMsgFailed(("unknown stat %d\n", statcode));
4343 return;
4344 }
4345 STAM_PROFILE_ADV_STOP(pStat, a);
4346}
4347#endif
4348
4349/**
4350 * Raise an RC, force rem exit.
4351 *
4352 * @param pVM VM handle.
4353 * @param rc The rc.
4354 */
4355void remR3RaiseRC(PVM pVM, int rc)
4356{
4357 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4358 Assert(pVM->rem.s.fInREM);
4359 VM_ASSERT_EMT(pVM);
4360 pVM->rem.s.rc = rc;
4361 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4362}
4363
4364
4365/* -+- timers -+- */
4366
4367uint64_t cpu_get_tsc(CPUX86State *env)
4368{
4369 STAM_COUNTER_INC(&gStatCpuGetTSC);
4370 return TMCpuTickGet(env->pVCpu);
4371}
4372
4373
4374/* -+- interrupts -+- */
4375
4376void cpu_set_ferr(CPUX86State *env)
4377{
4378 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4379 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4380}
4381
4382int cpu_get_pic_interrupt(CPUState *env)
4383{
4384 uint8_t u8Interrupt;
4385 int rc;
4386
4387 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4388 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4389 * with the (a)pic.
4390 */
4391 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4392 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4393 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4394 * remove this kludge. */
4395 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4396 {
4397 rc = VINF_SUCCESS;
4398 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4399 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4400 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4401 }
4402 else
4403 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4404
4405 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4406 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4407 if (RT_SUCCESS(rc))
4408 {
4409 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4410 env->interrupt_request |= CPU_INTERRUPT_HARD;
4411 return u8Interrupt;
4412 }
4413 return -1;
4414}
4415
4416
4417/* -+- local apic -+- */
4418
4419#if 0 /* CPUMSetGuestMsr does this now. */
4420void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4421{
4422 int rc = PDMApicSetBase(env->pVM, val);
4423 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4424}
4425#endif
4426
4427uint64_t cpu_get_apic_base(CPUX86State *env)
4428{
4429 uint64_t u64;
4430 int rc = PDMApicGetBase(env->pVM, &u64);
4431 if (RT_SUCCESS(rc))
4432 {
4433 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4434 return u64;
4435 }
4436 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4437 return 0;
4438}
4439
4440void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4441{
4442 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4443 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4444}
4445
4446uint8_t cpu_get_apic_tpr(CPUX86State *env)
4447{
4448 uint8_t u8;
4449 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4450 if (RT_SUCCESS(rc))
4451 {
4452 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4453 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4454 }
4455 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4456 return 0;
4457}
4458
4459/**
4460 * Read an MSR.
4461 *
4462 * @retval 0 success.
4463 * @retval -1 failure, raise \#GP(0).
4464 * @param env The cpu state.
4465 * @param idMsr The MSR to read.
4466 * @param puValue Where to return the value.
4467 */
4468int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4469{
4470 Assert(env->pVCpu);
4471 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4472}
4473
4474/**
4475 * Write to an MSR.
4476 *
4477 * @retval 0 success.
4478 * @retval -1 failure, raise \#GP(0).
4479 * @param env The cpu state.
4480 * @param idMsr The MSR to read.
4481 * @param puValue Where to return the value.
4482 */
4483int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4484{
4485 Assert(env->pVCpu);
4486 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4487}
4488
4489/* -+- I/O Ports -+- */
4490
4491#undef LOG_GROUP
4492#define LOG_GROUP LOG_GROUP_REM_IOPORT
4493
4494void cpu_outb(CPUState *env, int addr, int val)
4495{
4496 int rc;
4497
4498 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4499 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4500
4501 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4502 if (RT_LIKELY(rc == VINF_SUCCESS))
4503 return;
4504 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4505 {
4506 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4507 remR3RaiseRC(env->pVM, rc);
4508 return;
4509 }
4510 remAbort(rc, __FUNCTION__);
4511}
4512
4513void cpu_outw(CPUState *env, int addr, int val)
4514{
4515 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4516 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4517 if (RT_LIKELY(rc == VINF_SUCCESS))
4518 return;
4519 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4520 {
4521 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4522 remR3RaiseRC(env->pVM, rc);
4523 return;
4524 }
4525 remAbort(rc, __FUNCTION__);
4526}
4527
4528void cpu_outl(CPUState *env, int addr, int val)
4529{
4530 int rc;
4531 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4532 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4533 if (RT_LIKELY(rc == VINF_SUCCESS))
4534 return;
4535 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4536 {
4537 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4538 remR3RaiseRC(env->pVM, rc);
4539 return;
4540 }
4541 remAbort(rc, __FUNCTION__);
4542}
4543
4544int cpu_inb(CPUState *env, int addr)
4545{
4546 uint32_t u32 = 0;
4547 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4548 if (RT_LIKELY(rc == VINF_SUCCESS))
4549 {
4550 if (/*addr != 0x61 && */addr != 0x71)
4551 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4552 return (int)u32;
4553 }
4554 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4555 {
4556 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4557 remR3RaiseRC(env->pVM, rc);
4558 return (int)u32;
4559 }
4560 remAbort(rc, __FUNCTION__);
4561 return 0xff;
4562}
4563
4564int cpu_inw(CPUState *env, int addr)
4565{
4566 uint32_t u32 = 0;
4567 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4568 if (RT_LIKELY(rc == VINF_SUCCESS))
4569 {
4570 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4571 return (int)u32;
4572 }
4573 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4574 {
4575 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4576 remR3RaiseRC(env->pVM, rc);
4577 return (int)u32;
4578 }
4579 remAbort(rc, __FUNCTION__);
4580 return 0xffff;
4581}
4582
4583int cpu_inl(CPUState *env, int addr)
4584{
4585 uint32_t u32 = 0;
4586 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4587 if (RT_LIKELY(rc == VINF_SUCCESS))
4588 {
4589//if (addr==0x01f0 && u32 == 0x6b6d)
4590// loglevel = ~0;
4591 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4592 return (int)u32;
4593 }
4594 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4595 {
4596 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4597 remR3RaiseRC(env->pVM, rc);
4598 return (int)u32;
4599 }
4600 remAbort(rc, __FUNCTION__);
4601 return 0xffffffff;
4602}
4603
4604#undef LOG_GROUP
4605#define LOG_GROUP LOG_GROUP_REM
4606
4607
4608/* -+- helpers and misc other interfaces -+- */
4609
4610/**
4611 * Perform the CPUID instruction.
4612 *
4613 * ASMCpuId cannot be invoked from some source files where this is used because of global
4614 * register allocations.
4615 *
4616 * @param env Pointer to the recompiler CPU structure.
4617 * @param uOperator CPUID operation (eax).
4618 * @param pvEAX Where to store eax.
4619 * @param pvEBX Where to store ebx.
4620 * @param pvECX Where to store ecx.
4621 * @param pvEDX Where to store edx.
4622 */
4623void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4624{
4625 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4626}
4627
4628
4629#if 0 /* not used */
4630/**
4631 * Interface for qemu hardware to report back fatal errors.
4632 */
4633void hw_error(const char *pszFormat, ...)
4634{
4635 /*
4636 * Bitch about it.
4637 */
4638 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4639 * this in my Odin32 tree at home! */
4640 va_list args;
4641 va_start(args, pszFormat);
4642 RTLogPrintf("fatal error in virtual hardware:");
4643 RTLogPrintfV(pszFormat, args);
4644 va_end(args);
4645 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4646
4647 /*
4648 * If we're in REM context we'll sync back the state before 'jumping' to
4649 * the EMs failure handling.
4650 */
4651 PVM pVM = cpu_single_env->pVM;
4652 if (pVM->rem.s.fInREM)
4653 REMR3StateBack(pVM);
4654 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4655 AssertMsgFailed(("EMR3FatalError returned!\n"));
4656}
4657#endif
4658
4659/**
4660 * Interface for the qemu cpu to report unhandled situation
4661 * raising a fatal VM error.
4662 */
4663void cpu_abort(CPUState *env, const char *pszFormat, ...)
4664{
4665 va_list va;
4666 PVM pVM;
4667 PVMCPU pVCpu;
4668 char szMsg[256];
4669
4670 /*
4671 * Bitch about it.
4672 */
4673 RTLogFlags(NULL, "nodisabled nobuffered");
4674 RTLogFlush(NULL);
4675
4676 va_start(va, pszFormat);
4677#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4678 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4679 unsigned cArgs = 0;
4680 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4681 const char *psz = strchr(pszFormat, '%');
4682 while (psz && cArgs < 6)
4683 {
4684 auArgs[cArgs++] = va_arg(va, uintptr_t);
4685 psz = strchr(psz + 1, '%');
4686 }
4687 switch (cArgs)
4688 {
4689 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4690 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4691 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4692 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4693 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4694 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4695 default:
4696 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4697 }
4698#else
4699 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4700#endif
4701 va_end(va);
4702
4703 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4704 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4705
4706 /*
4707 * If we're in REM context we'll sync back the state before 'jumping' to
4708 * the EMs failure handling.
4709 */
4710 pVM = cpu_single_env->pVM;
4711 pVCpu = cpu_single_env->pVCpu;
4712 Assert(pVCpu);
4713
4714 if (pVM->rem.s.fInREM)
4715 REMR3StateBack(pVM, pVCpu);
4716 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4717 AssertMsgFailed(("EMR3FatalError returned!\n"));
4718}
4719
4720
4721/**
4722 * Aborts the VM.
4723 *
4724 * @param rc VBox error code.
4725 * @param pszTip Hint about why/when this happened.
4726 */
4727void remAbort(int rc, const char *pszTip)
4728{
4729 PVM pVM;
4730 PVMCPU pVCpu;
4731
4732 /*
4733 * Bitch about it.
4734 */
4735 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4736 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4737
4738 /*
4739 * Jump back to where we entered the recompiler.
4740 */
4741 pVM = cpu_single_env->pVM;
4742 pVCpu = cpu_single_env->pVCpu;
4743 Assert(pVCpu);
4744
4745 if (pVM->rem.s.fInREM)
4746 REMR3StateBack(pVM, pVCpu);
4747
4748 EMR3FatalError(pVCpu, rc);
4749 AssertMsgFailed(("EMR3FatalError returned!\n"));
4750}
4751
4752
4753/**
4754 * Dumps a linux system call.
4755 * @param pVCpu VMCPU handle.
4756 */
4757void remR3DumpLnxSyscall(PVMCPU pVCpu)
4758{
4759 static const char *apsz[] =
4760 {
4761 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4762 "sys_exit",
4763 "sys_fork",
4764 "sys_read",
4765 "sys_write",
4766 "sys_open", /* 5 */
4767 "sys_close",
4768 "sys_waitpid",
4769 "sys_creat",
4770 "sys_link",
4771 "sys_unlink", /* 10 */
4772 "sys_execve",
4773 "sys_chdir",
4774 "sys_time",
4775 "sys_mknod",
4776 "sys_chmod", /* 15 */
4777 "sys_lchown16",
4778 "sys_ni_syscall", /* old break syscall holder */
4779 "sys_stat",
4780 "sys_lseek",
4781 "sys_getpid", /* 20 */
4782 "sys_mount",
4783 "sys_oldumount",
4784 "sys_setuid16",
4785 "sys_getuid16",
4786 "sys_stime", /* 25 */
4787 "sys_ptrace",
4788 "sys_alarm",
4789 "sys_fstat",
4790 "sys_pause",
4791 "sys_utime", /* 30 */
4792 "sys_ni_syscall", /* old stty syscall holder */
4793 "sys_ni_syscall", /* old gtty syscall holder */
4794 "sys_access",
4795 "sys_nice",
4796 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4797 "sys_sync",
4798 "sys_kill",
4799 "sys_rename",
4800 "sys_mkdir",
4801 "sys_rmdir", /* 40 */
4802 "sys_dup",
4803 "sys_pipe",
4804 "sys_times",
4805 "sys_ni_syscall", /* old prof syscall holder */
4806 "sys_brk", /* 45 */
4807 "sys_setgid16",
4808 "sys_getgid16",
4809 "sys_signal",
4810 "sys_geteuid16",
4811 "sys_getegid16", /* 50 */
4812 "sys_acct",
4813 "sys_umount", /* recycled never used phys() */
4814 "sys_ni_syscall", /* old lock syscall holder */
4815 "sys_ioctl",
4816 "sys_fcntl", /* 55 */
4817 "sys_ni_syscall", /* old mpx syscall holder */
4818 "sys_setpgid",
4819 "sys_ni_syscall", /* old ulimit syscall holder */
4820 "sys_olduname",
4821 "sys_umask", /* 60 */
4822 "sys_chroot",
4823 "sys_ustat",
4824 "sys_dup2",
4825 "sys_getppid",
4826 "sys_getpgrp", /* 65 */
4827 "sys_setsid",
4828 "sys_sigaction",
4829 "sys_sgetmask",
4830 "sys_ssetmask",
4831 "sys_setreuid16", /* 70 */
4832 "sys_setregid16",
4833 "sys_sigsuspend",
4834 "sys_sigpending",
4835 "sys_sethostname",
4836 "sys_setrlimit", /* 75 */
4837 "sys_old_getrlimit",
4838 "sys_getrusage",
4839 "sys_gettimeofday",
4840 "sys_settimeofday",
4841 "sys_getgroups16", /* 80 */
4842 "sys_setgroups16",
4843 "old_select",
4844 "sys_symlink",
4845 "sys_lstat",
4846 "sys_readlink", /* 85 */
4847 "sys_uselib",
4848 "sys_swapon",
4849 "sys_reboot",
4850 "old_readdir",
4851 "old_mmap", /* 90 */
4852 "sys_munmap",
4853 "sys_truncate",
4854 "sys_ftruncate",
4855 "sys_fchmod",
4856 "sys_fchown16", /* 95 */
4857 "sys_getpriority",
4858 "sys_setpriority",
4859 "sys_ni_syscall", /* old profil syscall holder */
4860 "sys_statfs",
4861 "sys_fstatfs", /* 100 */
4862 "sys_ioperm",
4863 "sys_socketcall",
4864 "sys_syslog",
4865 "sys_setitimer",
4866 "sys_getitimer", /* 105 */
4867 "sys_newstat",
4868 "sys_newlstat",
4869 "sys_newfstat",
4870 "sys_uname",
4871 "sys_iopl", /* 110 */
4872 "sys_vhangup",
4873 "sys_ni_syscall", /* old "idle" system call */
4874 "sys_vm86old",
4875 "sys_wait4",
4876 "sys_swapoff", /* 115 */
4877 "sys_sysinfo",
4878 "sys_ipc",
4879 "sys_fsync",
4880 "sys_sigreturn",
4881 "sys_clone", /* 120 */
4882 "sys_setdomainname",
4883 "sys_newuname",
4884 "sys_modify_ldt",
4885 "sys_adjtimex",
4886 "sys_mprotect", /* 125 */
4887 "sys_sigprocmask",
4888 "sys_ni_syscall", /* old "create_module" */
4889 "sys_init_module",
4890 "sys_delete_module",
4891 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4892 "sys_quotactl",
4893 "sys_getpgid",
4894 "sys_fchdir",
4895 "sys_bdflush",
4896 "sys_sysfs", /* 135 */
4897 "sys_personality",
4898 "sys_ni_syscall", /* reserved for afs_syscall */
4899 "sys_setfsuid16",
4900 "sys_setfsgid16",
4901 "sys_llseek", /* 140 */
4902 "sys_getdents",
4903 "sys_select",
4904 "sys_flock",
4905 "sys_msync",
4906 "sys_readv", /* 145 */
4907 "sys_writev",
4908 "sys_getsid",
4909 "sys_fdatasync",
4910 "sys_sysctl",
4911 "sys_mlock", /* 150 */
4912 "sys_munlock",
4913 "sys_mlockall",
4914 "sys_munlockall",
4915 "sys_sched_setparam",
4916 "sys_sched_getparam", /* 155 */
4917 "sys_sched_setscheduler",
4918 "sys_sched_getscheduler",
4919 "sys_sched_yield",
4920 "sys_sched_get_priority_max",
4921 "sys_sched_get_priority_min", /* 160 */
4922 "sys_sched_rr_get_interval",
4923 "sys_nanosleep",
4924 "sys_mremap",
4925 "sys_setresuid16",
4926 "sys_getresuid16", /* 165 */
4927 "sys_vm86",
4928 "sys_ni_syscall", /* Old sys_query_module */
4929 "sys_poll",
4930 "sys_nfsservctl",
4931 "sys_setresgid16", /* 170 */
4932 "sys_getresgid16",
4933 "sys_prctl",
4934 "sys_rt_sigreturn",
4935 "sys_rt_sigaction",
4936 "sys_rt_sigprocmask", /* 175 */
4937 "sys_rt_sigpending",
4938 "sys_rt_sigtimedwait",
4939 "sys_rt_sigqueueinfo",
4940 "sys_rt_sigsuspend",
4941 "sys_pread64", /* 180 */
4942 "sys_pwrite64",
4943 "sys_chown16",
4944 "sys_getcwd",
4945 "sys_capget",
4946 "sys_capset", /* 185 */
4947 "sys_sigaltstack",
4948 "sys_sendfile",
4949 "sys_ni_syscall", /* reserved for streams1 */
4950 "sys_ni_syscall", /* reserved for streams2 */
4951 "sys_vfork", /* 190 */
4952 "sys_getrlimit",
4953 "sys_mmap2",
4954 "sys_truncate64",
4955 "sys_ftruncate64",
4956 "sys_stat64", /* 195 */
4957 "sys_lstat64",
4958 "sys_fstat64",
4959 "sys_lchown",
4960 "sys_getuid",
4961 "sys_getgid", /* 200 */
4962 "sys_geteuid",
4963 "sys_getegid",
4964 "sys_setreuid",
4965 "sys_setregid",
4966 "sys_getgroups", /* 205 */
4967 "sys_setgroups",
4968 "sys_fchown",
4969 "sys_setresuid",
4970 "sys_getresuid",
4971 "sys_setresgid", /* 210 */
4972 "sys_getresgid",
4973 "sys_chown",
4974 "sys_setuid",
4975 "sys_setgid",
4976 "sys_setfsuid", /* 215 */
4977 "sys_setfsgid",
4978 "sys_pivot_root",
4979 "sys_mincore",
4980 "sys_madvise",
4981 "sys_getdents64", /* 220 */
4982 "sys_fcntl64",
4983 "sys_ni_syscall", /* reserved for TUX */
4984 "sys_ni_syscall",
4985 "sys_gettid",
4986 "sys_readahead", /* 225 */
4987 "sys_setxattr",
4988 "sys_lsetxattr",
4989 "sys_fsetxattr",
4990 "sys_getxattr",
4991 "sys_lgetxattr", /* 230 */
4992 "sys_fgetxattr",
4993 "sys_listxattr",
4994 "sys_llistxattr",
4995 "sys_flistxattr",
4996 "sys_removexattr", /* 235 */
4997 "sys_lremovexattr",
4998 "sys_fremovexattr",
4999 "sys_tkill",
5000 "sys_sendfile64",
5001 "sys_futex", /* 240 */
5002 "sys_sched_setaffinity",
5003 "sys_sched_getaffinity",
5004 "sys_set_thread_area",
5005 "sys_get_thread_area",
5006 "sys_io_setup", /* 245 */
5007 "sys_io_destroy",
5008 "sys_io_getevents",
5009 "sys_io_submit",
5010 "sys_io_cancel",
5011 "sys_fadvise64", /* 250 */
5012 "sys_ni_syscall",
5013 "sys_exit_group",
5014 "sys_lookup_dcookie",
5015 "sys_epoll_create",
5016 "sys_epoll_ctl", /* 255 */
5017 "sys_epoll_wait",
5018 "sys_remap_file_pages",
5019 "sys_set_tid_address",
5020 "sys_timer_create",
5021 "sys_timer_settime", /* 260 */
5022 "sys_timer_gettime",
5023 "sys_timer_getoverrun",
5024 "sys_timer_delete",
5025 "sys_clock_settime",
5026 "sys_clock_gettime", /* 265 */
5027 "sys_clock_getres",
5028 "sys_clock_nanosleep",
5029 "sys_statfs64",
5030 "sys_fstatfs64",
5031 "sys_tgkill", /* 270 */
5032 "sys_utimes",
5033 "sys_fadvise64_64",
5034 "sys_ni_syscall" /* sys_vserver */
5035 };
5036
5037 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5038 switch (uEAX)
5039 {
5040 default:
5041 if (uEAX < RT_ELEMENTS(apsz))
5042 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5043 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5044 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5045 else
5046 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5047 break;
5048
5049 }
5050}
5051
5052
5053/**
5054 * Dumps an OpenBSD system call.
5055 * @param pVCpu VMCPU handle.
5056 */
5057void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5058{
5059 static const char *apsz[] =
5060 {
5061 "SYS_syscall", //0
5062 "SYS_exit", //1
5063 "SYS_fork", //2
5064 "SYS_read", //3
5065 "SYS_write", //4
5066 "SYS_open", //5
5067 "SYS_close", //6
5068 "SYS_wait4", //7
5069 "SYS_8",
5070 "SYS_link", //9
5071 "SYS_unlink", //10
5072 "SYS_11",
5073 "SYS_chdir", //12
5074 "SYS_fchdir", //13
5075 "SYS_mknod", //14
5076 "SYS_chmod", //15
5077 "SYS_chown", //16
5078 "SYS_break", //17
5079 "SYS_18",
5080 "SYS_19",
5081 "SYS_getpid", //20
5082 "SYS_mount", //21
5083 "SYS_unmount", //22
5084 "SYS_setuid", //23
5085 "SYS_getuid", //24
5086 "SYS_geteuid", //25
5087 "SYS_ptrace", //26
5088 "SYS_recvmsg", //27
5089 "SYS_sendmsg", //28
5090 "SYS_recvfrom", //29
5091 "SYS_accept", //30
5092 "SYS_getpeername", //31
5093 "SYS_getsockname", //32
5094 "SYS_access", //33
5095 "SYS_chflags", //34
5096 "SYS_fchflags", //35
5097 "SYS_sync", //36
5098 "SYS_kill", //37
5099 "SYS_38",
5100 "SYS_getppid", //39
5101 "SYS_40",
5102 "SYS_dup", //41
5103 "SYS_opipe", //42
5104 "SYS_getegid", //43
5105 "SYS_profil", //44
5106 "SYS_ktrace", //45
5107 "SYS_sigaction", //46
5108 "SYS_getgid", //47
5109 "SYS_sigprocmask", //48
5110 "SYS_getlogin", //49
5111 "SYS_setlogin", //50
5112 "SYS_acct", //51
5113 "SYS_sigpending", //52
5114 "SYS_osigaltstack", //53
5115 "SYS_ioctl", //54
5116 "SYS_reboot", //55
5117 "SYS_revoke", //56
5118 "SYS_symlink", //57
5119 "SYS_readlink", //58
5120 "SYS_execve", //59
5121 "SYS_umask", //60
5122 "SYS_chroot", //61
5123 "SYS_62",
5124 "SYS_63",
5125 "SYS_64",
5126 "SYS_65",
5127 "SYS_vfork", //66
5128 "SYS_67",
5129 "SYS_68",
5130 "SYS_sbrk", //69
5131 "SYS_sstk", //70
5132 "SYS_61",
5133 "SYS_vadvise", //72
5134 "SYS_munmap", //73
5135 "SYS_mprotect", //74
5136 "SYS_madvise", //75
5137 "SYS_76",
5138 "SYS_77",
5139 "SYS_mincore", //78
5140 "SYS_getgroups", //79
5141 "SYS_setgroups", //80
5142 "SYS_getpgrp", //81
5143 "SYS_setpgid", //82
5144 "SYS_setitimer", //83
5145 "SYS_84",
5146 "SYS_85",
5147 "SYS_getitimer", //86
5148 "SYS_87",
5149 "SYS_88",
5150 "SYS_89",
5151 "SYS_dup2", //90
5152 "SYS_91",
5153 "SYS_fcntl", //92
5154 "SYS_select", //93
5155 "SYS_94",
5156 "SYS_fsync", //95
5157 "SYS_setpriority", //96
5158 "SYS_socket", //97
5159 "SYS_connect", //98
5160 "SYS_99",
5161 "SYS_getpriority", //100
5162 "SYS_101",
5163 "SYS_102",
5164 "SYS_sigreturn", //103
5165 "SYS_bind", //104
5166 "SYS_setsockopt", //105
5167 "SYS_listen", //106
5168 "SYS_107",
5169 "SYS_108",
5170 "SYS_109",
5171 "SYS_110",
5172 "SYS_sigsuspend", //111
5173 "SYS_112",
5174 "SYS_113",
5175 "SYS_114",
5176 "SYS_115",
5177 "SYS_gettimeofday", //116
5178 "SYS_getrusage", //117
5179 "SYS_getsockopt", //118
5180 "SYS_119",
5181 "SYS_readv", //120
5182 "SYS_writev", //121
5183 "SYS_settimeofday", //122
5184 "SYS_fchown", //123
5185 "SYS_fchmod", //124
5186 "SYS_125",
5187 "SYS_setreuid", //126
5188 "SYS_setregid", //127
5189 "SYS_rename", //128
5190 "SYS_129",
5191 "SYS_130",
5192 "SYS_flock", //131
5193 "SYS_mkfifo", //132
5194 "SYS_sendto", //133
5195 "SYS_shutdown", //134
5196 "SYS_socketpair", //135
5197 "SYS_mkdir", //136
5198 "SYS_rmdir", //137
5199 "SYS_utimes", //138
5200 "SYS_139",
5201 "SYS_adjtime", //140
5202 "SYS_141",
5203 "SYS_142",
5204 "SYS_143",
5205 "SYS_144",
5206 "SYS_145",
5207 "SYS_146",
5208 "SYS_setsid", //147
5209 "SYS_quotactl", //148
5210 "SYS_149",
5211 "SYS_150",
5212 "SYS_151",
5213 "SYS_152",
5214 "SYS_153",
5215 "SYS_154",
5216 "SYS_nfssvc", //155
5217 "SYS_156",
5218 "SYS_157",
5219 "SYS_158",
5220 "SYS_159",
5221 "SYS_160",
5222 "SYS_getfh", //161
5223 "SYS_162",
5224 "SYS_163",
5225 "SYS_164",
5226 "SYS_sysarch", //165
5227 "SYS_166",
5228 "SYS_167",
5229 "SYS_168",
5230 "SYS_169",
5231 "SYS_170",
5232 "SYS_171",
5233 "SYS_172",
5234 "SYS_pread", //173
5235 "SYS_pwrite", //174
5236 "SYS_175",
5237 "SYS_176",
5238 "SYS_177",
5239 "SYS_178",
5240 "SYS_179",
5241 "SYS_180",
5242 "SYS_setgid", //181
5243 "SYS_setegid", //182
5244 "SYS_seteuid", //183
5245 "SYS_lfs_bmapv", //184
5246 "SYS_lfs_markv", //185
5247 "SYS_lfs_segclean", //186
5248 "SYS_lfs_segwait", //187
5249 "SYS_188",
5250 "SYS_189",
5251 "SYS_190",
5252 "SYS_pathconf", //191
5253 "SYS_fpathconf", //192
5254 "SYS_swapctl", //193
5255 "SYS_getrlimit", //194
5256 "SYS_setrlimit", //195
5257 "SYS_getdirentries", //196
5258 "SYS_mmap", //197
5259 "SYS___syscall", //198
5260 "SYS_lseek", //199
5261 "SYS_truncate", //200
5262 "SYS_ftruncate", //201
5263 "SYS___sysctl", //202
5264 "SYS_mlock", //203
5265 "SYS_munlock", //204
5266 "SYS_205",
5267 "SYS_futimes", //206
5268 "SYS_getpgid", //207
5269 "SYS_xfspioctl", //208
5270 "SYS_209",
5271 "SYS_210",
5272 "SYS_211",
5273 "SYS_212",
5274 "SYS_213",
5275 "SYS_214",
5276 "SYS_215",
5277 "SYS_216",
5278 "SYS_217",
5279 "SYS_218",
5280 "SYS_219",
5281 "SYS_220",
5282 "SYS_semget", //221
5283 "SYS_222",
5284 "SYS_223",
5285 "SYS_224",
5286 "SYS_msgget", //225
5287 "SYS_msgsnd", //226
5288 "SYS_msgrcv", //227
5289 "SYS_shmat", //228
5290 "SYS_229",
5291 "SYS_shmdt", //230
5292 "SYS_231",
5293 "SYS_clock_gettime", //232
5294 "SYS_clock_settime", //233
5295 "SYS_clock_getres", //234
5296 "SYS_235",
5297 "SYS_236",
5298 "SYS_237",
5299 "SYS_238",
5300 "SYS_239",
5301 "SYS_nanosleep", //240
5302 "SYS_241",
5303 "SYS_242",
5304 "SYS_243",
5305 "SYS_244",
5306 "SYS_245",
5307 "SYS_246",
5308 "SYS_247",
5309 "SYS_248",
5310 "SYS_249",
5311 "SYS_minherit", //250
5312 "SYS_rfork", //251
5313 "SYS_poll", //252
5314 "SYS_issetugid", //253
5315 "SYS_lchown", //254
5316 "SYS_getsid", //255
5317 "SYS_msync", //256
5318 "SYS_257",
5319 "SYS_258",
5320 "SYS_259",
5321 "SYS_getfsstat", //260
5322 "SYS_statfs", //261
5323 "SYS_fstatfs", //262
5324 "SYS_pipe", //263
5325 "SYS_fhopen", //264
5326 "SYS_265",
5327 "SYS_fhstatfs", //266
5328 "SYS_preadv", //267
5329 "SYS_pwritev", //268
5330 "SYS_kqueue", //269
5331 "SYS_kevent", //270
5332 "SYS_mlockall", //271
5333 "SYS_munlockall", //272
5334 "SYS_getpeereid", //273
5335 "SYS_274",
5336 "SYS_275",
5337 "SYS_276",
5338 "SYS_277",
5339 "SYS_278",
5340 "SYS_279",
5341 "SYS_280",
5342 "SYS_getresuid", //281
5343 "SYS_setresuid", //282
5344 "SYS_getresgid", //283
5345 "SYS_setresgid", //284
5346 "SYS_285",
5347 "SYS_mquery", //286
5348 "SYS_closefrom", //287
5349 "SYS_sigaltstack", //288
5350 "SYS_shmget", //289
5351 "SYS_semop", //290
5352 "SYS_stat", //291
5353 "SYS_fstat", //292
5354 "SYS_lstat", //293
5355 "SYS_fhstat", //294
5356 "SYS___semctl", //295
5357 "SYS_shmctl", //296
5358 "SYS_msgctl", //297
5359 "SYS_MAXSYSCALL", //298
5360 //299
5361 //300
5362 };
5363 uint32_t uEAX;
5364 if (!LogIsEnabled())
5365 return;
5366 uEAX = CPUMGetGuestEAX(pVCpu);
5367 switch (uEAX)
5368 {
5369 default:
5370 if (uEAX < RT_ELEMENTS(apsz))
5371 {
5372 uint32_t au32Args[8] = {0};
5373 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5374 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5375 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5376 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5377 }
5378 else
5379 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5380 break;
5381 }
5382}
5383
5384
5385#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5386/**
5387 * The Dll main entry point (stub).
5388 */
5389bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5390{
5391 return true;
5392}
5393
5394void *memcpy(void *dst, const void *src, size_t size)
5395{
5396 uint8_t*pbDst = dst, *pbSrc = src;
5397 while (size-- > 0)
5398 *pbDst++ = *pbSrc++;
5399 return dst;
5400}
5401
5402#endif
5403
5404void cpu_smm_update(CPUState *env)
5405{
5406}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette