VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 20746

Last change on this file since 20746 was 20746, checked in by vboxsync, 16 years ago

REMR3ReplayHandlerNotification,remNotifyHandlerInsert: paranoia.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 164.2 KB
Line 
1/* $Id: VBoxRecompiler.c 20746 2009-06-21 19:53:12Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .pResultDesc = NULL,
214 .fFlags = 0,
215 .pfnHandler = remR3CmdDisasEnableStepping,
216 .pszSyntax = "[on/off]",
217 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
218 "If no arguments show the current state."
219 }
220};
221#endif
222
223/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
224uint8_t *code_gen_prologue;
225
226
227/*******************************************************************************
228* Internal Functions *
229*******************************************************************************/
230void remAbort(int rc, const char *pszTip);
231extern int testmath(void);
232
233/* Put them here to avoid unused variable warning. */
234AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
235#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
236//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
237/* Why did this have to be identical?? */
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#else
240AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
241#endif
242
243
244/**
245 * Initializes the REM.
246 *
247 * @returns VBox status code.
248 * @param pVM The VM to operate on.
249 */
250REMR3DECL(int) REMR3Init(PVM pVM)
251{
252 uint32_t u32Dummy;
253 int rc;
254
255#ifdef VBOX_ENABLE_VBOXREM64
256 LogRel(("Using 64-bit aware REM\n"));
257#endif
258
259 /*
260 * Assert sanity.
261 */
262 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
263 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
264 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
265#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
266 Assert(!testmath());
267#endif
268
269 /*
270 * Init some internal data members.
271 */
272 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
273 pVM->rem.s.Env.pVM = pVM;
274#ifdef CPU_RAW_MODE_INIT
275 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
276#endif
277
278 /*
279 * Initialize the REM critical section.
280 *
281 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
282 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
283 * deadlocks. (mostly pgm vs rem locking)
284 */
285 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, "REM-Register");
286 AssertRCReturn(rc, rc);
287
288 /* ctx. */
289 pVM->rem.s.pCtx = NULL; /* set when executing code. */
290 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
291
292 /* ignore all notifications */
293 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
294
295 code_gen_prologue = RTMemExecAlloc(_1K);
296 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
297
298 cpu_exec_init_all(0);
299
300 /*
301 * Init the recompiler.
302 */
303 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
304 {
305 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
306 return VERR_GENERAL_FAILURE;
307 }
308 PVMCPU pVCpu = VMMGetCpu(pVM);
309 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
310 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
311
312 /* allocate code buffer for single instruction emulation. */
313 pVM->rem.s.Env.cbCodeBuffer = 4096;
314 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
315 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
316
317 /* finally, set the cpu_single_env global. */
318 cpu_single_env = &pVM->rem.s.Env;
319
320 /* Nothing is pending by default */
321 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
322
323 /*
324 * Register ram types.
325 */
326 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
327 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
328 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
330 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
331
332 /* stop ignoring. */
333 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
334
335 /*
336 * Register the saved state data unit.
337 */
338 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
339 NULL, remR3Save, NULL,
340 NULL, remR3Load, NULL);
341 if (RT_FAILURE(rc))
342 return rc;
343
344#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
345 /*
346 * Debugger commands.
347 */
348 static bool fRegisteredCmds = false;
349 if (!fRegisteredCmds)
350 {
351 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
352 if (RT_SUCCESS(rc))
353 fRegisteredCmds = true;
354 }
355#endif
356
357#ifdef VBOX_WITH_STATISTICS
358 /*
359 * Statistics.
360 */
361 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
362 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
363 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
364 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
365 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
366 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
368 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
370 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
371 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
372 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
373
374 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
375
376 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
377 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
378 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
379 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
380 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
381 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
382 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
383 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
384 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
385 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
386 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
387
388 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
389 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
390 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
391 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
392
393 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
394 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
399
400 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
406
407 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
408#endif /* VBOX_WITH_STATISTICS */
409
410 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
411 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
412 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
413
414
415#ifdef DEBUG_ALL_LOGGING
416 loglevel = ~0;
417# ifdef DEBUG_TMP_LOGGING
418 logfile = fopen("/tmp/vbox-qemu.log", "w");
419# endif
420#endif
421
422 PREMHANDLERNOTIFICATION pCur;
423 unsigned i;
424
425 pVM->rem.s.idxPendingList = -1;
426 pVM->rem.s.idxFreeList = 0;
427
428 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) - 1; i++)
429 {
430 pCur = &pVM->rem.s.aHandlerNotifications[i];
431 pCur->idxNext = i + 1;
432 pCur->idxSelf = i;
433 }
434
435 pCur = &pVM->rem.s.aHandlerNotifications[RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) - 1];
436 pCur->idxNext = -1;
437 pCur->idxSelf = RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) - 1;
438
439 return rc;
440}
441
442
443/**
444 * Finalizes the REM initialization.
445 *
446 * This is called after all components, devices and drivers has
447 * been initialized. Its main purpose it to finish the RAM related
448 * initialization.
449 *
450 * @returns VBox status code.
451 *
452 * @param pVM The VM handle.
453 */
454REMR3DECL(int) REMR3InitFinalize(PVM pVM)
455{
456 int rc;
457
458 /*
459 * Ram size & dirty bit map.
460 */
461 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
462 pVM->rem.s.fGCPhysLastRamFixed = true;
463#ifdef RT_STRICT
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
465#else
466 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
467#endif
468 return rc;
469}
470
471
472/**
473 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
474 *
475 * @returns VBox status code.
476 * @param pVM The VM handle.
477 * @param fGuarded Whether to guard the map.
478 */
479static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
480{
481 int rc = VINF_SUCCESS;
482 RTGCPHYS cb;
483
484 cb = pVM->rem.s.GCPhysLastRam + 1;
485 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
486 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
487 VERR_OUT_OF_RANGE);
488 phys_ram_size = cb;
489 phys_ram_dirty_size = cb >> PAGE_SHIFT;
490 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
491
492 if (!fGuarded)
493 {
494 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
495 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
496 }
497 else
498 {
499 /*
500 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
501 */
502 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
503 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
504 if (cbBitmapFull == cbBitmapAligned)
505 cbBitmapFull += _4G >> PAGE_SHIFT;
506 else if (cbBitmapFull - cbBitmapAligned < _64K)
507 cbBitmapFull += _64K;
508
509 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
510 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
511
512 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
513 if (RT_FAILURE(rc))
514 {
515 RTMemPageFree(phys_ram_dirty);
516 AssertLogRelRCReturn(rc, rc);
517 }
518
519 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
520 }
521
522 /* initialize it. */
523 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
524 return rc;
525}
526
527
528/**
529 * Terminates the REM.
530 *
531 * Termination means cleaning up and freeing all resources,
532 * the VM it self is at this point powered off or suspended.
533 *
534 * @returns VBox status code.
535 * @param pVM The VM to operate on.
536 */
537REMR3DECL(int) REMR3Term(PVM pVM)
538{
539#ifdef VBOX_WITH_STATISTICS
540 /*
541 * Statistics.
542 */
543 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
544 STAM_DEREG(pVM, &gStatCompilationQEmu);
545 STAM_DEREG(pVM, &gStatRunCodeQEmu);
546 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
547 STAM_DEREG(pVM, &gStatTimers);
548 STAM_DEREG(pVM, &gStatTBLookup);
549 STAM_DEREG(pVM, &gStatIRQ);
550 STAM_DEREG(pVM, &gStatRawCheck);
551 STAM_DEREG(pVM, &gStatMemRead);
552 STAM_DEREG(pVM, &gStatMemWrite);
553 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
554 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
555
556 STAM_DEREG(pVM, &gStatCpuGetTSC);
557
558 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
559 STAM_DEREG(pVM, &gStatRefuseVM86);
560 STAM_DEREG(pVM, &gStatRefusePaging);
561 STAM_DEREG(pVM, &gStatRefusePAE);
562 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
563 STAM_DEREG(pVM, &gStatRefuseIF0);
564 STAM_DEREG(pVM, &gStatRefuseCode16);
565 STAM_DEREG(pVM, &gStatRefuseWP0);
566 STAM_DEREG(pVM, &gStatRefuseRing1or2);
567 STAM_DEREG(pVM, &gStatRefuseCanExecute);
568 STAM_DEREG(pVM, &gStatFlushTBs);
569
570 STAM_DEREG(pVM, &gStatREMGDTChange);
571 STAM_DEREG(pVM, &gStatREMLDTRChange);
572 STAM_DEREG(pVM, &gStatREMIDTChange);
573 STAM_DEREG(pVM, &gStatREMTRChange);
574
575 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
579 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
580 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
581
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
586 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
588
589 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
590#endif /* VBOX_WITH_STATISTICS */
591
592 STAM_REL_DEREG(pVM, &tb_flush_count);
593 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
594 STAM_REL_DEREG(pVM, &tlb_flush_count);
595
596 return VINF_SUCCESS;
597}
598
599
600/**
601 * The VM is being reset.
602 *
603 * For the REM component this means to call the cpu_reset() and
604 * reinitialize some state variables.
605 *
606 * @param pVM VM handle.
607 */
608REMR3DECL(void) REMR3Reset(PVM pVM)
609{
610 /*
611 * Reset the REM cpu.
612 */
613 Assert(pVM->rem.s.cIgnoreAll == 0);
614 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
615 cpu_reset(&pVM->rem.s.Env);
616 pVM->rem.s.cInvalidatedPages = 0;
617 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
618 Assert(pVM->rem.s.cIgnoreAll == 0);
619
620 /* Clear raw ring 0 init state */
621 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
622
623 /* Flush the TBs the next time we execute code here. */
624 pVM->rem.s.fFlushTBs = true;
625}
626
627
628/**
629 * Execute state save operation.
630 *
631 * @returns VBox status code.
632 * @param pVM VM Handle.
633 * @param pSSM SSM operation handle.
634 */
635static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
636{
637 PREM pRem = &pVM->rem.s;
638
639 /*
640 * Save the required CPU Env bits.
641 * (Not much because we're never in REM when doing the save.)
642 */
643 LogFlow(("remR3Save:\n"));
644 Assert(!pRem->fInREM);
645 SSMR3PutU32(pSSM, pRem->Env.hflags);
646 SSMR3PutU32(pSSM, ~0); /* separator */
647
648 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
649 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
650 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
651
652 return SSMR3PutU32(pSSM, ~0); /* terminator */
653}
654
655
656/**
657 * Execute state load operation.
658 *
659 * @returns VBox status code.
660 * @param pVM VM Handle.
661 * @param pSSM SSM operation handle.
662 * @param u32Version Data layout version.
663 */
664static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
665{
666 uint32_t u32Dummy;
667 uint32_t fRawRing0 = false;
668 uint32_t u32Sep;
669 unsigned i;
670 int rc;
671 PREM pRem;
672 LogFlow(("remR3Load:\n"));
673
674 /*
675 * Validate version.
676 */
677 if ( u32Version != REM_SAVED_STATE_VERSION
678 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
679 {
680 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
681 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
682 }
683
684 /*
685 * Do a reset to be on the safe side...
686 */
687 REMR3Reset(pVM);
688
689 /*
690 * Ignore all ignorable notifications.
691 * (Not doing this will cause serious trouble.)
692 */
693 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
694
695 /*
696 * Load the required CPU Env bits.
697 * (Not much because we're never in REM when doing the save.)
698 */
699 pRem = &pVM->rem.s;
700 Assert(!pRem->fInREM);
701 SSMR3GetU32(pSSM, &pRem->Env.hflags);
702 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
703 {
704 /* Redundant REM CPU state has to be loaded, but can be ignored. */
705 CPUX86State_Ver16 temp;
706 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
707 }
708
709 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
710 if (RT_FAILURE(rc))
711 return rc;
712 if (u32Sep != ~0U)
713 {
714 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
715 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
716 }
717
718 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
719 SSMR3GetUInt(pSSM, &fRawRing0);
720 if (fRawRing0)
721 pRem->Env.state |= CPU_RAW_RING0;
722
723 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
724 {
725 /*
726 * Load the REM stuff.
727 */
728 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
729 if (RT_FAILURE(rc))
730 return rc;
731 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
732 {
733 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
734 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
735 }
736 for (i = 0; i < pRem->cInvalidatedPages; i++)
737 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
738 }
739
740 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
741 if (RT_FAILURE(rc))
742 return rc;
743
744 /* check the terminator. */
745 rc = SSMR3GetU32(pSSM, &u32Sep);
746 if (RT_FAILURE(rc))
747 return rc;
748 if (u32Sep != ~0U)
749 {
750 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
751 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
752 }
753
754 /*
755 * Get the CPUID features.
756 */
757 PVMCPU pVCpu = VMMGetCpu(pVM);
758 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
759 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
760
761 /*
762 * Sync the Load Flush the TLB
763 */
764 tlb_flush(&pRem->Env, 1);
765
766 /*
767 * Stop ignoring ignornable notifications.
768 */
769 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
770
771 /*
772 * Sync the whole CPU state when executing code in the recompiler.
773 */
774 for (i=0;i<pVM->cCPUs;i++)
775 {
776 PVMCPU pVCpu = &pVM->aCpus[i];
777
778 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
779 }
780 return VINF_SUCCESS;
781}
782
783
784
785#undef LOG_GROUP
786#define LOG_GROUP LOG_GROUP_REM_RUN
787
788/**
789 * Single steps an instruction in recompiled mode.
790 *
791 * Before calling this function the REM state needs to be in sync with
792 * the VM. Call REMR3State() to perform the sync. It's only necessary
793 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
794 * and after calling REMR3StateBack().
795 *
796 * @returns VBox status code.
797 *
798 * @param pVM VM Handle.
799 * @param pVCpu VMCPU Handle.
800 */
801REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
802{
803 int rc, interrupt_request;
804 RTGCPTR GCPtrPC;
805 bool fBp;
806
807 /*
808 * Lock the REM - we don't wanna have anyone interrupting us
809 * while stepping - and enabled single stepping. We also ignore
810 * pending interrupts and suchlike.
811 */
812 interrupt_request = pVM->rem.s.Env.interrupt_request;
813 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
814 pVM->rem.s.Env.interrupt_request = 0;
815 cpu_single_step(&pVM->rem.s.Env, 1);
816
817 /*
818 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
819 */
820 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
821 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
822
823 /*
824 * Execute and handle the return code.
825 * We execute without enabling the cpu tick, so on success we'll
826 * just flip it on and off to make sure it moves
827 */
828 rc = cpu_exec(&pVM->rem.s.Env);
829 if (rc == EXCP_DEBUG)
830 {
831 TMR3NotifyResume(pVM, pVCpu);
832 TMR3NotifySuspend(pVM, pVCpu);
833 rc = VINF_EM_DBG_STEPPED;
834 }
835 else
836 {
837 switch (rc)
838 {
839 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
840 case EXCP_HLT:
841 case EXCP_HALTED: rc = VINF_EM_HALT; break;
842 case EXCP_RC:
843 rc = pVM->rem.s.rc;
844 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
845 break;
846 case EXCP_EXECUTE_RAW:
847 case EXCP_EXECUTE_HWACC:
848 /** @todo: is it correct? No! */
849 rc = VINF_SUCCESS;
850 break;
851 default:
852 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
853 rc = VERR_INTERNAL_ERROR;
854 break;
855 }
856 }
857
858 /*
859 * Restore the stuff we changed to prevent interruption.
860 * Unlock the REM.
861 */
862 if (fBp)
863 {
864 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
865 Assert(rc2 == 0); NOREF(rc2);
866 }
867 cpu_single_step(&pVM->rem.s.Env, 0);
868 pVM->rem.s.Env.interrupt_request = interrupt_request;
869
870 return rc;
871}
872
873
874/**
875 * Set a breakpoint using the REM facilities.
876 *
877 * @returns VBox status code.
878 * @param pVM The VM handle.
879 * @param Address The breakpoint address.
880 * @thread The emulation thread.
881 */
882REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
883{
884 VM_ASSERT_EMT(pVM);
885 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
886 {
887 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
888 return VINF_SUCCESS;
889 }
890 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
891 return VERR_REM_NO_MORE_BP_SLOTS;
892}
893
894
895/**
896 * Clears a breakpoint set by REMR3BreakpointSet().
897 *
898 * @returns VBox status code.
899 * @param pVM The VM handle.
900 * @param Address The breakpoint address.
901 * @thread The emulation thread.
902 */
903REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
904{
905 VM_ASSERT_EMT(pVM);
906 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
907 {
908 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
909 return VINF_SUCCESS;
910 }
911 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
912 return VERR_REM_BP_NOT_FOUND;
913}
914
915
916/**
917 * Emulate an instruction.
918 *
919 * This function executes one instruction without letting anyone
920 * interrupt it. This is intended for being called while being in
921 * raw mode and thus will take care of all the state syncing between
922 * REM and the rest.
923 *
924 * @returns VBox status code.
925 * @param pVM VM handle.
926 * @param pVCpu VMCPU Handle.
927 */
928REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
929{
930 bool fFlushTBs;
931
932 int rc, rc2;
933 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
934
935 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
936 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
937 */
938 if (HWACCMIsEnabled(pVM))
939 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
940
941 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
942 fFlushTBs = pVM->rem.s.fFlushTBs;
943 pVM->rem.s.fFlushTBs = false;
944
945 /*
946 * Sync the state and enable single instruction / single stepping.
947 */
948 rc = REMR3State(pVM, pVCpu);
949 pVM->rem.s.fFlushTBs = fFlushTBs;
950 if (RT_SUCCESS(rc))
951 {
952 int interrupt_request = pVM->rem.s.Env.interrupt_request;
953 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
954 Assert(!pVM->rem.s.Env.singlestep_enabled);
955 /*
956 * Now we set the execute single instruction flag and enter the cpu_exec loop.
957 */
958 TMNotifyStartOfExecution(pVCpu);
959 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
960 rc = cpu_exec(&pVM->rem.s.Env);
961 TMNotifyEndOfExecution(pVCpu);
962 switch (rc)
963 {
964 /*
965 * Executed without anything out of the way happening.
966 */
967 case EXCP_SINGLE_INSTR:
968 rc = VINF_EM_RESCHEDULE;
969 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
970 break;
971
972 /*
973 * If we take a trap or start servicing a pending interrupt, we might end up here.
974 * (Timer thread or some other thread wishing EMT's attention.)
975 */
976 case EXCP_INTERRUPT:
977 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
978 rc = VINF_EM_RESCHEDULE;
979 break;
980
981 /*
982 * Single step, we assume!
983 * If there was a breakpoint there we're fucked now.
984 */
985 case EXCP_DEBUG:
986 {
987 /* breakpoint or single step? */
988 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
989 int iBP;
990 rc = VINF_EM_DBG_STEPPED;
991 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
992 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
993 {
994 rc = VINF_EM_DBG_BREAKPOINT;
995 break;
996 }
997 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
998 break;
999 }
1000
1001 /*
1002 * hlt instruction.
1003 */
1004 case EXCP_HLT:
1005 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1006 rc = VINF_EM_HALT;
1007 break;
1008
1009 /*
1010 * The VM has halted.
1011 */
1012 case EXCP_HALTED:
1013 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1014 rc = VINF_EM_HALT;
1015 break;
1016
1017 /*
1018 * Switch to RAW-mode.
1019 */
1020 case EXCP_EXECUTE_RAW:
1021 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1022 rc = VINF_EM_RESCHEDULE_RAW;
1023 break;
1024
1025 /*
1026 * Switch to hardware accelerated RAW-mode.
1027 */
1028 case EXCP_EXECUTE_HWACC:
1029 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1030 rc = VINF_EM_RESCHEDULE_HWACC;
1031 break;
1032
1033 /*
1034 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1035 */
1036 case EXCP_RC:
1037 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1038 rc = pVM->rem.s.rc;
1039 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1040 break;
1041
1042 /*
1043 * Figure out the rest when they arrive....
1044 */
1045 default:
1046 AssertMsgFailed(("rc=%d\n", rc));
1047 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1048 rc = VINF_EM_RESCHEDULE;
1049 break;
1050 }
1051
1052 /*
1053 * Switch back the state.
1054 */
1055 pVM->rem.s.Env.interrupt_request = interrupt_request;
1056 rc2 = REMR3StateBack(pVM, pVCpu);
1057 AssertRC(rc2);
1058 }
1059
1060 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1061 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1062 return rc;
1063}
1064
1065
1066/**
1067 * Runs code in recompiled mode.
1068 *
1069 * Before calling this function the REM state needs to be in sync with
1070 * the VM. Call REMR3State() to perform the sync. It's only necessary
1071 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1072 * and after calling REMR3StateBack().
1073 *
1074 * @returns VBox status code.
1075 *
1076 * @param pVM VM Handle.
1077 * @param pVCpu VMCPU Handle.
1078 */
1079REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1080{
1081 int rc;
1082 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1083 Assert(pVM->rem.s.fInREM);
1084
1085 TMNotifyStartOfExecution(pVCpu);
1086 rc = cpu_exec(&pVM->rem.s.Env);
1087 TMNotifyEndOfExecution(pVCpu);
1088 switch (rc)
1089 {
1090 /*
1091 * This happens when the execution was interrupted
1092 * by an external event, like pending timers.
1093 */
1094 case EXCP_INTERRUPT:
1095 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1096 rc = VINF_SUCCESS;
1097 break;
1098
1099 /*
1100 * hlt instruction.
1101 */
1102 case EXCP_HLT:
1103 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1104 rc = VINF_EM_HALT;
1105 break;
1106
1107 /*
1108 * The VM has halted.
1109 */
1110 case EXCP_HALTED:
1111 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1112 rc = VINF_EM_HALT;
1113 break;
1114
1115 /*
1116 * Breakpoint/single step.
1117 */
1118 case EXCP_DEBUG:
1119 {
1120#if 0//def DEBUG_bird
1121 static int iBP = 0;
1122 printf("howdy, breakpoint! iBP=%d\n", iBP);
1123 switch (iBP)
1124 {
1125 case 0:
1126 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1127 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1128 //pVM->rem.s.Env.interrupt_request = 0;
1129 //pVM->rem.s.Env.exception_index = -1;
1130 //g_fInterruptDisabled = 1;
1131 rc = VINF_SUCCESS;
1132 asm("int3");
1133 break;
1134 default:
1135 asm("int3");
1136 break;
1137 }
1138 iBP++;
1139#else
1140 /* breakpoint or single step? */
1141 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1142 int iBP;
1143 rc = VINF_EM_DBG_STEPPED;
1144 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1145 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1146 {
1147 rc = VINF_EM_DBG_BREAKPOINT;
1148 break;
1149 }
1150 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1151#endif
1152 break;
1153 }
1154
1155 /*
1156 * Switch to RAW-mode.
1157 */
1158 case EXCP_EXECUTE_RAW:
1159 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1160 rc = VINF_EM_RESCHEDULE_RAW;
1161 break;
1162
1163 /*
1164 * Switch to hardware accelerated RAW-mode.
1165 */
1166 case EXCP_EXECUTE_HWACC:
1167 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1168 rc = VINF_EM_RESCHEDULE_HWACC;
1169 break;
1170
1171 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1172 /*
1173 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1174 */
1175 case EXCP_RC:
1176 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1177 rc = pVM->rem.s.rc;
1178 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1179 break;
1180
1181 /*
1182 * Figure out the rest when they arrive....
1183 */
1184 default:
1185 AssertMsgFailed(("rc=%d\n", rc));
1186 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1187 rc = VINF_SUCCESS;
1188 break;
1189 }
1190
1191 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1192 return rc;
1193}
1194
1195
1196/**
1197 * Check if the cpu state is suitable for Raw execution.
1198 *
1199 * @returns boolean
1200 * @param env The CPU env struct.
1201 * @param eip The EIP to check this for (might differ from env->eip).
1202 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1203 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1204 *
1205 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1206 */
1207bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1208{
1209 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1210 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1211 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1212 uint32_t u32CR0;
1213
1214 /* Update counter. */
1215 env->pVM->rem.s.cCanExecuteRaw++;
1216
1217 if (HWACCMIsEnabled(env->pVM))
1218 {
1219 CPUMCTX Ctx;
1220
1221 env->state |= CPU_RAW_HWACC;
1222
1223 /*
1224 * Create partial context for HWACCMR3CanExecuteGuest
1225 */
1226 Ctx.cr0 = env->cr[0];
1227 Ctx.cr3 = env->cr[3];
1228 Ctx.cr4 = env->cr[4];
1229
1230 Ctx.tr = env->tr.selector;
1231 Ctx.trHid.u64Base = env->tr.base;
1232 Ctx.trHid.u32Limit = env->tr.limit;
1233 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1234
1235 Ctx.idtr.cbIdt = env->idt.limit;
1236 Ctx.idtr.pIdt = env->idt.base;
1237
1238 Ctx.gdtr.cbGdt = env->gdt.limit;
1239 Ctx.gdtr.pGdt = env->gdt.base;
1240
1241 Ctx.rsp = env->regs[R_ESP];
1242 Ctx.rip = env->eip;
1243
1244 Ctx.eflags.u32 = env->eflags;
1245
1246 Ctx.cs = env->segs[R_CS].selector;
1247 Ctx.csHid.u64Base = env->segs[R_CS].base;
1248 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1249 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1250
1251 Ctx.ds = env->segs[R_DS].selector;
1252 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1253 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1254 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1255
1256 Ctx.es = env->segs[R_ES].selector;
1257 Ctx.esHid.u64Base = env->segs[R_ES].base;
1258 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1259 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1260
1261 Ctx.fs = env->segs[R_FS].selector;
1262 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1263 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1264 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1265
1266 Ctx.gs = env->segs[R_GS].selector;
1267 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1268 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1269 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1270
1271 Ctx.ss = env->segs[R_SS].selector;
1272 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1273 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1274 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1275
1276 Ctx.msrEFER = env->efer;
1277
1278 /* Hardware accelerated raw-mode:
1279 *
1280 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1281 */
1282 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1283 {
1284 *piException = EXCP_EXECUTE_HWACC;
1285 return true;
1286 }
1287 return false;
1288 }
1289
1290 /*
1291 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1292 * or 32 bits protected mode ring 0 code
1293 *
1294 * The tests are ordered by the likelyhood of being true during normal execution.
1295 */
1296 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1297 {
1298 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1299 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1300 return false;
1301 }
1302
1303#ifndef VBOX_RAW_V86
1304 if (fFlags & VM_MASK) {
1305 STAM_COUNTER_INC(&gStatRefuseVM86);
1306 Log2(("raw mode refused: VM_MASK\n"));
1307 return false;
1308 }
1309#endif
1310
1311 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1312 {
1313#ifndef DEBUG_bird
1314 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1315#endif
1316 return false;
1317 }
1318
1319 if (env->singlestep_enabled)
1320 {
1321 //Log2(("raw mode refused: Single step\n"));
1322 return false;
1323 }
1324
1325 if (env->nb_breakpoints > 0)
1326 {
1327 //Log2(("raw mode refused: Breakpoints\n"));
1328 return false;
1329 }
1330
1331 u32CR0 = env->cr[0];
1332 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1333 {
1334 STAM_COUNTER_INC(&gStatRefusePaging);
1335 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1336 return false;
1337 }
1338
1339 if (env->cr[4] & CR4_PAE_MASK)
1340 {
1341 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1342 {
1343 STAM_COUNTER_INC(&gStatRefusePAE);
1344 return false;
1345 }
1346 }
1347
1348 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1349 {
1350 if (!EMIsRawRing3Enabled(env->pVM))
1351 return false;
1352
1353 if (!(env->eflags & IF_MASK))
1354 {
1355 STAM_COUNTER_INC(&gStatRefuseIF0);
1356 Log2(("raw mode refused: IF (RawR3)\n"));
1357 return false;
1358 }
1359
1360 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1361 {
1362 STAM_COUNTER_INC(&gStatRefuseWP0);
1363 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1364 return false;
1365 }
1366 }
1367 else
1368 {
1369 if (!EMIsRawRing0Enabled(env->pVM))
1370 return false;
1371
1372 // Let's start with pure 32 bits ring 0 code first
1373 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1374 {
1375 STAM_COUNTER_INC(&gStatRefuseCode16);
1376 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1377 return false;
1378 }
1379
1380 // Only R0
1381 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1382 {
1383 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1384 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1385 return false;
1386 }
1387
1388 if (!(u32CR0 & CR0_WP_MASK))
1389 {
1390 STAM_COUNTER_INC(&gStatRefuseWP0);
1391 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1392 return false;
1393 }
1394
1395 if (PATMIsPatchGCAddr(env->pVM, eip))
1396 {
1397 Log2(("raw r0 mode forced: patch code\n"));
1398 *piException = EXCP_EXECUTE_RAW;
1399 return true;
1400 }
1401
1402#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1403 if (!(env->eflags & IF_MASK))
1404 {
1405 STAM_COUNTER_INC(&gStatRefuseIF0);
1406 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1407 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1408 return false;
1409 }
1410#endif
1411
1412 env->state |= CPU_RAW_RING0;
1413 }
1414
1415 /*
1416 * Don't reschedule the first time we're called, because there might be
1417 * special reasons why we're here that is not covered by the above checks.
1418 */
1419 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1420 {
1421 Log2(("raw mode refused: first scheduling\n"));
1422 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1423 return false;
1424 }
1425
1426 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1427 *piException = EXCP_EXECUTE_RAW;
1428 return true;
1429}
1430
1431
1432/**
1433 * Fetches a code byte.
1434 *
1435 * @returns Success indicator (bool) for ease of use.
1436 * @param env The CPU environment structure.
1437 * @param GCPtrInstr Where to fetch code.
1438 * @param pu8Byte Where to store the byte on success
1439 */
1440bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1441{
1442 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1443 if (RT_SUCCESS(rc))
1444 return true;
1445 return false;
1446}
1447
1448
1449/**
1450 * Flush (or invalidate if you like) page table/dir entry.
1451 *
1452 * (invlpg instruction; tlb_flush_page)
1453 *
1454 * @param env Pointer to cpu environment.
1455 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1456 */
1457void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1458{
1459 PVM pVM = env->pVM;
1460 PCPUMCTX pCtx;
1461 int rc;
1462
1463 /*
1464 * When we're replaying invlpg instructions or restoring a saved
1465 * state we disable this path.
1466 */
1467 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1468 return;
1469 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1470 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1471
1472 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1473
1474 /*
1475 * Update the control registers before calling PGMFlushPage.
1476 */
1477 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1478 Assert(pCtx);
1479 pCtx->cr0 = env->cr[0];
1480 pCtx->cr3 = env->cr[3];
1481 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1482 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1483 pCtx->cr4 = env->cr[4];
1484
1485 /*
1486 * Let PGM do the rest.
1487 */
1488 Assert(env->pVCpu);
1489 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1490 if (RT_FAILURE(rc))
1491 {
1492 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1493 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1494 }
1495 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1496}
1497
1498
1499#ifndef REM_PHYS_ADDR_IN_TLB
1500/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1501void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1502{
1503 void *pv;
1504 int rc;
1505
1506 /* Address must be aligned enough to fiddle with lower bits */
1507 Assert((physAddr & 0x3) == 0);
1508
1509 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1510 Assert( rc == VINF_SUCCESS
1511 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1512 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1513 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1514 if (RT_FAILURE(rc))
1515 return (void *)1;
1516 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1517 return (void *)((uintptr_t)pv | 2);
1518 return pv;
1519}
1520#endif /* REM_PHYS_ADDR_IN_TLB */
1521
1522
1523/**
1524 * Called from tlb_protect_code in order to write monitor a code page.
1525 *
1526 * @param env Pointer to the CPU environment.
1527 * @param GCPtr Code page to monitor
1528 */
1529void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1530{
1531#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1532 Assert(env->pVM->rem.s.fInREM);
1533 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1534 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1535 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1536 && !(env->eflags & VM_MASK) /* no V86 mode */
1537 && !HWACCMIsEnabled(env->pVM))
1538 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1539#endif
1540}
1541
1542
1543/**
1544 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1545 *
1546 * @param env Pointer to the CPU environment.
1547 * @param GCPtr Code page to monitor
1548 */
1549void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1550{
1551 Assert(env->pVM->rem.s.fInREM);
1552#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1553 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1554 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1555 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1556 && !(env->eflags & VM_MASK) /* no V86 mode */
1557 && !HWACCMIsEnabled(env->pVM))
1558 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1559#endif
1560}
1561
1562
1563/**
1564 * Called when the CPU is initialized, any of the CRx registers are changed or
1565 * when the A20 line is modified.
1566 *
1567 * @param env Pointer to the CPU environment.
1568 * @param fGlobal Set if the flush is global.
1569 */
1570void remR3FlushTLB(CPUState *env, bool fGlobal)
1571{
1572 PVM pVM = env->pVM;
1573 PCPUMCTX pCtx;
1574
1575 /*
1576 * When we're replaying invlpg instructions or restoring a saved
1577 * state we disable this path.
1578 */
1579 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1580 return;
1581 Assert(pVM->rem.s.fInREM);
1582
1583 /*
1584 * The caller doesn't check cr4, so we have to do that for ourselves.
1585 */
1586 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1587 fGlobal = true;
1588 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1589
1590 /*
1591 * Update the control registers before calling PGMR3FlushTLB.
1592 */
1593 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1594 Assert(pCtx);
1595 pCtx->cr0 = env->cr[0];
1596 pCtx->cr3 = env->cr[3];
1597 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1598 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1599 pCtx->cr4 = env->cr[4];
1600
1601 /*
1602 * Let PGM do the rest.
1603 */
1604 Assert(env->pVCpu);
1605 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1606}
1607
1608
1609/**
1610 * Called when any of the cr0, cr4 or efer registers is updated.
1611 *
1612 * @param env Pointer to the CPU environment.
1613 */
1614void remR3ChangeCpuMode(CPUState *env)
1615{
1616 PVM pVM = env->pVM;
1617 uint64_t efer;
1618 PCPUMCTX pCtx;
1619 int rc;
1620
1621 /*
1622 * When we're replaying loads or restoring a saved
1623 * state this path is disabled.
1624 */
1625 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1626 return;
1627 Assert(pVM->rem.s.fInREM);
1628
1629 /*
1630 * Update the control registers before calling PGMChangeMode()
1631 * as it may need to map whatever cr3 is pointing to.
1632 */
1633 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1634 Assert(pCtx);
1635 pCtx->cr0 = env->cr[0];
1636 pCtx->cr3 = env->cr[3];
1637 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1638 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1639 pCtx->cr4 = env->cr[4];
1640
1641#ifdef TARGET_X86_64
1642 efer = env->efer;
1643#else
1644 efer = 0;
1645#endif
1646 Assert(env->pVCpu);
1647 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1648 if (rc != VINF_SUCCESS)
1649 {
1650 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1651 {
1652 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1653 remR3RaiseRC(env->pVM, rc);
1654 }
1655 else
1656 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1657 }
1658}
1659
1660
1661/**
1662 * Called from compiled code to run dma.
1663 *
1664 * @param env Pointer to the CPU environment.
1665 */
1666void remR3DmaRun(CPUState *env)
1667{
1668 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1669 PDMR3DmaRun(env->pVM);
1670 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1671}
1672
1673
1674/**
1675 * Called from compiled code to schedule pending timers in VMM
1676 *
1677 * @param env Pointer to the CPU environment.
1678 */
1679void remR3TimersRun(CPUState *env)
1680{
1681 LogFlow(("remR3TimersRun:\n"));
1682 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1683 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1684 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1685 TMR3TimerQueuesDo(env->pVM);
1686 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1687 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1688}
1689
1690
1691/**
1692 * Record trap occurance
1693 *
1694 * @returns VBox status code
1695 * @param env Pointer to the CPU environment.
1696 * @param uTrap Trap nr
1697 * @param uErrorCode Error code
1698 * @param pvNextEIP Next EIP
1699 */
1700int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1701{
1702 PVM pVM = env->pVM;
1703#ifdef VBOX_WITH_STATISTICS
1704 static STAMCOUNTER s_aStatTrap[255];
1705 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1706#endif
1707
1708#ifdef VBOX_WITH_STATISTICS
1709 if (uTrap < 255)
1710 {
1711 if (!s_aRegisters[uTrap])
1712 {
1713 char szStatName[64];
1714 s_aRegisters[uTrap] = true;
1715 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1716 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1717 }
1718 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1719 }
1720#endif
1721 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1722 if( uTrap < 0x20
1723 && (env->cr[0] & X86_CR0_PE)
1724 && !(env->eflags & X86_EFL_VM))
1725 {
1726#ifdef DEBUG
1727 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1728#endif
1729 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1730 {
1731 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1732 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1733 return VERR_REM_TOO_MANY_TRAPS;
1734 }
1735 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1736 pVM->rem.s.cPendingExceptions = 1;
1737 pVM->rem.s.uPendingException = uTrap;
1738 pVM->rem.s.uPendingExcptEIP = env->eip;
1739 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1740 }
1741 else
1742 {
1743 pVM->rem.s.cPendingExceptions = 0;
1744 pVM->rem.s.uPendingException = uTrap;
1745 pVM->rem.s.uPendingExcptEIP = env->eip;
1746 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1747 }
1748 return VINF_SUCCESS;
1749}
1750
1751
1752/*
1753 * Clear current active trap
1754 *
1755 * @param pVM VM Handle.
1756 */
1757void remR3TrapClear(PVM pVM)
1758{
1759 pVM->rem.s.cPendingExceptions = 0;
1760 pVM->rem.s.uPendingException = 0;
1761 pVM->rem.s.uPendingExcptEIP = 0;
1762 pVM->rem.s.uPendingExcptCR2 = 0;
1763}
1764
1765
1766/*
1767 * Record previous call instruction addresses
1768 *
1769 * @param env Pointer to the CPU environment.
1770 */
1771void remR3RecordCall(CPUState *env)
1772{
1773 CSAMR3RecordCallAddress(env->pVM, env->eip);
1774}
1775
1776
1777/**
1778 * Syncs the internal REM state with the VM.
1779 *
1780 * This must be called before REMR3Run() is invoked whenever when the REM
1781 * state is not up to date. Calling it several times in a row is not
1782 * permitted.
1783 *
1784 * @returns VBox status code.
1785 *
1786 * @param pVM VM Handle.
1787 * @param pVCpu VMCPU Handle.
1788 *
1789 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1790 * no do this since the majority of the callers don't want any unnecessary of events
1791 * pending that would immediatly interrupt execution.
1792 */
1793REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1794{
1795 register const CPUMCTX *pCtx;
1796 register unsigned fFlags;
1797 bool fHiddenSelRegsValid;
1798 unsigned i;
1799 TRPMEVENT enmType;
1800 uint8_t u8TrapNo;
1801 int rc;
1802
1803 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1804 Log2(("REMR3State:\n"));
1805
1806 pVM->rem.s.Env.pVCpu = pVCpu;
1807 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1808 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1809
1810 Assert(!pVM->rem.s.fInREM);
1811 pVM->rem.s.fInStateSync = true;
1812
1813 /*
1814 * If we have to flush TBs, do that immediately.
1815 */
1816 if (pVM->rem.s.fFlushTBs)
1817 {
1818 STAM_COUNTER_INC(&gStatFlushTBs);
1819 tb_flush(&pVM->rem.s.Env);
1820 pVM->rem.s.fFlushTBs = false;
1821 }
1822
1823 /*
1824 * Copy the registers which require no special handling.
1825 */
1826#ifdef TARGET_X86_64
1827 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1828 Assert(R_EAX == 0);
1829 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1830 Assert(R_ECX == 1);
1831 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1832 Assert(R_EDX == 2);
1833 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1834 Assert(R_EBX == 3);
1835 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1836 Assert(R_ESP == 4);
1837 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1838 Assert(R_EBP == 5);
1839 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1840 Assert(R_ESI == 6);
1841 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1842 Assert(R_EDI == 7);
1843 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1844 pVM->rem.s.Env.regs[8] = pCtx->r8;
1845 pVM->rem.s.Env.regs[9] = pCtx->r9;
1846 pVM->rem.s.Env.regs[10] = pCtx->r10;
1847 pVM->rem.s.Env.regs[11] = pCtx->r11;
1848 pVM->rem.s.Env.regs[12] = pCtx->r12;
1849 pVM->rem.s.Env.regs[13] = pCtx->r13;
1850 pVM->rem.s.Env.regs[14] = pCtx->r14;
1851 pVM->rem.s.Env.regs[15] = pCtx->r15;
1852
1853 pVM->rem.s.Env.eip = pCtx->rip;
1854
1855 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1856#else
1857 Assert(R_EAX == 0);
1858 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1859 Assert(R_ECX == 1);
1860 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1861 Assert(R_EDX == 2);
1862 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1863 Assert(R_EBX == 3);
1864 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1865 Assert(R_ESP == 4);
1866 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1867 Assert(R_EBP == 5);
1868 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1869 Assert(R_ESI == 6);
1870 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1871 Assert(R_EDI == 7);
1872 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1873 pVM->rem.s.Env.eip = pCtx->eip;
1874
1875 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1876#endif
1877
1878 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1879
1880 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1881 for (i=0;i<8;i++)
1882 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1883
1884 /*
1885 * Clear the halted hidden flag (the interrupt waking up the CPU can
1886 * have been dispatched in raw mode).
1887 */
1888 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1889
1890 /*
1891 * Replay invlpg?
1892 */
1893 if (pVM->rem.s.cInvalidatedPages)
1894 {
1895 RTUINT i;
1896
1897 pVM->rem.s.fIgnoreInvlPg = true;
1898 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1899 {
1900 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1901 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1902 }
1903 pVM->rem.s.fIgnoreInvlPg = false;
1904 pVM->rem.s.cInvalidatedPages = 0;
1905 }
1906
1907 /* Replay notification changes. */
1908 REMR3ReplayHandlerNotifications(pVM);
1909
1910 /* Update MSRs; before CRx registers! */
1911 pVM->rem.s.Env.efer = pCtx->msrEFER;
1912 pVM->rem.s.Env.star = pCtx->msrSTAR;
1913 pVM->rem.s.Env.pat = pCtx->msrPAT;
1914#ifdef TARGET_X86_64
1915 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1916 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1917 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1918 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1919
1920 /* Update the internal long mode activate flag according to the new EFER value. */
1921 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1922 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1923 else
1924 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1925#endif
1926
1927 /*
1928 * Registers which are rarely changed and require special handling / order when changed.
1929 */
1930 fFlags = CPUMGetAndClearChangedFlagsREM(pVCpu);
1931 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1932 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1933 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1934 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1935 {
1936 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1937 {
1938 pVM->rem.s.fIgnoreCR3Load = true;
1939 tlb_flush(&pVM->rem.s.Env, true);
1940 pVM->rem.s.fIgnoreCR3Load = false;
1941 }
1942
1943 /* CR4 before CR0! */
1944 if (fFlags & CPUM_CHANGED_CR4)
1945 {
1946 pVM->rem.s.fIgnoreCR3Load = true;
1947 pVM->rem.s.fIgnoreCpuMode = true;
1948 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1949 pVM->rem.s.fIgnoreCpuMode = false;
1950 pVM->rem.s.fIgnoreCR3Load = false;
1951 }
1952
1953 if (fFlags & CPUM_CHANGED_CR0)
1954 {
1955 pVM->rem.s.fIgnoreCR3Load = true;
1956 pVM->rem.s.fIgnoreCpuMode = true;
1957 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1958 pVM->rem.s.fIgnoreCpuMode = false;
1959 pVM->rem.s.fIgnoreCR3Load = false;
1960 }
1961
1962 if (fFlags & CPUM_CHANGED_CR3)
1963 {
1964 pVM->rem.s.fIgnoreCR3Load = true;
1965 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1966 pVM->rem.s.fIgnoreCR3Load = false;
1967 }
1968
1969 if (fFlags & CPUM_CHANGED_GDTR)
1970 {
1971 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1972 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1973 }
1974
1975 if (fFlags & CPUM_CHANGED_IDTR)
1976 {
1977 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1978 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1979 }
1980
1981 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1982 {
1983 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1984 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1985 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1986 }
1987
1988 if (fFlags & CPUM_CHANGED_LDTR)
1989 {
1990 if (fHiddenSelRegsValid)
1991 {
1992 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1993 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1994 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1995 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1996 }
1997 else
1998 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1999 }
2000
2001 if (fFlags & CPUM_CHANGED_CPUID)
2002 {
2003 uint32_t u32Dummy;
2004
2005 /*
2006 * Get the CPUID features.
2007 */
2008 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2009 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2010 }
2011
2012 /* Sync FPU state after CR4, CPUID and EFER (!). */
2013 if (fFlags & CPUM_CHANGED_FPU_REM)
2014 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2015 }
2016
2017 /*
2018 * Sync TR unconditionally to make life simpler.
2019 */
2020 pVM->rem.s.Env.tr.selector = pCtx->tr;
2021 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2022 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2023 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2024 /* Note! do_interrupt will fault if the busy flag is still set... */
2025 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2026
2027 /*
2028 * Update selector registers.
2029 * This must be done *after* we've synced gdt, ldt and crX registers
2030 * since we're reading the GDT/LDT om sync_seg. This will happen with
2031 * saved state which takes a quick dip into rawmode for instance.
2032 */
2033 /*
2034 * Stack; Note first check this one as the CPL might have changed. The
2035 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2036 */
2037
2038 if (fHiddenSelRegsValid)
2039 {
2040 /* The hidden selector registers are valid in the CPU context. */
2041 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2042
2043 /* Set current CPL */
2044 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2045
2046 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2047 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2048 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2049 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2050 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2051 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2052 }
2053 else
2054 {
2055 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2056 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2057 {
2058 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2059
2060 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2061 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2062#ifdef VBOX_WITH_STATISTICS
2063 if (pVM->rem.s.Env.segs[R_SS].newselector)
2064 {
2065 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2066 }
2067#endif
2068 }
2069 else
2070 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2071
2072 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2073 {
2074 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2075 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2076#ifdef VBOX_WITH_STATISTICS
2077 if (pVM->rem.s.Env.segs[R_ES].newselector)
2078 {
2079 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2080 }
2081#endif
2082 }
2083 else
2084 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2085
2086 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2087 {
2088 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2089 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2090#ifdef VBOX_WITH_STATISTICS
2091 if (pVM->rem.s.Env.segs[R_CS].newselector)
2092 {
2093 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2094 }
2095#endif
2096 }
2097 else
2098 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2099
2100 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2101 {
2102 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2103 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2104#ifdef VBOX_WITH_STATISTICS
2105 if (pVM->rem.s.Env.segs[R_DS].newselector)
2106 {
2107 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2108 }
2109#endif
2110 }
2111 else
2112 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2113
2114 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2115 * be the same but not the base/limit. */
2116 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2117 {
2118 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2119 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2120#ifdef VBOX_WITH_STATISTICS
2121 if (pVM->rem.s.Env.segs[R_FS].newselector)
2122 {
2123 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2124 }
2125#endif
2126 }
2127 else
2128 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2129
2130 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2131 {
2132 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2133 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2134#ifdef VBOX_WITH_STATISTICS
2135 if (pVM->rem.s.Env.segs[R_GS].newselector)
2136 {
2137 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2138 }
2139#endif
2140 }
2141 else
2142 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2143 }
2144
2145 /*
2146 * Check for traps.
2147 */
2148 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2149 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2150 if (RT_SUCCESS(rc))
2151 {
2152#ifdef DEBUG
2153 if (u8TrapNo == 0x80)
2154 {
2155 remR3DumpLnxSyscall(pVCpu);
2156 remR3DumpOBsdSyscall(pVCpu);
2157 }
2158#endif
2159
2160 pVM->rem.s.Env.exception_index = u8TrapNo;
2161 if (enmType != TRPM_SOFTWARE_INT)
2162 {
2163 pVM->rem.s.Env.exception_is_int = 0;
2164 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2165 }
2166 else
2167 {
2168 /*
2169 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2170 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2171 * for int03 and into.
2172 */
2173 pVM->rem.s.Env.exception_is_int = 1;
2174 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2175 /* int 3 may be generated by one-byte 0xcc */
2176 if (u8TrapNo == 3)
2177 {
2178 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2179 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2180 }
2181 /* int 4 may be generated by one-byte 0xce */
2182 else if (u8TrapNo == 4)
2183 {
2184 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2185 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2186 }
2187 }
2188
2189 /* get error code and cr2 if needed. */
2190 switch (u8TrapNo)
2191 {
2192 case 0x0e:
2193 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2194 /* fallthru */
2195 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2196 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2197 break;
2198
2199 case 0x11: case 0x08:
2200 default:
2201 pVM->rem.s.Env.error_code = 0;
2202 break;
2203 }
2204
2205 /*
2206 * We can now reset the active trap since the recompiler is gonna have a go at it.
2207 */
2208 rc = TRPMResetTrap(pVCpu);
2209 AssertRC(rc);
2210 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2211 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2212 }
2213
2214 /*
2215 * Clear old interrupt request flags; Check for pending hardware interrupts.
2216 * (See @remark for why we don't check for other FFs.)
2217 */
2218 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2219 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2220 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2221 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2222
2223 /*
2224 * We're now in REM mode.
2225 */
2226 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2227 pVM->rem.s.fInREM = true;
2228 pVM->rem.s.fInStateSync = false;
2229 pVM->rem.s.cCanExecuteRaw = 0;
2230 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2231 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2232 return VINF_SUCCESS;
2233}
2234
2235
2236/**
2237 * Syncs back changes in the REM state to the the VM state.
2238 *
2239 * This must be called after invoking REMR3Run().
2240 * Calling it several times in a row is not permitted.
2241 *
2242 * @returns VBox status code.
2243 *
2244 * @param pVM VM Handle.
2245 * @param pVCpu VMCPU Handle.
2246 */
2247REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2248{
2249 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2250 Assert(pCtx);
2251 unsigned i;
2252
2253 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2254 Log2(("REMR3StateBack:\n"));
2255 Assert(pVM->rem.s.fInREM);
2256
2257 /*
2258 * Copy back the registers.
2259 * This is done in the order they are declared in the CPUMCTX structure.
2260 */
2261
2262 /** @todo FOP */
2263 /** @todo FPUIP */
2264 /** @todo CS */
2265 /** @todo FPUDP */
2266 /** @todo DS */
2267
2268 /** @todo check if FPU/XMM was actually used in the recompiler */
2269 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2270//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2271
2272#ifdef TARGET_X86_64
2273 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2274 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2275 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2276 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2277 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2278 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2279 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2280 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2281 pCtx->r8 = pVM->rem.s.Env.regs[8];
2282 pCtx->r9 = pVM->rem.s.Env.regs[9];
2283 pCtx->r10 = pVM->rem.s.Env.regs[10];
2284 pCtx->r11 = pVM->rem.s.Env.regs[11];
2285 pCtx->r12 = pVM->rem.s.Env.regs[12];
2286 pCtx->r13 = pVM->rem.s.Env.regs[13];
2287 pCtx->r14 = pVM->rem.s.Env.regs[14];
2288 pCtx->r15 = pVM->rem.s.Env.regs[15];
2289
2290 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2291
2292#else
2293 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2294 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2295 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2296 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2297 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2298 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2299 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2300
2301 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2302#endif
2303
2304 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2305
2306#ifdef VBOX_WITH_STATISTICS
2307 if (pVM->rem.s.Env.segs[R_SS].newselector)
2308 {
2309 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2310 }
2311 if (pVM->rem.s.Env.segs[R_GS].newselector)
2312 {
2313 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2314 }
2315 if (pVM->rem.s.Env.segs[R_FS].newselector)
2316 {
2317 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2318 }
2319 if (pVM->rem.s.Env.segs[R_ES].newselector)
2320 {
2321 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2322 }
2323 if (pVM->rem.s.Env.segs[R_DS].newselector)
2324 {
2325 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2326 }
2327 if (pVM->rem.s.Env.segs[R_CS].newselector)
2328 {
2329 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2330 }
2331#endif
2332 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2333 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2334 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2335 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2336 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2337
2338#ifdef TARGET_X86_64
2339 pCtx->rip = pVM->rem.s.Env.eip;
2340 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2341#else
2342 pCtx->eip = pVM->rem.s.Env.eip;
2343 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2344#endif
2345
2346 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2347 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2348 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2349 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2350 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2351 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2352
2353 for (i = 0; i < 8; i++)
2354 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2355
2356 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2357 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2358 {
2359 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2360 STAM_COUNTER_INC(&gStatREMGDTChange);
2361 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2362 }
2363
2364 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2365 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2366 {
2367 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2368 STAM_COUNTER_INC(&gStatREMIDTChange);
2369 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2370 }
2371
2372 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2373 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2374 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2375 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2376 {
2377 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2378 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2379 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2380 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2381 STAM_COUNTER_INC(&gStatREMLDTRChange);
2382 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2383 }
2384
2385 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2386 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2387 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2388 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2389 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2390 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2391 : 0) )
2392 {
2393 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2394 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2395 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2396 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2397 pCtx->tr = pVM->rem.s.Env.tr.selector;
2398 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2399 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2400 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2401 if (pCtx->trHid.Attr.u)
2402 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2403 STAM_COUNTER_INC(&gStatREMTRChange);
2404 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2405 }
2406
2407 /** @todo These values could still be out of sync! */
2408 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2409 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2410 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2411 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2412
2413 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2414 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2415 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2416
2417 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2418 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2419 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2420
2421 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2422 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2423 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2424
2425 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2426 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2427 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2428
2429 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2430 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2431 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2432
2433 /* Sysenter MSR */
2434 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2435 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2436 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2437
2438 /* System MSRs. */
2439 pCtx->msrEFER = pVM->rem.s.Env.efer;
2440 pCtx->msrSTAR = pVM->rem.s.Env.star;
2441 pCtx->msrPAT = pVM->rem.s.Env.pat;
2442#ifdef TARGET_X86_64
2443 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2444 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2445 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2446 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2447#endif
2448
2449 remR3TrapClear(pVM);
2450
2451 /*
2452 * Check for traps.
2453 */
2454 if ( pVM->rem.s.Env.exception_index >= 0
2455 && pVM->rem.s.Env.exception_index < 256)
2456 {
2457 int rc;
2458
2459 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2460 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2461 AssertRC(rc);
2462 switch (pVM->rem.s.Env.exception_index)
2463 {
2464 case 0x0e:
2465 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2466 /* fallthru */
2467 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2468 case 0x11: case 0x08: /* 0 */
2469 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2470 break;
2471 }
2472
2473 }
2474
2475 /*
2476 * We're not longer in REM mode.
2477 */
2478 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2479 pVM->rem.s.fInREM = false;
2480 pVM->rem.s.pCtx = NULL;
2481 pVM->rem.s.Env.pVCpu = NULL;
2482 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2483 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2484 return VINF_SUCCESS;
2485}
2486
2487
2488/**
2489 * This is called by the disassembler when it wants to update the cpu state
2490 * before for instance doing a register dump.
2491 */
2492static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2493{
2494 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2495 unsigned i;
2496
2497 Assert(pVM->rem.s.fInREM);
2498
2499 /*
2500 * Copy back the registers.
2501 * This is done in the order they are declared in the CPUMCTX structure.
2502 */
2503
2504 /** @todo FOP */
2505 /** @todo FPUIP */
2506 /** @todo CS */
2507 /** @todo FPUDP */
2508 /** @todo DS */
2509 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2510 pCtx->fpu.MXCSR = 0;
2511 pCtx->fpu.MXCSR_MASK = 0;
2512
2513 /** @todo check if FPU/XMM was actually used in the recompiler */
2514 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2515//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2516
2517#ifdef TARGET_X86_64
2518 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2519 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2520 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2521 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2522 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2523 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2524 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2525 pCtx->r8 = pVM->rem.s.Env.regs[8];
2526 pCtx->r9 = pVM->rem.s.Env.regs[9];
2527 pCtx->r10 = pVM->rem.s.Env.regs[10];
2528 pCtx->r11 = pVM->rem.s.Env.regs[11];
2529 pCtx->r12 = pVM->rem.s.Env.regs[12];
2530 pCtx->r13 = pVM->rem.s.Env.regs[13];
2531 pCtx->r14 = pVM->rem.s.Env.regs[14];
2532 pCtx->r15 = pVM->rem.s.Env.regs[15];
2533
2534 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2535#else
2536 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2537 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2538 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2539 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2540 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2541 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2542 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2543
2544 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2545#endif
2546
2547 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2548
2549 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2550 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2551 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2552 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2553 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2554
2555#ifdef TARGET_X86_64
2556 pCtx->rip = pVM->rem.s.Env.eip;
2557 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2558#else
2559 pCtx->eip = pVM->rem.s.Env.eip;
2560 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2561#endif
2562
2563 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2564 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2565 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2566 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2567 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2568 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2569
2570 for (i = 0; i < 8; i++)
2571 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2572
2573 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2574 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2575 {
2576 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2577 STAM_COUNTER_INC(&gStatREMGDTChange);
2578 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2579 }
2580
2581 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2582 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2583 {
2584 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2585 STAM_COUNTER_INC(&gStatREMIDTChange);
2586 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2587 }
2588
2589 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2590 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2591 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2592 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2593 {
2594 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2595 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2596 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2597 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2598 STAM_COUNTER_INC(&gStatREMLDTRChange);
2599 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2600 }
2601
2602 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2603 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2604 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2605 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2606 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2607 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2608 : 0) )
2609 {
2610 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2611 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2612 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2613 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2614 pCtx->tr = pVM->rem.s.Env.tr.selector;
2615 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2616 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2617 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2618 if (pCtx->trHid.Attr.u)
2619 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2620 STAM_COUNTER_INC(&gStatREMTRChange);
2621 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2622 }
2623
2624 /** @todo These values could still be out of sync! */
2625 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2626 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2627 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2628 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2629
2630 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2631 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2632 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2633
2634 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2635 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2636 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2637
2638 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2639 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2640 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2641
2642 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2643 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2644 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2645
2646 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2647 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2648 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2649
2650 /* Sysenter MSR */
2651 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2652 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2653 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2654
2655 /* System MSRs. */
2656 pCtx->msrEFER = pVM->rem.s.Env.efer;
2657 pCtx->msrSTAR = pVM->rem.s.Env.star;
2658 pCtx->msrPAT = pVM->rem.s.Env.pat;
2659#ifdef TARGET_X86_64
2660 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2661 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2662 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2663 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2664#endif
2665
2666}
2667
2668
2669/**
2670 * Update the VMM state information if we're currently in REM.
2671 *
2672 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2673 * we're currently executing in REM and the VMM state is invalid. This method will of
2674 * course check that we're executing in REM before syncing any data over to the VMM.
2675 *
2676 * @param pVM The VM handle.
2677 * @param pVCpu The VMCPU handle.
2678 */
2679REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2680{
2681 if (pVM->rem.s.fInREM)
2682 remR3StateUpdate(pVM, pVCpu);
2683}
2684
2685
2686#undef LOG_GROUP
2687#define LOG_GROUP LOG_GROUP_REM
2688
2689
2690/**
2691 * Notify the recompiler about Address Gate 20 state change.
2692 *
2693 * This notification is required since A20 gate changes are
2694 * initialized from a device driver and the VM might just as
2695 * well be in REM mode as in RAW mode.
2696 *
2697 * @param pVM VM handle.
2698 * @param pVCpu VMCPU handle.
2699 * @param fEnable True if the gate should be enabled.
2700 * False if the gate should be disabled.
2701 */
2702REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2703{
2704 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2705 VM_ASSERT_EMT(pVM);
2706
2707 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2708 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2709 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2710}
2711
2712
2713/**
2714 * Replays the handler notification changes
2715 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2716 *
2717 * @param pVM VM handle.
2718 */
2719REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2720{
2721 /*
2722 * Replay the flushes.
2723 */
2724 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2725 VM_ASSERT_EMT(pVM);
2726
2727 /** @todo this isn't ensuring correct replay order. */
2728 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY_BIT))
2729 {
2730 /* Lockless purging of pending notifications. */
2731 uint32_t idxReqs = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, -1);
2732 if (idxReqs == -1)
2733 return;
2734
2735 Assert(idxReqs < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2736 PREMHANDLERNOTIFICATION pReqs = &pVM->rem.s.aHandlerNotifications[idxReqs];
2737
2738 /*
2739 * Reverse the list to process it in FIFO order.
2740 */
2741 PREMHANDLERNOTIFICATION pReq = pReqs;
2742 pReqs = NULL;
2743 while (pReq)
2744 {
2745 PREMHANDLERNOTIFICATION pCur = pReq;
2746
2747 if (pReq->idxNext != -1)
2748 {
2749 Assert(pReq->idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2750 pReq = &pVM->rem.s.aHandlerNotifications[pReq->idxNext];
2751 }
2752 else
2753 pReq = NULL;
2754
2755 pCur->idxNext = (pReqs) ? pReqs->idxSelf : -1;
2756 pReqs = pCur;
2757 }
2758
2759 while (pReqs)
2760 {
2761 PREMHANDLERNOTIFICATION pRec = pReqs;
2762
2763 switch (pRec->enmKind)
2764 {
2765 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2766 remR3NotifyHandlerPhysicalRegister(pVM,
2767 pRec->u.PhysicalRegister.enmType,
2768 pRec->u.PhysicalRegister.GCPhys,
2769 pRec->u.PhysicalRegister.cb,
2770 pRec->u.PhysicalRegister.fHasHCHandler);
2771 break;
2772
2773 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2774 remR3NotifyHandlerPhysicalDeregister(pVM,
2775 pRec->u.PhysicalDeregister.enmType,
2776 pRec->u.PhysicalDeregister.GCPhys,
2777 pRec->u.PhysicalDeregister.cb,
2778 pRec->u.PhysicalDeregister.fHasHCHandler,
2779 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2780 break;
2781
2782 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2783 remR3NotifyHandlerPhysicalModify(pVM,
2784 pRec->u.PhysicalModify.enmType,
2785 pRec->u.PhysicalModify.GCPhysOld,
2786 pRec->u.PhysicalModify.GCPhysNew,
2787 pRec->u.PhysicalModify.cb,
2788 pRec->u.PhysicalModify.fHasHCHandler,
2789 pRec->u.PhysicalModify.fRestoreAsRAM);
2790 break;
2791
2792 default:
2793 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2794 break;
2795 }
2796 if (pReqs->idxNext != -1)
2797 {
2798 AssertMsg(pReqs->idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("pReqs->idxNext=%d\n", pReqs->idxNext));
2799 pReqs = &pVM->rem.s.aHandlerNotifications[pReqs->idxNext];
2800 }
2801 else
2802 pReqs = NULL;
2803
2804 /* Put the record back into the free list */
2805 uint32_t idxNext;
2806
2807 do
2808 {
2809 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
2810 ASMAtomicWriteU32(&pRec->idxNext, idxNext);
2811 ASMCompilerBarrier();
2812 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, pRec->idxSelf, idxNext));
2813 }
2814 }
2815}
2816
2817
2818/**
2819 * Notify REM about changed code page.
2820 *
2821 * @returns VBox status code.
2822 * @param pVM VM handle.
2823 * @param pVCpu VMCPU handle.
2824 * @param pvCodePage Code page address
2825 */
2826REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2827{
2828#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2829 int rc;
2830 RTGCPHYS PhysGC;
2831 uint64_t flags;
2832
2833 VM_ASSERT_EMT(pVM);
2834
2835 /*
2836 * Get the physical page address.
2837 */
2838 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2839 if (rc == VINF_SUCCESS)
2840 {
2841 /*
2842 * Sync the required registers and flush the whole page.
2843 * (Easier to do the whole page than notifying it about each physical
2844 * byte that was changed.
2845 */
2846 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2847 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2848 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2849 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2850
2851 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2852 }
2853#endif
2854 return VINF_SUCCESS;
2855}
2856
2857
2858/**
2859 * Notification about a successful MMR3PhysRegister() call.
2860 *
2861 * @param pVM VM handle.
2862 * @param GCPhys The physical address the RAM.
2863 * @param cb Size of the memory.
2864 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2865 */
2866REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2867{
2868 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2869 VM_ASSERT_EMT(pVM);
2870
2871 /*
2872 * Validate input - we trust the caller.
2873 */
2874 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2875 Assert(cb);
2876 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2877 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2878
2879 /*
2880 * Base ram? Update GCPhysLastRam.
2881 */
2882 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2883 {
2884 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2885 {
2886 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2887 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2888 }
2889 }
2890
2891 /*
2892 * Register the ram.
2893 */
2894 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2895
2896 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2897 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2898 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2899
2900 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2901}
2902
2903
2904/**
2905 * Notification about a successful MMR3PhysRomRegister() call.
2906 *
2907 * @param pVM VM handle.
2908 * @param GCPhys The physical address of the ROM.
2909 * @param cb The size of the ROM.
2910 * @param pvCopy Pointer to the ROM copy.
2911 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2912 * This function will be called when ever the protection of the
2913 * shadow ROM changes (at reset and end of POST).
2914 */
2915REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2916{
2917 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2918 VM_ASSERT_EMT(pVM);
2919
2920 /*
2921 * Validate input - we trust the caller.
2922 */
2923 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2924 Assert(cb);
2925 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2926
2927 /*
2928 * Register the rom.
2929 */
2930 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2931
2932 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2933 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2934 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2935
2936 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2937}
2938
2939
2940/**
2941 * Notification about a successful memory deregistration or reservation.
2942 *
2943 * @param pVM VM Handle.
2944 * @param GCPhys Start physical address.
2945 * @param cb The size of the range.
2946 */
2947REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2948{
2949 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2950 VM_ASSERT_EMT(pVM);
2951
2952 /*
2953 * Validate input - we trust the caller.
2954 */
2955 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2956 Assert(cb);
2957 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2958
2959 /*
2960 * Unassigning the memory.
2961 */
2962 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2963
2964 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2965 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2966 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2967
2968 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2969}
2970
2971
2972/**
2973 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2974 *
2975 * @param pVM VM Handle.
2976 * @param enmType Handler type.
2977 * @param GCPhys Handler range address.
2978 * @param cb Size of the handler range.
2979 * @param fHasHCHandler Set if the handler has a HC callback function.
2980 *
2981 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2982 * Handler memory type to memory which has no HC handler.
2983 */
2984static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2985{
2986 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2987 enmType, GCPhys, cb, fHasHCHandler));
2988
2989 VM_ASSERT_EMT(pVM);
2990 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2991 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2992
2993
2994 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2995
2996 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2997 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2998 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2999 else if (fHasHCHandler)
3000 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3001 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3002
3003 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3004}
3005
3006/**
3007 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3008 *
3009 * @param pVM VM Handle.
3010 * @param enmType Handler type.
3011 * @param GCPhys Handler range address.
3012 * @param cb Size of the handler range.
3013 * @param fHasHCHandler Set if the handler has a HC callback function.
3014 *
3015 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3016 * Handler memory type to memory which has no HC handler.
3017 */
3018REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3019{
3020 REMR3ReplayHandlerNotifications(pVM);
3021
3022 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3023}
3024
3025/**
3026 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3027 *
3028 * @param pVM VM Handle.
3029 * @param enmType Handler type.
3030 * @param GCPhys Handler range address.
3031 * @param cb Size of the handler range.
3032 * @param fHasHCHandler Set if the handler has a HC callback function.
3033 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3034 */
3035static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3036{
3037 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3038 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3039 VM_ASSERT_EMT(pVM);
3040
3041
3042 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3043
3044 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3045 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3046 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3047 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3048 else if (fHasHCHandler)
3049 {
3050 if (!fRestoreAsRAM)
3051 {
3052 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3053 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3054 }
3055 else
3056 {
3057 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3058 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3059 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3060 }
3061 }
3062 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3063
3064 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3065}
3066
3067/**
3068 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3069 *
3070 * @param pVM VM Handle.
3071 * @param enmType Handler type.
3072 * @param GCPhys Handler range address.
3073 * @param cb Size of the handler range.
3074 * @param fHasHCHandler Set if the handler has a HC callback function.
3075 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3076 */
3077REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3078{
3079 REMR3ReplayHandlerNotifications(pVM);
3080 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3081}
3082
3083
3084/**
3085 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3086 *
3087 * @param pVM VM Handle.
3088 * @param enmType Handler type.
3089 * @param GCPhysOld Old handler range address.
3090 * @param GCPhysNew New handler range address.
3091 * @param cb Size of the handler range.
3092 * @param fHasHCHandler Set if the handler has a HC callback function.
3093 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3094 */
3095static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3096{
3097 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3098 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3099 VM_ASSERT_EMT(pVM);
3100 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3101
3102 if (fHasHCHandler)
3103 {
3104 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3105
3106 /*
3107 * Reset the old page.
3108 */
3109 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3110 if (!fRestoreAsRAM)
3111 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3112 else
3113 {
3114 /* This is not perfect, but it'll do for PD monitoring... */
3115 Assert(cb == PAGE_SIZE);
3116 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3117 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3118 }
3119
3120 /*
3121 * Update the new page.
3122 */
3123 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3124 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3125 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3126 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3127
3128 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3129 }
3130}
3131
3132/**
3133 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3134 *
3135 * @param pVM VM Handle.
3136 * @param enmType Handler type.
3137 * @param GCPhysOld Old handler range address.
3138 * @param GCPhysNew New handler range address.
3139 * @param cb Size of the handler range.
3140 * @param fHasHCHandler Set if the handler has a HC callback function.
3141 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3142 */
3143REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3144{
3145 REMR3ReplayHandlerNotifications(pVM);
3146
3147 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3148}
3149
3150/**
3151 * Checks if we're handling access to this page or not.
3152 *
3153 * @returns true if we're trapping access.
3154 * @returns false if we aren't.
3155 * @param pVM The VM handle.
3156 * @param GCPhys The physical address.
3157 *
3158 * @remark This function will only work correctly in VBOX_STRICT builds!
3159 */
3160REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3161{
3162#ifdef VBOX_STRICT
3163 unsigned long off;
3164 REMR3ReplayHandlerNotifications(pVM);
3165
3166 off = get_phys_page_offset(GCPhys);
3167 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3168 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3169 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3170#else
3171 return false;
3172#endif
3173}
3174
3175
3176/**
3177 * Deals with a rare case in get_phys_addr_code where the code
3178 * is being monitored.
3179 *
3180 * It could also be an MMIO page, in which case we will raise a fatal error.
3181 *
3182 * @returns The physical address corresponding to addr.
3183 * @param env The cpu environment.
3184 * @param addr The virtual address.
3185 * @param pTLBEntry The TLB entry.
3186 */
3187target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3188 target_ulong addr,
3189 CPUTLBEntry* pTLBEntry,
3190 target_phys_addr_t ioTLBEntry)
3191{
3192 PVM pVM = env->pVM;
3193
3194 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3195 {
3196 /* If code memory is being monitored, appropriate IOTLB entry will have
3197 handler IO type, and addend will provide real physical address, no
3198 matter if we store VA in TLB or not, as handlers are always passed PA */
3199 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3200 return ret;
3201 }
3202 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3203 "*** handlers\n",
3204 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3205 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3206 LogRel(("*** mmio\n"));
3207 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3208 LogRel(("*** phys\n"));
3209 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3210 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3211 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3212 AssertFatalFailed();
3213}
3214
3215/**
3216 * Read guest RAM and ROM.
3217 *
3218 * @param SrcGCPhys The source address (guest physical).
3219 * @param pvDst The destination address.
3220 * @param cb Number of bytes
3221 */
3222void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3223{
3224 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3225 VBOX_CHECK_ADDR(SrcGCPhys);
3226 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3227#ifdef VBOX_DEBUG_PHYS
3228 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3229#endif
3230 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3231}
3232
3233
3234/**
3235 * Read guest RAM and ROM, unsigned 8-bit.
3236 *
3237 * @param SrcGCPhys The source address (guest physical).
3238 */
3239RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3240{
3241 uint8_t val;
3242 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3243 VBOX_CHECK_ADDR(SrcGCPhys);
3244 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3245 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3246#ifdef VBOX_DEBUG_PHYS
3247 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3248#endif
3249 return val;
3250}
3251
3252
3253/**
3254 * Read guest RAM and ROM, signed 8-bit.
3255 *
3256 * @param SrcGCPhys The source address (guest physical).
3257 */
3258RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3259{
3260 int8_t val;
3261 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3262 VBOX_CHECK_ADDR(SrcGCPhys);
3263 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3264 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3265#ifdef VBOX_DEBUG_PHYS
3266 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3267#endif
3268 return val;
3269}
3270
3271
3272/**
3273 * Read guest RAM and ROM, unsigned 16-bit.
3274 *
3275 * @param SrcGCPhys The source address (guest physical).
3276 */
3277RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3278{
3279 uint16_t val;
3280 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3281 VBOX_CHECK_ADDR(SrcGCPhys);
3282 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3283 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3284#ifdef VBOX_DEBUG_PHYS
3285 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3286#endif
3287 return val;
3288}
3289
3290
3291/**
3292 * Read guest RAM and ROM, signed 16-bit.
3293 *
3294 * @param SrcGCPhys The source address (guest physical).
3295 */
3296RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3297{
3298 int16_t val;
3299 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3300 VBOX_CHECK_ADDR(SrcGCPhys);
3301 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3302 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3303#ifdef VBOX_DEBUG_PHYS
3304 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3305#endif
3306 return val;
3307}
3308
3309
3310/**
3311 * Read guest RAM and ROM, unsigned 32-bit.
3312 *
3313 * @param SrcGCPhys The source address (guest physical).
3314 */
3315RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3316{
3317 uint32_t val;
3318 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3319 VBOX_CHECK_ADDR(SrcGCPhys);
3320 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3321 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3322#ifdef VBOX_DEBUG_PHYS
3323 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3324#endif
3325 return val;
3326}
3327
3328
3329/**
3330 * Read guest RAM and ROM, signed 32-bit.
3331 *
3332 * @param SrcGCPhys The source address (guest physical).
3333 */
3334RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3335{
3336 int32_t val;
3337 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3338 VBOX_CHECK_ADDR(SrcGCPhys);
3339 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3340 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3341#ifdef VBOX_DEBUG_PHYS
3342 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3343#endif
3344 return val;
3345}
3346
3347
3348/**
3349 * Read guest RAM and ROM, unsigned 64-bit.
3350 *
3351 * @param SrcGCPhys The source address (guest physical).
3352 */
3353uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3354{
3355 uint64_t val;
3356 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3357 VBOX_CHECK_ADDR(SrcGCPhys);
3358 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3359 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3360#ifdef VBOX_DEBUG_PHYS
3361 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3362#endif
3363 return val;
3364}
3365
3366
3367/**
3368 * Read guest RAM and ROM, signed 64-bit.
3369 *
3370 * @param SrcGCPhys The source address (guest physical).
3371 */
3372int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3373{
3374 int64_t val;
3375 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3376 VBOX_CHECK_ADDR(SrcGCPhys);
3377 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3378 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3379#ifdef VBOX_DEBUG_PHYS
3380 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3381#endif
3382 return val;
3383}
3384
3385
3386/**
3387 * Write guest RAM.
3388 *
3389 * @param DstGCPhys The destination address (guest physical).
3390 * @param pvSrc The source address.
3391 * @param cb Number of bytes to write
3392 */
3393void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3394{
3395 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3396 VBOX_CHECK_ADDR(DstGCPhys);
3397 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3398 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3399#ifdef VBOX_DEBUG_PHYS
3400 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3401#endif
3402}
3403
3404
3405/**
3406 * Write guest RAM, unsigned 8-bit.
3407 *
3408 * @param DstGCPhys The destination address (guest physical).
3409 * @param val Value
3410 */
3411void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3412{
3413 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3414 VBOX_CHECK_ADDR(DstGCPhys);
3415 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3416 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3417#ifdef VBOX_DEBUG_PHYS
3418 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3419#endif
3420}
3421
3422
3423/**
3424 * Write guest RAM, unsigned 8-bit.
3425 *
3426 * @param DstGCPhys The destination address (guest physical).
3427 * @param val Value
3428 */
3429void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3430{
3431 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3432 VBOX_CHECK_ADDR(DstGCPhys);
3433 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3434 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3435#ifdef VBOX_DEBUG_PHYS
3436 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3437#endif
3438}
3439
3440
3441/**
3442 * Write guest RAM, unsigned 32-bit.
3443 *
3444 * @param DstGCPhys The destination address (guest physical).
3445 * @param val Value
3446 */
3447void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3448{
3449 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3450 VBOX_CHECK_ADDR(DstGCPhys);
3451 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3452 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3453#ifdef VBOX_DEBUG_PHYS
3454 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3455#endif
3456}
3457
3458
3459/**
3460 * Write guest RAM, unsigned 64-bit.
3461 *
3462 * @param DstGCPhys The destination address (guest physical).
3463 * @param val Value
3464 */
3465void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3466{
3467 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3468 VBOX_CHECK_ADDR(DstGCPhys);
3469 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3470 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3471#ifdef VBOX_DEBUG_PHYS
3472 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3473#endif
3474}
3475
3476#undef LOG_GROUP
3477#define LOG_GROUP LOG_GROUP_REM_MMIO
3478
3479/** Read MMIO memory. */
3480static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3481{
3482 uint32_t u32 = 0;
3483 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3484 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3485 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3486 return u32;
3487}
3488
3489/** Read MMIO memory. */
3490static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3491{
3492 uint32_t u32 = 0;
3493 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3494 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3495 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3496 return u32;
3497}
3498
3499/** Read MMIO memory. */
3500static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3501{
3502 uint32_t u32 = 0;
3503 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3504 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3505 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3506 return u32;
3507}
3508
3509/** Write to MMIO memory. */
3510static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3511{
3512 int rc;
3513 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3514 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3515 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3516}
3517
3518/** Write to MMIO memory. */
3519static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3520{
3521 int rc;
3522 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3523 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3524 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3525}
3526
3527/** Write to MMIO memory. */
3528static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3529{
3530 int rc;
3531 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3532 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3533 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3534}
3535
3536
3537#undef LOG_GROUP
3538#define LOG_GROUP LOG_GROUP_REM_HANDLER
3539
3540/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3541
3542static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3543{
3544 uint8_t u8;
3545 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3546 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3547 return u8;
3548}
3549
3550static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3551{
3552 uint16_t u16;
3553 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3554 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3555 return u16;
3556}
3557
3558static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3559{
3560 uint32_t u32;
3561 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3562 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3563 return u32;
3564}
3565
3566static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3567{
3568 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3569 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3570}
3571
3572static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3573{
3574 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3575 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3576}
3577
3578static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3579{
3580 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3581 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3582}
3583
3584/* -+- disassembly -+- */
3585
3586#undef LOG_GROUP
3587#define LOG_GROUP LOG_GROUP_REM_DISAS
3588
3589
3590/**
3591 * Enables or disables singled stepped disassembly.
3592 *
3593 * @returns VBox status code.
3594 * @param pVM VM handle.
3595 * @param fEnable To enable set this flag, to disable clear it.
3596 */
3597static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3598{
3599 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3600 VM_ASSERT_EMT(pVM);
3601
3602 if (fEnable)
3603 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3604 else
3605 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3606 return VINF_SUCCESS;
3607}
3608
3609
3610/**
3611 * Enables or disables singled stepped disassembly.
3612 *
3613 * @returns VBox status code.
3614 * @param pVM VM handle.
3615 * @param fEnable To enable set this flag, to disable clear it.
3616 */
3617REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3618{
3619 PVMREQ pReq;
3620 int rc;
3621
3622 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3623 if (VM_IS_EMT(pVM))
3624 return remR3DisasEnableStepping(pVM, fEnable);
3625
3626 rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3627 AssertRC(rc);
3628 if (RT_SUCCESS(rc))
3629 rc = pReq->iStatus;
3630 VMR3ReqFree(pReq);
3631 return rc;
3632}
3633
3634
3635#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3636/**
3637 * External Debugger Command: .remstep [on|off|1|0]
3638 */
3639static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3640{
3641 bool fEnable;
3642 int rc;
3643
3644 /* print status */
3645 if (cArgs == 0)
3646 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3647 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3648
3649 /* convert the argument and change the mode. */
3650 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3651 if (RT_FAILURE(rc))
3652 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3653 rc = REMR3DisasEnableStepping(pVM, fEnable);
3654 if (RT_FAILURE(rc))
3655 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3656 return rc;
3657}
3658#endif
3659
3660
3661/**
3662 * Disassembles one instruction and prints it to the log.
3663 *
3664 * @returns Success indicator.
3665 * @param env Pointer to the recompiler CPU structure.
3666 * @param f32BitCode Indicates that whether or not the code should
3667 * be disassembled as 16 or 32 bit. If -1 the CS
3668 * selector will be inspected.
3669 * @param pszPrefix
3670 */
3671bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3672{
3673 PVM pVM = env->pVM;
3674 const bool fLog = LogIsEnabled();
3675 const bool fLog2 = LogIs2Enabled();
3676 int rc = VINF_SUCCESS;
3677
3678 /*
3679 * Don't bother if there ain't any log output to do.
3680 */
3681 if (!fLog && !fLog2)
3682 return true;
3683
3684 /*
3685 * Update the state so DBGF reads the correct register values.
3686 */
3687 remR3StateUpdate(pVM, env->pVCpu);
3688
3689 /*
3690 * Log registers if requested.
3691 */
3692 if (!fLog2)
3693 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3694
3695 /*
3696 * Disassemble to log.
3697 */
3698 if (fLog)
3699 rc = DBGFR3DisasInstrCurrentLogInternal(env->pVCpu, pszPrefix);
3700
3701 return RT_SUCCESS(rc);
3702}
3703
3704
3705/**
3706 * Disassemble recompiled code.
3707 *
3708 * @param phFileIgnored Ignored, logfile usually.
3709 * @param pvCode Pointer to the code block.
3710 * @param cb Size of the code block.
3711 */
3712void disas(FILE *phFile, void *pvCode, unsigned long cb)
3713{
3714#ifdef DEBUG_TMP_LOGGING
3715# define DISAS_PRINTF(x...) fprintf(phFile, x)
3716#else
3717# define DISAS_PRINTF(x...) RTLogPrintf(x)
3718 if (LogIs2Enabled())
3719#endif
3720 {
3721 unsigned off = 0;
3722 char szOutput[256];
3723 DISCPUSTATE Cpu;
3724
3725 memset(&Cpu, 0, sizeof(Cpu));
3726#ifdef RT_ARCH_X86
3727 Cpu.mode = CPUMODE_32BIT;
3728#else
3729 Cpu.mode = CPUMODE_64BIT;
3730#endif
3731
3732 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3733 while (off < cb)
3734 {
3735 uint32_t cbInstr;
3736 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3737 DISAS_PRINTF("%s", szOutput);
3738 else
3739 {
3740 DISAS_PRINTF("disas error\n");
3741 cbInstr = 1;
3742#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3743 break;
3744#endif
3745 }
3746 off += cbInstr;
3747 }
3748 }
3749
3750#undef DISAS_PRINTF
3751}
3752
3753
3754/**
3755 * Disassemble guest code.
3756 *
3757 * @param phFileIgnored Ignored, logfile usually.
3758 * @param uCode The guest address of the code to disassemble. (flat?)
3759 * @param cb Number of bytes to disassemble.
3760 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3761 */
3762void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3763{
3764#ifdef DEBUG_TMP_LOGGING
3765# define DISAS_PRINTF(x...) fprintf(phFile, x)
3766#else
3767# define DISAS_PRINTF(x...) RTLogPrintf(x)
3768 if (LogIs2Enabled())
3769#endif
3770 {
3771 PVM pVM = cpu_single_env->pVM;
3772 PVMCPU pVCpu = cpu_single_env->pVCpu;
3773 RTSEL cs;
3774 RTGCUINTPTR eip;
3775
3776 Assert(pVCpu);
3777
3778 /*
3779 * Update the state so DBGF reads the correct register values (flags).
3780 */
3781 remR3StateUpdate(pVM, pVCpu);
3782
3783 /*
3784 * Do the disassembling.
3785 */
3786 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3787 cs = cpu_single_env->segs[R_CS].selector;
3788 eip = uCode - cpu_single_env->segs[R_CS].base;
3789 for (;;)
3790 {
3791 char szBuf[256];
3792 uint32_t cbInstr;
3793 int rc = DBGFR3DisasInstrEx(pVM,
3794 pVCpu->idCpu,
3795 cs,
3796 eip,
3797 0,
3798 szBuf, sizeof(szBuf),
3799 &cbInstr);
3800 if (RT_SUCCESS(rc))
3801 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3802 else
3803 {
3804 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3805 cbInstr = 1;
3806 }
3807
3808 /* next */
3809 if (cb <= cbInstr)
3810 break;
3811 cb -= cbInstr;
3812 uCode += cbInstr;
3813 eip += cbInstr;
3814 }
3815 }
3816#undef DISAS_PRINTF
3817}
3818
3819
3820/**
3821 * Looks up a guest symbol.
3822 *
3823 * @returns Pointer to symbol name. This is a static buffer.
3824 * @param orig_addr The address in question.
3825 */
3826const char *lookup_symbol(target_ulong orig_addr)
3827{
3828 RTGCINTPTR off = 0;
3829 DBGFSYMBOL Sym;
3830 PVM pVM = cpu_single_env->pVM;
3831 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3832 if (RT_SUCCESS(rc))
3833 {
3834 static char szSym[sizeof(Sym.szName) + 48];
3835 if (!off)
3836 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3837 else if (off > 0)
3838 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3839 else
3840 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3841 return szSym;
3842 }
3843 return "<N/A>";
3844}
3845
3846
3847#undef LOG_GROUP
3848#define LOG_GROUP LOG_GROUP_REM
3849
3850
3851/* -+- FF notifications -+- */
3852
3853
3854/**
3855 * Notification about a pending interrupt.
3856 *
3857 * @param pVM VM Handle.
3858 * @param pVCpu VMCPU Handle.
3859 * @param u8Interrupt Interrupt
3860 * @thread The emulation thread.
3861 */
3862REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3863{
3864 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3865 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3866}
3867
3868/**
3869 * Notification about a pending interrupt.
3870 *
3871 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3872 * @param pVM VM Handle.
3873 * @param pVCpu VMCPU Handle.
3874 * @thread The emulation thread.
3875 */
3876REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3877{
3878 return pVM->rem.s.u32PendingInterrupt;
3879}
3880
3881/**
3882 * Notification about the interrupt FF being set.
3883 *
3884 * @param pVM VM Handle.
3885 * @param pVCpu VMCPU Handle.
3886 * @thread The emulation thread.
3887 */
3888REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3889{
3890 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3891 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3892 if (pVM->rem.s.fInREM)
3893 {
3894 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3895 CPU_INTERRUPT_EXTERNAL_HARD);
3896 }
3897}
3898
3899
3900/**
3901 * Notification about the interrupt FF being set.
3902 *
3903 * @param pVM VM Handle.
3904 * @param pVCpu VMCPU Handle.
3905 * @thread Any.
3906 */
3907REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3908{
3909 LogFlow(("REMR3NotifyInterruptClear:\n"));
3910 if (pVM->rem.s.fInREM)
3911 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3912}
3913
3914
3915/**
3916 * Notification about pending timer(s).
3917 *
3918 * @param pVM VM Handle.
3919 * @param pVCpuDst The target cpu for this notification.
3920 * TM will not broadcast pending timer events, but use
3921 * a decidated EMT for them. So, only interrupt REM
3922 * execution if the given CPU is executing in REM.
3923 * @thread Any.
3924 */
3925REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3926{
3927#ifndef DEBUG_bird
3928 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3929#endif
3930 if (pVM->rem.s.fInREM)
3931 {
3932 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3933 {
3934 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
3935 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
3936 CPU_INTERRUPT_EXTERNAL_TIMER);
3937 }
3938 else
3939 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
3940 }
3941 else
3942 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
3943}
3944
3945
3946/**
3947 * Notification about pending DMA transfers.
3948 *
3949 * @param pVM VM Handle.
3950 * @thread Any.
3951 */
3952REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3953{
3954 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3955 if (pVM->rem.s.fInREM)
3956 {
3957 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3958 CPU_INTERRUPT_EXTERNAL_DMA);
3959 }
3960}
3961
3962
3963/**
3964 * Notification about pending timer(s).
3965 *
3966 * @param pVM VM Handle.
3967 * @thread Any.
3968 */
3969REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3970{
3971 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3972 if (pVM->rem.s.fInREM)
3973 {
3974 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3975 CPU_INTERRUPT_EXTERNAL_EXIT);
3976 }
3977}
3978
3979
3980/**
3981 * Notification about pending FF set by an external thread.
3982 *
3983 * @param pVM VM handle.
3984 * @thread Any.
3985 */
3986REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3987{
3988 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3989 if (pVM->rem.s.fInREM)
3990 {
3991 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3992 CPU_INTERRUPT_EXTERNAL_EXIT);
3993 }
3994}
3995
3996
3997#ifdef VBOX_WITH_STATISTICS
3998void remR3ProfileStart(int statcode)
3999{
4000 STAMPROFILEADV *pStat;
4001 switch(statcode)
4002 {
4003 case STATS_EMULATE_SINGLE_INSTR:
4004 pStat = &gStatExecuteSingleInstr;
4005 break;
4006 case STATS_QEMU_COMPILATION:
4007 pStat = &gStatCompilationQEmu;
4008 break;
4009 case STATS_QEMU_RUN_EMULATED_CODE:
4010 pStat = &gStatRunCodeQEmu;
4011 break;
4012 case STATS_QEMU_TOTAL:
4013 pStat = &gStatTotalTimeQEmu;
4014 break;
4015 case STATS_QEMU_RUN_TIMERS:
4016 pStat = &gStatTimers;
4017 break;
4018 case STATS_TLB_LOOKUP:
4019 pStat= &gStatTBLookup;
4020 break;
4021 case STATS_IRQ_HANDLING:
4022 pStat= &gStatIRQ;
4023 break;
4024 case STATS_RAW_CHECK:
4025 pStat = &gStatRawCheck;
4026 break;
4027
4028 default:
4029 AssertMsgFailed(("unknown stat %d\n", statcode));
4030 return;
4031 }
4032 STAM_PROFILE_ADV_START(pStat, a);
4033}
4034
4035
4036void remR3ProfileStop(int statcode)
4037{
4038 STAMPROFILEADV *pStat;
4039 switch(statcode)
4040 {
4041 case STATS_EMULATE_SINGLE_INSTR:
4042 pStat = &gStatExecuteSingleInstr;
4043 break;
4044 case STATS_QEMU_COMPILATION:
4045 pStat = &gStatCompilationQEmu;
4046 break;
4047 case STATS_QEMU_RUN_EMULATED_CODE:
4048 pStat = &gStatRunCodeQEmu;
4049 break;
4050 case STATS_QEMU_TOTAL:
4051 pStat = &gStatTotalTimeQEmu;
4052 break;
4053 case STATS_QEMU_RUN_TIMERS:
4054 pStat = &gStatTimers;
4055 break;
4056 case STATS_TLB_LOOKUP:
4057 pStat= &gStatTBLookup;
4058 break;
4059 case STATS_IRQ_HANDLING:
4060 pStat= &gStatIRQ;
4061 break;
4062 case STATS_RAW_CHECK:
4063 pStat = &gStatRawCheck;
4064 break;
4065 default:
4066 AssertMsgFailed(("unknown stat %d\n", statcode));
4067 return;
4068 }
4069 STAM_PROFILE_ADV_STOP(pStat, a);
4070}
4071#endif
4072
4073/**
4074 * Raise an RC, force rem exit.
4075 *
4076 * @param pVM VM handle.
4077 * @param rc The rc.
4078 */
4079void remR3RaiseRC(PVM pVM, int rc)
4080{
4081 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4082 Assert(pVM->rem.s.fInREM);
4083 VM_ASSERT_EMT(pVM);
4084 pVM->rem.s.rc = rc;
4085 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4086}
4087
4088
4089/* -+- timers -+- */
4090
4091uint64_t cpu_get_tsc(CPUX86State *env)
4092{
4093 STAM_COUNTER_INC(&gStatCpuGetTSC);
4094 return TMCpuTickGet(env->pVCpu);
4095}
4096
4097
4098/* -+- interrupts -+- */
4099
4100void cpu_set_ferr(CPUX86State *env)
4101{
4102 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4103 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4104}
4105
4106int cpu_get_pic_interrupt(CPUState *env)
4107{
4108 uint8_t u8Interrupt;
4109 int rc;
4110
4111 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4112 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4113 * with the (a)pic.
4114 */
4115 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4116 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4117 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4118 * remove this kludge. */
4119 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4120 {
4121 rc = VINF_SUCCESS;
4122 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4123 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4124 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4125 }
4126 else
4127 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4128
4129 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4130 if (RT_SUCCESS(rc))
4131 {
4132 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4133 env->interrupt_request |= CPU_INTERRUPT_HARD;
4134 return u8Interrupt;
4135 }
4136 return -1;
4137}
4138
4139
4140/* -+- local apic -+- */
4141
4142void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4143{
4144 int rc = PDMApicSetBase(env->pVM, val);
4145 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4146}
4147
4148uint64_t cpu_get_apic_base(CPUX86State *env)
4149{
4150 uint64_t u64;
4151 int rc = PDMApicGetBase(env->pVM, &u64);
4152 if (RT_SUCCESS(rc))
4153 {
4154 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4155 return u64;
4156 }
4157 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4158 return 0;
4159}
4160
4161void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4162{
4163 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4164 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4165}
4166
4167uint8_t cpu_get_apic_tpr(CPUX86State *env)
4168{
4169 uint8_t u8;
4170 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4171 if (RT_SUCCESS(rc))
4172 {
4173 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4174 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4175 }
4176 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4177 return 0;
4178}
4179
4180
4181uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4182{
4183 uint64_t value;
4184 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4185 if (RT_SUCCESS(rc))
4186 {
4187 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4188 return value;
4189 }
4190 /** @todo: exception ? */
4191 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4192 return value;
4193}
4194
4195void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4196{
4197 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4198 /** @todo: exception if error ? */
4199 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4200}
4201
4202uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4203{
4204 Assert(env->pVCpu);
4205 return CPUMGetGuestMsr(env->pVCpu, msr);
4206}
4207
4208void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4209{
4210 Assert(env->pVCpu);
4211 CPUMSetGuestMsr(env->pVCpu, msr, val);
4212}
4213
4214/* -+- I/O Ports -+- */
4215
4216#undef LOG_GROUP
4217#define LOG_GROUP LOG_GROUP_REM_IOPORT
4218
4219void cpu_outb(CPUState *env, int addr, int val)
4220{
4221 int rc;
4222
4223 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4224 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4225
4226 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4227 if (RT_LIKELY(rc == VINF_SUCCESS))
4228 return;
4229 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4230 {
4231 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4232 remR3RaiseRC(env->pVM, rc);
4233 return;
4234 }
4235 remAbort(rc, __FUNCTION__);
4236}
4237
4238void cpu_outw(CPUState *env, int addr, int val)
4239{
4240 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4241 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4242 if (RT_LIKELY(rc == VINF_SUCCESS))
4243 return;
4244 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4245 {
4246 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4247 remR3RaiseRC(env->pVM, rc);
4248 return;
4249 }
4250 remAbort(rc, __FUNCTION__);
4251}
4252
4253void cpu_outl(CPUState *env, int addr, int val)
4254{
4255 int rc;
4256 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4257 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4258 if (RT_LIKELY(rc == VINF_SUCCESS))
4259 return;
4260 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4261 {
4262 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4263 remR3RaiseRC(env->pVM, rc);
4264 return;
4265 }
4266 remAbort(rc, __FUNCTION__);
4267}
4268
4269int cpu_inb(CPUState *env, int addr)
4270{
4271 uint32_t u32 = 0;
4272 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4273 if (RT_LIKELY(rc == VINF_SUCCESS))
4274 {
4275 if (/*addr != 0x61 && */addr != 0x71)
4276 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4277 return (int)u32;
4278 }
4279 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4280 {
4281 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4282 remR3RaiseRC(env->pVM, rc);
4283 return (int)u32;
4284 }
4285 remAbort(rc, __FUNCTION__);
4286 return 0xff;
4287}
4288
4289int cpu_inw(CPUState *env, int addr)
4290{
4291 uint32_t u32 = 0;
4292 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4293 if (RT_LIKELY(rc == VINF_SUCCESS))
4294 {
4295 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4296 return (int)u32;
4297 }
4298 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4299 {
4300 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4301 remR3RaiseRC(env->pVM, rc);
4302 return (int)u32;
4303 }
4304 remAbort(rc, __FUNCTION__);
4305 return 0xffff;
4306}
4307
4308int cpu_inl(CPUState *env, int addr)
4309{
4310 uint32_t u32 = 0;
4311 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4312 if (RT_LIKELY(rc == VINF_SUCCESS))
4313 {
4314//if (addr==0x01f0 && u32 == 0x6b6d)
4315// loglevel = ~0;
4316 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4317 return (int)u32;
4318 }
4319 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4320 {
4321 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4322 remR3RaiseRC(env->pVM, rc);
4323 return (int)u32;
4324 }
4325 remAbort(rc, __FUNCTION__);
4326 return 0xffffffff;
4327}
4328
4329#undef LOG_GROUP
4330#define LOG_GROUP LOG_GROUP_REM
4331
4332
4333/* -+- helpers and misc other interfaces -+- */
4334
4335/**
4336 * Perform the CPUID instruction.
4337 *
4338 * ASMCpuId cannot be invoked from some source files where this is used because of global
4339 * register allocations.
4340 *
4341 * @param env Pointer to the recompiler CPU structure.
4342 * @param uOperator CPUID operation (eax).
4343 * @param pvEAX Where to store eax.
4344 * @param pvEBX Where to store ebx.
4345 * @param pvECX Where to store ecx.
4346 * @param pvEDX Where to store edx.
4347 */
4348void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4349{
4350 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4351}
4352
4353
4354#if 0 /* not used */
4355/**
4356 * Interface for qemu hardware to report back fatal errors.
4357 */
4358void hw_error(const char *pszFormat, ...)
4359{
4360 /*
4361 * Bitch about it.
4362 */
4363 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4364 * this in my Odin32 tree at home! */
4365 va_list args;
4366 va_start(args, pszFormat);
4367 RTLogPrintf("fatal error in virtual hardware:");
4368 RTLogPrintfV(pszFormat, args);
4369 va_end(args);
4370 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4371
4372 /*
4373 * If we're in REM context we'll sync back the state before 'jumping' to
4374 * the EMs failure handling.
4375 */
4376 PVM pVM = cpu_single_env->pVM;
4377 if (pVM->rem.s.fInREM)
4378 REMR3StateBack(pVM);
4379 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4380 AssertMsgFailed(("EMR3FatalError returned!\n"));
4381}
4382#endif
4383
4384/**
4385 * Interface for the qemu cpu to report unhandled situation
4386 * raising a fatal VM error.
4387 */
4388void cpu_abort(CPUState *env, const char *pszFormat, ...)
4389{
4390 va_list va;
4391 PVM pVM;
4392 PVMCPU pVCpu;
4393 char szMsg[256];
4394
4395 /*
4396 * Bitch about it.
4397 */
4398 RTLogFlags(NULL, "nodisabled nobuffered");
4399 RTLogFlush(NULL);
4400
4401 va_start(va, pszFormat);
4402#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4403 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4404 unsigned cArgs = 0;
4405 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4406 const char *psz = strchr(pszFormat, '%');
4407 while (psz && cArgs < 6)
4408 {
4409 auArgs[cArgs++] = va_arg(va, uintptr_t);
4410 psz = strchr(psz + 1, '%');
4411 }
4412 switch (cArgs)
4413 {
4414 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4415 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4416 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4417 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4418 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4419 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4420 default:
4421 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4422 }
4423#else
4424 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4425#endif
4426 va_end(va);
4427
4428 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4429 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4430
4431 /*
4432 * If we're in REM context we'll sync back the state before 'jumping' to
4433 * the EMs failure handling.
4434 */
4435 pVM = cpu_single_env->pVM;
4436 pVCpu = cpu_single_env->pVCpu;
4437 Assert(pVCpu);
4438
4439 if (pVM->rem.s.fInREM)
4440 REMR3StateBack(pVM, pVCpu);
4441 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4442 AssertMsgFailed(("EMR3FatalError returned!\n"));
4443}
4444
4445
4446/**
4447 * Aborts the VM.
4448 *
4449 * @param rc VBox error code.
4450 * @param pszTip Hint about why/when this happend.
4451 */
4452void remAbort(int rc, const char *pszTip)
4453{
4454 PVM pVM;
4455 PVMCPU pVCpu;
4456
4457 /*
4458 * Bitch about it.
4459 */
4460 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4461 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4462
4463 /*
4464 * Jump back to where we entered the recompiler.
4465 */
4466 pVM = cpu_single_env->pVM;
4467 pVCpu = cpu_single_env->pVCpu;
4468 Assert(pVCpu);
4469
4470 if (pVM->rem.s.fInREM)
4471 REMR3StateBack(pVM, pVCpu);
4472
4473 EMR3FatalError(pVCpu, rc);
4474 AssertMsgFailed(("EMR3FatalError returned!\n"));
4475}
4476
4477
4478/**
4479 * Dumps a linux system call.
4480 * @param pVCpu VMCPU handle.
4481 */
4482void remR3DumpLnxSyscall(PVMCPU pVCpu)
4483{
4484 static const char *apsz[] =
4485 {
4486 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4487 "sys_exit",
4488 "sys_fork",
4489 "sys_read",
4490 "sys_write",
4491 "sys_open", /* 5 */
4492 "sys_close",
4493 "sys_waitpid",
4494 "sys_creat",
4495 "sys_link",
4496 "sys_unlink", /* 10 */
4497 "sys_execve",
4498 "sys_chdir",
4499 "sys_time",
4500 "sys_mknod",
4501 "sys_chmod", /* 15 */
4502 "sys_lchown16",
4503 "sys_ni_syscall", /* old break syscall holder */
4504 "sys_stat",
4505 "sys_lseek",
4506 "sys_getpid", /* 20 */
4507 "sys_mount",
4508 "sys_oldumount",
4509 "sys_setuid16",
4510 "sys_getuid16",
4511 "sys_stime", /* 25 */
4512 "sys_ptrace",
4513 "sys_alarm",
4514 "sys_fstat",
4515 "sys_pause",
4516 "sys_utime", /* 30 */
4517 "sys_ni_syscall", /* old stty syscall holder */
4518 "sys_ni_syscall", /* old gtty syscall holder */
4519 "sys_access",
4520 "sys_nice",
4521 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4522 "sys_sync",
4523 "sys_kill",
4524 "sys_rename",
4525 "sys_mkdir",
4526 "sys_rmdir", /* 40 */
4527 "sys_dup",
4528 "sys_pipe",
4529 "sys_times",
4530 "sys_ni_syscall", /* old prof syscall holder */
4531 "sys_brk", /* 45 */
4532 "sys_setgid16",
4533 "sys_getgid16",
4534 "sys_signal",
4535 "sys_geteuid16",
4536 "sys_getegid16", /* 50 */
4537 "sys_acct",
4538 "sys_umount", /* recycled never used phys() */
4539 "sys_ni_syscall", /* old lock syscall holder */
4540 "sys_ioctl",
4541 "sys_fcntl", /* 55 */
4542 "sys_ni_syscall", /* old mpx syscall holder */
4543 "sys_setpgid",
4544 "sys_ni_syscall", /* old ulimit syscall holder */
4545 "sys_olduname",
4546 "sys_umask", /* 60 */
4547 "sys_chroot",
4548 "sys_ustat",
4549 "sys_dup2",
4550 "sys_getppid",
4551 "sys_getpgrp", /* 65 */
4552 "sys_setsid",
4553 "sys_sigaction",
4554 "sys_sgetmask",
4555 "sys_ssetmask",
4556 "sys_setreuid16", /* 70 */
4557 "sys_setregid16",
4558 "sys_sigsuspend",
4559 "sys_sigpending",
4560 "sys_sethostname",
4561 "sys_setrlimit", /* 75 */
4562 "sys_old_getrlimit",
4563 "sys_getrusage",
4564 "sys_gettimeofday",
4565 "sys_settimeofday",
4566 "sys_getgroups16", /* 80 */
4567 "sys_setgroups16",
4568 "old_select",
4569 "sys_symlink",
4570 "sys_lstat",
4571 "sys_readlink", /* 85 */
4572 "sys_uselib",
4573 "sys_swapon",
4574 "sys_reboot",
4575 "old_readdir",
4576 "old_mmap", /* 90 */
4577 "sys_munmap",
4578 "sys_truncate",
4579 "sys_ftruncate",
4580 "sys_fchmod",
4581 "sys_fchown16", /* 95 */
4582 "sys_getpriority",
4583 "sys_setpriority",
4584 "sys_ni_syscall", /* old profil syscall holder */
4585 "sys_statfs",
4586 "sys_fstatfs", /* 100 */
4587 "sys_ioperm",
4588 "sys_socketcall",
4589 "sys_syslog",
4590 "sys_setitimer",
4591 "sys_getitimer", /* 105 */
4592 "sys_newstat",
4593 "sys_newlstat",
4594 "sys_newfstat",
4595 "sys_uname",
4596 "sys_iopl", /* 110 */
4597 "sys_vhangup",
4598 "sys_ni_syscall", /* old "idle" system call */
4599 "sys_vm86old",
4600 "sys_wait4",
4601 "sys_swapoff", /* 115 */
4602 "sys_sysinfo",
4603 "sys_ipc",
4604 "sys_fsync",
4605 "sys_sigreturn",
4606 "sys_clone", /* 120 */
4607 "sys_setdomainname",
4608 "sys_newuname",
4609 "sys_modify_ldt",
4610 "sys_adjtimex",
4611 "sys_mprotect", /* 125 */
4612 "sys_sigprocmask",
4613 "sys_ni_syscall", /* old "create_module" */
4614 "sys_init_module",
4615 "sys_delete_module",
4616 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4617 "sys_quotactl",
4618 "sys_getpgid",
4619 "sys_fchdir",
4620 "sys_bdflush",
4621 "sys_sysfs", /* 135 */
4622 "sys_personality",
4623 "sys_ni_syscall", /* reserved for afs_syscall */
4624 "sys_setfsuid16",
4625 "sys_setfsgid16",
4626 "sys_llseek", /* 140 */
4627 "sys_getdents",
4628 "sys_select",
4629 "sys_flock",
4630 "sys_msync",
4631 "sys_readv", /* 145 */
4632 "sys_writev",
4633 "sys_getsid",
4634 "sys_fdatasync",
4635 "sys_sysctl",
4636 "sys_mlock", /* 150 */
4637 "sys_munlock",
4638 "sys_mlockall",
4639 "sys_munlockall",
4640 "sys_sched_setparam",
4641 "sys_sched_getparam", /* 155 */
4642 "sys_sched_setscheduler",
4643 "sys_sched_getscheduler",
4644 "sys_sched_yield",
4645 "sys_sched_get_priority_max",
4646 "sys_sched_get_priority_min", /* 160 */
4647 "sys_sched_rr_get_interval",
4648 "sys_nanosleep",
4649 "sys_mremap",
4650 "sys_setresuid16",
4651 "sys_getresuid16", /* 165 */
4652 "sys_vm86",
4653 "sys_ni_syscall", /* Old sys_query_module */
4654 "sys_poll",
4655 "sys_nfsservctl",
4656 "sys_setresgid16", /* 170 */
4657 "sys_getresgid16",
4658 "sys_prctl",
4659 "sys_rt_sigreturn",
4660 "sys_rt_sigaction",
4661 "sys_rt_sigprocmask", /* 175 */
4662 "sys_rt_sigpending",
4663 "sys_rt_sigtimedwait",
4664 "sys_rt_sigqueueinfo",
4665 "sys_rt_sigsuspend",
4666 "sys_pread64", /* 180 */
4667 "sys_pwrite64",
4668 "sys_chown16",
4669 "sys_getcwd",
4670 "sys_capget",
4671 "sys_capset", /* 185 */
4672 "sys_sigaltstack",
4673 "sys_sendfile",
4674 "sys_ni_syscall", /* reserved for streams1 */
4675 "sys_ni_syscall", /* reserved for streams2 */
4676 "sys_vfork", /* 190 */
4677 "sys_getrlimit",
4678 "sys_mmap2",
4679 "sys_truncate64",
4680 "sys_ftruncate64",
4681 "sys_stat64", /* 195 */
4682 "sys_lstat64",
4683 "sys_fstat64",
4684 "sys_lchown",
4685 "sys_getuid",
4686 "sys_getgid", /* 200 */
4687 "sys_geteuid",
4688 "sys_getegid",
4689 "sys_setreuid",
4690 "sys_setregid",
4691 "sys_getgroups", /* 205 */
4692 "sys_setgroups",
4693 "sys_fchown",
4694 "sys_setresuid",
4695 "sys_getresuid",
4696 "sys_setresgid", /* 210 */
4697 "sys_getresgid",
4698 "sys_chown",
4699 "sys_setuid",
4700 "sys_setgid",
4701 "sys_setfsuid", /* 215 */
4702 "sys_setfsgid",
4703 "sys_pivot_root",
4704 "sys_mincore",
4705 "sys_madvise",
4706 "sys_getdents64", /* 220 */
4707 "sys_fcntl64",
4708 "sys_ni_syscall", /* reserved for TUX */
4709 "sys_ni_syscall",
4710 "sys_gettid",
4711 "sys_readahead", /* 225 */
4712 "sys_setxattr",
4713 "sys_lsetxattr",
4714 "sys_fsetxattr",
4715 "sys_getxattr",
4716 "sys_lgetxattr", /* 230 */
4717 "sys_fgetxattr",
4718 "sys_listxattr",
4719 "sys_llistxattr",
4720 "sys_flistxattr",
4721 "sys_removexattr", /* 235 */
4722 "sys_lremovexattr",
4723 "sys_fremovexattr",
4724 "sys_tkill",
4725 "sys_sendfile64",
4726 "sys_futex", /* 240 */
4727 "sys_sched_setaffinity",
4728 "sys_sched_getaffinity",
4729 "sys_set_thread_area",
4730 "sys_get_thread_area",
4731 "sys_io_setup", /* 245 */
4732 "sys_io_destroy",
4733 "sys_io_getevents",
4734 "sys_io_submit",
4735 "sys_io_cancel",
4736 "sys_fadvise64", /* 250 */
4737 "sys_ni_syscall",
4738 "sys_exit_group",
4739 "sys_lookup_dcookie",
4740 "sys_epoll_create",
4741 "sys_epoll_ctl", /* 255 */
4742 "sys_epoll_wait",
4743 "sys_remap_file_pages",
4744 "sys_set_tid_address",
4745 "sys_timer_create",
4746 "sys_timer_settime", /* 260 */
4747 "sys_timer_gettime",
4748 "sys_timer_getoverrun",
4749 "sys_timer_delete",
4750 "sys_clock_settime",
4751 "sys_clock_gettime", /* 265 */
4752 "sys_clock_getres",
4753 "sys_clock_nanosleep",
4754 "sys_statfs64",
4755 "sys_fstatfs64",
4756 "sys_tgkill", /* 270 */
4757 "sys_utimes",
4758 "sys_fadvise64_64",
4759 "sys_ni_syscall" /* sys_vserver */
4760 };
4761
4762 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4763 switch (uEAX)
4764 {
4765 default:
4766 if (uEAX < RT_ELEMENTS(apsz))
4767 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4768 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4769 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4770 else
4771 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4772 break;
4773
4774 }
4775}
4776
4777
4778/**
4779 * Dumps an OpenBSD system call.
4780 * @param pVCpu VMCPU handle.
4781 */
4782void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4783{
4784 static const char *apsz[] =
4785 {
4786 "SYS_syscall", //0
4787 "SYS_exit", //1
4788 "SYS_fork", //2
4789 "SYS_read", //3
4790 "SYS_write", //4
4791 "SYS_open", //5
4792 "SYS_close", //6
4793 "SYS_wait4", //7
4794 "SYS_8",
4795 "SYS_link", //9
4796 "SYS_unlink", //10
4797 "SYS_11",
4798 "SYS_chdir", //12
4799 "SYS_fchdir", //13
4800 "SYS_mknod", //14
4801 "SYS_chmod", //15
4802 "SYS_chown", //16
4803 "SYS_break", //17
4804 "SYS_18",
4805 "SYS_19",
4806 "SYS_getpid", //20
4807 "SYS_mount", //21
4808 "SYS_unmount", //22
4809 "SYS_setuid", //23
4810 "SYS_getuid", //24
4811 "SYS_geteuid", //25
4812 "SYS_ptrace", //26
4813 "SYS_recvmsg", //27
4814 "SYS_sendmsg", //28
4815 "SYS_recvfrom", //29
4816 "SYS_accept", //30
4817 "SYS_getpeername", //31
4818 "SYS_getsockname", //32
4819 "SYS_access", //33
4820 "SYS_chflags", //34
4821 "SYS_fchflags", //35
4822 "SYS_sync", //36
4823 "SYS_kill", //37
4824 "SYS_38",
4825 "SYS_getppid", //39
4826 "SYS_40",
4827 "SYS_dup", //41
4828 "SYS_opipe", //42
4829 "SYS_getegid", //43
4830 "SYS_profil", //44
4831 "SYS_ktrace", //45
4832 "SYS_sigaction", //46
4833 "SYS_getgid", //47
4834 "SYS_sigprocmask", //48
4835 "SYS_getlogin", //49
4836 "SYS_setlogin", //50
4837 "SYS_acct", //51
4838 "SYS_sigpending", //52
4839 "SYS_osigaltstack", //53
4840 "SYS_ioctl", //54
4841 "SYS_reboot", //55
4842 "SYS_revoke", //56
4843 "SYS_symlink", //57
4844 "SYS_readlink", //58
4845 "SYS_execve", //59
4846 "SYS_umask", //60
4847 "SYS_chroot", //61
4848 "SYS_62",
4849 "SYS_63",
4850 "SYS_64",
4851 "SYS_65",
4852 "SYS_vfork", //66
4853 "SYS_67",
4854 "SYS_68",
4855 "SYS_sbrk", //69
4856 "SYS_sstk", //70
4857 "SYS_61",
4858 "SYS_vadvise", //72
4859 "SYS_munmap", //73
4860 "SYS_mprotect", //74
4861 "SYS_madvise", //75
4862 "SYS_76",
4863 "SYS_77",
4864 "SYS_mincore", //78
4865 "SYS_getgroups", //79
4866 "SYS_setgroups", //80
4867 "SYS_getpgrp", //81
4868 "SYS_setpgid", //82
4869 "SYS_setitimer", //83
4870 "SYS_84",
4871 "SYS_85",
4872 "SYS_getitimer", //86
4873 "SYS_87",
4874 "SYS_88",
4875 "SYS_89",
4876 "SYS_dup2", //90
4877 "SYS_91",
4878 "SYS_fcntl", //92
4879 "SYS_select", //93
4880 "SYS_94",
4881 "SYS_fsync", //95
4882 "SYS_setpriority", //96
4883 "SYS_socket", //97
4884 "SYS_connect", //98
4885 "SYS_99",
4886 "SYS_getpriority", //100
4887 "SYS_101",
4888 "SYS_102",
4889 "SYS_sigreturn", //103
4890 "SYS_bind", //104
4891 "SYS_setsockopt", //105
4892 "SYS_listen", //106
4893 "SYS_107",
4894 "SYS_108",
4895 "SYS_109",
4896 "SYS_110",
4897 "SYS_sigsuspend", //111
4898 "SYS_112",
4899 "SYS_113",
4900 "SYS_114",
4901 "SYS_115",
4902 "SYS_gettimeofday", //116
4903 "SYS_getrusage", //117
4904 "SYS_getsockopt", //118
4905 "SYS_119",
4906 "SYS_readv", //120
4907 "SYS_writev", //121
4908 "SYS_settimeofday", //122
4909 "SYS_fchown", //123
4910 "SYS_fchmod", //124
4911 "SYS_125",
4912 "SYS_setreuid", //126
4913 "SYS_setregid", //127
4914 "SYS_rename", //128
4915 "SYS_129",
4916 "SYS_130",
4917 "SYS_flock", //131
4918 "SYS_mkfifo", //132
4919 "SYS_sendto", //133
4920 "SYS_shutdown", //134
4921 "SYS_socketpair", //135
4922 "SYS_mkdir", //136
4923 "SYS_rmdir", //137
4924 "SYS_utimes", //138
4925 "SYS_139",
4926 "SYS_adjtime", //140
4927 "SYS_141",
4928 "SYS_142",
4929 "SYS_143",
4930 "SYS_144",
4931 "SYS_145",
4932 "SYS_146",
4933 "SYS_setsid", //147
4934 "SYS_quotactl", //148
4935 "SYS_149",
4936 "SYS_150",
4937 "SYS_151",
4938 "SYS_152",
4939 "SYS_153",
4940 "SYS_154",
4941 "SYS_nfssvc", //155
4942 "SYS_156",
4943 "SYS_157",
4944 "SYS_158",
4945 "SYS_159",
4946 "SYS_160",
4947 "SYS_getfh", //161
4948 "SYS_162",
4949 "SYS_163",
4950 "SYS_164",
4951 "SYS_sysarch", //165
4952 "SYS_166",
4953 "SYS_167",
4954 "SYS_168",
4955 "SYS_169",
4956 "SYS_170",
4957 "SYS_171",
4958 "SYS_172",
4959 "SYS_pread", //173
4960 "SYS_pwrite", //174
4961 "SYS_175",
4962 "SYS_176",
4963 "SYS_177",
4964 "SYS_178",
4965 "SYS_179",
4966 "SYS_180",
4967 "SYS_setgid", //181
4968 "SYS_setegid", //182
4969 "SYS_seteuid", //183
4970 "SYS_lfs_bmapv", //184
4971 "SYS_lfs_markv", //185
4972 "SYS_lfs_segclean", //186
4973 "SYS_lfs_segwait", //187
4974 "SYS_188",
4975 "SYS_189",
4976 "SYS_190",
4977 "SYS_pathconf", //191
4978 "SYS_fpathconf", //192
4979 "SYS_swapctl", //193
4980 "SYS_getrlimit", //194
4981 "SYS_setrlimit", //195
4982 "SYS_getdirentries", //196
4983 "SYS_mmap", //197
4984 "SYS___syscall", //198
4985 "SYS_lseek", //199
4986 "SYS_truncate", //200
4987 "SYS_ftruncate", //201
4988 "SYS___sysctl", //202
4989 "SYS_mlock", //203
4990 "SYS_munlock", //204
4991 "SYS_205",
4992 "SYS_futimes", //206
4993 "SYS_getpgid", //207
4994 "SYS_xfspioctl", //208
4995 "SYS_209",
4996 "SYS_210",
4997 "SYS_211",
4998 "SYS_212",
4999 "SYS_213",
5000 "SYS_214",
5001 "SYS_215",
5002 "SYS_216",
5003 "SYS_217",
5004 "SYS_218",
5005 "SYS_219",
5006 "SYS_220",
5007 "SYS_semget", //221
5008 "SYS_222",
5009 "SYS_223",
5010 "SYS_224",
5011 "SYS_msgget", //225
5012 "SYS_msgsnd", //226
5013 "SYS_msgrcv", //227
5014 "SYS_shmat", //228
5015 "SYS_229",
5016 "SYS_shmdt", //230
5017 "SYS_231",
5018 "SYS_clock_gettime", //232
5019 "SYS_clock_settime", //233
5020 "SYS_clock_getres", //234
5021 "SYS_235",
5022 "SYS_236",
5023 "SYS_237",
5024 "SYS_238",
5025 "SYS_239",
5026 "SYS_nanosleep", //240
5027 "SYS_241",
5028 "SYS_242",
5029 "SYS_243",
5030 "SYS_244",
5031 "SYS_245",
5032 "SYS_246",
5033 "SYS_247",
5034 "SYS_248",
5035 "SYS_249",
5036 "SYS_minherit", //250
5037 "SYS_rfork", //251
5038 "SYS_poll", //252
5039 "SYS_issetugid", //253
5040 "SYS_lchown", //254
5041 "SYS_getsid", //255
5042 "SYS_msync", //256
5043 "SYS_257",
5044 "SYS_258",
5045 "SYS_259",
5046 "SYS_getfsstat", //260
5047 "SYS_statfs", //261
5048 "SYS_fstatfs", //262
5049 "SYS_pipe", //263
5050 "SYS_fhopen", //264
5051 "SYS_265",
5052 "SYS_fhstatfs", //266
5053 "SYS_preadv", //267
5054 "SYS_pwritev", //268
5055 "SYS_kqueue", //269
5056 "SYS_kevent", //270
5057 "SYS_mlockall", //271
5058 "SYS_munlockall", //272
5059 "SYS_getpeereid", //273
5060 "SYS_274",
5061 "SYS_275",
5062 "SYS_276",
5063 "SYS_277",
5064 "SYS_278",
5065 "SYS_279",
5066 "SYS_280",
5067 "SYS_getresuid", //281
5068 "SYS_setresuid", //282
5069 "SYS_getresgid", //283
5070 "SYS_setresgid", //284
5071 "SYS_285",
5072 "SYS_mquery", //286
5073 "SYS_closefrom", //287
5074 "SYS_sigaltstack", //288
5075 "SYS_shmget", //289
5076 "SYS_semop", //290
5077 "SYS_stat", //291
5078 "SYS_fstat", //292
5079 "SYS_lstat", //293
5080 "SYS_fhstat", //294
5081 "SYS___semctl", //295
5082 "SYS_shmctl", //296
5083 "SYS_msgctl", //297
5084 "SYS_MAXSYSCALL", //298
5085 //299
5086 //300
5087 };
5088 uint32_t uEAX;
5089 if (!LogIsEnabled())
5090 return;
5091 uEAX = CPUMGetGuestEAX(pVCpu);
5092 switch (uEAX)
5093 {
5094 default:
5095 if (uEAX < RT_ELEMENTS(apsz))
5096 {
5097 uint32_t au32Args[8] = {0};
5098 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5099 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5100 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5101 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5102 }
5103 else
5104 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5105 break;
5106 }
5107}
5108
5109
5110#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5111/**
5112 * The Dll main entry point (stub).
5113 */
5114bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5115{
5116 return true;
5117}
5118
5119void *memcpy(void *dst, const void *src, size_t size)
5120{
5121 uint8_t*pbDst = dst, *pbSrc = src;
5122 while (size-- > 0)
5123 *pbDst++ = *pbSrc++;
5124 return dst;
5125}
5126
5127#endif
5128
5129void cpu_smm_update(CPUState *env)
5130{
5131}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette