VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 20783

Last change on this file since 20783 was 20747, checked in by vboxsync, 16 years ago

REM: Don't drop handler reply records. More handler replay paranoia and massaging.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 164.6 KB
Line 
1/* $Id: VBoxRecompiler.c 20747 2009-06-21 20:23:22Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .pResultDesc = NULL,
214 .fFlags = 0,
215 .pfnHandler = remR3CmdDisasEnableStepping,
216 .pszSyntax = "[on/off]",
217 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
218 "If no arguments show the current state."
219 }
220};
221#endif
222
223/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
224uint8_t *code_gen_prologue;
225
226
227/*******************************************************************************
228* Internal Functions *
229*******************************************************************************/
230void remAbort(int rc, const char *pszTip);
231extern int testmath(void);
232
233/* Put them here to avoid unused variable warning. */
234AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
235#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
236//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
237/* Why did this have to be identical?? */
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#else
240AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
241#endif
242
243
244/**
245 * Initializes the REM.
246 *
247 * @returns VBox status code.
248 * @param pVM The VM to operate on.
249 */
250REMR3DECL(int) REMR3Init(PVM pVM)
251{
252 uint32_t u32Dummy;
253 int rc;
254
255#ifdef VBOX_ENABLE_VBOXREM64
256 LogRel(("Using 64-bit aware REM\n"));
257#endif
258
259 /*
260 * Assert sanity.
261 */
262 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
263 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
264 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
265#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
266 Assert(!testmath());
267#endif
268
269 /*
270 * Init some internal data members.
271 */
272 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
273 pVM->rem.s.Env.pVM = pVM;
274#ifdef CPU_RAW_MODE_INIT
275 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
276#endif
277
278 /*
279 * Initialize the REM critical section.
280 *
281 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
282 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
283 * deadlocks. (mostly pgm vs rem locking)
284 */
285 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, "REM-Register");
286 AssertRCReturn(rc, rc);
287
288 /* ctx. */
289 pVM->rem.s.pCtx = NULL; /* set when executing code. */
290 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
291
292 /* ignore all notifications */
293 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
294
295 code_gen_prologue = RTMemExecAlloc(_1K);
296 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
297
298 cpu_exec_init_all(0);
299
300 /*
301 * Init the recompiler.
302 */
303 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
304 {
305 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
306 return VERR_GENERAL_FAILURE;
307 }
308 PVMCPU pVCpu = VMMGetCpu(pVM);
309 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
310 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
311
312 /* allocate code buffer for single instruction emulation. */
313 pVM->rem.s.Env.cbCodeBuffer = 4096;
314 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
315 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
316
317 /* finally, set the cpu_single_env global. */
318 cpu_single_env = &pVM->rem.s.Env;
319
320 /* Nothing is pending by default */
321 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
322
323 /*
324 * Register ram types.
325 */
326 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
327 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
328 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
330 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
331
332 /* stop ignoring. */
333 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
334
335 /*
336 * Register the saved state data unit.
337 */
338 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
339 NULL, remR3Save, NULL,
340 NULL, remR3Load, NULL);
341 if (RT_FAILURE(rc))
342 return rc;
343
344#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
345 /*
346 * Debugger commands.
347 */
348 static bool fRegisteredCmds = false;
349 if (!fRegisteredCmds)
350 {
351 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
352 if (RT_SUCCESS(rc))
353 fRegisteredCmds = true;
354 }
355#endif
356
357#ifdef VBOX_WITH_STATISTICS
358 /*
359 * Statistics.
360 */
361 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
362 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
363 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
364 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
365 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
366 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
368 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
370 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
371 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
372 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
373
374 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
375
376 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
377 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
378 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
379 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
380 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
381 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
382 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
383 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
384 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
385 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
386 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
387
388 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
389 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
390 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
391 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
392
393 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
394 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
399
400 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
406
407 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
408#endif /* VBOX_WITH_STATISTICS */
409
410 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
411 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
412 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
413
414
415#ifdef DEBUG_ALL_LOGGING
416 loglevel = ~0;
417# ifdef DEBUG_TMP_LOGGING
418 logfile = fopen("/tmp/vbox-qemu.log", "w");
419# endif
420#endif
421
422 PREMHANDLERNOTIFICATION pCur;
423 unsigned i;
424
425 pVM->rem.s.idxPendingList = -1;
426 pVM->rem.s.idxFreeList = 0;
427
428 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) - 1; i++)
429 {
430 pCur = &pVM->rem.s.aHandlerNotifications[i];
431 pCur->idxNext = i + 1;
432 pCur->idxSelf = i;
433 }
434
435 pCur = &pVM->rem.s.aHandlerNotifications[RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) - 1];
436 pCur->idxNext = -1;
437 pCur->idxSelf = RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) - 1;
438
439 return rc;
440}
441
442
443/**
444 * Finalizes the REM initialization.
445 *
446 * This is called after all components, devices and drivers has
447 * been initialized. Its main purpose it to finish the RAM related
448 * initialization.
449 *
450 * @returns VBox status code.
451 *
452 * @param pVM The VM handle.
453 */
454REMR3DECL(int) REMR3InitFinalize(PVM pVM)
455{
456 int rc;
457
458 /*
459 * Ram size & dirty bit map.
460 */
461 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
462 pVM->rem.s.fGCPhysLastRamFixed = true;
463#ifdef RT_STRICT
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
465#else
466 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
467#endif
468 return rc;
469}
470
471
472/**
473 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
474 *
475 * @returns VBox status code.
476 * @param pVM The VM handle.
477 * @param fGuarded Whether to guard the map.
478 */
479static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
480{
481 int rc = VINF_SUCCESS;
482 RTGCPHYS cb;
483
484 cb = pVM->rem.s.GCPhysLastRam + 1;
485 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
486 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
487 VERR_OUT_OF_RANGE);
488 phys_ram_size = cb;
489 phys_ram_dirty_size = cb >> PAGE_SHIFT;
490 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
491
492 if (!fGuarded)
493 {
494 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
495 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
496 }
497 else
498 {
499 /*
500 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
501 */
502 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
503 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
504 if (cbBitmapFull == cbBitmapAligned)
505 cbBitmapFull += _4G >> PAGE_SHIFT;
506 else if (cbBitmapFull - cbBitmapAligned < _64K)
507 cbBitmapFull += _64K;
508
509 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
510 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
511
512 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
513 if (RT_FAILURE(rc))
514 {
515 RTMemPageFree(phys_ram_dirty);
516 AssertLogRelRCReturn(rc, rc);
517 }
518
519 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
520 }
521
522 /* initialize it. */
523 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
524 return rc;
525}
526
527
528/**
529 * Terminates the REM.
530 *
531 * Termination means cleaning up and freeing all resources,
532 * the VM it self is at this point powered off or suspended.
533 *
534 * @returns VBox status code.
535 * @param pVM The VM to operate on.
536 */
537REMR3DECL(int) REMR3Term(PVM pVM)
538{
539#ifdef VBOX_WITH_STATISTICS
540 /*
541 * Statistics.
542 */
543 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
544 STAM_DEREG(pVM, &gStatCompilationQEmu);
545 STAM_DEREG(pVM, &gStatRunCodeQEmu);
546 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
547 STAM_DEREG(pVM, &gStatTimers);
548 STAM_DEREG(pVM, &gStatTBLookup);
549 STAM_DEREG(pVM, &gStatIRQ);
550 STAM_DEREG(pVM, &gStatRawCheck);
551 STAM_DEREG(pVM, &gStatMemRead);
552 STAM_DEREG(pVM, &gStatMemWrite);
553 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
554 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
555
556 STAM_DEREG(pVM, &gStatCpuGetTSC);
557
558 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
559 STAM_DEREG(pVM, &gStatRefuseVM86);
560 STAM_DEREG(pVM, &gStatRefusePaging);
561 STAM_DEREG(pVM, &gStatRefusePAE);
562 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
563 STAM_DEREG(pVM, &gStatRefuseIF0);
564 STAM_DEREG(pVM, &gStatRefuseCode16);
565 STAM_DEREG(pVM, &gStatRefuseWP0);
566 STAM_DEREG(pVM, &gStatRefuseRing1or2);
567 STAM_DEREG(pVM, &gStatRefuseCanExecute);
568 STAM_DEREG(pVM, &gStatFlushTBs);
569
570 STAM_DEREG(pVM, &gStatREMGDTChange);
571 STAM_DEREG(pVM, &gStatREMLDTRChange);
572 STAM_DEREG(pVM, &gStatREMIDTChange);
573 STAM_DEREG(pVM, &gStatREMTRChange);
574
575 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
579 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
580 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
581
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
586 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
588
589 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
590#endif /* VBOX_WITH_STATISTICS */
591
592 STAM_REL_DEREG(pVM, &tb_flush_count);
593 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
594 STAM_REL_DEREG(pVM, &tlb_flush_count);
595
596 return VINF_SUCCESS;
597}
598
599
600/**
601 * The VM is being reset.
602 *
603 * For the REM component this means to call the cpu_reset() and
604 * reinitialize some state variables.
605 *
606 * @param pVM VM handle.
607 */
608REMR3DECL(void) REMR3Reset(PVM pVM)
609{
610 /*
611 * Reset the REM cpu.
612 */
613 Assert(pVM->rem.s.cIgnoreAll == 0);
614 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
615 cpu_reset(&pVM->rem.s.Env);
616 pVM->rem.s.cInvalidatedPages = 0;
617 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
618 Assert(pVM->rem.s.cIgnoreAll == 0);
619
620 /* Clear raw ring 0 init state */
621 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
622
623 /* Flush the TBs the next time we execute code here. */
624 pVM->rem.s.fFlushTBs = true;
625}
626
627
628/**
629 * Execute state save operation.
630 *
631 * @returns VBox status code.
632 * @param pVM VM Handle.
633 * @param pSSM SSM operation handle.
634 */
635static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
636{
637 PREM pRem = &pVM->rem.s;
638
639 /*
640 * Save the required CPU Env bits.
641 * (Not much because we're never in REM when doing the save.)
642 */
643 LogFlow(("remR3Save:\n"));
644 Assert(!pRem->fInREM);
645 SSMR3PutU32(pSSM, pRem->Env.hflags);
646 SSMR3PutU32(pSSM, ~0); /* separator */
647
648 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
649 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
650 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
651
652 return SSMR3PutU32(pSSM, ~0); /* terminator */
653}
654
655
656/**
657 * Execute state load operation.
658 *
659 * @returns VBox status code.
660 * @param pVM VM Handle.
661 * @param pSSM SSM operation handle.
662 * @param u32Version Data layout version.
663 */
664static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
665{
666 uint32_t u32Dummy;
667 uint32_t fRawRing0 = false;
668 uint32_t u32Sep;
669 unsigned i;
670 int rc;
671 PREM pRem;
672 LogFlow(("remR3Load:\n"));
673
674 /*
675 * Validate version.
676 */
677 if ( u32Version != REM_SAVED_STATE_VERSION
678 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
679 {
680 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
681 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
682 }
683
684 /*
685 * Do a reset to be on the safe side...
686 */
687 REMR3Reset(pVM);
688
689 /*
690 * Ignore all ignorable notifications.
691 * (Not doing this will cause serious trouble.)
692 */
693 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
694
695 /*
696 * Load the required CPU Env bits.
697 * (Not much because we're never in REM when doing the save.)
698 */
699 pRem = &pVM->rem.s;
700 Assert(!pRem->fInREM);
701 SSMR3GetU32(pSSM, &pRem->Env.hflags);
702 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
703 {
704 /* Redundant REM CPU state has to be loaded, but can be ignored. */
705 CPUX86State_Ver16 temp;
706 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
707 }
708
709 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
710 if (RT_FAILURE(rc))
711 return rc;
712 if (u32Sep != ~0U)
713 {
714 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
715 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
716 }
717
718 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
719 SSMR3GetUInt(pSSM, &fRawRing0);
720 if (fRawRing0)
721 pRem->Env.state |= CPU_RAW_RING0;
722
723 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
724 {
725 /*
726 * Load the REM stuff.
727 */
728 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
729 if (RT_FAILURE(rc))
730 return rc;
731 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
732 {
733 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
734 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
735 }
736 for (i = 0; i < pRem->cInvalidatedPages; i++)
737 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
738 }
739
740 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
741 if (RT_FAILURE(rc))
742 return rc;
743
744 /* check the terminator. */
745 rc = SSMR3GetU32(pSSM, &u32Sep);
746 if (RT_FAILURE(rc))
747 return rc;
748 if (u32Sep != ~0U)
749 {
750 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
751 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
752 }
753
754 /*
755 * Get the CPUID features.
756 */
757 PVMCPU pVCpu = VMMGetCpu(pVM);
758 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
759 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
760
761 /*
762 * Sync the Load Flush the TLB
763 */
764 tlb_flush(&pRem->Env, 1);
765
766 /*
767 * Stop ignoring ignornable notifications.
768 */
769 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
770
771 /*
772 * Sync the whole CPU state when executing code in the recompiler.
773 */
774 for (i=0;i<pVM->cCPUs;i++)
775 {
776 PVMCPU pVCpu = &pVM->aCpus[i];
777
778 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
779 }
780 return VINF_SUCCESS;
781}
782
783
784
785#undef LOG_GROUP
786#define LOG_GROUP LOG_GROUP_REM_RUN
787
788/**
789 * Single steps an instruction in recompiled mode.
790 *
791 * Before calling this function the REM state needs to be in sync with
792 * the VM. Call REMR3State() to perform the sync. It's only necessary
793 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
794 * and after calling REMR3StateBack().
795 *
796 * @returns VBox status code.
797 *
798 * @param pVM VM Handle.
799 * @param pVCpu VMCPU Handle.
800 */
801REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
802{
803 int rc, interrupt_request;
804 RTGCPTR GCPtrPC;
805 bool fBp;
806
807 /*
808 * Lock the REM - we don't wanna have anyone interrupting us
809 * while stepping - and enabled single stepping. We also ignore
810 * pending interrupts and suchlike.
811 */
812 interrupt_request = pVM->rem.s.Env.interrupt_request;
813 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
814 pVM->rem.s.Env.interrupt_request = 0;
815 cpu_single_step(&pVM->rem.s.Env, 1);
816
817 /*
818 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
819 */
820 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
821 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
822
823 /*
824 * Execute and handle the return code.
825 * We execute without enabling the cpu tick, so on success we'll
826 * just flip it on and off to make sure it moves
827 */
828 rc = cpu_exec(&pVM->rem.s.Env);
829 if (rc == EXCP_DEBUG)
830 {
831 TMR3NotifyResume(pVM, pVCpu);
832 TMR3NotifySuspend(pVM, pVCpu);
833 rc = VINF_EM_DBG_STEPPED;
834 }
835 else
836 {
837 switch (rc)
838 {
839 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
840 case EXCP_HLT:
841 case EXCP_HALTED: rc = VINF_EM_HALT; break;
842 case EXCP_RC:
843 rc = pVM->rem.s.rc;
844 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
845 break;
846 case EXCP_EXECUTE_RAW:
847 case EXCP_EXECUTE_HWACC:
848 /** @todo: is it correct? No! */
849 rc = VINF_SUCCESS;
850 break;
851 default:
852 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
853 rc = VERR_INTERNAL_ERROR;
854 break;
855 }
856 }
857
858 /*
859 * Restore the stuff we changed to prevent interruption.
860 * Unlock the REM.
861 */
862 if (fBp)
863 {
864 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
865 Assert(rc2 == 0); NOREF(rc2);
866 }
867 cpu_single_step(&pVM->rem.s.Env, 0);
868 pVM->rem.s.Env.interrupt_request = interrupt_request;
869
870 return rc;
871}
872
873
874/**
875 * Set a breakpoint using the REM facilities.
876 *
877 * @returns VBox status code.
878 * @param pVM The VM handle.
879 * @param Address The breakpoint address.
880 * @thread The emulation thread.
881 */
882REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
883{
884 VM_ASSERT_EMT(pVM);
885 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
886 {
887 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
888 return VINF_SUCCESS;
889 }
890 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
891 return VERR_REM_NO_MORE_BP_SLOTS;
892}
893
894
895/**
896 * Clears a breakpoint set by REMR3BreakpointSet().
897 *
898 * @returns VBox status code.
899 * @param pVM The VM handle.
900 * @param Address The breakpoint address.
901 * @thread The emulation thread.
902 */
903REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
904{
905 VM_ASSERT_EMT(pVM);
906 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
907 {
908 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
909 return VINF_SUCCESS;
910 }
911 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
912 return VERR_REM_BP_NOT_FOUND;
913}
914
915
916/**
917 * Emulate an instruction.
918 *
919 * This function executes one instruction without letting anyone
920 * interrupt it. This is intended for being called while being in
921 * raw mode and thus will take care of all the state syncing between
922 * REM and the rest.
923 *
924 * @returns VBox status code.
925 * @param pVM VM handle.
926 * @param pVCpu VMCPU Handle.
927 */
928REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
929{
930 bool fFlushTBs;
931
932 int rc, rc2;
933 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
934
935 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
936 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
937 */
938 if (HWACCMIsEnabled(pVM))
939 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
940
941 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
942 fFlushTBs = pVM->rem.s.fFlushTBs;
943 pVM->rem.s.fFlushTBs = false;
944
945 /*
946 * Sync the state and enable single instruction / single stepping.
947 */
948 rc = REMR3State(pVM, pVCpu);
949 pVM->rem.s.fFlushTBs = fFlushTBs;
950 if (RT_SUCCESS(rc))
951 {
952 int interrupt_request = pVM->rem.s.Env.interrupt_request;
953 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
954 Assert(!pVM->rem.s.Env.singlestep_enabled);
955 /*
956 * Now we set the execute single instruction flag and enter the cpu_exec loop.
957 */
958 TMNotifyStartOfExecution(pVCpu);
959 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
960 rc = cpu_exec(&pVM->rem.s.Env);
961 TMNotifyEndOfExecution(pVCpu);
962 switch (rc)
963 {
964 /*
965 * Executed without anything out of the way happening.
966 */
967 case EXCP_SINGLE_INSTR:
968 rc = VINF_EM_RESCHEDULE;
969 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
970 break;
971
972 /*
973 * If we take a trap or start servicing a pending interrupt, we might end up here.
974 * (Timer thread or some other thread wishing EMT's attention.)
975 */
976 case EXCP_INTERRUPT:
977 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
978 rc = VINF_EM_RESCHEDULE;
979 break;
980
981 /*
982 * Single step, we assume!
983 * If there was a breakpoint there we're fucked now.
984 */
985 case EXCP_DEBUG:
986 {
987 /* breakpoint or single step? */
988 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
989 int iBP;
990 rc = VINF_EM_DBG_STEPPED;
991 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
992 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
993 {
994 rc = VINF_EM_DBG_BREAKPOINT;
995 break;
996 }
997 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
998 break;
999 }
1000
1001 /*
1002 * hlt instruction.
1003 */
1004 case EXCP_HLT:
1005 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1006 rc = VINF_EM_HALT;
1007 break;
1008
1009 /*
1010 * The VM has halted.
1011 */
1012 case EXCP_HALTED:
1013 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1014 rc = VINF_EM_HALT;
1015 break;
1016
1017 /*
1018 * Switch to RAW-mode.
1019 */
1020 case EXCP_EXECUTE_RAW:
1021 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1022 rc = VINF_EM_RESCHEDULE_RAW;
1023 break;
1024
1025 /*
1026 * Switch to hardware accelerated RAW-mode.
1027 */
1028 case EXCP_EXECUTE_HWACC:
1029 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1030 rc = VINF_EM_RESCHEDULE_HWACC;
1031 break;
1032
1033 /*
1034 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1035 */
1036 case EXCP_RC:
1037 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1038 rc = pVM->rem.s.rc;
1039 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1040 break;
1041
1042 /*
1043 * Figure out the rest when they arrive....
1044 */
1045 default:
1046 AssertMsgFailed(("rc=%d\n", rc));
1047 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1048 rc = VINF_EM_RESCHEDULE;
1049 break;
1050 }
1051
1052 /*
1053 * Switch back the state.
1054 */
1055 pVM->rem.s.Env.interrupt_request = interrupt_request;
1056 rc2 = REMR3StateBack(pVM, pVCpu);
1057 AssertRC(rc2);
1058 }
1059
1060 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1061 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1062 return rc;
1063}
1064
1065
1066/**
1067 * Runs code in recompiled mode.
1068 *
1069 * Before calling this function the REM state needs to be in sync with
1070 * the VM. Call REMR3State() to perform the sync. It's only necessary
1071 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1072 * and after calling REMR3StateBack().
1073 *
1074 * @returns VBox status code.
1075 *
1076 * @param pVM VM Handle.
1077 * @param pVCpu VMCPU Handle.
1078 */
1079REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1080{
1081 int rc;
1082 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1083 Assert(pVM->rem.s.fInREM);
1084
1085 TMNotifyStartOfExecution(pVCpu);
1086 rc = cpu_exec(&pVM->rem.s.Env);
1087 TMNotifyEndOfExecution(pVCpu);
1088 switch (rc)
1089 {
1090 /*
1091 * This happens when the execution was interrupted
1092 * by an external event, like pending timers.
1093 */
1094 case EXCP_INTERRUPT:
1095 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1096 rc = VINF_SUCCESS;
1097 break;
1098
1099 /*
1100 * hlt instruction.
1101 */
1102 case EXCP_HLT:
1103 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1104 rc = VINF_EM_HALT;
1105 break;
1106
1107 /*
1108 * The VM has halted.
1109 */
1110 case EXCP_HALTED:
1111 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1112 rc = VINF_EM_HALT;
1113 break;
1114
1115 /*
1116 * Breakpoint/single step.
1117 */
1118 case EXCP_DEBUG:
1119 {
1120#if 0//def DEBUG_bird
1121 static int iBP = 0;
1122 printf("howdy, breakpoint! iBP=%d\n", iBP);
1123 switch (iBP)
1124 {
1125 case 0:
1126 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1127 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1128 //pVM->rem.s.Env.interrupt_request = 0;
1129 //pVM->rem.s.Env.exception_index = -1;
1130 //g_fInterruptDisabled = 1;
1131 rc = VINF_SUCCESS;
1132 asm("int3");
1133 break;
1134 default:
1135 asm("int3");
1136 break;
1137 }
1138 iBP++;
1139#else
1140 /* breakpoint or single step? */
1141 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1142 int iBP;
1143 rc = VINF_EM_DBG_STEPPED;
1144 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1145 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1146 {
1147 rc = VINF_EM_DBG_BREAKPOINT;
1148 break;
1149 }
1150 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1151#endif
1152 break;
1153 }
1154
1155 /*
1156 * Switch to RAW-mode.
1157 */
1158 case EXCP_EXECUTE_RAW:
1159 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1160 rc = VINF_EM_RESCHEDULE_RAW;
1161 break;
1162
1163 /*
1164 * Switch to hardware accelerated RAW-mode.
1165 */
1166 case EXCP_EXECUTE_HWACC:
1167 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1168 rc = VINF_EM_RESCHEDULE_HWACC;
1169 break;
1170
1171 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1172 /*
1173 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1174 */
1175 case EXCP_RC:
1176 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1177 rc = pVM->rem.s.rc;
1178 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1179 break;
1180
1181 /*
1182 * Figure out the rest when they arrive....
1183 */
1184 default:
1185 AssertMsgFailed(("rc=%d\n", rc));
1186 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1187 rc = VINF_SUCCESS;
1188 break;
1189 }
1190
1191 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1192 return rc;
1193}
1194
1195
1196/**
1197 * Check if the cpu state is suitable for Raw execution.
1198 *
1199 * @returns boolean
1200 * @param env The CPU env struct.
1201 * @param eip The EIP to check this for (might differ from env->eip).
1202 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1203 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1204 *
1205 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1206 */
1207bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1208{
1209 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1210 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1211 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1212 uint32_t u32CR0;
1213
1214 /* Update counter. */
1215 env->pVM->rem.s.cCanExecuteRaw++;
1216
1217 if (HWACCMIsEnabled(env->pVM))
1218 {
1219 CPUMCTX Ctx;
1220
1221 env->state |= CPU_RAW_HWACC;
1222
1223 /*
1224 * Create partial context for HWACCMR3CanExecuteGuest
1225 */
1226 Ctx.cr0 = env->cr[0];
1227 Ctx.cr3 = env->cr[3];
1228 Ctx.cr4 = env->cr[4];
1229
1230 Ctx.tr = env->tr.selector;
1231 Ctx.trHid.u64Base = env->tr.base;
1232 Ctx.trHid.u32Limit = env->tr.limit;
1233 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1234
1235 Ctx.idtr.cbIdt = env->idt.limit;
1236 Ctx.idtr.pIdt = env->idt.base;
1237
1238 Ctx.gdtr.cbGdt = env->gdt.limit;
1239 Ctx.gdtr.pGdt = env->gdt.base;
1240
1241 Ctx.rsp = env->regs[R_ESP];
1242 Ctx.rip = env->eip;
1243
1244 Ctx.eflags.u32 = env->eflags;
1245
1246 Ctx.cs = env->segs[R_CS].selector;
1247 Ctx.csHid.u64Base = env->segs[R_CS].base;
1248 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1249 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1250
1251 Ctx.ds = env->segs[R_DS].selector;
1252 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1253 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1254 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1255
1256 Ctx.es = env->segs[R_ES].selector;
1257 Ctx.esHid.u64Base = env->segs[R_ES].base;
1258 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1259 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1260
1261 Ctx.fs = env->segs[R_FS].selector;
1262 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1263 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1264 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1265
1266 Ctx.gs = env->segs[R_GS].selector;
1267 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1268 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1269 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1270
1271 Ctx.ss = env->segs[R_SS].selector;
1272 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1273 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1274 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1275
1276 Ctx.msrEFER = env->efer;
1277
1278 /* Hardware accelerated raw-mode:
1279 *
1280 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1281 */
1282 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1283 {
1284 *piException = EXCP_EXECUTE_HWACC;
1285 return true;
1286 }
1287 return false;
1288 }
1289
1290 /*
1291 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1292 * or 32 bits protected mode ring 0 code
1293 *
1294 * The tests are ordered by the likelyhood of being true during normal execution.
1295 */
1296 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1297 {
1298 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1299 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1300 return false;
1301 }
1302
1303#ifndef VBOX_RAW_V86
1304 if (fFlags & VM_MASK) {
1305 STAM_COUNTER_INC(&gStatRefuseVM86);
1306 Log2(("raw mode refused: VM_MASK\n"));
1307 return false;
1308 }
1309#endif
1310
1311 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1312 {
1313#ifndef DEBUG_bird
1314 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1315#endif
1316 return false;
1317 }
1318
1319 if (env->singlestep_enabled)
1320 {
1321 //Log2(("raw mode refused: Single step\n"));
1322 return false;
1323 }
1324
1325 if (env->nb_breakpoints > 0)
1326 {
1327 //Log2(("raw mode refused: Breakpoints\n"));
1328 return false;
1329 }
1330
1331 u32CR0 = env->cr[0];
1332 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1333 {
1334 STAM_COUNTER_INC(&gStatRefusePaging);
1335 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1336 return false;
1337 }
1338
1339 if (env->cr[4] & CR4_PAE_MASK)
1340 {
1341 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1342 {
1343 STAM_COUNTER_INC(&gStatRefusePAE);
1344 return false;
1345 }
1346 }
1347
1348 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1349 {
1350 if (!EMIsRawRing3Enabled(env->pVM))
1351 return false;
1352
1353 if (!(env->eflags & IF_MASK))
1354 {
1355 STAM_COUNTER_INC(&gStatRefuseIF0);
1356 Log2(("raw mode refused: IF (RawR3)\n"));
1357 return false;
1358 }
1359
1360 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1361 {
1362 STAM_COUNTER_INC(&gStatRefuseWP0);
1363 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1364 return false;
1365 }
1366 }
1367 else
1368 {
1369 if (!EMIsRawRing0Enabled(env->pVM))
1370 return false;
1371
1372 // Let's start with pure 32 bits ring 0 code first
1373 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1374 {
1375 STAM_COUNTER_INC(&gStatRefuseCode16);
1376 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1377 return false;
1378 }
1379
1380 // Only R0
1381 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1382 {
1383 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1384 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1385 return false;
1386 }
1387
1388 if (!(u32CR0 & CR0_WP_MASK))
1389 {
1390 STAM_COUNTER_INC(&gStatRefuseWP0);
1391 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1392 return false;
1393 }
1394
1395 if (PATMIsPatchGCAddr(env->pVM, eip))
1396 {
1397 Log2(("raw r0 mode forced: patch code\n"));
1398 *piException = EXCP_EXECUTE_RAW;
1399 return true;
1400 }
1401
1402#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1403 if (!(env->eflags & IF_MASK))
1404 {
1405 STAM_COUNTER_INC(&gStatRefuseIF0);
1406 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1407 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1408 return false;
1409 }
1410#endif
1411
1412 env->state |= CPU_RAW_RING0;
1413 }
1414
1415 /*
1416 * Don't reschedule the first time we're called, because there might be
1417 * special reasons why we're here that is not covered by the above checks.
1418 */
1419 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1420 {
1421 Log2(("raw mode refused: first scheduling\n"));
1422 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1423 return false;
1424 }
1425
1426 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1427 *piException = EXCP_EXECUTE_RAW;
1428 return true;
1429}
1430
1431
1432/**
1433 * Fetches a code byte.
1434 *
1435 * @returns Success indicator (bool) for ease of use.
1436 * @param env The CPU environment structure.
1437 * @param GCPtrInstr Where to fetch code.
1438 * @param pu8Byte Where to store the byte on success
1439 */
1440bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1441{
1442 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1443 if (RT_SUCCESS(rc))
1444 return true;
1445 return false;
1446}
1447
1448
1449/**
1450 * Flush (or invalidate if you like) page table/dir entry.
1451 *
1452 * (invlpg instruction; tlb_flush_page)
1453 *
1454 * @param env Pointer to cpu environment.
1455 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1456 */
1457void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1458{
1459 PVM pVM = env->pVM;
1460 PCPUMCTX pCtx;
1461 int rc;
1462
1463 /*
1464 * When we're replaying invlpg instructions or restoring a saved
1465 * state we disable this path.
1466 */
1467 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1468 return;
1469 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1470 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1471
1472 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1473
1474 /*
1475 * Update the control registers before calling PGMFlushPage.
1476 */
1477 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1478 Assert(pCtx);
1479 pCtx->cr0 = env->cr[0];
1480 pCtx->cr3 = env->cr[3];
1481 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1482 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1483 pCtx->cr4 = env->cr[4];
1484
1485 /*
1486 * Let PGM do the rest.
1487 */
1488 Assert(env->pVCpu);
1489 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1490 if (RT_FAILURE(rc))
1491 {
1492 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1493 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1494 }
1495 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1496}
1497
1498
1499#ifndef REM_PHYS_ADDR_IN_TLB
1500/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1501void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1502{
1503 void *pv;
1504 int rc;
1505
1506 /* Address must be aligned enough to fiddle with lower bits */
1507 Assert((physAddr & 0x3) == 0);
1508
1509 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1510 Assert( rc == VINF_SUCCESS
1511 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1512 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1513 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1514 if (RT_FAILURE(rc))
1515 return (void *)1;
1516 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1517 return (void *)((uintptr_t)pv | 2);
1518 return pv;
1519}
1520#endif /* REM_PHYS_ADDR_IN_TLB */
1521
1522
1523/**
1524 * Called from tlb_protect_code in order to write monitor a code page.
1525 *
1526 * @param env Pointer to the CPU environment.
1527 * @param GCPtr Code page to monitor
1528 */
1529void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1530{
1531#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1532 Assert(env->pVM->rem.s.fInREM);
1533 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1534 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1535 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1536 && !(env->eflags & VM_MASK) /* no V86 mode */
1537 && !HWACCMIsEnabled(env->pVM))
1538 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1539#endif
1540}
1541
1542
1543/**
1544 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1545 *
1546 * @param env Pointer to the CPU environment.
1547 * @param GCPtr Code page to monitor
1548 */
1549void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1550{
1551 Assert(env->pVM->rem.s.fInREM);
1552#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1553 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1554 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1555 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1556 && !(env->eflags & VM_MASK) /* no V86 mode */
1557 && !HWACCMIsEnabled(env->pVM))
1558 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1559#endif
1560}
1561
1562
1563/**
1564 * Called when the CPU is initialized, any of the CRx registers are changed or
1565 * when the A20 line is modified.
1566 *
1567 * @param env Pointer to the CPU environment.
1568 * @param fGlobal Set if the flush is global.
1569 */
1570void remR3FlushTLB(CPUState *env, bool fGlobal)
1571{
1572 PVM pVM = env->pVM;
1573 PCPUMCTX pCtx;
1574
1575 /*
1576 * When we're replaying invlpg instructions or restoring a saved
1577 * state we disable this path.
1578 */
1579 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1580 return;
1581 Assert(pVM->rem.s.fInREM);
1582
1583 /*
1584 * The caller doesn't check cr4, so we have to do that for ourselves.
1585 */
1586 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1587 fGlobal = true;
1588 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1589
1590 /*
1591 * Update the control registers before calling PGMR3FlushTLB.
1592 */
1593 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1594 Assert(pCtx);
1595 pCtx->cr0 = env->cr[0];
1596 pCtx->cr3 = env->cr[3];
1597 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1598 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1599 pCtx->cr4 = env->cr[4];
1600
1601 /*
1602 * Let PGM do the rest.
1603 */
1604 Assert(env->pVCpu);
1605 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1606}
1607
1608
1609/**
1610 * Called when any of the cr0, cr4 or efer registers is updated.
1611 *
1612 * @param env Pointer to the CPU environment.
1613 */
1614void remR3ChangeCpuMode(CPUState *env)
1615{
1616 PVM pVM = env->pVM;
1617 uint64_t efer;
1618 PCPUMCTX pCtx;
1619 int rc;
1620
1621 /*
1622 * When we're replaying loads or restoring a saved
1623 * state this path is disabled.
1624 */
1625 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1626 return;
1627 Assert(pVM->rem.s.fInREM);
1628
1629 /*
1630 * Update the control registers before calling PGMChangeMode()
1631 * as it may need to map whatever cr3 is pointing to.
1632 */
1633 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1634 Assert(pCtx);
1635 pCtx->cr0 = env->cr[0];
1636 pCtx->cr3 = env->cr[3];
1637 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1638 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1639 pCtx->cr4 = env->cr[4];
1640
1641#ifdef TARGET_X86_64
1642 efer = env->efer;
1643#else
1644 efer = 0;
1645#endif
1646 Assert(env->pVCpu);
1647 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1648 if (rc != VINF_SUCCESS)
1649 {
1650 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1651 {
1652 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1653 remR3RaiseRC(env->pVM, rc);
1654 }
1655 else
1656 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1657 }
1658}
1659
1660
1661/**
1662 * Called from compiled code to run dma.
1663 *
1664 * @param env Pointer to the CPU environment.
1665 */
1666void remR3DmaRun(CPUState *env)
1667{
1668 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1669 PDMR3DmaRun(env->pVM);
1670 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1671}
1672
1673
1674/**
1675 * Called from compiled code to schedule pending timers in VMM
1676 *
1677 * @param env Pointer to the CPU environment.
1678 */
1679void remR3TimersRun(CPUState *env)
1680{
1681 LogFlow(("remR3TimersRun:\n"));
1682 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1683 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1684 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1685 TMR3TimerQueuesDo(env->pVM);
1686 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1687 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1688}
1689
1690
1691/**
1692 * Record trap occurance
1693 *
1694 * @returns VBox status code
1695 * @param env Pointer to the CPU environment.
1696 * @param uTrap Trap nr
1697 * @param uErrorCode Error code
1698 * @param pvNextEIP Next EIP
1699 */
1700int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1701{
1702 PVM pVM = env->pVM;
1703#ifdef VBOX_WITH_STATISTICS
1704 static STAMCOUNTER s_aStatTrap[255];
1705 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1706#endif
1707
1708#ifdef VBOX_WITH_STATISTICS
1709 if (uTrap < 255)
1710 {
1711 if (!s_aRegisters[uTrap])
1712 {
1713 char szStatName[64];
1714 s_aRegisters[uTrap] = true;
1715 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1716 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1717 }
1718 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1719 }
1720#endif
1721 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1722 if( uTrap < 0x20
1723 && (env->cr[0] & X86_CR0_PE)
1724 && !(env->eflags & X86_EFL_VM))
1725 {
1726#ifdef DEBUG
1727 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1728#endif
1729 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1730 {
1731 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1732 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1733 return VERR_REM_TOO_MANY_TRAPS;
1734 }
1735 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1736 pVM->rem.s.cPendingExceptions = 1;
1737 pVM->rem.s.uPendingException = uTrap;
1738 pVM->rem.s.uPendingExcptEIP = env->eip;
1739 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1740 }
1741 else
1742 {
1743 pVM->rem.s.cPendingExceptions = 0;
1744 pVM->rem.s.uPendingException = uTrap;
1745 pVM->rem.s.uPendingExcptEIP = env->eip;
1746 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1747 }
1748 return VINF_SUCCESS;
1749}
1750
1751
1752/*
1753 * Clear current active trap
1754 *
1755 * @param pVM VM Handle.
1756 */
1757void remR3TrapClear(PVM pVM)
1758{
1759 pVM->rem.s.cPendingExceptions = 0;
1760 pVM->rem.s.uPendingException = 0;
1761 pVM->rem.s.uPendingExcptEIP = 0;
1762 pVM->rem.s.uPendingExcptCR2 = 0;
1763}
1764
1765
1766/*
1767 * Record previous call instruction addresses
1768 *
1769 * @param env Pointer to the CPU environment.
1770 */
1771void remR3RecordCall(CPUState *env)
1772{
1773 CSAMR3RecordCallAddress(env->pVM, env->eip);
1774}
1775
1776
1777/**
1778 * Syncs the internal REM state with the VM.
1779 *
1780 * This must be called before REMR3Run() is invoked whenever when the REM
1781 * state is not up to date. Calling it several times in a row is not
1782 * permitted.
1783 *
1784 * @returns VBox status code.
1785 *
1786 * @param pVM VM Handle.
1787 * @param pVCpu VMCPU Handle.
1788 *
1789 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1790 * no do this since the majority of the callers don't want any unnecessary of events
1791 * pending that would immediatly interrupt execution.
1792 */
1793REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1794{
1795 register const CPUMCTX *pCtx;
1796 register unsigned fFlags;
1797 bool fHiddenSelRegsValid;
1798 unsigned i;
1799 TRPMEVENT enmType;
1800 uint8_t u8TrapNo;
1801 int rc;
1802
1803 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1804 Log2(("REMR3State:\n"));
1805
1806 pVM->rem.s.Env.pVCpu = pVCpu;
1807 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1808 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1809
1810 Assert(!pVM->rem.s.fInREM);
1811 pVM->rem.s.fInStateSync = true;
1812
1813 /*
1814 * If we have to flush TBs, do that immediately.
1815 */
1816 if (pVM->rem.s.fFlushTBs)
1817 {
1818 STAM_COUNTER_INC(&gStatFlushTBs);
1819 tb_flush(&pVM->rem.s.Env);
1820 pVM->rem.s.fFlushTBs = false;
1821 }
1822
1823 /*
1824 * Copy the registers which require no special handling.
1825 */
1826#ifdef TARGET_X86_64
1827 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1828 Assert(R_EAX == 0);
1829 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1830 Assert(R_ECX == 1);
1831 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1832 Assert(R_EDX == 2);
1833 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1834 Assert(R_EBX == 3);
1835 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1836 Assert(R_ESP == 4);
1837 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1838 Assert(R_EBP == 5);
1839 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1840 Assert(R_ESI == 6);
1841 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1842 Assert(R_EDI == 7);
1843 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1844 pVM->rem.s.Env.regs[8] = pCtx->r8;
1845 pVM->rem.s.Env.regs[9] = pCtx->r9;
1846 pVM->rem.s.Env.regs[10] = pCtx->r10;
1847 pVM->rem.s.Env.regs[11] = pCtx->r11;
1848 pVM->rem.s.Env.regs[12] = pCtx->r12;
1849 pVM->rem.s.Env.regs[13] = pCtx->r13;
1850 pVM->rem.s.Env.regs[14] = pCtx->r14;
1851 pVM->rem.s.Env.regs[15] = pCtx->r15;
1852
1853 pVM->rem.s.Env.eip = pCtx->rip;
1854
1855 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1856#else
1857 Assert(R_EAX == 0);
1858 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1859 Assert(R_ECX == 1);
1860 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1861 Assert(R_EDX == 2);
1862 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1863 Assert(R_EBX == 3);
1864 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1865 Assert(R_ESP == 4);
1866 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1867 Assert(R_EBP == 5);
1868 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1869 Assert(R_ESI == 6);
1870 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1871 Assert(R_EDI == 7);
1872 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1873 pVM->rem.s.Env.eip = pCtx->eip;
1874
1875 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1876#endif
1877
1878 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1879
1880 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1881 for (i=0;i<8;i++)
1882 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1883
1884 /*
1885 * Clear the halted hidden flag (the interrupt waking up the CPU can
1886 * have been dispatched in raw mode).
1887 */
1888 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1889
1890 /*
1891 * Replay invlpg?
1892 */
1893 if (pVM->rem.s.cInvalidatedPages)
1894 {
1895 RTUINT i;
1896
1897 pVM->rem.s.fIgnoreInvlPg = true;
1898 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1899 {
1900 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1901 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1902 }
1903 pVM->rem.s.fIgnoreInvlPg = false;
1904 pVM->rem.s.cInvalidatedPages = 0;
1905 }
1906
1907 /* Replay notification changes. */
1908 REMR3ReplayHandlerNotifications(pVM);
1909
1910 /* Update MSRs; before CRx registers! */
1911 pVM->rem.s.Env.efer = pCtx->msrEFER;
1912 pVM->rem.s.Env.star = pCtx->msrSTAR;
1913 pVM->rem.s.Env.pat = pCtx->msrPAT;
1914#ifdef TARGET_X86_64
1915 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1916 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1917 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1918 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1919
1920 /* Update the internal long mode activate flag according to the new EFER value. */
1921 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1922 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1923 else
1924 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1925#endif
1926
1927 /*
1928 * Registers which are rarely changed and require special handling / order when changed.
1929 */
1930 fFlags = CPUMGetAndClearChangedFlagsREM(pVCpu);
1931 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1932 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1933 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1934 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1935 {
1936 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1937 {
1938 pVM->rem.s.fIgnoreCR3Load = true;
1939 tlb_flush(&pVM->rem.s.Env, true);
1940 pVM->rem.s.fIgnoreCR3Load = false;
1941 }
1942
1943 /* CR4 before CR0! */
1944 if (fFlags & CPUM_CHANGED_CR4)
1945 {
1946 pVM->rem.s.fIgnoreCR3Load = true;
1947 pVM->rem.s.fIgnoreCpuMode = true;
1948 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1949 pVM->rem.s.fIgnoreCpuMode = false;
1950 pVM->rem.s.fIgnoreCR3Load = false;
1951 }
1952
1953 if (fFlags & CPUM_CHANGED_CR0)
1954 {
1955 pVM->rem.s.fIgnoreCR3Load = true;
1956 pVM->rem.s.fIgnoreCpuMode = true;
1957 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1958 pVM->rem.s.fIgnoreCpuMode = false;
1959 pVM->rem.s.fIgnoreCR3Load = false;
1960 }
1961
1962 if (fFlags & CPUM_CHANGED_CR3)
1963 {
1964 pVM->rem.s.fIgnoreCR3Load = true;
1965 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1966 pVM->rem.s.fIgnoreCR3Load = false;
1967 }
1968
1969 if (fFlags & CPUM_CHANGED_GDTR)
1970 {
1971 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1972 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1973 }
1974
1975 if (fFlags & CPUM_CHANGED_IDTR)
1976 {
1977 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1978 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1979 }
1980
1981 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1982 {
1983 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1984 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1985 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1986 }
1987
1988 if (fFlags & CPUM_CHANGED_LDTR)
1989 {
1990 if (fHiddenSelRegsValid)
1991 {
1992 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1993 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1994 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1995 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1996 }
1997 else
1998 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1999 }
2000
2001 if (fFlags & CPUM_CHANGED_CPUID)
2002 {
2003 uint32_t u32Dummy;
2004
2005 /*
2006 * Get the CPUID features.
2007 */
2008 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2009 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2010 }
2011
2012 /* Sync FPU state after CR4, CPUID and EFER (!). */
2013 if (fFlags & CPUM_CHANGED_FPU_REM)
2014 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2015 }
2016
2017 /*
2018 * Sync TR unconditionally to make life simpler.
2019 */
2020 pVM->rem.s.Env.tr.selector = pCtx->tr;
2021 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2022 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2023 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2024 /* Note! do_interrupt will fault if the busy flag is still set... */
2025 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2026
2027 /*
2028 * Update selector registers.
2029 * This must be done *after* we've synced gdt, ldt and crX registers
2030 * since we're reading the GDT/LDT om sync_seg. This will happen with
2031 * saved state which takes a quick dip into rawmode for instance.
2032 */
2033 /*
2034 * Stack; Note first check this one as the CPL might have changed. The
2035 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2036 */
2037
2038 if (fHiddenSelRegsValid)
2039 {
2040 /* The hidden selector registers are valid in the CPU context. */
2041 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2042
2043 /* Set current CPL */
2044 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2045
2046 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2047 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2048 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2049 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2050 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2051 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2052 }
2053 else
2054 {
2055 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2056 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2057 {
2058 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2059
2060 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2061 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2062#ifdef VBOX_WITH_STATISTICS
2063 if (pVM->rem.s.Env.segs[R_SS].newselector)
2064 {
2065 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2066 }
2067#endif
2068 }
2069 else
2070 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2071
2072 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2073 {
2074 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2075 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2076#ifdef VBOX_WITH_STATISTICS
2077 if (pVM->rem.s.Env.segs[R_ES].newselector)
2078 {
2079 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2080 }
2081#endif
2082 }
2083 else
2084 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2085
2086 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2087 {
2088 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2089 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2090#ifdef VBOX_WITH_STATISTICS
2091 if (pVM->rem.s.Env.segs[R_CS].newselector)
2092 {
2093 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2094 }
2095#endif
2096 }
2097 else
2098 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2099
2100 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2101 {
2102 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2103 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2104#ifdef VBOX_WITH_STATISTICS
2105 if (pVM->rem.s.Env.segs[R_DS].newselector)
2106 {
2107 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2108 }
2109#endif
2110 }
2111 else
2112 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2113
2114 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2115 * be the same but not the base/limit. */
2116 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2117 {
2118 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2119 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2120#ifdef VBOX_WITH_STATISTICS
2121 if (pVM->rem.s.Env.segs[R_FS].newselector)
2122 {
2123 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2124 }
2125#endif
2126 }
2127 else
2128 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2129
2130 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2131 {
2132 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2133 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2134#ifdef VBOX_WITH_STATISTICS
2135 if (pVM->rem.s.Env.segs[R_GS].newselector)
2136 {
2137 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2138 }
2139#endif
2140 }
2141 else
2142 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2143 }
2144
2145 /*
2146 * Check for traps.
2147 */
2148 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2149 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2150 if (RT_SUCCESS(rc))
2151 {
2152#ifdef DEBUG
2153 if (u8TrapNo == 0x80)
2154 {
2155 remR3DumpLnxSyscall(pVCpu);
2156 remR3DumpOBsdSyscall(pVCpu);
2157 }
2158#endif
2159
2160 pVM->rem.s.Env.exception_index = u8TrapNo;
2161 if (enmType != TRPM_SOFTWARE_INT)
2162 {
2163 pVM->rem.s.Env.exception_is_int = 0;
2164 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2165 }
2166 else
2167 {
2168 /*
2169 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2170 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2171 * for int03 and into.
2172 */
2173 pVM->rem.s.Env.exception_is_int = 1;
2174 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2175 /* int 3 may be generated by one-byte 0xcc */
2176 if (u8TrapNo == 3)
2177 {
2178 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2179 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2180 }
2181 /* int 4 may be generated by one-byte 0xce */
2182 else if (u8TrapNo == 4)
2183 {
2184 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2185 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2186 }
2187 }
2188
2189 /* get error code and cr2 if needed. */
2190 switch (u8TrapNo)
2191 {
2192 case 0x0e:
2193 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2194 /* fallthru */
2195 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2196 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2197 break;
2198
2199 case 0x11: case 0x08:
2200 default:
2201 pVM->rem.s.Env.error_code = 0;
2202 break;
2203 }
2204
2205 /*
2206 * We can now reset the active trap since the recompiler is gonna have a go at it.
2207 */
2208 rc = TRPMResetTrap(pVCpu);
2209 AssertRC(rc);
2210 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2211 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2212 }
2213
2214 /*
2215 * Clear old interrupt request flags; Check for pending hardware interrupts.
2216 * (See @remark for why we don't check for other FFs.)
2217 */
2218 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2219 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2220 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2221 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2222
2223 /*
2224 * We're now in REM mode.
2225 */
2226 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2227 pVM->rem.s.fInREM = true;
2228 pVM->rem.s.fInStateSync = false;
2229 pVM->rem.s.cCanExecuteRaw = 0;
2230 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2231 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2232 return VINF_SUCCESS;
2233}
2234
2235
2236/**
2237 * Syncs back changes in the REM state to the the VM state.
2238 *
2239 * This must be called after invoking REMR3Run().
2240 * Calling it several times in a row is not permitted.
2241 *
2242 * @returns VBox status code.
2243 *
2244 * @param pVM VM Handle.
2245 * @param pVCpu VMCPU Handle.
2246 */
2247REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2248{
2249 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2250 Assert(pCtx);
2251 unsigned i;
2252
2253 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2254 Log2(("REMR3StateBack:\n"));
2255 Assert(pVM->rem.s.fInREM);
2256
2257 /*
2258 * Copy back the registers.
2259 * This is done in the order they are declared in the CPUMCTX structure.
2260 */
2261
2262 /** @todo FOP */
2263 /** @todo FPUIP */
2264 /** @todo CS */
2265 /** @todo FPUDP */
2266 /** @todo DS */
2267
2268 /** @todo check if FPU/XMM was actually used in the recompiler */
2269 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2270//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2271
2272#ifdef TARGET_X86_64
2273 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2274 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2275 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2276 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2277 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2278 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2279 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2280 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2281 pCtx->r8 = pVM->rem.s.Env.regs[8];
2282 pCtx->r9 = pVM->rem.s.Env.regs[9];
2283 pCtx->r10 = pVM->rem.s.Env.regs[10];
2284 pCtx->r11 = pVM->rem.s.Env.regs[11];
2285 pCtx->r12 = pVM->rem.s.Env.regs[12];
2286 pCtx->r13 = pVM->rem.s.Env.regs[13];
2287 pCtx->r14 = pVM->rem.s.Env.regs[14];
2288 pCtx->r15 = pVM->rem.s.Env.regs[15];
2289
2290 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2291
2292#else
2293 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2294 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2295 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2296 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2297 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2298 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2299 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2300
2301 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2302#endif
2303
2304 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2305
2306#ifdef VBOX_WITH_STATISTICS
2307 if (pVM->rem.s.Env.segs[R_SS].newselector)
2308 {
2309 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2310 }
2311 if (pVM->rem.s.Env.segs[R_GS].newselector)
2312 {
2313 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2314 }
2315 if (pVM->rem.s.Env.segs[R_FS].newselector)
2316 {
2317 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2318 }
2319 if (pVM->rem.s.Env.segs[R_ES].newselector)
2320 {
2321 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2322 }
2323 if (pVM->rem.s.Env.segs[R_DS].newselector)
2324 {
2325 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2326 }
2327 if (pVM->rem.s.Env.segs[R_CS].newselector)
2328 {
2329 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2330 }
2331#endif
2332 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2333 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2334 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2335 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2336 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2337
2338#ifdef TARGET_X86_64
2339 pCtx->rip = pVM->rem.s.Env.eip;
2340 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2341#else
2342 pCtx->eip = pVM->rem.s.Env.eip;
2343 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2344#endif
2345
2346 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2347 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2348 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2349 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2350 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2351 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2352
2353 for (i = 0; i < 8; i++)
2354 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2355
2356 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2357 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2358 {
2359 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2360 STAM_COUNTER_INC(&gStatREMGDTChange);
2361 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2362 }
2363
2364 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2365 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2366 {
2367 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2368 STAM_COUNTER_INC(&gStatREMIDTChange);
2369 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2370 }
2371
2372 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2373 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2374 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2375 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2376 {
2377 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2378 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2379 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2380 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2381 STAM_COUNTER_INC(&gStatREMLDTRChange);
2382 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2383 }
2384
2385 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2386 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2387 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2388 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2389 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2390 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2391 : 0) )
2392 {
2393 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2394 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2395 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2396 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2397 pCtx->tr = pVM->rem.s.Env.tr.selector;
2398 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2399 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2400 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2401 if (pCtx->trHid.Attr.u)
2402 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2403 STAM_COUNTER_INC(&gStatREMTRChange);
2404 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2405 }
2406
2407 /** @todo These values could still be out of sync! */
2408 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2409 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2410 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2411 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2412
2413 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2414 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2415 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2416
2417 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2418 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2419 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2420
2421 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2422 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2423 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2424
2425 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2426 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2427 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2428
2429 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2430 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2431 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2432
2433 /* Sysenter MSR */
2434 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2435 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2436 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2437
2438 /* System MSRs. */
2439 pCtx->msrEFER = pVM->rem.s.Env.efer;
2440 pCtx->msrSTAR = pVM->rem.s.Env.star;
2441 pCtx->msrPAT = pVM->rem.s.Env.pat;
2442#ifdef TARGET_X86_64
2443 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2444 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2445 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2446 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2447#endif
2448
2449 remR3TrapClear(pVM);
2450
2451 /*
2452 * Check for traps.
2453 */
2454 if ( pVM->rem.s.Env.exception_index >= 0
2455 && pVM->rem.s.Env.exception_index < 256)
2456 {
2457 int rc;
2458
2459 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2460 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2461 AssertRC(rc);
2462 switch (pVM->rem.s.Env.exception_index)
2463 {
2464 case 0x0e:
2465 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2466 /* fallthru */
2467 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2468 case 0x11: case 0x08: /* 0 */
2469 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2470 break;
2471 }
2472
2473 }
2474
2475 /*
2476 * We're not longer in REM mode.
2477 */
2478 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2479 pVM->rem.s.fInREM = false;
2480 pVM->rem.s.pCtx = NULL;
2481 pVM->rem.s.Env.pVCpu = NULL;
2482 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2483 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2484 return VINF_SUCCESS;
2485}
2486
2487
2488/**
2489 * This is called by the disassembler when it wants to update the cpu state
2490 * before for instance doing a register dump.
2491 */
2492static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2493{
2494 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2495 unsigned i;
2496
2497 Assert(pVM->rem.s.fInREM);
2498
2499 /*
2500 * Copy back the registers.
2501 * This is done in the order they are declared in the CPUMCTX structure.
2502 */
2503
2504 /** @todo FOP */
2505 /** @todo FPUIP */
2506 /** @todo CS */
2507 /** @todo FPUDP */
2508 /** @todo DS */
2509 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2510 pCtx->fpu.MXCSR = 0;
2511 pCtx->fpu.MXCSR_MASK = 0;
2512
2513 /** @todo check if FPU/XMM was actually used in the recompiler */
2514 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2515//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2516
2517#ifdef TARGET_X86_64
2518 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2519 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2520 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2521 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2522 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2523 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2524 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2525 pCtx->r8 = pVM->rem.s.Env.regs[8];
2526 pCtx->r9 = pVM->rem.s.Env.regs[9];
2527 pCtx->r10 = pVM->rem.s.Env.regs[10];
2528 pCtx->r11 = pVM->rem.s.Env.regs[11];
2529 pCtx->r12 = pVM->rem.s.Env.regs[12];
2530 pCtx->r13 = pVM->rem.s.Env.regs[13];
2531 pCtx->r14 = pVM->rem.s.Env.regs[14];
2532 pCtx->r15 = pVM->rem.s.Env.regs[15];
2533
2534 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2535#else
2536 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2537 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2538 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2539 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2540 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2541 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2542 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2543
2544 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2545#endif
2546
2547 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2548
2549 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2550 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2551 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2552 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2553 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2554
2555#ifdef TARGET_X86_64
2556 pCtx->rip = pVM->rem.s.Env.eip;
2557 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2558#else
2559 pCtx->eip = pVM->rem.s.Env.eip;
2560 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2561#endif
2562
2563 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2564 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2565 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2566 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2567 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2568 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2569
2570 for (i = 0; i < 8; i++)
2571 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2572
2573 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2574 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2575 {
2576 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2577 STAM_COUNTER_INC(&gStatREMGDTChange);
2578 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2579 }
2580
2581 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2582 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2583 {
2584 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2585 STAM_COUNTER_INC(&gStatREMIDTChange);
2586 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2587 }
2588
2589 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2590 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2591 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2592 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2593 {
2594 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2595 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2596 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2597 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2598 STAM_COUNTER_INC(&gStatREMLDTRChange);
2599 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2600 }
2601
2602 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2603 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2604 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2605 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2606 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2607 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2608 : 0) )
2609 {
2610 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2611 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2612 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2613 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2614 pCtx->tr = pVM->rem.s.Env.tr.selector;
2615 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2616 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2617 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2618 if (pCtx->trHid.Attr.u)
2619 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2620 STAM_COUNTER_INC(&gStatREMTRChange);
2621 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2622 }
2623
2624 /** @todo These values could still be out of sync! */
2625 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2626 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2627 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2628 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2629
2630 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2631 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2632 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2633
2634 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2635 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2636 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2637
2638 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2639 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2640 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2641
2642 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2643 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2644 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2645
2646 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2647 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2648 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2649
2650 /* Sysenter MSR */
2651 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2652 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2653 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2654
2655 /* System MSRs. */
2656 pCtx->msrEFER = pVM->rem.s.Env.efer;
2657 pCtx->msrSTAR = pVM->rem.s.Env.star;
2658 pCtx->msrPAT = pVM->rem.s.Env.pat;
2659#ifdef TARGET_X86_64
2660 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2661 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2662 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2663 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2664#endif
2665
2666}
2667
2668
2669/**
2670 * Update the VMM state information if we're currently in REM.
2671 *
2672 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2673 * we're currently executing in REM and the VMM state is invalid. This method will of
2674 * course check that we're executing in REM before syncing any data over to the VMM.
2675 *
2676 * @param pVM The VM handle.
2677 * @param pVCpu The VMCPU handle.
2678 */
2679REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2680{
2681 if (pVM->rem.s.fInREM)
2682 remR3StateUpdate(pVM, pVCpu);
2683}
2684
2685
2686#undef LOG_GROUP
2687#define LOG_GROUP LOG_GROUP_REM
2688
2689
2690/**
2691 * Notify the recompiler about Address Gate 20 state change.
2692 *
2693 * This notification is required since A20 gate changes are
2694 * initialized from a device driver and the VM might just as
2695 * well be in REM mode as in RAW mode.
2696 *
2697 * @param pVM VM handle.
2698 * @param pVCpu VMCPU handle.
2699 * @param fEnable True if the gate should be enabled.
2700 * False if the gate should be disabled.
2701 */
2702REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2703{
2704 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2705 VM_ASSERT_EMT(pVM);
2706
2707 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2708 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2709 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2710}
2711
2712
2713/**
2714 * Replays the handler notification changes
2715 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2716 *
2717 * @param pVM VM handle.
2718 */
2719REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2720{
2721 /*
2722 * Replay the flushes.
2723 */
2724 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2725 VM_ASSERT_EMT(pVM);
2726
2727 /** @todo this isn't ensuring correct replay order. */
2728 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY_BIT))
2729 {
2730 PREMHANDLERNOTIFICATION pReqsRev;
2731 PREMHANDLERNOTIFICATION pReqs;
2732 uint32_t idxNext;
2733 uint32_t idxReqs;
2734
2735 /* Lockless purging of pending notifications. */
2736 idxReqs = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2737 if (idxReqs == UINT32_MAX)
2738 return;
2739 Assert(idxReqs < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2740
2741 /*
2742 * Reverse the list to process it in FIFO order.
2743 */
2744 pReqsRev = &pVM->rem.s.aHandlerNotifications[idxReqs];
2745 pReqs = NULL;
2746 while (pReqsRev)
2747 {
2748 PREMHANDLERNOTIFICATION pCur = pReqsRev;
2749 idxNext = pReqsRev->idxNext;
2750 if (idxNext != UINT32_MAX)
2751 {
2752 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2753 pReqsRev = &pVM->rem.s.aHandlerNotifications[idxNext];
2754 }
2755 else
2756 pReqsRev = NULL;
2757 pCur->idxNext = idxNext;
2758 pReqs = pCur;
2759 }
2760
2761 /*
2762 * Loop thru the list, reinserting the record into the free list as they are
2763 * processed to avoid having other EMTs running out of entries while we're flushing.
2764 */
2765 while (pReqs)
2766 {
2767 PREMHANDLERNOTIFICATION pCur = pReqs;
2768
2769 switch (pCur->enmKind)
2770 {
2771 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2772 remR3NotifyHandlerPhysicalRegister(pVM,
2773 pCur->u.PhysicalRegister.enmType,
2774 pCur->u.PhysicalRegister.GCPhys,
2775 pCur->u.PhysicalRegister.cb,
2776 pCur->u.PhysicalRegister.fHasHCHandler);
2777 break;
2778
2779 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2780 remR3NotifyHandlerPhysicalDeregister(pVM,
2781 pCur->u.PhysicalDeregister.enmType,
2782 pCur->u.PhysicalDeregister.GCPhys,
2783 pCur->u.PhysicalDeregister.cb,
2784 pCur->u.PhysicalDeregister.fHasHCHandler,
2785 pCur->u.PhysicalDeregister.fRestoreAsRAM);
2786 break;
2787
2788 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2789 remR3NotifyHandlerPhysicalModify(pVM,
2790 pCur->u.PhysicalModify.enmType,
2791 pCur->u.PhysicalModify.GCPhysOld,
2792 pCur->u.PhysicalModify.GCPhysNew,
2793 pCur->u.PhysicalModify.cb,
2794 pCur->u.PhysicalModify.fHasHCHandler,
2795 pCur->u.PhysicalModify.fRestoreAsRAM);
2796 break;
2797
2798 default:
2799 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
2800 break;
2801 }
2802
2803 /*
2804 * Advance pReqs.
2805 */
2806 idxNext = pCur->idxNext;
2807 if (idxNext != UINT32_MAX)
2808 {
2809 AssertMsg(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("idxNext=%d\n", idxNext));
2810 pReqs = &pVM->rem.s.aHandlerNotifications[idxNext];
2811 }
2812 else
2813 pReqs = NULL;
2814
2815 /*
2816 * Put the record back into the free list.
2817 */
2818 do
2819 {
2820 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
2821 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
2822 ASMCompilerBarrier();
2823 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, pCur->idxSelf, idxNext));
2824 }
2825 }
2826}
2827
2828
2829/**
2830 * Notify REM about changed code page.
2831 *
2832 * @returns VBox status code.
2833 * @param pVM VM handle.
2834 * @param pVCpu VMCPU handle.
2835 * @param pvCodePage Code page address
2836 */
2837REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2838{
2839#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2840 int rc;
2841 RTGCPHYS PhysGC;
2842 uint64_t flags;
2843
2844 VM_ASSERT_EMT(pVM);
2845
2846 /*
2847 * Get the physical page address.
2848 */
2849 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2850 if (rc == VINF_SUCCESS)
2851 {
2852 /*
2853 * Sync the required registers and flush the whole page.
2854 * (Easier to do the whole page than notifying it about each physical
2855 * byte that was changed.
2856 */
2857 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2858 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2859 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2860 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2861
2862 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2863 }
2864#endif
2865 return VINF_SUCCESS;
2866}
2867
2868
2869/**
2870 * Notification about a successful MMR3PhysRegister() call.
2871 *
2872 * @param pVM VM handle.
2873 * @param GCPhys The physical address the RAM.
2874 * @param cb Size of the memory.
2875 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2876 */
2877REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2878{
2879 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2880 VM_ASSERT_EMT(pVM);
2881
2882 /*
2883 * Validate input - we trust the caller.
2884 */
2885 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2886 Assert(cb);
2887 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2888 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2889
2890 /*
2891 * Base ram? Update GCPhysLastRam.
2892 */
2893 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2894 {
2895 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2896 {
2897 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2898 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2899 }
2900 }
2901
2902 /*
2903 * Register the ram.
2904 */
2905 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2906
2907 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2908 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2909 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2910
2911 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2912}
2913
2914
2915/**
2916 * Notification about a successful MMR3PhysRomRegister() call.
2917 *
2918 * @param pVM VM handle.
2919 * @param GCPhys The physical address of the ROM.
2920 * @param cb The size of the ROM.
2921 * @param pvCopy Pointer to the ROM copy.
2922 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2923 * This function will be called when ever the protection of the
2924 * shadow ROM changes (at reset and end of POST).
2925 */
2926REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2927{
2928 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2929 VM_ASSERT_EMT(pVM);
2930
2931 /*
2932 * Validate input - we trust the caller.
2933 */
2934 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2935 Assert(cb);
2936 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2937
2938 /*
2939 * Register the rom.
2940 */
2941 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2942
2943 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2944 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2945 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2946
2947 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2948}
2949
2950
2951/**
2952 * Notification about a successful memory deregistration or reservation.
2953 *
2954 * @param pVM VM Handle.
2955 * @param GCPhys Start physical address.
2956 * @param cb The size of the range.
2957 */
2958REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2959{
2960 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2961 VM_ASSERT_EMT(pVM);
2962
2963 /*
2964 * Validate input - we trust the caller.
2965 */
2966 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2967 Assert(cb);
2968 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2969
2970 /*
2971 * Unassigning the memory.
2972 */
2973 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2974
2975 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2976 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2977 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2978
2979 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2980}
2981
2982
2983/**
2984 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2985 *
2986 * @param pVM VM Handle.
2987 * @param enmType Handler type.
2988 * @param GCPhys Handler range address.
2989 * @param cb Size of the handler range.
2990 * @param fHasHCHandler Set if the handler has a HC callback function.
2991 *
2992 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2993 * Handler memory type to memory which has no HC handler.
2994 */
2995static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2996{
2997 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2998 enmType, GCPhys, cb, fHasHCHandler));
2999
3000 VM_ASSERT_EMT(pVM);
3001 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3002 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3003
3004
3005 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3006
3007 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3008 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3009 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3010 else if (fHasHCHandler)
3011 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3012 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3013
3014 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3015}
3016
3017/**
3018 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3019 *
3020 * @param pVM VM Handle.
3021 * @param enmType Handler type.
3022 * @param GCPhys Handler range address.
3023 * @param cb Size of the handler range.
3024 * @param fHasHCHandler Set if the handler has a HC callback function.
3025 *
3026 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3027 * Handler memory type to memory which has no HC handler.
3028 */
3029REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3030{
3031 REMR3ReplayHandlerNotifications(pVM);
3032
3033 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3034}
3035
3036/**
3037 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3038 *
3039 * @param pVM VM Handle.
3040 * @param enmType Handler type.
3041 * @param GCPhys Handler range address.
3042 * @param cb Size of the handler range.
3043 * @param fHasHCHandler Set if the handler has a HC callback function.
3044 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3045 */
3046static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3047{
3048 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3049 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3050 VM_ASSERT_EMT(pVM);
3051
3052
3053 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3054
3055 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3056 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3057 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3058 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3059 else if (fHasHCHandler)
3060 {
3061 if (!fRestoreAsRAM)
3062 {
3063 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3064 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3065 }
3066 else
3067 {
3068 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3069 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3070 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3071 }
3072 }
3073 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3074
3075 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3076}
3077
3078/**
3079 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3080 *
3081 * @param pVM VM Handle.
3082 * @param enmType Handler type.
3083 * @param GCPhys Handler range address.
3084 * @param cb Size of the handler range.
3085 * @param fHasHCHandler Set if the handler has a HC callback function.
3086 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3087 */
3088REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3089{
3090 REMR3ReplayHandlerNotifications(pVM);
3091 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3092}
3093
3094
3095/**
3096 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3097 *
3098 * @param pVM VM Handle.
3099 * @param enmType Handler type.
3100 * @param GCPhysOld Old handler range address.
3101 * @param GCPhysNew New handler range address.
3102 * @param cb Size of the handler range.
3103 * @param fHasHCHandler Set if the handler has a HC callback function.
3104 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3105 */
3106static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3107{
3108 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3109 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3110 VM_ASSERT_EMT(pVM);
3111 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3112
3113 if (fHasHCHandler)
3114 {
3115 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3116
3117 /*
3118 * Reset the old page.
3119 */
3120 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3121 if (!fRestoreAsRAM)
3122 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3123 else
3124 {
3125 /* This is not perfect, but it'll do for PD monitoring... */
3126 Assert(cb == PAGE_SIZE);
3127 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3128 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3129 }
3130
3131 /*
3132 * Update the new page.
3133 */
3134 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3135 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3136 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3137 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3138
3139 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3140 }
3141}
3142
3143/**
3144 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3145 *
3146 * @param pVM VM Handle.
3147 * @param enmType Handler type.
3148 * @param GCPhysOld Old handler range address.
3149 * @param GCPhysNew New handler range address.
3150 * @param cb Size of the handler range.
3151 * @param fHasHCHandler Set if the handler has a HC callback function.
3152 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3153 */
3154REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3155{
3156 REMR3ReplayHandlerNotifications(pVM);
3157
3158 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3159}
3160
3161/**
3162 * Checks if we're handling access to this page or not.
3163 *
3164 * @returns true if we're trapping access.
3165 * @returns false if we aren't.
3166 * @param pVM The VM handle.
3167 * @param GCPhys The physical address.
3168 *
3169 * @remark This function will only work correctly in VBOX_STRICT builds!
3170 */
3171REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3172{
3173#ifdef VBOX_STRICT
3174 unsigned long off;
3175 REMR3ReplayHandlerNotifications(pVM);
3176
3177 off = get_phys_page_offset(GCPhys);
3178 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3179 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3180 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3181#else
3182 return false;
3183#endif
3184}
3185
3186
3187/**
3188 * Deals with a rare case in get_phys_addr_code where the code
3189 * is being monitored.
3190 *
3191 * It could also be an MMIO page, in which case we will raise a fatal error.
3192 *
3193 * @returns The physical address corresponding to addr.
3194 * @param env The cpu environment.
3195 * @param addr The virtual address.
3196 * @param pTLBEntry The TLB entry.
3197 */
3198target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3199 target_ulong addr,
3200 CPUTLBEntry* pTLBEntry,
3201 target_phys_addr_t ioTLBEntry)
3202{
3203 PVM pVM = env->pVM;
3204
3205 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3206 {
3207 /* If code memory is being monitored, appropriate IOTLB entry will have
3208 handler IO type, and addend will provide real physical address, no
3209 matter if we store VA in TLB or not, as handlers are always passed PA */
3210 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3211 return ret;
3212 }
3213 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3214 "*** handlers\n",
3215 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3216 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3217 LogRel(("*** mmio\n"));
3218 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3219 LogRel(("*** phys\n"));
3220 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3221 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3222 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3223 AssertFatalFailed();
3224}
3225
3226/**
3227 * Read guest RAM and ROM.
3228 *
3229 * @param SrcGCPhys The source address (guest physical).
3230 * @param pvDst The destination address.
3231 * @param cb Number of bytes
3232 */
3233void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3234{
3235 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3236 VBOX_CHECK_ADDR(SrcGCPhys);
3237 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3238#ifdef VBOX_DEBUG_PHYS
3239 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3240#endif
3241 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3242}
3243
3244
3245/**
3246 * Read guest RAM and ROM, unsigned 8-bit.
3247 *
3248 * @param SrcGCPhys The source address (guest physical).
3249 */
3250RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3251{
3252 uint8_t val;
3253 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3254 VBOX_CHECK_ADDR(SrcGCPhys);
3255 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3256 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3257#ifdef VBOX_DEBUG_PHYS
3258 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3259#endif
3260 return val;
3261}
3262
3263
3264/**
3265 * Read guest RAM and ROM, signed 8-bit.
3266 *
3267 * @param SrcGCPhys The source address (guest physical).
3268 */
3269RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3270{
3271 int8_t val;
3272 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3273 VBOX_CHECK_ADDR(SrcGCPhys);
3274 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3275 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3276#ifdef VBOX_DEBUG_PHYS
3277 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3278#endif
3279 return val;
3280}
3281
3282
3283/**
3284 * Read guest RAM and ROM, unsigned 16-bit.
3285 *
3286 * @param SrcGCPhys The source address (guest physical).
3287 */
3288RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3289{
3290 uint16_t val;
3291 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3292 VBOX_CHECK_ADDR(SrcGCPhys);
3293 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3294 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3295#ifdef VBOX_DEBUG_PHYS
3296 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3297#endif
3298 return val;
3299}
3300
3301
3302/**
3303 * Read guest RAM and ROM, signed 16-bit.
3304 *
3305 * @param SrcGCPhys The source address (guest physical).
3306 */
3307RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3308{
3309 int16_t val;
3310 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3311 VBOX_CHECK_ADDR(SrcGCPhys);
3312 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3313 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3314#ifdef VBOX_DEBUG_PHYS
3315 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3316#endif
3317 return val;
3318}
3319
3320
3321/**
3322 * Read guest RAM and ROM, unsigned 32-bit.
3323 *
3324 * @param SrcGCPhys The source address (guest physical).
3325 */
3326RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3327{
3328 uint32_t val;
3329 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3330 VBOX_CHECK_ADDR(SrcGCPhys);
3331 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3332 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3333#ifdef VBOX_DEBUG_PHYS
3334 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3335#endif
3336 return val;
3337}
3338
3339
3340/**
3341 * Read guest RAM and ROM, signed 32-bit.
3342 *
3343 * @param SrcGCPhys The source address (guest physical).
3344 */
3345RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3346{
3347 int32_t val;
3348 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3349 VBOX_CHECK_ADDR(SrcGCPhys);
3350 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3351 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3352#ifdef VBOX_DEBUG_PHYS
3353 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3354#endif
3355 return val;
3356}
3357
3358
3359/**
3360 * Read guest RAM and ROM, unsigned 64-bit.
3361 *
3362 * @param SrcGCPhys The source address (guest physical).
3363 */
3364uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3365{
3366 uint64_t val;
3367 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3368 VBOX_CHECK_ADDR(SrcGCPhys);
3369 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3370 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3371#ifdef VBOX_DEBUG_PHYS
3372 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3373#endif
3374 return val;
3375}
3376
3377
3378/**
3379 * Read guest RAM and ROM, signed 64-bit.
3380 *
3381 * @param SrcGCPhys The source address (guest physical).
3382 */
3383int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3384{
3385 int64_t val;
3386 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3387 VBOX_CHECK_ADDR(SrcGCPhys);
3388 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3389 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3390#ifdef VBOX_DEBUG_PHYS
3391 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3392#endif
3393 return val;
3394}
3395
3396
3397/**
3398 * Write guest RAM.
3399 *
3400 * @param DstGCPhys The destination address (guest physical).
3401 * @param pvSrc The source address.
3402 * @param cb Number of bytes to write
3403 */
3404void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3405{
3406 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3407 VBOX_CHECK_ADDR(DstGCPhys);
3408 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3409 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3410#ifdef VBOX_DEBUG_PHYS
3411 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3412#endif
3413}
3414
3415
3416/**
3417 * Write guest RAM, unsigned 8-bit.
3418 *
3419 * @param DstGCPhys The destination address (guest physical).
3420 * @param val Value
3421 */
3422void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3423{
3424 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3425 VBOX_CHECK_ADDR(DstGCPhys);
3426 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3427 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3428#ifdef VBOX_DEBUG_PHYS
3429 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3430#endif
3431}
3432
3433
3434/**
3435 * Write guest RAM, unsigned 8-bit.
3436 *
3437 * @param DstGCPhys The destination address (guest physical).
3438 * @param val Value
3439 */
3440void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3441{
3442 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3443 VBOX_CHECK_ADDR(DstGCPhys);
3444 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3445 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3446#ifdef VBOX_DEBUG_PHYS
3447 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3448#endif
3449}
3450
3451
3452/**
3453 * Write guest RAM, unsigned 32-bit.
3454 *
3455 * @param DstGCPhys The destination address (guest physical).
3456 * @param val Value
3457 */
3458void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3459{
3460 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3461 VBOX_CHECK_ADDR(DstGCPhys);
3462 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3463 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3464#ifdef VBOX_DEBUG_PHYS
3465 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3466#endif
3467}
3468
3469
3470/**
3471 * Write guest RAM, unsigned 64-bit.
3472 *
3473 * @param DstGCPhys The destination address (guest physical).
3474 * @param val Value
3475 */
3476void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3477{
3478 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3479 VBOX_CHECK_ADDR(DstGCPhys);
3480 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3481 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3482#ifdef VBOX_DEBUG_PHYS
3483 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3484#endif
3485}
3486
3487#undef LOG_GROUP
3488#define LOG_GROUP LOG_GROUP_REM_MMIO
3489
3490/** Read MMIO memory. */
3491static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3492{
3493 uint32_t u32 = 0;
3494 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3495 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3496 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3497 return u32;
3498}
3499
3500/** Read MMIO memory. */
3501static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3502{
3503 uint32_t u32 = 0;
3504 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3505 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3506 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3507 return u32;
3508}
3509
3510/** Read MMIO memory. */
3511static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3512{
3513 uint32_t u32 = 0;
3514 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3515 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3516 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3517 return u32;
3518}
3519
3520/** Write to MMIO memory. */
3521static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3522{
3523 int rc;
3524 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3525 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3526 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3527}
3528
3529/** Write to MMIO memory. */
3530static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3531{
3532 int rc;
3533 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3534 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3535 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3536}
3537
3538/** Write to MMIO memory. */
3539static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3540{
3541 int rc;
3542 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3543 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3544 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3545}
3546
3547
3548#undef LOG_GROUP
3549#define LOG_GROUP LOG_GROUP_REM_HANDLER
3550
3551/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3552
3553static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3554{
3555 uint8_t u8;
3556 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3557 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3558 return u8;
3559}
3560
3561static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3562{
3563 uint16_t u16;
3564 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3565 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3566 return u16;
3567}
3568
3569static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3570{
3571 uint32_t u32;
3572 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3573 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3574 return u32;
3575}
3576
3577static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3578{
3579 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3580 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3581}
3582
3583static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3584{
3585 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3586 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3587}
3588
3589static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3590{
3591 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3592 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3593}
3594
3595/* -+- disassembly -+- */
3596
3597#undef LOG_GROUP
3598#define LOG_GROUP LOG_GROUP_REM_DISAS
3599
3600
3601/**
3602 * Enables or disables singled stepped disassembly.
3603 *
3604 * @returns VBox status code.
3605 * @param pVM VM handle.
3606 * @param fEnable To enable set this flag, to disable clear it.
3607 */
3608static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3609{
3610 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3611 VM_ASSERT_EMT(pVM);
3612
3613 if (fEnable)
3614 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3615 else
3616 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3617 return VINF_SUCCESS;
3618}
3619
3620
3621/**
3622 * Enables or disables singled stepped disassembly.
3623 *
3624 * @returns VBox status code.
3625 * @param pVM VM handle.
3626 * @param fEnable To enable set this flag, to disable clear it.
3627 */
3628REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3629{
3630 PVMREQ pReq;
3631 int rc;
3632
3633 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3634 if (VM_IS_EMT(pVM))
3635 return remR3DisasEnableStepping(pVM, fEnable);
3636
3637 rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3638 AssertRC(rc);
3639 if (RT_SUCCESS(rc))
3640 rc = pReq->iStatus;
3641 VMR3ReqFree(pReq);
3642 return rc;
3643}
3644
3645
3646#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3647/**
3648 * External Debugger Command: .remstep [on|off|1|0]
3649 */
3650static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3651{
3652 bool fEnable;
3653 int rc;
3654
3655 /* print status */
3656 if (cArgs == 0)
3657 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3658 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3659
3660 /* convert the argument and change the mode. */
3661 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3662 if (RT_FAILURE(rc))
3663 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3664 rc = REMR3DisasEnableStepping(pVM, fEnable);
3665 if (RT_FAILURE(rc))
3666 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3667 return rc;
3668}
3669#endif
3670
3671
3672/**
3673 * Disassembles one instruction and prints it to the log.
3674 *
3675 * @returns Success indicator.
3676 * @param env Pointer to the recompiler CPU structure.
3677 * @param f32BitCode Indicates that whether or not the code should
3678 * be disassembled as 16 or 32 bit. If -1 the CS
3679 * selector will be inspected.
3680 * @param pszPrefix
3681 */
3682bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3683{
3684 PVM pVM = env->pVM;
3685 const bool fLog = LogIsEnabled();
3686 const bool fLog2 = LogIs2Enabled();
3687 int rc = VINF_SUCCESS;
3688
3689 /*
3690 * Don't bother if there ain't any log output to do.
3691 */
3692 if (!fLog && !fLog2)
3693 return true;
3694
3695 /*
3696 * Update the state so DBGF reads the correct register values.
3697 */
3698 remR3StateUpdate(pVM, env->pVCpu);
3699
3700 /*
3701 * Log registers if requested.
3702 */
3703 if (!fLog2)
3704 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3705
3706 /*
3707 * Disassemble to log.
3708 */
3709 if (fLog)
3710 rc = DBGFR3DisasInstrCurrentLogInternal(env->pVCpu, pszPrefix);
3711
3712 return RT_SUCCESS(rc);
3713}
3714
3715
3716/**
3717 * Disassemble recompiled code.
3718 *
3719 * @param phFileIgnored Ignored, logfile usually.
3720 * @param pvCode Pointer to the code block.
3721 * @param cb Size of the code block.
3722 */
3723void disas(FILE *phFile, void *pvCode, unsigned long cb)
3724{
3725#ifdef DEBUG_TMP_LOGGING
3726# define DISAS_PRINTF(x...) fprintf(phFile, x)
3727#else
3728# define DISAS_PRINTF(x...) RTLogPrintf(x)
3729 if (LogIs2Enabled())
3730#endif
3731 {
3732 unsigned off = 0;
3733 char szOutput[256];
3734 DISCPUSTATE Cpu;
3735
3736 memset(&Cpu, 0, sizeof(Cpu));
3737#ifdef RT_ARCH_X86
3738 Cpu.mode = CPUMODE_32BIT;
3739#else
3740 Cpu.mode = CPUMODE_64BIT;
3741#endif
3742
3743 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3744 while (off < cb)
3745 {
3746 uint32_t cbInstr;
3747 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3748 DISAS_PRINTF("%s", szOutput);
3749 else
3750 {
3751 DISAS_PRINTF("disas error\n");
3752 cbInstr = 1;
3753#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3754 break;
3755#endif
3756 }
3757 off += cbInstr;
3758 }
3759 }
3760
3761#undef DISAS_PRINTF
3762}
3763
3764
3765/**
3766 * Disassemble guest code.
3767 *
3768 * @param phFileIgnored Ignored, logfile usually.
3769 * @param uCode The guest address of the code to disassemble. (flat?)
3770 * @param cb Number of bytes to disassemble.
3771 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3772 */
3773void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3774{
3775#ifdef DEBUG_TMP_LOGGING
3776# define DISAS_PRINTF(x...) fprintf(phFile, x)
3777#else
3778# define DISAS_PRINTF(x...) RTLogPrintf(x)
3779 if (LogIs2Enabled())
3780#endif
3781 {
3782 PVM pVM = cpu_single_env->pVM;
3783 PVMCPU pVCpu = cpu_single_env->pVCpu;
3784 RTSEL cs;
3785 RTGCUINTPTR eip;
3786
3787 Assert(pVCpu);
3788
3789 /*
3790 * Update the state so DBGF reads the correct register values (flags).
3791 */
3792 remR3StateUpdate(pVM, pVCpu);
3793
3794 /*
3795 * Do the disassembling.
3796 */
3797 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3798 cs = cpu_single_env->segs[R_CS].selector;
3799 eip = uCode - cpu_single_env->segs[R_CS].base;
3800 for (;;)
3801 {
3802 char szBuf[256];
3803 uint32_t cbInstr;
3804 int rc = DBGFR3DisasInstrEx(pVM,
3805 pVCpu->idCpu,
3806 cs,
3807 eip,
3808 0,
3809 szBuf, sizeof(szBuf),
3810 &cbInstr);
3811 if (RT_SUCCESS(rc))
3812 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3813 else
3814 {
3815 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3816 cbInstr = 1;
3817 }
3818
3819 /* next */
3820 if (cb <= cbInstr)
3821 break;
3822 cb -= cbInstr;
3823 uCode += cbInstr;
3824 eip += cbInstr;
3825 }
3826 }
3827#undef DISAS_PRINTF
3828}
3829
3830
3831/**
3832 * Looks up a guest symbol.
3833 *
3834 * @returns Pointer to symbol name. This is a static buffer.
3835 * @param orig_addr The address in question.
3836 */
3837const char *lookup_symbol(target_ulong orig_addr)
3838{
3839 RTGCINTPTR off = 0;
3840 DBGFSYMBOL Sym;
3841 PVM pVM = cpu_single_env->pVM;
3842 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3843 if (RT_SUCCESS(rc))
3844 {
3845 static char szSym[sizeof(Sym.szName) + 48];
3846 if (!off)
3847 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3848 else if (off > 0)
3849 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3850 else
3851 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3852 return szSym;
3853 }
3854 return "<N/A>";
3855}
3856
3857
3858#undef LOG_GROUP
3859#define LOG_GROUP LOG_GROUP_REM
3860
3861
3862/* -+- FF notifications -+- */
3863
3864
3865/**
3866 * Notification about a pending interrupt.
3867 *
3868 * @param pVM VM Handle.
3869 * @param pVCpu VMCPU Handle.
3870 * @param u8Interrupt Interrupt
3871 * @thread The emulation thread.
3872 */
3873REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3874{
3875 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3876 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3877}
3878
3879/**
3880 * Notification about a pending interrupt.
3881 *
3882 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3883 * @param pVM VM Handle.
3884 * @param pVCpu VMCPU Handle.
3885 * @thread The emulation thread.
3886 */
3887REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3888{
3889 return pVM->rem.s.u32PendingInterrupt;
3890}
3891
3892/**
3893 * Notification about the interrupt FF being set.
3894 *
3895 * @param pVM VM Handle.
3896 * @param pVCpu VMCPU Handle.
3897 * @thread The emulation thread.
3898 */
3899REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3900{
3901 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3902 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3903 if (pVM->rem.s.fInREM)
3904 {
3905 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3906 CPU_INTERRUPT_EXTERNAL_HARD);
3907 }
3908}
3909
3910
3911/**
3912 * Notification about the interrupt FF being set.
3913 *
3914 * @param pVM VM Handle.
3915 * @param pVCpu VMCPU Handle.
3916 * @thread Any.
3917 */
3918REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3919{
3920 LogFlow(("REMR3NotifyInterruptClear:\n"));
3921 if (pVM->rem.s.fInREM)
3922 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3923}
3924
3925
3926/**
3927 * Notification about pending timer(s).
3928 *
3929 * @param pVM VM Handle.
3930 * @param pVCpuDst The target cpu for this notification.
3931 * TM will not broadcast pending timer events, but use
3932 * a decidated EMT for them. So, only interrupt REM
3933 * execution if the given CPU is executing in REM.
3934 * @thread Any.
3935 */
3936REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3937{
3938#ifndef DEBUG_bird
3939 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3940#endif
3941 if (pVM->rem.s.fInREM)
3942 {
3943 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3944 {
3945 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
3946 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
3947 CPU_INTERRUPT_EXTERNAL_TIMER);
3948 }
3949 else
3950 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
3951 }
3952 else
3953 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
3954}
3955
3956
3957/**
3958 * Notification about pending DMA transfers.
3959 *
3960 * @param pVM VM Handle.
3961 * @thread Any.
3962 */
3963REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3964{
3965 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3966 if (pVM->rem.s.fInREM)
3967 {
3968 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3969 CPU_INTERRUPT_EXTERNAL_DMA);
3970 }
3971}
3972
3973
3974/**
3975 * Notification about pending timer(s).
3976 *
3977 * @param pVM VM Handle.
3978 * @thread Any.
3979 */
3980REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3981{
3982 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3983 if (pVM->rem.s.fInREM)
3984 {
3985 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3986 CPU_INTERRUPT_EXTERNAL_EXIT);
3987 }
3988}
3989
3990
3991/**
3992 * Notification about pending FF set by an external thread.
3993 *
3994 * @param pVM VM handle.
3995 * @thread Any.
3996 */
3997REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3998{
3999 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4000 if (pVM->rem.s.fInREM)
4001 {
4002 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4003 CPU_INTERRUPT_EXTERNAL_EXIT);
4004 }
4005}
4006
4007
4008#ifdef VBOX_WITH_STATISTICS
4009void remR3ProfileStart(int statcode)
4010{
4011 STAMPROFILEADV *pStat;
4012 switch(statcode)
4013 {
4014 case STATS_EMULATE_SINGLE_INSTR:
4015 pStat = &gStatExecuteSingleInstr;
4016 break;
4017 case STATS_QEMU_COMPILATION:
4018 pStat = &gStatCompilationQEmu;
4019 break;
4020 case STATS_QEMU_RUN_EMULATED_CODE:
4021 pStat = &gStatRunCodeQEmu;
4022 break;
4023 case STATS_QEMU_TOTAL:
4024 pStat = &gStatTotalTimeQEmu;
4025 break;
4026 case STATS_QEMU_RUN_TIMERS:
4027 pStat = &gStatTimers;
4028 break;
4029 case STATS_TLB_LOOKUP:
4030 pStat= &gStatTBLookup;
4031 break;
4032 case STATS_IRQ_HANDLING:
4033 pStat= &gStatIRQ;
4034 break;
4035 case STATS_RAW_CHECK:
4036 pStat = &gStatRawCheck;
4037 break;
4038
4039 default:
4040 AssertMsgFailed(("unknown stat %d\n", statcode));
4041 return;
4042 }
4043 STAM_PROFILE_ADV_START(pStat, a);
4044}
4045
4046
4047void remR3ProfileStop(int statcode)
4048{
4049 STAMPROFILEADV *pStat;
4050 switch(statcode)
4051 {
4052 case STATS_EMULATE_SINGLE_INSTR:
4053 pStat = &gStatExecuteSingleInstr;
4054 break;
4055 case STATS_QEMU_COMPILATION:
4056 pStat = &gStatCompilationQEmu;
4057 break;
4058 case STATS_QEMU_RUN_EMULATED_CODE:
4059 pStat = &gStatRunCodeQEmu;
4060 break;
4061 case STATS_QEMU_TOTAL:
4062 pStat = &gStatTotalTimeQEmu;
4063 break;
4064 case STATS_QEMU_RUN_TIMERS:
4065 pStat = &gStatTimers;
4066 break;
4067 case STATS_TLB_LOOKUP:
4068 pStat= &gStatTBLookup;
4069 break;
4070 case STATS_IRQ_HANDLING:
4071 pStat= &gStatIRQ;
4072 break;
4073 case STATS_RAW_CHECK:
4074 pStat = &gStatRawCheck;
4075 break;
4076 default:
4077 AssertMsgFailed(("unknown stat %d\n", statcode));
4078 return;
4079 }
4080 STAM_PROFILE_ADV_STOP(pStat, a);
4081}
4082#endif
4083
4084/**
4085 * Raise an RC, force rem exit.
4086 *
4087 * @param pVM VM handle.
4088 * @param rc The rc.
4089 */
4090void remR3RaiseRC(PVM pVM, int rc)
4091{
4092 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4093 Assert(pVM->rem.s.fInREM);
4094 VM_ASSERT_EMT(pVM);
4095 pVM->rem.s.rc = rc;
4096 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4097}
4098
4099
4100/* -+- timers -+- */
4101
4102uint64_t cpu_get_tsc(CPUX86State *env)
4103{
4104 STAM_COUNTER_INC(&gStatCpuGetTSC);
4105 return TMCpuTickGet(env->pVCpu);
4106}
4107
4108
4109/* -+- interrupts -+- */
4110
4111void cpu_set_ferr(CPUX86State *env)
4112{
4113 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4114 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4115}
4116
4117int cpu_get_pic_interrupt(CPUState *env)
4118{
4119 uint8_t u8Interrupt;
4120 int rc;
4121
4122 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4123 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4124 * with the (a)pic.
4125 */
4126 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4127 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4128 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4129 * remove this kludge. */
4130 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4131 {
4132 rc = VINF_SUCCESS;
4133 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4134 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4135 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4136 }
4137 else
4138 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4139
4140 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4141 if (RT_SUCCESS(rc))
4142 {
4143 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4144 env->interrupt_request |= CPU_INTERRUPT_HARD;
4145 return u8Interrupt;
4146 }
4147 return -1;
4148}
4149
4150
4151/* -+- local apic -+- */
4152
4153void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4154{
4155 int rc = PDMApicSetBase(env->pVM, val);
4156 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4157}
4158
4159uint64_t cpu_get_apic_base(CPUX86State *env)
4160{
4161 uint64_t u64;
4162 int rc = PDMApicGetBase(env->pVM, &u64);
4163 if (RT_SUCCESS(rc))
4164 {
4165 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4166 return u64;
4167 }
4168 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4169 return 0;
4170}
4171
4172void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4173{
4174 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4175 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4176}
4177
4178uint8_t cpu_get_apic_tpr(CPUX86State *env)
4179{
4180 uint8_t u8;
4181 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4182 if (RT_SUCCESS(rc))
4183 {
4184 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4185 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4186 }
4187 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4188 return 0;
4189}
4190
4191
4192uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4193{
4194 uint64_t value;
4195 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4196 if (RT_SUCCESS(rc))
4197 {
4198 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4199 return value;
4200 }
4201 /** @todo: exception ? */
4202 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4203 return value;
4204}
4205
4206void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4207{
4208 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4209 /** @todo: exception if error ? */
4210 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4211}
4212
4213uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4214{
4215 Assert(env->pVCpu);
4216 return CPUMGetGuestMsr(env->pVCpu, msr);
4217}
4218
4219void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4220{
4221 Assert(env->pVCpu);
4222 CPUMSetGuestMsr(env->pVCpu, msr, val);
4223}
4224
4225/* -+- I/O Ports -+- */
4226
4227#undef LOG_GROUP
4228#define LOG_GROUP LOG_GROUP_REM_IOPORT
4229
4230void cpu_outb(CPUState *env, int addr, int val)
4231{
4232 int rc;
4233
4234 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4235 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4236
4237 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4238 if (RT_LIKELY(rc == VINF_SUCCESS))
4239 return;
4240 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4241 {
4242 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4243 remR3RaiseRC(env->pVM, rc);
4244 return;
4245 }
4246 remAbort(rc, __FUNCTION__);
4247}
4248
4249void cpu_outw(CPUState *env, int addr, int val)
4250{
4251 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4252 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4253 if (RT_LIKELY(rc == VINF_SUCCESS))
4254 return;
4255 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4256 {
4257 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4258 remR3RaiseRC(env->pVM, rc);
4259 return;
4260 }
4261 remAbort(rc, __FUNCTION__);
4262}
4263
4264void cpu_outl(CPUState *env, int addr, int val)
4265{
4266 int rc;
4267 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4268 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4269 if (RT_LIKELY(rc == VINF_SUCCESS))
4270 return;
4271 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4272 {
4273 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4274 remR3RaiseRC(env->pVM, rc);
4275 return;
4276 }
4277 remAbort(rc, __FUNCTION__);
4278}
4279
4280int cpu_inb(CPUState *env, int addr)
4281{
4282 uint32_t u32 = 0;
4283 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4284 if (RT_LIKELY(rc == VINF_SUCCESS))
4285 {
4286 if (/*addr != 0x61 && */addr != 0x71)
4287 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4288 return (int)u32;
4289 }
4290 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4291 {
4292 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4293 remR3RaiseRC(env->pVM, rc);
4294 return (int)u32;
4295 }
4296 remAbort(rc, __FUNCTION__);
4297 return 0xff;
4298}
4299
4300int cpu_inw(CPUState *env, int addr)
4301{
4302 uint32_t u32 = 0;
4303 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4304 if (RT_LIKELY(rc == VINF_SUCCESS))
4305 {
4306 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4307 return (int)u32;
4308 }
4309 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4310 {
4311 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4312 remR3RaiseRC(env->pVM, rc);
4313 return (int)u32;
4314 }
4315 remAbort(rc, __FUNCTION__);
4316 return 0xffff;
4317}
4318
4319int cpu_inl(CPUState *env, int addr)
4320{
4321 uint32_t u32 = 0;
4322 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4323 if (RT_LIKELY(rc == VINF_SUCCESS))
4324 {
4325//if (addr==0x01f0 && u32 == 0x6b6d)
4326// loglevel = ~0;
4327 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4328 return (int)u32;
4329 }
4330 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4331 {
4332 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4333 remR3RaiseRC(env->pVM, rc);
4334 return (int)u32;
4335 }
4336 remAbort(rc, __FUNCTION__);
4337 return 0xffffffff;
4338}
4339
4340#undef LOG_GROUP
4341#define LOG_GROUP LOG_GROUP_REM
4342
4343
4344/* -+- helpers and misc other interfaces -+- */
4345
4346/**
4347 * Perform the CPUID instruction.
4348 *
4349 * ASMCpuId cannot be invoked from some source files where this is used because of global
4350 * register allocations.
4351 *
4352 * @param env Pointer to the recompiler CPU structure.
4353 * @param uOperator CPUID operation (eax).
4354 * @param pvEAX Where to store eax.
4355 * @param pvEBX Where to store ebx.
4356 * @param pvECX Where to store ecx.
4357 * @param pvEDX Where to store edx.
4358 */
4359void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4360{
4361 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4362}
4363
4364
4365#if 0 /* not used */
4366/**
4367 * Interface for qemu hardware to report back fatal errors.
4368 */
4369void hw_error(const char *pszFormat, ...)
4370{
4371 /*
4372 * Bitch about it.
4373 */
4374 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4375 * this in my Odin32 tree at home! */
4376 va_list args;
4377 va_start(args, pszFormat);
4378 RTLogPrintf("fatal error in virtual hardware:");
4379 RTLogPrintfV(pszFormat, args);
4380 va_end(args);
4381 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4382
4383 /*
4384 * If we're in REM context we'll sync back the state before 'jumping' to
4385 * the EMs failure handling.
4386 */
4387 PVM pVM = cpu_single_env->pVM;
4388 if (pVM->rem.s.fInREM)
4389 REMR3StateBack(pVM);
4390 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4391 AssertMsgFailed(("EMR3FatalError returned!\n"));
4392}
4393#endif
4394
4395/**
4396 * Interface for the qemu cpu to report unhandled situation
4397 * raising a fatal VM error.
4398 */
4399void cpu_abort(CPUState *env, const char *pszFormat, ...)
4400{
4401 va_list va;
4402 PVM pVM;
4403 PVMCPU pVCpu;
4404 char szMsg[256];
4405
4406 /*
4407 * Bitch about it.
4408 */
4409 RTLogFlags(NULL, "nodisabled nobuffered");
4410 RTLogFlush(NULL);
4411
4412 va_start(va, pszFormat);
4413#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4414 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4415 unsigned cArgs = 0;
4416 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4417 const char *psz = strchr(pszFormat, '%');
4418 while (psz && cArgs < 6)
4419 {
4420 auArgs[cArgs++] = va_arg(va, uintptr_t);
4421 psz = strchr(psz + 1, '%');
4422 }
4423 switch (cArgs)
4424 {
4425 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4426 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4427 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4428 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4429 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4430 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4431 default:
4432 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4433 }
4434#else
4435 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4436#endif
4437 va_end(va);
4438
4439 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4440 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4441
4442 /*
4443 * If we're in REM context we'll sync back the state before 'jumping' to
4444 * the EMs failure handling.
4445 */
4446 pVM = cpu_single_env->pVM;
4447 pVCpu = cpu_single_env->pVCpu;
4448 Assert(pVCpu);
4449
4450 if (pVM->rem.s.fInREM)
4451 REMR3StateBack(pVM, pVCpu);
4452 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4453 AssertMsgFailed(("EMR3FatalError returned!\n"));
4454}
4455
4456
4457/**
4458 * Aborts the VM.
4459 *
4460 * @param rc VBox error code.
4461 * @param pszTip Hint about why/when this happend.
4462 */
4463void remAbort(int rc, const char *pszTip)
4464{
4465 PVM pVM;
4466 PVMCPU pVCpu;
4467
4468 /*
4469 * Bitch about it.
4470 */
4471 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4472 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4473
4474 /*
4475 * Jump back to where we entered the recompiler.
4476 */
4477 pVM = cpu_single_env->pVM;
4478 pVCpu = cpu_single_env->pVCpu;
4479 Assert(pVCpu);
4480
4481 if (pVM->rem.s.fInREM)
4482 REMR3StateBack(pVM, pVCpu);
4483
4484 EMR3FatalError(pVCpu, rc);
4485 AssertMsgFailed(("EMR3FatalError returned!\n"));
4486}
4487
4488
4489/**
4490 * Dumps a linux system call.
4491 * @param pVCpu VMCPU handle.
4492 */
4493void remR3DumpLnxSyscall(PVMCPU pVCpu)
4494{
4495 static const char *apsz[] =
4496 {
4497 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4498 "sys_exit",
4499 "sys_fork",
4500 "sys_read",
4501 "sys_write",
4502 "sys_open", /* 5 */
4503 "sys_close",
4504 "sys_waitpid",
4505 "sys_creat",
4506 "sys_link",
4507 "sys_unlink", /* 10 */
4508 "sys_execve",
4509 "sys_chdir",
4510 "sys_time",
4511 "sys_mknod",
4512 "sys_chmod", /* 15 */
4513 "sys_lchown16",
4514 "sys_ni_syscall", /* old break syscall holder */
4515 "sys_stat",
4516 "sys_lseek",
4517 "sys_getpid", /* 20 */
4518 "sys_mount",
4519 "sys_oldumount",
4520 "sys_setuid16",
4521 "sys_getuid16",
4522 "sys_stime", /* 25 */
4523 "sys_ptrace",
4524 "sys_alarm",
4525 "sys_fstat",
4526 "sys_pause",
4527 "sys_utime", /* 30 */
4528 "sys_ni_syscall", /* old stty syscall holder */
4529 "sys_ni_syscall", /* old gtty syscall holder */
4530 "sys_access",
4531 "sys_nice",
4532 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4533 "sys_sync",
4534 "sys_kill",
4535 "sys_rename",
4536 "sys_mkdir",
4537 "sys_rmdir", /* 40 */
4538 "sys_dup",
4539 "sys_pipe",
4540 "sys_times",
4541 "sys_ni_syscall", /* old prof syscall holder */
4542 "sys_brk", /* 45 */
4543 "sys_setgid16",
4544 "sys_getgid16",
4545 "sys_signal",
4546 "sys_geteuid16",
4547 "sys_getegid16", /* 50 */
4548 "sys_acct",
4549 "sys_umount", /* recycled never used phys() */
4550 "sys_ni_syscall", /* old lock syscall holder */
4551 "sys_ioctl",
4552 "sys_fcntl", /* 55 */
4553 "sys_ni_syscall", /* old mpx syscall holder */
4554 "sys_setpgid",
4555 "sys_ni_syscall", /* old ulimit syscall holder */
4556 "sys_olduname",
4557 "sys_umask", /* 60 */
4558 "sys_chroot",
4559 "sys_ustat",
4560 "sys_dup2",
4561 "sys_getppid",
4562 "sys_getpgrp", /* 65 */
4563 "sys_setsid",
4564 "sys_sigaction",
4565 "sys_sgetmask",
4566 "sys_ssetmask",
4567 "sys_setreuid16", /* 70 */
4568 "sys_setregid16",
4569 "sys_sigsuspend",
4570 "sys_sigpending",
4571 "sys_sethostname",
4572 "sys_setrlimit", /* 75 */
4573 "sys_old_getrlimit",
4574 "sys_getrusage",
4575 "sys_gettimeofday",
4576 "sys_settimeofday",
4577 "sys_getgroups16", /* 80 */
4578 "sys_setgroups16",
4579 "old_select",
4580 "sys_symlink",
4581 "sys_lstat",
4582 "sys_readlink", /* 85 */
4583 "sys_uselib",
4584 "sys_swapon",
4585 "sys_reboot",
4586 "old_readdir",
4587 "old_mmap", /* 90 */
4588 "sys_munmap",
4589 "sys_truncate",
4590 "sys_ftruncate",
4591 "sys_fchmod",
4592 "sys_fchown16", /* 95 */
4593 "sys_getpriority",
4594 "sys_setpriority",
4595 "sys_ni_syscall", /* old profil syscall holder */
4596 "sys_statfs",
4597 "sys_fstatfs", /* 100 */
4598 "sys_ioperm",
4599 "sys_socketcall",
4600 "sys_syslog",
4601 "sys_setitimer",
4602 "sys_getitimer", /* 105 */
4603 "sys_newstat",
4604 "sys_newlstat",
4605 "sys_newfstat",
4606 "sys_uname",
4607 "sys_iopl", /* 110 */
4608 "sys_vhangup",
4609 "sys_ni_syscall", /* old "idle" system call */
4610 "sys_vm86old",
4611 "sys_wait4",
4612 "sys_swapoff", /* 115 */
4613 "sys_sysinfo",
4614 "sys_ipc",
4615 "sys_fsync",
4616 "sys_sigreturn",
4617 "sys_clone", /* 120 */
4618 "sys_setdomainname",
4619 "sys_newuname",
4620 "sys_modify_ldt",
4621 "sys_adjtimex",
4622 "sys_mprotect", /* 125 */
4623 "sys_sigprocmask",
4624 "sys_ni_syscall", /* old "create_module" */
4625 "sys_init_module",
4626 "sys_delete_module",
4627 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4628 "sys_quotactl",
4629 "sys_getpgid",
4630 "sys_fchdir",
4631 "sys_bdflush",
4632 "sys_sysfs", /* 135 */
4633 "sys_personality",
4634 "sys_ni_syscall", /* reserved for afs_syscall */
4635 "sys_setfsuid16",
4636 "sys_setfsgid16",
4637 "sys_llseek", /* 140 */
4638 "sys_getdents",
4639 "sys_select",
4640 "sys_flock",
4641 "sys_msync",
4642 "sys_readv", /* 145 */
4643 "sys_writev",
4644 "sys_getsid",
4645 "sys_fdatasync",
4646 "sys_sysctl",
4647 "sys_mlock", /* 150 */
4648 "sys_munlock",
4649 "sys_mlockall",
4650 "sys_munlockall",
4651 "sys_sched_setparam",
4652 "sys_sched_getparam", /* 155 */
4653 "sys_sched_setscheduler",
4654 "sys_sched_getscheduler",
4655 "sys_sched_yield",
4656 "sys_sched_get_priority_max",
4657 "sys_sched_get_priority_min", /* 160 */
4658 "sys_sched_rr_get_interval",
4659 "sys_nanosleep",
4660 "sys_mremap",
4661 "sys_setresuid16",
4662 "sys_getresuid16", /* 165 */
4663 "sys_vm86",
4664 "sys_ni_syscall", /* Old sys_query_module */
4665 "sys_poll",
4666 "sys_nfsservctl",
4667 "sys_setresgid16", /* 170 */
4668 "sys_getresgid16",
4669 "sys_prctl",
4670 "sys_rt_sigreturn",
4671 "sys_rt_sigaction",
4672 "sys_rt_sigprocmask", /* 175 */
4673 "sys_rt_sigpending",
4674 "sys_rt_sigtimedwait",
4675 "sys_rt_sigqueueinfo",
4676 "sys_rt_sigsuspend",
4677 "sys_pread64", /* 180 */
4678 "sys_pwrite64",
4679 "sys_chown16",
4680 "sys_getcwd",
4681 "sys_capget",
4682 "sys_capset", /* 185 */
4683 "sys_sigaltstack",
4684 "sys_sendfile",
4685 "sys_ni_syscall", /* reserved for streams1 */
4686 "sys_ni_syscall", /* reserved for streams2 */
4687 "sys_vfork", /* 190 */
4688 "sys_getrlimit",
4689 "sys_mmap2",
4690 "sys_truncate64",
4691 "sys_ftruncate64",
4692 "sys_stat64", /* 195 */
4693 "sys_lstat64",
4694 "sys_fstat64",
4695 "sys_lchown",
4696 "sys_getuid",
4697 "sys_getgid", /* 200 */
4698 "sys_geteuid",
4699 "sys_getegid",
4700 "sys_setreuid",
4701 "sys_setregid",
4702 "sys_getgroups", /* 205 */
4703 "sys_setgroups",
4704 "sys_fchown",
4705 "sys_setresuid",
4706 "sys_getresuid",
4707 "sys_setresgid", /* 210 */
4708 "sys_getresgid",
4709 "sys_chown",
4710 "sys_setuid",
4711 "sys_setgid",
4712 "sys_setfsuid", /* 215 */
4713 "sys_setfsgid",
4714 "sys_pivot_root",
4715 "sys_mincore",
4716 "sys_madvise",
4717 "sys_getdents64", /* 220 */
4718 "sys_fcntl64",
4719 "sys_ni_syscall", /* reserved for TUX */
4720 "sys_ni_syscall",
4721 "sys_gettid",
4722 "sys_readahead", /* 225 */
4723 "sys_setxattr",
4724 "sys_lsetxattr",
4725 "sys_fsetxattr",
4726 "sys_getxattr",
4727 "sys_lgetxattr", /* 230 */
4728 "sys_fgetxattr",
4729 "sys_listxattr",
4730 "sys_llistxattr",
4731 "sys_flistxattr",
4732 "sys_removexattr", /* 235 */
4733 "sys_lremovexattr",
4734 "sys_fremovexattr",
4735 "sys_tkill",
4736 "sys_sendfile64",
4737 "sys_futex", /* 240 */
4738 "sys_sched_setaffinity",
4739 "sys_sched_getaffinity",
4740 "sys_set_thread_area",
4741 "sys_get_thread_area",
4742 "sys_io_setup", /* 245 */
4743 "sys_io_destroy",
4744 "sys_io_getevents",
4745 "sys_io_submit",
4746 "sys_io_cancel",
4747 "sys_fadvise64", /* 250 */
4748 "sys_ni_syscall",
4749 "sys_exit_group",
4750 "sys_lookup_dcookie",
4751 "sys_epoll_create",
4752 "sys_epoll_ctl", /* 255 */
4753 "sys_epoll_wait",
4754 "sys_remap_file_pages",
4755 "sys_set_tid_address",
4756 "sys_timer_create",
4757 "sys_timer_settime", /* 260 */
4758 "sys_timer_gettime",
4759 "sys_timer_getoverrun",
4760 "sys_timer_delete",
4761 "sys_clock_settime",
4762 "sys_clock_gettime", /* 265 */
4763 "sys_clock_getres",
4764 "sys_clock_nanosleep",
4765 "sys_statfs64",
4766 "sys_fstatfs64",
4767 "sys_tgkill", /* 270 */
4768 "sys_utimes",
4769 "sys_fadvise64_64",
4770 "sys_ni_syscall" /* sys_vserver */
4771 };
4772
4773 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4774 switch (uEAX)
4775 {
4776 default:
4777 if (uEAX < RT_ELEMENTS(apsz))
4778 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4779 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4780 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4781 else
4782 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4783 break;
4784
4785 }
4786}
4787
4788
4789/**
4790 * Dumps an OpenBSD system call.
4791 * @param pVCpu VMCPU handle.
4792 */
4793void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4794{
4795 static const char *apsz[] =
4796 {
4797 "SYS_syscall", //0
4798 "SYS_exit", //1
4799 "SYS_fork", //2
4800 "SYS_read", //3
4801 "SYS_write", //4
4802 "SYS_open", //5
4803 "SYS_close", //6
4804 "SYS_wait4", //7
4805 "SYS_8",
4806 "SYS_link", //9
4807 "SYS_unlink", //10
4808 "SYS_11",
4809 "SYS_chdir", //12
4810 "SYS_fchdir", //13
4811 "SYS_mknod", //14
4812 "SYS_chmod", //15
4813 "SYS_chown", //16
4814 "SYS_break", //17
4815 "SYS_18",
4816 "SYS_19",
4817 "SYS_getpid", //20
4818 "SYS_mount", //21
4819 "SYS_unmount", //22
4820 "SYS_setuid", //23
4821 "SYS_getuid", //24
4822 "SYS_geteuid", //25
4823 "SYS_ptrace", //26
4824 "SYS_recvmsg", //27
4825 "SYS_sendmsg", //28
4826 "SYS_recvfrom", //29
4827 "SYS_accept", //30
4828 "SYS_getpeername", //31
4829 "SYS_getsockname", //32
4830 "SYS_access", //33
4831 "SYS_chflags", //34
4832 "SYS_fchflags", //35
4833 "SYS_sync", //36
4834 "SYS_kill", //37
4835 "SYS_38",
4836 "SYS_getppid", //39
4837 "SYS_40",
4838 "SYS_dup", //41
4839 "SYS_opipe", //42
4840 "SYS_getegid", //43
4841 "SYS_profil", //44
4842 "SYS_ktrace", //45
4843 "SYS_sigaction", //46
4844 "SYS_getgid", //47
4845 "SYS_sigprocmask", //48
4846 "SYS_getlogin", //49
4847 "SYS_setlogin", //50
4848 "SYS_acct", //51
4849 "SYS_sigpending", //52
4850 "SYS_osigaltstack", //53
4851 "SYS_ioctl", //54
4852 "SYS_reboot", //55
4853 "SYS_revoke", //56
4854 "SYS_symlink", //57
4855 "SYS_readlink", //58
4856 "SYS_execve", //59
4857 "SYS_umask", //60
4858 "SYS_chroot", //61
4859 "SYS_62",
4860 "SYS_63",
4861 "SYS_64",
4862 "SYS_65",
4863 "SYS_vfork", //66
4864 "SYS_67",
4865 "SYS_68",
4866 "SYS_sbrk", //69
4867 "SYS_sstk", //70
4868 "SYS_61",
4869 "SYS_vadvise", //72
4870 "SYS_munmap", //73
4871 "SYS_mprotect", //74
4872 "SYS_madvise", //75
4873 "SYS_76",
4874 "SYS_77",
4875 "SYS_mincore", //78
4876 "SYS_getgroups", //79
4877 "SYS_setgroups", //80
4878 "SYS_getpgrp", //81
4879 "SYS_setpgid", //82
4880 "SYS_setitimer", //83
4881 "SYS_84",
4882 "SYS_85",
4883 "SYS_getitimer", //86
4884 "SYS_87",
4885 "SYS_88",
4886 "SYS_89",
4887 "SYS_dup2", //90
4888 "SYS_91",
4889 "SYS_fcntl", //92
4890 "SYS_select", //93
4891 "SYS_94",
4892 "SYS_fsync", //95
4893 "SYS_setpriority", //96
4894 "SYS_socket", //97
4895 "SYS_connect", //98
4896 "SYS_99",
4897 "SYS_getpriority", //100
4898 "SYS_101",
4899 "SYS_102",
4900 "SYS_sigreturn", //103
4901 "SYS_bind", //104
4902 "SYS_setsockopt", //105
4903 "SYS_listen", //106
4904 "SYS_107",
4905 "SYS_108",
4906 "SYS_109",
4907 "SYS_110",
4908 "SYS_sigsuspend", //111
4909 "SYS_112",
4910 "SYS_113",
4911 "SYS_114",
4912 "SYS_115",
4913 "SYS_gettimeofday", //116
4914 "SYS_getrusage", //117
4915 "SYS_getsockopt", //118
4916 "SYS_119",
4917 "SYS_readv", //120
4918 "SYS_writev", //121
4919 "SYS_settimeofday", //122
4920 "SYS_fchown", //123
4921 "SYS_fchmod", //124
4922 "SYS_125",
4923 "SYS_setreuid", //126
4924 "SYS_setregid", //127
4925 "SYS_rename", //128
4926 "SYS_129",
4927 "SYS_130",
4928 "SYS_flock", //131
4929 "SYS_mkfifo", //132
4930 "SYS_sendto", //133
4931 "SYS_shutdown", //134
4932 "SYS_socketpair", //135
4933 "SYS_mkdir", //136
4934 "SYS_rmdir", //137
4935 "SYS_utimes", //138
4936 "SYS_139",
4937 "SYS_adjtime", //140
4938 "SYS_141",
4939 "SYS_142",
4940 "SYS_143",
4941 "SYS_144",
4942 "SYS_145",
4943 "SYS_146",
4944 "SYS_setsid", //147
4945 "SYS_quotactl", //148
4946 "SYS_149",
4947 "SYS_150",
4948 "SYS_151",
4949 "SYS_152",
4950 "SYS_153",
4951 "SYS_154",
4952 "SYS_nfssvc", //155
4953 "SYS_156",
4954 "SYS_157",
4955 "SYS_158",
4956 "SYS_159",
4957 "SYS_160",
4958 "SYS_getfh", //161
4959 "SYS_162",
4960 "SYS_163",
4961 "SYS_164",
4962 "SYS_sysarch", //165
4963 "SYS_166",
4964 "SYS_167",
4965 "SYS_168",
4966 "SYS_169",
4967 "SYS_170",
4968 "SYS_171",
4969 "SYS_172",
4970 "SYS_pread", //173
4971 "SYS_pwrite", //174
4972 "SYS_175",
4973 "SYS_176",
4974 "SYS_177",
4975 "SYS_178",
4976 "SYS_179",
4977 "SYS_180",
4978 "SYS_setgid", //181
4979 "SYS_setegid", //182
4980 "SYS_seteuid", //183
4981 "SYS_lfs_bmapv", //184
4982 "SYS_lfs_markv", //185
4983 "SYS_lfs_segclean", //186
4984 "SYS_lfs_segwait", //187
4985 "SYS_188",
4986 "SYS_189",
4987 "SYS_190",
4988 "SYS_pathconf", //191
4989 "SYS_fpathconf", //192
4990 "SYS_swapctl", //193
4991 "SYS_getrlimit", //194
4992 "SYS_setrlimit", //195
4993 "SYS_getdirentries", //196
4994 "SYS_mmap", //197
4995 "SYS___syscall", //198
4996 "SYS_lseek", //199
4997 "SYS_truncate", //200
4998 "SYS_ftruncate", //201
4999 "SYS___sysctl", //202
5000 "SYS_mlock", //203
5001 "SYS_munlock", //204
5002 "SYS_205",
5003 "SYS_futimes", //206
5004 "SYS_getpgid", //207
5005 "SYS_xfspioctl", //208
5006 "SYS_209",
5007 "SYS_210",
5008 "SYS_211",
5009 "SYS_212",
5010 "SYS_213",
5011 "SYS_214",
5012 "SYS_215",
5013 "SYS_216",
5014 "SYS_217",
5015 "SYS_218",
5016 "SYS_219",
5017 "SYS_220",
5018 "SYS_semget", //221
5019 "SYS_222",
5020 "SYS_223",
5021 "SYS_224",
5022 "SYS_msgget", //225
5023 "SYS_msgsnd", //226
5024 "SYS_msgrcv", //227
5025 "SYS_shmat", //228
5026 "SYS_229",
5027 "SYS_shmdt", //230
5028 "SYS_231",
5029 "SYS_clock_gettime", //232
5030 "SYS_clock_settime", //233
5031 "SYS_clock_getres", //234
5032 "SYS_235",
5033 "SYS_236",
5034 "SYS_237",
5035 "SYS_238",
5036 "SYS_239",
5037 "SYS_nanosleep", //240
5038 "SYS_241",
5039 "SYS_242",
5040 "SYS_243",
5041 "SYS_244",
5042 "SYS_245",
5043 "SYS_246",
5044 "SYS_247",
5045 "SYS_248",
5046 "SYS_249",
5047 "SYS_minherit", //250
5048 "SYS_rfork", //251
5049 "SYS_poll", //252
5050 "SYS_issetugid", //253
5051 "SYS_lchown", //254
5052 "SYS_getsid", //255
5053 "SYS_msync", //256
5054 "SYS_257",
5055 "SYS_258",
5056 "SYS_259",
5057 "SYS_getfsstat", //260
5058 "SYS_statfs", //261
5059 "SYS_fstatfs", //262
5060 "SYS_pipe", //263
5061 "SYS_fhopen", //264
5062 "SYS_265",
5063 "SYS_fhstatfs", //266
5064 "SYS_preadv", //267
5065 "SYS_pwritev", //268
5066 "SYS_kqueue", //269
5067 "SYS_kevent", //270
5068 "SYS_mlockall", //271
5069 "SYS_munlockall", //272
5070 "SYS_getpeereid", //273
5071 "SYS_274",
5072 "SYS_275",
5073 "SYS_276",
5074 "SYS_277",
5075 "SYS_278",
5076 "SYS_279",
5077 "SYS_280",
5078 "SYS_getresuid", //281
5079 "SYS_setresuid", //282
5080 "SYS_getresgid", //283
5081 "SYS_setresgid", //284
5082 "SYS_285",
5083 "SYS_mquery", //286
5084 "SYS_closefrom", //287
5085 "SYS_sigaltstack", //288
5086 "SYS_shmget", //289
5087 "SYS_semop", //290
5088 "SYS_stat", //291
5089 "SYS_fstat", //292
5090 "SYS_lstat", //293
5091 "SYS_fhstat", //294
5092 "SYS___semctl", //295
5093 "SYS_shmctl", //296
5094 "SYS_msgctl", //297
5095 "SYS_MAXSYSCALL", //298
5096 //299
5097 //300
5098 };
5099 uint32_t uEAX;
5100 if (!LogIsEnabled())
5101 return;
5102 uEAX = CPUMGetGuestEAX(pVCpu);
5103 switch (uEAX)
5104 {
5105 default:
5106 if (uEAX < RT_ELEMENTS(apsz))
5107 {
5108 uint32_t au32Args[8] = {0};
5109 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5110 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5111 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5112 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5113 }
5114 else
5115 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5116 break;
5117 }
5118}
5119
5120
5121#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5122/**
5123 * The Dll main entry point (stub).
5124 */
5125bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5126{
5127 return true;
5128}
5129
5130void *memcpy(void *dst, const void *src, size_t size)
5131{
5132 uint8_t*pbDst = dst, *pbSrc = src;
5133 while (size-- > 0)
5134 *pbDst++ = *pbSrc++;
5135 return dst;
5136}
5137
5138#endif
5139
5140void cpu_smm_update(CPUState *env)
5141{
5142}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette