VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 25580

Last change on this file since 25580 was 25580, checked in by vboxsync, 15 years ago

Temporarily turned on extra checking in release build for #4113

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 165.5 KB
Line 
1/* $Id: VBoxRecompiler.c 25580 2009-12-24 14:35:39Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .pResultDesc = NULL,
214 .fFlags = 0,
215 .pfnHandler = remR3CmdDisasEnableStepping,
216 .pszSyntax = "[on/off]",
217 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
218 "If no arguments show the current state."
219 }
220};
221#endif
222
223/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
224uint8_t *code_gen_prologue;
225
226
227/*******************************************************************************
228* Internal Functions *
229*******************************************************************************/
230void remAbort(int rc, const char *pszTip);
231extern int testmath(void);
232
233/* Put them here to avoid unused variable warning. */
234AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
235#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
236//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
237/* Why did this have to be identical?? */
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#else
240AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
241#endif
242
243
244/**
245 * Initializes the REM.
246 *
247 * @returns VBox status code.
248 * @param pVM The VM to operate on.
249 */
250REMR3DECL(int) REMR3Init(PVM pVM)
251{
252 PREMHANDLERNOTIFICATION pCur;
253 uint32_t u32Dummy;
254 int rc;
255 unsigned i;
256
257#ifdef VBOX_ENABLE_VBOXREM64
258 LogRel(("Using 64-bit aware REM\n"));
259#endif
260
261 /*
262 * Assert sanity.
263 */
264 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
265 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
266 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
267#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
268 Assert(!testmath());
269#endif
270
271 /*
272 * Init some internal data members.
273 */
274 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
275 pVM->rem.s.Env.pVM = pVM;
276#ifdef CPU_RAW_MODE_INIT
277 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
278#endif
279
280 /*
281 * Initialize the REM critical section.
282 *
283 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
284 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
285 * deadlocks. (mostly pgm vs rem locking)
286 */
287 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, "REM-Register");
288 AssertRCReturn(rc, rc);
289
290 /* ctx. */
291 pVM->rem.s.pCtx = NULL; /* set when executing code. */
292 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
293
294 /* ignore all notifications */
295 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
296
297 code_gen_prologue = RTMemExecAlloc(_1K);
298 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
299
300 cpu_exec_init_all(0);
301
302 /*
303 * Init the recompiler.
304 */
305 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
306 {
307 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
308 return VERR_GENERAL_FAILURE;
309 }
310 PVMCPU pVCpu = VMMGetCpu(pVM);
311 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
312 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
313
314 /* allocate code buffer for single instruction emulation. */
315 pVM->rem.s.Env.cbCodeBuffer = 4096;
316 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
317 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
318
319 /* finally, set the cpu_single_env global. */
320 cpu_single_env = &pVM->rem.s.Env;
321
322 /* Nothing is pending by default */
323 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
324
325 /*
326 * Register ram types.
327 */
328 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
330 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
331 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
332 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
333
334 /* stop ignoring. */
335 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
336
337 /*
338 * Register the saved state data unit.
339 */
340 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
341 NULL, NULL, NULL,
342 NULL, remR3Save, NULL,
343 NULL, remR3Load, NULL);
344 if (RT_FAILURE(rc))
345 return rc;
346
347#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
348 /*
349 * Debugger commands.
350 */
351 static bool fRegisteredCmds = false;
352 if (!fRegisteredCmds)
353 {
354 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
355 if (RT_SUCCESS(rc))
356 fRegisteredCmds = true;
357 }
358#endif
359
360#ifdef VBOX_WITH_STATISTICS
361 /*
362 * Statistics.
363 */
364 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
365 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
366 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
367 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
368 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
370 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
371 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
372 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
373 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
374 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
375 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
376
377 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
378
379 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
380 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
381 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
382 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
383 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
384 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
385 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
386 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
387 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
388 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
389 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
390
391 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
392 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
393 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
394 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
395
396 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
402
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
407 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
409
410 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
411#endif /* VBOX_WITH_STATISTICS */
412
413 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
414 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
415 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
416
417
418#ifdef DEBUG_ALL_LOGGING
419 loglevel = ~0;
420# ifdef DEBUG_TMP_LOGGING
421 logfile = fopen("/tmp/vbox-qemu.log", "w");
422# endif
423#endif
424
425 /*
426 * Init the handler notification lists.
427 */
428 pVM->rem.s.idxPendingList = UINT32_MAX;
429 pVM->rem.s.idxFreeList = 0;
430
431 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
432 {
433 pCur = &pVM->rem.s.aHandlerNotifications[i];
434 pCur->idxNext = i + 1;
435 pCur->idxSelf = i;
436 }
437 pCur->idxNext = UINT32_MAX; /* the last record. */
438
439 return rc;
440}
441
442
443/**
444 * Finalizes the REM initialization.
445 *
446 * This is called after all components, devices and drivers has
447 * been initialized. Its main purpose it to finish the RAM related
448 * initialization.
449 *
450 * @returns VBox status code.
451 *
452 * @param pVM The VM handle.
453 */
454REMR3DECL(int) REMR3InitFinalize(PVM pVM)
455{
456 int rc;
457
458 /*
459 * Ram size & dirty bit map.
460 */
461 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
462 pVM->rem.s.fGCPhysLastRamFixed = true;
463#ifdef RT_STRICT
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
465#else
466 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
467#endif
468 return rc;
469}
470
471
472/**
473 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
474 *
475 * @returns VBox status code.
476 * @param pVM The VM handle.
477 * @param fGuarded Whether to guard the map.
478 */
479static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
480{
481 int rc = VINF_SUCCESS;
482 RTGCPHYS cb;
483
484 cb = pVM->rem.s.GCPhysLastRam + 1;
485 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
486 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
487 VERR_OUT_OF_RANGE);
488 phys_ram_size = cb;
489 phys_ram_dirty_size = cb >> PAGE_SHIFT;
490 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
491
492 if (!fGuarded)
493 {
494 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
495 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
496 }
497 else
498 {
499 /*
500 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
501 */
502 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
503 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
504 if (cbBitmapFull == cbBitmapAligned)
505 cbBitmapFull += _4G >> PAGE_SHIFT;
506 else if (cbBitmapFull - cbBitmapAligned < _64K)
507 cbBitmapFull += _64K;
508
509 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
510 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
511
512 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
513 if (RT_FAILURE(rc))
514 {
515 RTMemPageFree(phys_ram_dirty);
516 AssertLogRelRCReturn(rc, rc);
517 }
518
519 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
520 }
521
522 /* initialize it. */
523 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
524 return rc;
525}
526
527
528/**
529 * Terminates the REM.
530 *
531 * Termination means cleaning up and freeing all resources,
532 * the VM it self is at this point powered off or suspended.
533 *
534 * @returns VBox status code.
535 * @param pVM The VM to operate on.
536 */
537REMR3DECL(int) REMR3Term(PVM pVM)
538{
539#ifdef VBOX_WITH_STATISTICS
540 /*
541 * Statistics.
542 */
543 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
544 STAM_DEREG(pVM, &gStatCompilationQEmu);
545 STAM_DEREG(pVM, &gStatRunCodeQEmu);
546 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
547 STAM_DEREG(pVM, &gStatTimers);
548 STAM_DEREG(pVM, &gStatTBLookup);
549 STAM_DEREG(pVM, &gStatIRQ);
550 STAM_DEREG(pVM, &gStatRawCheck);
551 STAM_DEREG(pVM, &gStatMemRead);
552 STAM_DEREG(pVM, &gStatMemWrite);
553 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
554 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
555
556 STAM_DEREG(pVM, &gStatCpuGetTSC);
557
558 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
559 STAM_DEREG(pVM, &gStatRefuseVM86);
560 STAM_DEREG(pVM, &gStatRefusePaging);
561 STAM_DEREG(pVM, &gStatRefusePAE);
562 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
563 STAM_DEREG(pVM, &gStatRefuseIF0);
564 STAM_DEREG(pVM, &gStatRefuseCode16);
565 STAM_DEREG(pVM, &gStatRefuseWP0);
566 STAM_DEREG(pVM, &gStatRefuseRing1or2);
567 STAM_DEREG(pVM, &gStatRefuseCanExecute);
568 STAM_DEREG(pVM, &gStatFlushTBs);
569
570 STAM_DEREG(pVM, &gStatREMGDTChange);
571 STAM_DEREG(pVM, &gStatREMLDTRChange);
572 STAM_DEREG(pVM, &gStatREMIDTChange);
573 STAM_DEREG(pVM, &gStatREMTRChange);
574
575 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
579 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
580 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
581
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
586 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
588
589 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
590#endif /* VBOX_WITH_STATISTICS */
591
592 STAM_REL_DEREG(pVM, &tb_flush_count);
593 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
594 STAM_REL_DEREG(pVM, &tlb_flush_count);
595
596 return VINF_SUCCESS;
597}
598
599
600/**
601 * The VM is being reset.
602 *
603 * For the REM component this means to call the cpu_reset() and
604 * reinitialize some state variables.
605 *
606 * @param pVM VM handle.
607 */
608REMR3DECL(void) REMR3Reset(PVM pVM)
609{
610 /*
611 * Reset the REM cpu.
612 */
613 Assert(pVM->rem.s.cIgnoreAll == 0);
614 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
615 cpu_reset(&pVM->rem.s.Env);
616 pVM->rem.s.cInvalidatedPages = 0;
617 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
618 Assert(pVM->rem.s.cIgnoreAll == 0);
619
620 /* Clear raw ring 0 init state */
621 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
622
623 /* Flush the TBs the next time we execute code here. */
624 pVM->rem.s.fFlushTBs = true;
625}
626
627
628/**
629 * Execute state save operation.
630 *
631 * @returns VBox status code.
632 * @param pVM VM Handle.
633 * @param pSSM SSM operation handle.
634 */
635static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
636{
637 PREM pRem = &pVM->rem.s;
638
639 /*
640 * Save the required CPU Env bits.
641 * (Not much because we're never in REM when doing the save.)
642 */
643 LogFlow(("remR3Save:\n"));
644 Assert(!pRem->fInREM);
645 SSMR3PutU32(pSSM, pRem->Env.hflags);
646 SSMR3PutU32(pSSM, ~0); /* separator */
647
648 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
649 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
650 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
651
652 return SSMR3PutU32(pSSM, ~0); /* terminator */
653}
654
655
656/**
657 * Execute state load operation.
658 *
659 * @returns VBox status code.
660 * @param pVM VM Handle.
661 * @param pSSM SSM operation handle.
662 * @param uVersion Data layout version.
663 * @param uPass The data pass.
664 */
665static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
666{
667 uint32_t u32Dummy;
668 uint32_t fRawRing0 = false;
669 uint32_t u32Sep;
670 uint32_t i;
671 int rc;
672 PREM pRem;
673
674 LogFlow(("remR3Load:\n"));
675 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
676
677 /*
678 * Validate version.
679 */
680 if ( uVersion != REM_SAVED_STATE_VERSION
681 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
682 {
683 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
684 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
685 }
686
687 /*
688 * Do a reset to be on the safe side...
689 */
690 REMR3Reset(pVM);
691
692 /*
693 * Ignore all ignorable notifications.
694 * (Not doing this will cause serious trouble.)
695 */
696 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
697
698 /*
699 * Load the required CPU Env bits.
700 * (Not much because we're never in REM when doing the save.)
701 */
702 pRem = &pVM->rem.s;
703 Assert(!pRem->fInREM);
704 SSMR3GetU32(pSSM, &pRem->Env.hflags);
705 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
706 {
707 /* Redundant REM CPU state has to be loaded, but can be ignored. */
708 CPUX86State_Ver16 temp;
709 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
710 }
711
712 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
713 if (RT_FAILURE(rc))
714 return rc;
715 if (u32Sep != ~0U)
716 {
717 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
718 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
719 }
720
721 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
722 SSMR3GetUInt(pSSM, &fRawRing0);
723 if (fRawRing0)
724 pRem->Env.state |= CPU_RAW_RING0;
725
726 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
727 {
728 /*
729 * Load the REM stuff.
730 */
731 /** @todo r=bird: We should just drop all these items, restoring doesn't make
732 * sense. */
733 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
734 if (RT_FAILURE(rc))
735 return rc;
736 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
737 {
738 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
739 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
740 }
741 for (i = 0; i < pRem->cInvalidatedPages; i++)
742 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
743 }
744
745 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
746 if (RT_FAILURE(rc))
747 return rc;
748
749 /* check the terminator. */
750 rc = SSMR3GetU32(pSSM, &u32Sep);
751 if (RT_FAILURE(rc))
752 return rc;
753 if (u32Sep != ~0U)
754 {
755 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
756 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
757 }
758
759 /*
760 * Get the CPUID features.
761 */
762 PVMCPU pVCpu = VMMGetCpu(pVM);
763 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
764 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
765
766 /*
767 * Sync the Load Flush the TLB
768 */
769 tlb_flush(&pRem->Env, 1);
770
771 /*
772 * Stop ignoring ignornable notifications.
773 */
774 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
775
776 /*
777 * Sync the whole CPU state when executing code in the recompiler.
778 */
779 for (i = 0; i < pVM->cCpus; i++)
780 {
781 PVMCPU pVCpu = &pVM->aCpus[i];
782 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
783 }
784 return VINF_SUCCESS;
785}
786
787
788
789#undef LOG_GROUP
790#define LOG_GROUP LOG_GROUP_REM_RUN
791
792/**
793 * Single steps an instruction in recompiled mode.
794 *
795 * Before calling this function the REM state needs to be in sync with
796 * the VM. Call REMR3State() to perform the sync. It's only necessary
797 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
798 * and after calling REMR3StateBack().
799 *
800 * @returns VBox status code.
801 *
802 * @param pVM VM Handle.
803 * @param pVCpu VMCPU Handle.
804 */
805REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
806{
807 int rc, interrupt_request;
808 RTGCPTR GCPtrPC;
809 bool fBp;
810
811 /*
812 * Lock the REM - we don't wanna have anyone interrupting us
813 * while stepping - and enabled single stepping. We also ignore
814 * pending interrupts and suchlike.
815 */
816 interrupt_request = pVM->rem.s.Env.interrupt_request;
817 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
818 pVM->rem.s.Env.interrupt_request = 0;
819 cpu_single_step(&pVM->rem.s.Env, 1);
820
821 /*
822 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
823 */
824 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
825 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
826
827 /*
828 * Execute and handle the return code.
829 * We execute without enabling the cpu tick, so on success we'll
830 * just flip it on and off to make sure it moves
831 */
832 rc = cpu_exec(&pVM->rem.s.Env);
833 if (rc == EXCP_DEBUG)
834 {
835 TMR3NotifyResume(pVM, pVCpu);
836 TMR3NotifySuspend(pVM, pVCpu);
837 rc = VINF_EM_DBG_STEPPED;
838 }
839 else
840 {
841 switch (rc)
842 {
843 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
844 case EXCP_HLT:
845 case EXCP_HALTED: rc = VINF_EM_HALT; break;
846 case EXCP_RC:
847 rc = pVM->rem.s.rc;
848 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
849 break;
850 case EXCP_EXECUTE_RAW:
851 case EXCP_EXECUTE_HWACC:
852 /** @todo: is it correct? No! */
853 rc = VINF_SUCCESS;
854 break;
855 default:
856 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
857 rc = VERR_INTERNAL_ERROR;
858 break;
859 }
860 }
861
862 /*
863 * Restore the stuff we changed to prevent interruption.
864 * Unlock the REM.
865 */
866 if (fBp)
867 {
868 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
869 Assert(rc2 == 0); NOREF(rc2);
870 }
871 cpu_single_step(&pVM->rem.s.Env, 0);
872 pVM->rem.s.Env.interrupt_request = interrupt_request;
873
874 return rc;
875}
876
877
878/**
879 * Set a breakpoint using the REM facilities.
880 *
881 * @returns VBox status code.
882 * @param pVM The VM handle.
883 * @param Address The breakpoint address.
884 * @thread The emulation thread.
885 */
886REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
887{
888 VM_ASSERT_EMT(pVM);
889 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
890 {
891 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
892 return VINF_SUCCESS;
893 }
894 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
895 return VERR_REM_NO_MORE_BP_SLOTS;
896}
897
898
899/**
900 * Clears a breakpoint set by REMR3BreakpointSet().
901 *
902 * @returns VBox status code.
903 * @param pVM The VM handle.
904 * @param Address The breakpoint address.
905 * @thread The emulation thread.
906 */
907REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
908{
909 VM_ASSERT_EMT(pVM);
910 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
911 {
912 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
913 return VINF_SUCCESS;
914 }
915 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
916 return VERR_REM_BP_NOT_FOUND;
917}
918
919
920/**
921 * Emulate an instruction.
922 *
923 * This function executes one instruction without letting anyone
924 * interrupt it. This is intended for being called while being in
925 * raw mode and thus will take care of all the state syncing between
926 * REM and the rest.
927 *
928 * @returns VBox status code.
929 * @param pVM VM handle.
930 * @param pVCpu VMCPU Handle.
931 */
932REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
933{
934 bool fFlushTBs;
935
936 int rc, rc2;
937 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
938
939 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
940 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
941 */
942 if (HWACCMIsEnabled(pVM))
943 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
944
945 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
946 fFlushTBs = pVM->rem.s.fFlushTBs;
947 pVM->rem.s.fFlushTBs = false;
948
949 /*
950 * Sync the state and enable single instruction / single stepping.
951 */
952 rc = REMR3State(pVM, pVCpu);
953 pVM->rem.s.fFlushTBs = fFlushTBs;
954 if (RT_SUCCESS(rc))
955 {
956 int interrupt_request = pVM->rem.s.Env.interrupt_request;
957 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
958 Assert(!pVM->rem.s.Env.singlestep_enabled);
959 /*
960 * Now we set the execute single instruction flag and enter the cpu_exec loop.
961 */
962 TMNotifyStartOfExecution(pVCpu);
963 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
964 rc = cpu_exec(&pVM->rem.s.Env);
965 TMNotifyEndOfExecution(pVCpu);
966 switch (rc)
967 {
968 /*
969 * Executed without anything out of the way happening.
970 */
971 case EXCP_SINGLE_INSTR:
972 rc = VINF_EM_RESCHEDULE;
973 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
974 break;
975
976 /*
977 * If we take a trap or start servicing a pending interrupt, we might end up here.
978 * (Timer thread or some other thread wishing EMT's attention.)
979 */
980 case EXCP_INTERRUPT:
981 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
982 rc = VINF_EM_RESCHEDULE;
983 break;
984
985 /*
986 * Single step, we assume!
987 * If there was a breakpoint there we're fucked now.
988 */
989 case EXCP_DEBUG:
990 {
991 /* breakpoint or single step? */
992 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
993 int iBP;
994 rc = VINF_EM_DBG_STEPPED;
995 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
996 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
997 {
998 rc = VINF_EM_DBG_BREAKPOINT;
999 break;
1000 }
1001 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1002 break;
1003 }
1004
1005 /*
1006 * hlt instruction.
1007 */
1008 case EXCP_HLT:
1009 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1010 rc = VINF_EM_HALT;
1011 break;
1012
1013 /*
1014 * The VM has halted.
1015 */
1016 case EXCP_HALTED:
1017 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1018 rc = VINF_EM_HALT;
1019 break;
1020
1021 /*
1022 * Switch to RAW-mode.
1023 */
1024 case EXCP_EXECUTE_RAW:
1025 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1026 rc = VINF_EM_RESCHEDULE_RAW;
1027 break;
1028
1029 /*
1030 * Switch to hardware accelerated RAW-mode.
1031 */
1032 case EXCP_EXECUTE_HWACC:
1033 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1034 rc = VINF_EM_RESCHEDULE_HWACC;
1035 break;
1036
1037 /*
1038 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1039 */
1040 case EXCP_RC:
1041 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1042 rc = pVM->rem.s.rc;
1043 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1044 break;
1045
1046 /*
1047 * Figure out the rest when they arrive....
1048 */
1049 default:
1050 AssertMsgFailed(("rc=%d\n", rc));
1051 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1052 rc = VINF_EM_RESCHEDULE;
1053 break;
1054 }
1055
1056 /*
1057 * Switch back the state.
1058 */
1059 pVM->rem.s.Env.interrupt_request = interrupt_request;
1060 rc2 = REMR3StateBack(pVM, pVCpu);
1061 AssertRC(rc2);
1062 }
1063
1064 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1065 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1066 return rc;
1067}
1068
1069
1070/**
1071 * Runs code in recompiled mode.
1072 *
1073 * Before calling this function the REM state needs to be in sync with
1074 * the VM. Call REMR3State() to perform the sync. It's only necessary
1075 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1076 * and after calling REMR3StateBack().
1077 *
1078 * @returns VBox status code.
1079 *
1080 * @param pVM VM Handle.
1081 * @param pVCpu VMCPU Handle.
1082 */
1083REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1084{
1085 int rc;
1086 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1087 Assert(pVM->rem.s.fInREM);
1088
1089 TMNotifyStartOfExecution(pVCpu);
1090 rc = cpu_exec(&pVM->rem.s.Env);
1091 TMNotifyEndOfExecution(pVCpu);
1092 switch (rc)
1093 {
1094 /*
1095 * This happens when the execution was interrupted
1096 * by an external event, like pending timers.
1097 */
1098 case EXCP_INTERRUPT:
1099 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1100 rc = VINF_SUCCESS;
1101 break;
1102
1103 /*
1104 * hlt instruction.
1105 */
1106 case EXCP_HLT:
1107 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1108 rc = VINF_EM_HALT;
1109 break;
1110
1111 /*
1112 * The VM has halted.
1113 */
1114 case EXCP_HALTED:
1115 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1116 rc = VINF_EM_HALT;
1117 break;
1118
1119 /*
1120 * Breakpoint/single step.
1121 */
1122 case EXCP_DEBUG:
1123 {
1124#if 0//def DEBUG_bird
1125 static int iBP = 0;
1126 printf("howdy, breakpoint! iBP=%d\n", iBP);
1127 switch (iBP)
1128 {
1129 case 0:
1130 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1131 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1132 //pVM->rem.s.Env.interrupt_request = 0;
1133 //pVM->rem.s.Env.exception_index = -1;
1134 //g_fInterruptDisabled = 1;
1135 rc = VINF_SUCCESS;
1136 asm("int3");
1137 break;
1138 default:
1139 asm("int3");
1140 break;
1141 }
1142 iBP++;
1143#else
1144 /* breakpoint or single step? */
1145 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1146 int iBP;
1147 rc = VINF_EM_DBG_STEPPED;
1148 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1149 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1150 {
1151 rc = VINF_EM_DBG_BREAKPOINT;
1152 break;
1153 }
1154 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1155#endif
1156 break;
1157 }
1158
1159 /*
1160 * Switch to RAW-mode.
1161 */
1162 case EXCP_EXECUTE_RAW:
1163 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1164 rc = VINF_EM_RESCHEDULE_RAW;
1165 break;
1166
1167 /*
1168 * Switch to hardware accelerated RAW-mode.
1169 */
1170 case EXCP_EXECUTE_HWACC:
1171 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1172 rc = VINF_EM_RESCHEDULE_HWACC;
1173 break;
1174
1175 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1176 /*
1177 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1178 */
1179 case EXCP_RC:
1180 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1181 rc = pVM->rem.s.rc;
1182 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1183 break;
1184
1185 /*
1186 * Figure out the rest when they arrive....
1187 */
1188 default:
1189 AssertMsgFailed(("rc=%d\n", rc));
1190 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1191 rc = VINF_SUCCESS;
1192 break;
1193 }
1194
1195 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1196 return rc;
1197}
1198
1199
1200/**
1201 * Check if the cpu state is suitable for Raw execution.
1202 *
1203 * @returns boolean
1204 * @param env The CPU env struct.
1205 * @param eip The EIP to check this for (might differ from env->eip).
1206 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1207 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1208 *
1209 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1210 */
1211bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1212{
1213 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1214 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1215 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1216 uint32_t u32CR0;
1217
1218 /* Update counter. */
1219 env->pVM->rem.s.cCanExecuteRaw++;
1220
1221 if (HWACCMIsEnabled(env->pVM))
1222 {
1223 CPUMCTX Ctx;
1224
1225 env->state |= CPU_RAW_HWACC;
1226
1227 /*
1228 * Create partial context for HWACCMR3CanExecuteGuest
1229 */
1230 Ctx.cr0 = env->cr[0];
1231 Ctx.cr3 = env->cr[3];
1232 Ctx.cr4 = env->cr[4];
1233
1234 Ctx.tr = env->tr.selector;
1235 Ctx.trHid.u64Base = env->tr.base;
1236 Ctx.trHid.u32Limit = env->tr.limit;
1237 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1238
1239 Ctx.idtr.cbIdt = env->idt.limit;
1240 Ctx.idtr.pIdt = env->idt.base;
1241
1242 Ctx.gdtr.cbGdt = env->gdt.limit;
1243 Ctx.gdtr.pGdt = env->gdt.base;
1244
1245 Ctx.rsp = env->regs[R_ESP];
1246 Ctx.rip = env->eip;
1247
1248 Ctx.eflags.u32 = env->eflags;
1249
1250 Ctx.cs = env->segs[R_CS].selector;
1251 Ctx.csHid.u64Base = env->segs[R_CS].base;
1252 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1253 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1254
1255 Ctx.ds = env->segs[R_DS].selector;
1256 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1257 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1258 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1259
1260 Ctx.es = env->segs[R_ES].selector;
1261 Ctx.esHid.u64Base = env->segs[R_ES].base;
1262 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1263 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1264
1265 Ctx.fs = env->segs[R_FS].selector;
1266 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1267 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1268 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1269
1270 Ctx.gs = env->segs[R_GS].selector;
1271 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1272 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1273 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1274
1275 Ctx.ss = env->segs[R_SS].selector;
1276 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1277 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1278 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1279
1280 Ctx.msrEFER = env->efer;
1281
1282 /* Hardware accelerated raw-mode:
1283 *
1284 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1285 */
1286 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1287 {
1288 *piException = EXCP_EXECUTE_HWACC;
1289 return true;
1290 }
1291 return false;
1292 }
1293
1294 /*
1295 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1296 * or 32 bits protected mode ring 0 code
1297 *
1298 * The tests are ordered by the likelyhood of being true during normal execution.
1299 */
1300 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1301 {
1302 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1303 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1304 return false;
1305 }
1306
1307#ifndef VBOX_RAW_V86
1308 if (fFlags & VM_MASK) {
1309 STAM_COUNTER_INC(&gStatRefuseVM86);
1310 Log2(("raw mode refused: VM_MASK\n"));
1311 return false;
1312 }
1313#endif
1314
1315 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1316 {
1317#ifndef DEBUG_bird
1318 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1319#endif
1320 return false;
1321 }
1322
1323 if (env->singlestep_enabled)
1324 {
1325 //Log2(("raw mode refused: Single step\n"));
1326 return false;
1327 }
1328
1329 if (env->nb_breakpoints > 0)
1330 {
1331 //Log2(("raw mode refused: Breakpoints\n"));
1332 return false;
1333 }
1334
1335 u32CR0 = env->cr[0];
1336 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1337 {
1338 STAM_COUNTER_INC(&gStatRefusePaging);
1339 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1340 return false;
1341 }
1342
1343 if (env->cr[4] & CR4_PAE_MASK)
1344 {
1345 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1346 {
1347 STAM_COUNTER_INC(&gStatRefusePAE);
1348 return false;
1349 }
1350 }
1351
1352 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1353 {
1354 if (!EMIsRawRing3Enabled(env->pVM))
1355 return false;
1356
1357 if (!(env->eflags & IF_MASK))
1358 {
1359 STAM_COUNTER_INC(&gStatRefuseIF0);
1360 Log2(("raw mode refused: IF (RawR3)\n"));
1361 return false;
1362 }
1363
1364 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1365 {
1366 STAM_COUNTER_INC(&gStatRefuseWP0);
1367 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1368 return false;
1369 }
1370 }
1371 else
1372 {
1373 if (!EMIsRawRing0Enabled(env->pVM))
1374 return false;
1375
1376 // Let's start with pure 32 bits ring 0 code first
1377 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1378 {
1379 STAM_COUNTER_INC(&gStatRefuseCode16);
1380 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1381 return false;
1382 }
1383
1384 // Only R0
1385 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1386 {
1387 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1388 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1389 return false;
1390 }
1391
1392 if (!(u32CR0 & CR0_WP_MASK))
1393 {
1394 STAM_COUNTER_INC(&gStatRefuseWP0);
1395 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1396 return false;
1397 }
1398
1399 if (PATMIsPatchGCAddr(env->pVM, eip))
1400 {
1401 Log2(("raw r0 mode forced: patch code\n"));
1402 *piException = EXCP_EXECUTE_RAW;
1403 return true;
1404 }
1405
1406#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1407 if (!(env->eflags & IF_MASK))
1408 {
1409 STAM_COUNTER_INC(&gStatRefuseIF0);
1410 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1411 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1412 return false;
1413 }
1414#endif
1415
1416 env->state |= CPU_RAW_RING0;
1417 }
1418
1419 /*
1420 * Don't reschedule the first time we're called, because there might be
1421 * special reasons why we're here that is not covered by the above checks.
1422 */
1423 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1424 {
1425 Log2(("raw mode refused: first scheduling\n"));
1426 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1427 return false;
1428 }
1429
1430 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1431 *piException = EXCP_EXECUTE_RAW;
1432 return true;
1433}
1434
1435
1436/**
1437 * Fetches a code byte.
1438 *
1439 * @returns Success indicator (bool) for ease of use.
1440 * @param env The CPU environment structure.
1441 * @param GCPtrInstr Where to fetch code.
1442 * @param pu8Byte Where to store the byte on success
1443 */
1444bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1445{
1446 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1447 if (RT_SUCCESS(rc))
1448 return true;
1449 return false;
1450}
1451
1452
1453/**
1454 * Flush (or invalidate if you like) page table/dir entry.
1455 *
1456 * (invlpg instruction; tlb_flush_page)
1457 *
1458 * @param env Pointer to cpu environment.
1459 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1460 */
1461void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1462{
1463 PVM pVM = env->pVM;
1464 PCPUMCTX pCtx;
1465 int rc;
1466
1467 /*
1468 * When we're replaying invlpg instructions or restoring a saved
1469 * state we disable this path.
1470 */
1471 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1472 return;
1473 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1474 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1475
1476 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1477
1478 /*
1479 * Update the control registers before calling PGMFlushPage.
1480 */
1481 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1482 Assert(pCtx);
1483 pCtx->cr0 = env->cr[0];
1484 pCtx->cr3 = env->cr[3];
1485 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1486 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1487 pCtx->cr4 = env->cr[4];
1488
1489 /*
1490 * Let PGM do the rest.
1491 */
1492 Assert(env->pVCpu);
1493 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1494 if (RT_FAILURE(rc))
1495 {
1496 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1497 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1498 }
1499 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1500}
1501
1502
1503#ifndef REM_PHYS_ADDR_IN_TLB
1504/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1505void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1506{
1507 void *pv;
1508 int rc;
1509
1510 /* Address must be aligned enough to fiddle with lower bits */
1511 Assert((physAddr & 0x3) == 0);
1512
1513 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1514 Assert( rc == VINF_SUCCESS
1515 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1516 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1517 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1518 if (RT_FAILURE(rc))
1519 return (void *)1;
1520 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1521 return (void *)((uintptr_t)pv | 2);
1522 return pv;
1523}
1524#endif /* REM_PHYS_ADDR_IN_TLB */
1525
1526
1527/**
1528 * Called from tlb_protect_code in order to write monitor a code page.
1529 *
1530 * @param env Pointer to the CPU environment.
1531 * @param GCPtr Code page to monitor
1532 */
1533void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1534{
1535#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1536 Assert(env->pVM->rem.s.fInREM);
1537 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1538 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1539 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1540 && !(env->eflags & VM_MASK) /* no V86 mode */
1541 && !HWACCMIsEnabled(env->pVM))
1542 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1543#endif
1544}
1545
1546
1547/**
1548 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1549 *
1550 * @param env Pointer to the CPU environment.
1551 * @param GCPtr Code page to monitor
1552 */
1553void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1554{
1555 Assert(env->pVM->rem.s.fInREM);
1556#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1557 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1558 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1559 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1560 && !(env->eflags & VM_MASK) /* no V86 mode */
1561 && !HWACCMIsEnabled(env->pVM))
1562 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1563#endif
1564}
1565
1566
1567/**
1568 * Called when the CPU is initialized, any of the CRx registers are changed or
1569 * when the A20 line is modified.
1570 *
1571 * @param env Pointer to the CPU environment.
1572 * @param fGlobal Set if the flush is global.
1573 */
1574void remR3FlushTLB(CPUState *env, bool fGlobal)
1575{
1576 PVM pVM = env->pVM;
1577 PCPUMCTX pCtx;
1578
1579 /*
1580 * When we're replaying invlpg instructions or restoring a saved
1581 * state we disable this path.
1582 */
1583 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1584 return;
1585 Assert(pVM->rem.s.fInREM);
1586
1587 /*
1588 * The caller doesn't check cr4, so we have to do that for ourselves.
1589 */
1590 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1591 fGlobal = true;
1592 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1593
1594 /*
1595 * Update the control registers before calling PGMR3FlushTLB.
1596 */
1597 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1598 Assert(pCtx);
1599 pCtx->cr0 = env->cr[0];
1600 pCtx->cr3 = env->cr[3];
1601 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1602 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1603 pCtx->cr4 = env->cr[4];
1604
1605 /*
1606 * Let PGM do the rest.
1607 */
1608 Assert(env->pVCpu);
1609 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1610}
1611
1612
1613/**
1614 * Called when any of the cr0, cr4 or efer registers is updated.
1615 *
1616 * @param env Pointer to the CPU environment.
1617 */
1618void remR3ChangeCpuMode(CPUState *env)
1619{
1620 PVM pVM = env->pVM;
1621 uint64_t efer;
1622 PCPUMCTX pCtx;
1623 int rc;
1624
1625 /*
1626 * When we're replaying loads or restoring a saved
1627 * state this path is disabled.
1628 */
1629 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1630 return;
1631 Assert(pVM->rem.s.fInREM);
1632
1633 /*
1634 * Update the control registers before calling PGMChangeMode()
1635 * as it may need to map whatever cr3 is pointing to.
1636 */
1637 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1638 Assert(pCtx);
1639 pCtx->cr0 = env->cr[0];
1640 pCtx->cr3 = env->cr[3];
1641 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1642 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1643 pCtx->cr4 = env->cr[4];
1644
1645#ifdef TARGET_X86_64
1646 efer = env->efer;
1647#else
1648 efer = 0;
1649#endif
1650 Assert(env->pVCpu);
1651 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1652 if (rc != VINF_SUCCESS)
1653 {
1654 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1655 {
1656 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1657 remR3RaiseRC(env->pVM, rc);
1658 }
1659 else
1660 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1661 }
1662}
1663
1664
1665/**
1666 * Called from compiled code to run dma.
1667 *
1668 * @param env Pointer to the CPU environment.
1669 */
1670void remR3DmaRun(CPUState *env)
1671{
1672 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1673 PDMR3DmaRun(env->pVM);
1674 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1675}
1676
1677
1678/**
1679 * Called from compiled code to schedule pending timers in VMM
1680 *
1681 * @param env Pointer to the CPU environment.
1682 */
1683void remR3TimersRun(CPUState *env)
1684{
1685 LogFlow(("remR3TimersRun:\n"));
1686 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1687 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1688 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1689 TMR3TimerQueuesDo(env->pVM);
1690 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1691 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1692}
1693
1694
1695/**
1696 * Record trap occurance
1697 *
1698 * @returns VBox status code
1699 * @param env Pointer to the CPU environment.
1700 * @param uTrap Trap nr
1701 * @param uErrorCode Error code
1702 * @param pvNextEIP Next EIP
1703 */
1704int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1705{
1706 PVM pVM = env->pVM;
1707#ifdef VBOX_WITH_STATISTICS
1708 static STAMCOUNTER s_aStatTrap[255];
1709 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1710#endif
1711
1712#ifdef VBOX_WITH_STATISTICS
1713 if (uTrap < 255)
1714 {
1715 if (!s_aRegisters[uTrap])
1716 {
1717 char szStatName[64];
1718 s_aRegisters[uTrap] = true;
1719 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1720 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1721 }
1722 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1723 }
1724#endif
1725 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1726 if( uTrap < 0x20
1727 && (env->cr[0] & X86_CR0_PE)
1728 && !(env->eflags & X86_EFL_VM))
1729 {
1730#ifdef DEBUG
1731 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1732#endif
1733 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1734 {
1735 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1736 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1737 return VERR_REM_TOO_MANY_TRAPS;
1738 }
1739 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1740 pVM->rem.s.cPendingExceptions = 1;
1741 pVM->rem.s.uPendingException = uTrap;
1742 pVM->rem.s.uPendingExcptEIP = env->eip;
1743 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1744 }
1745 else
1746 {
1747 pVM->rem.s.cPendingExceptions = 0;
1748 pVM->rem.s.uPendingException = uTrap;
1749 pVM->rem.s.uPendingExcptEIP = env->eip;
1750 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1751 }
1752 return VINF_SUCCESS;
1753}
1754
1755
1756/*
1757 * Clear current active trap
1758 *
1759 * @param pVM VM Handle.
1760 */
1761void remR3TrapClear(PVM pVM)
1762{
1763 pVM->rem.s.cPendingExceptions = 0;
1764 pVM->rem.s.uPendingException = 0;
1765 pVM->rem.s.uPendingExcptEIP = 0;
1766 pVM->rem.s.uPendingExcptCR2 = 0;
1767}
1768
1769
1770/*
1771 * Record previous call instruction addresses
1772 *
1773 * @param env Pointer to the CPU environment.
1774 */
1775void remR3RecordCall(CPUState *env)
1776{
1777 CSAMR3RecordCallAddress(env->pVM, env->eip);
1778}
1779
1780
1781/**
1782 * Syncs the internal REM state with the VM.
1783 *
1784 * This must be called before REMR3Run() is invoked whenever when the REM
1785 * state is not up to date. Calling it several times in a row is not
1786 * permitted.
1787 *
1788 * @returns VBox status code.
1789 *
1790 * @param pVM VM Handle.
1791 * @param pVCpu VMCPU Handle.
1792 *
1793 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1794 * no do this since the majority of the callers don't want any unnecessary of events
1795 * pending that would immediatly interrupt execution.
1796 */
1797REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1798{
1799 register const CPUMCTX *pCtx;
1800 register unsigned fFlags;
1801 bool fHiddenSelRegsValid;
1802 unsigned i;
1803 TRPMEVENT enmType;
1804 uint8_t u8TrapNo;
1805 int rc;
1806
1807 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1808 Log2(("REMR3State:\n"));
1809
1810 pVM->rem.s.Env.pVCpu = pVCpu;
1811 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1812 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1813
1814 Assert(!pVM->rem.s.fInREM);
1815 pVM->rem.s.fInStateSync = true;
1816
1817 /*
1818 * If we have to flush TBs, do that immediately.
1819 */
1820 if (pVM->rem.s.fFlushTBs)
1821 {
1822 STAM_COUNTER_INC(&gStatFlushTBs);
1823 tb_flush(&pVM->rem.s.Env);
1824 pVM->rem.s.fFlushTBs = false;
1825 }
1826
1827 /*
1828 * Copy the registers which require no special handling.
1829 */
1830#ifdef TARGET_X86_64
1831 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1832 Assert(R_EAX == 0);
1833 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1834 Assert(R_ECX == 1);
1835 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1836 Assert(R_EDX == 2);
1837 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1838 Assert(R_EBX == 3);
1839 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1840 Assert(R_ESP == 4);
1841 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1842 Assert(R_EBP == 5);
1843 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1844 Assert(R_ESI == 6);
1845 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1846 Assert(R_EDI == 7);
1847 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1848 pVM->rem.s.Env.regs[8] = pCtx->r8;
1849 pVM->rem.s.Env.regs[9] = pCtx->r9;
1850 pVM->rem.s.Env.regs[10] = pCtx->r10;
1851 pVM->rem.s.Env.regs[11] = pCtx->r11;
1852 pVM->rem.s.Env.regs[12] = pCtx->r12;
1853 pVM->rem.s.Env.regs[13] = pCtx->r13;
1854 pVM->rem.s.Env.regs[14] = pCtx->r14;
1855 pVM->rem.s.Env.regs[15] = pCtx->r15;
1856
1857 pVM->rem.s.Env.eip = pCtx->rip;
1858
1859 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1860#else
1861 Assert(R_EAX == 0);
1862 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1863 Assert(R_ECX == 1);
1864 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1865 Assert(R_EDX == 2);
1866 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1867 Assert(R_EBX == 3);
1868 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1869 Assert(R_ESP == 4);
1870 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1871 Assert(R_EBP == 5);
1872 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1873 Assert(R_ESI == 6);
1874 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1875 Assert(R_EDI == 7);
1876 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1877 pVM->rem.s.Env.eip = pCtx->eip;
1878
1879 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1880#endif
1881
1882 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1883
1884 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1885 for (i=0;i<8;i++)
1886 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1887
1888 /*
1889 * Clear the halted hidden flag (the interrupt waking up the CPU can
1890 * have been dispatched in raw mode).
1891 */
1892 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1893
1894 /*
1895 * Replay invlpg?
1896 */
1897 if (pVM->rem.s.cInvalidatedPages)
1898 {
1899 RTUINT i;
1900
1901 pVM->rem.s.fIgnoreInvlPg = true;
1902 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1903 {
1904 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1905 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1906 }
1907 pVM->rem.s.fIgnoreInvlPg = false;
1908 pVM->rem.s.cInvalidatedPages = 0;
1909 }
1910
1911 /* Replay notification changes. */
1912 REMR3ReplayHandlerNotifications(pVM);
1913
1914 /* Update MSRs; before CRx registers! */
1915 pVM->rem.s.Env.efer = pCtx->msrEFER;
1916 pVM->rem.s.Env.star = pCtx->msrSTAR;
1917 pVM->rem.s.Env.pat = pCtx->msrPAT;
1918#ifdef TARGET_X86_64
1919 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1920 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1921 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1922 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1923
1924 /* Update the internal long mode activate flag according to the new EFER value. */
1925 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1926 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1927 else
1928 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1929#endif
1930
1931 /*
1932 * Registers which are rarely changed and require special handling / order when changed.
1933 */
1934 fFlags = CPUMGetAndClearChangedFlagsREM(pVCpu);
1935 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1936 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1937 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1938 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1939 {
1940 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1941 {
1942 pVM->rem.s.fIgnoreCR3Load = true;
1943 tlb_flush(&pVM->rem.s.Env, true);
1944 pVM->rem.s.fIgnoreCR3Load = false;
1945 }
1946
1947 /* CR4 before CR0! */
1948 if (fFlags & CPUM_CHANGED_CR4)
1949 {
1950 pVM->rem.s.fIgnoreCR3Load = true;
1951 pVM->rem.s.fIgnoreCpuMode = true;
1952 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1953 pVM->rem.s.fIgnoreCpuMode = false;
1954 pVM->rem.s.fIgnoreCR3Load = false;
1955 }
1956
1957 if (fFlags & CPUM_CHANGED_CR0)
1958 {
1959 pVM->rem.s.fIgnoreCR3Load = true;
1960 pVM->rem.s.fIgnoreCpuMode = true;
1961 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1962 pVM->rem.s.fIgnoreCpuMode = false;
1963 pVM->rem.s.fIgnoreCR3Load = false;
1964 }
1965
1966 if (fFlags & CPUM_CHANGED_CR3)
1967 {
1968 pVM->rem.s.fIgnoreCR3Load = true;
1969 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1970 pVM->rem.s.fIgnoreCR3Load = false;
1971 }
1972
1973 if (fFlags & CPUM_CHANGED_GDTR)
1974 {
1975 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1976 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1977 }
1978
1979 if (fFlags & CPUM_CHANGED_IDTR)
1980 {
1981 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1982 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1983 }
1984
1985 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1986 {
1987 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1988 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1989 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1990 }
1991
1992 if (fFlags & CPUM_CHANGED_LDTR)
1993 {
1994 if (fHiddenSelRegsValid)
1995 {
1996 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1997 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1998 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1999 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2000 }
2001 else
2002 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2003 }
2004
2005 if (fFlags & CPUM_CHANGED_CPUID)
2006 {
2007 uint32_t u32Dummy;
2008
2009 /*
2010 * Get the CPUID features.
2011 */
2012 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2013 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2014 }
2015
2016 /* Sync FPU state after CR4, CPUID and EFER (!). */
2017 if (fFlags & CPUM_CHANGED_FPU_REM)
2018 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2019 }
2020
2021 /*
2022 * Sync TR unconditionally to make life simpler.
2023 */
2024 pVM->rem.s.Env.tr.selector = pCtx->tr;
2025 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2026 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2027 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2028 /* Note! do_interrupt will fault if the busy flag is still set... */
2029 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2030
2031 /*
2032 * Update selector registers.
2033 * This must be done *after* we've synced gdt, ldt and crX registers
2034 * since we're reading the GDT/LDT om sync_seg. This will happen with
2035 * saved state which takes a quick dip into rawmode for instance.
2036 */
2037 /*
2038 * Stack; Note first check this one as the CPL might have changed. The
2039 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2040 */
2041
2042 if (fHiddenSelRegsValid)
2043 {
2044 /* The hidden selector registers are valid in the CPU context. */
2045 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2046
2047 /* Set current CPL */
2048 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2049
2050 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2051 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2052 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2053 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2054 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2055 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2056 }
2057 else
2058 {
2059 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2060 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2061 {
2062 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2063
2064 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2065 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2066#ifdef VBOX_WITH_STATISTICS
2067 if (pVM->rem.s.Env.segs[R_SS].newselector)
2068 {
2069 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2070 }
2071#endif
2072 }
2073 else
2074 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2075
2076 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2077 {
2078 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2079 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2080#ifdef VBOX_WITH_STATISTICS
2081 if (pVM->rem.s.Env.segs[R_ES].newselector)
2082 {
2083 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2084 }
2085#endif
2086 }
2087 else
2088 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2089
2090 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2091 {
2092 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2093 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2094#ifdef VBOX_WITH_STATISTICS
2095 if (pVM->rem.s.Env.segs[R_CS].newselector)
2096 {
2097 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2098 }
2099#endif
2100 }
2101 else
2102 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2103
2104 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2105 {
2106 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2107 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2108#ifdef VBOX_WITH_STATISTICS
2109 if (pVM->rem.s.Env.segs[R_DS].newselector)
2110 {
2111 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2112 }
2113#endif
2114 }
2115 else
2116 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2117
2118 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2119 * be the same but not the base/limit. */
2120 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2121 {
2122 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2123 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2124#ifdef VBOX_WITH_STATISTICS
2125 if (pVM->rem.s.Env.segs[R_FS].newselector)
2126 {
2127 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2128 }
2129#endif
2130 }
2131 else
2132 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2133
2134 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2135 {
2136 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2137 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2138#ifdef VBOX_WITH_STATISTICS
2139 if (pVM->rem.s.Env.segs[R_GS].newselector)
2140 {
2141 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2142 }
2143#endif
2144 }
2145 else
2146 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2147 }
2148
2149 /*
2150 * Check for traps.
2151 */
2152 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2153 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2154 if (RT_SUCCESS(rc))
2155 {
2156#ifdef DEBUG
2157 if (u8TrapNo == 0x80)
2158 {
2159 remR3DumpLnxSyscall(pVCpu);
2160 remR3DumpOBsdSyscall(pVCpu);
2161 }
2162#endif
2163
2164 pVM->rem.s.Env.exception_index = u8TrapNo;
2165 if (enmType != TRPM_SOFTWARE_INT)
2166 {
2167 pVM->rem.s.Env.exception_is_int = 0;
2168 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2169 }
2170 else
2171 {
2172 /*
2173 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2174 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2175 * for int03 and into.
2176 */
2177 pVM->rem.s.Env.exception_is_int = 1;
2178 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2179 /* int 3 may be generated by one-byte 0xcc */
2180 if (u8TrapNo == 3)
2181 {
2182 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2183 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2184 }
2185 /* int 4 may be generated by one-byte 0xce */
2186 else if (u8TrapNo == 4)
2187 {
2188 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2189 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2190 }
2191 }
2192
2193 /* get error code and cr2 if needed. */
2194 switch (u8TrapNo)
2195 {
2196 case 0x0e:
2197 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2198 /* fallthru */
2199 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2200 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2201 break;
2202
2203 case 0x11: case 0x08:
2204 default:
2205 pVM->rem.s.Env.error_code = 0;
2206 break;
2207 }
2208
2209 /*
2210 * We can now reset the active trap since the recompiler is gonna have a go at it.
2211 */
2212 rc = TRPMResetTrap(pVCpu);
2213 AssertRC(rc);
2214 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2215 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2216 }
2217
2218 /*
2219 * Clear old interrupt request flags; Check for pending hardware interrupts.
2220 * (See @remark for why we don't check for other FFs.)
2221 */
2222 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2223 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2224 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2225 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2226
2227 /*
2228 * We're now in REM mode.
2229 */
2230 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2231 pVM->rem.s.fInREM = true;
2232 pVM->rem.s.fInStateSync = false;
2233 pVM->rem.s.cCanExecuteRaw = 0;
2234 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2235 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2236 return VINF_SUCCESS;
2237}
2238
2239
2240/**
2241 * Syncs back changes in the REM state to the the VM state.
2242 *
2243 * This must be called after invoking REMR3Run().
2244 * Calling it several times in a row is not permitted.
2245 *
2246 * @returns VBox status code.
2247 *
2248 * @param pVM VM Handle.
2249 * @param pVCpu VMCPU Handle.
2250 */
2251REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2252{
2253 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2254 Assert(pCtx);
2255 unsigned i;
2256
2257 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2258 Log2(("REMR3StateBack:\n"));
2259 Assert(pVM->rem.s.fInREM);
2260
2261 /*
2262 * Copy back the registers.
2263 * This is done in the order they are declared in the CPUMCTX structure.
2264 */
2265
2266 /** @todo FOP */
2267 /** @todo FPUIP */
2268 /** @todo CS */
2269 /** @todo FPUDP */
2270 /** @todo DS */
2271
2272 /** @todo check if FPU/XMM was actually used in the recompiler */
2273 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2274//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2275
2276#ifdef TARGET_X86_64
2277 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2278 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2279 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2280 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2281 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2282 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2283 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2284 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2285 pCtx->r8 = pVM->rem.s.Env.regs[8];
2286 pCtx->r9 = pVM->rem.s.Env.regs[9];
2287 pCtx->r10 = pVM->rem.s.Env.regs[10];
2288 pCtx->r11 = pVM->rem.s.Env.regs[11];
2289 pCtx->r12 = pVM->rem.s.Env.regs[12];
2290 pCtx->r13 = pVM->rem.s.Env.regs[13];
2291 pCtx->r14 = pVM->rem.s.Env.regs[14];
2292 pCtx->r15 = pVM->rem.s.Env.regs[15];
2293
2294 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2295
2296#else
2297 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2298 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2299 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2300 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2301 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2302 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2303 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2304
2305 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2306#endif
2307
2308 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2309
2310#ifdef VBOX_WITH_STATISTICS
2311 if (pVM->rem.s.Env.segs[R_SS].newselector)
2312 {
2313 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2314 }
2315 if (pVM->rem.s.Env.segs[R_GS].newselector)
2316 {
2317 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2318 }
2319 if (pVM->rem.s.Env.segs[R_FS].newselector)
2320 {
2321 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2322 }
2323 if (pVM->rem.s.Env.segs[R_ES].newselector)
2324 {
2325 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2326 }
2327 if (pVM->rem.s.Env.segs[R_DS].newselector)
2328 {
2329 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2330 }
2331 if (pVM->rem.s.Env.segs[R_CS].newselector)
2332 {
2333 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2334 }
2335#endif
2336 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2337 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2338 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2339 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2340 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2341
2342#ifdef TARGET_X86_64
2343 pCtx->rip = pVM->rem.s.Env.eip;
2344 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2345#else
2346 pCtx->eip = pVM->rem.s.Env.eip;
2347 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2348#endif
2349
2350 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2351 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2352 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2353 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2354 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2355 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2356
2357 for (i = 0; i < 8; i++)
2358 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2359
2360 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2361 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2362 {
2363 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2364 STAM_COUNTER_INC(&gStatREMGDTChange);
2365 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2366 }
2367
2368 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2369 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2370 {
2371 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2372 STAM_COUNTER_INC(&gStatREMIDTChange);
2373 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2374 }
2375
2376 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2377 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2378 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2379 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2380 {
2381 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2382 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2383 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2384 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2385 STAM_COUNTER_INC(&gStatREMLDTRChange);
2386 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2387 }
2388
2389 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2390 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2391 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2392 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2393 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2394 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2395 : 0) )
2396 {
2397 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2398 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2399 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2400 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2401 pCtx->tr = pVM->rem.s.Env.tr.selector;
2402 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2403 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2404 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2405 if (pCtx->trHid.Attr.u)
2406 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2407 STAM_COUNTER_INC(&gStatREMTRChange);
2408 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2409 }
2410
2411 /** @todo These values could still be out of sync! */
2412 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2413 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2414 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2415 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2416
2417 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2418 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2419 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2420
2421 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2422 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2423 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2424
2425 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2426 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2427 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2428
2429 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2430 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2431 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2432
2433 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2434 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2435 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2436
2437 /* Sysenter MSR */
2438 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2439 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2440 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2441
2442 /* System MSRs. */
2443 pCtx->msrEFER = pVM->rem.s.Env.efer;
2444 pCtx->msrSTAR = pVM->rem.s.Env.star;
2445 pCtx->msrPAT = pVM->rem.s.Env.pat;
2446#ifdef TARGET_X86_64
2447 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2448 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2449 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2450 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2451#endif
2452
2453 remR3TrapClear(pVM);
2454
2455 /*
2456 * Check for traps.
2457 */
2458 if ( pVM->rem.s.Env.exception_index >= 0
2459 && pVM->rem.s.Env.exception_index < 256)
2460 {
2461 int rc;
2462
2463 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2464 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2465 AssertRC(rc);
2466 switch (pVM->rem.s.Env.exception_index)
2467 {
2468 case 0x0e:
2469 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2470 /* fallthru */
2471 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2472 case 0x11: case 0x08: /* 0 */
2473 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2474 break;
2475 }
2476
2477 }
2478
2479 /*
2480 * We're not longer in REM mode.
2481 */
2482 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2483 pVM->rem.s.fInREM = false;
2484 pVM->rem.s.pCtx = NULL;
2485 pVM->rem.s.Env.pVCpu = NULL;
2486 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2487 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2488 return VINF_SUCCESS;
2489}
2490
2491
2492/**
2493 * This is called by the disassembler when it wants to update the cpu state
2494 * before for instance doing a register dump.
2495 */
2496static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2497{
2498 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2499 unsigned i;
2500
2501 Assert(pVM->rem.s.fInREM);
2502
2503 /*
2504 * Copy back the registers.
2505 * This is done in the order they are declared in the CPUMCTX structure.
2506 */
2507
2508 /** @todo FOP */
2509 /** @todo FPUIP */
2510 /** @todo CS */
2511 /** @todo FPUDP */
2512 /** @todo DS */
2513 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2514 pCtx->fpu.MXCSR = 0;
2515 pCtx->fpu.MXCSR_MASK = 0;
2516
2517 /** @todo check if FPU/XMM was actually used in the recompiler */
2518 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2519//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2520
2521#ifdef TARGET_X86_64
2522 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2523 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2524 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2525 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2526 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2527 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2528 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2529 pCtx->r8 = pVM->rem.s.Env.regs[8];
2530 pCtx->r9 = pVM->rem.s.Env.regs[9];
2531 pCtx->r10 = pVM->rem.s.Env.regs[10];
2532 pCtx->r11 = pVM->rem.s.Env.regs[11];
2533 pCtx->r12 = pVM->rem.s.Env.regs[12];
2534 pCtx->r13 = pVM->rem.s.Env.regs[13];
2535 pCtx->r14 = pVM->rem.s.Env.regs[14];
2536 pCtx->r15 = pVM->rem.s.Env.regs[15];
2537
2538 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2539#else
2540 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2541 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2542 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2543 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2544 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2545 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2546 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2547
2548 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2549#endif
2550
2551 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2552
2553 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2554 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2555 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2556 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2557 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2558
2559#ifdef TARGET_X86_64
2560 pCtx->rip = pVM->rem.s.Env.eip;
2561 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2562#else
2563 pCtx->eip = pVM->rem.s.Env.eip;
2564 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2565#endif
2566
2567 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2568 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2569 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2570 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2571 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2572 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2573
2574 for (i = 0; i < 8; i++)
2575 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2576
2577 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2578 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2579 {
2580 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2581 STAM_COUNTER_INC(&gStatREMGDTChange);
2582 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2583 }
2584
2585 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2586 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2587 {
2588 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2589 STAM_COUNTER_INC(&gStatREMIDTChange);
2590 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2591 }
2592
2593 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2594 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2595 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2596 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2597 {
2598 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2599 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2600 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2601 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2602 STAM_COUNTER_INC(&gStatREMLDTRChange);
2603 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2604 }
2605
2606 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2607 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2608 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2609 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2610 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2611 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2612 : 0) )
2613 {
2614 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2615 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2616 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2617 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2618 pCtx->tr = pVM->rem.s.Env.tr.selector;
2619 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2620 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2621 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2622 if (pCtx->trHid.Attr.u)
2623 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2624 STAM_COUNTER_INC(&gStatREMTRChange);
2625 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2626 }
2627
2628 /** @todo These values could still be out of sync! */
2629 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2630 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2631 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2632 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2633
2634 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2635 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2636 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2637
2638 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2639 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2640 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2641
2642 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2643 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2644 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2645
2646 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2647 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2648 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2649
2650 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2651 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2652 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2653
2654 /* Sysenter MSR */
2655 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2656 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2657 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2658
2659 /* System MSRs. */
2660 pCtx->msrEFER = pVM->rem.s.Env.efer;
2661 pCtx->msrSTAR = pVM->rem.s.Env.star;
2662 pCtx->msrPAT = pVM->rem.s.Env.pat;
2663#ifdef TARGET_X86_64
2664 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2665 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2666 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2667 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2668#endif
2669
2670}
2671
2672
2673/**
2674 * Update the VMM state information if we're currently in REM.
2675 *
2676 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2677 * we're currently executing in REM and the VMM state is invalid. This method will of
2678 * course check that we're executing in REM before syncing any data over to the VMM.
2679 *
2680 * @param pVM The VM handle.
2681 * @param pVCpu The VMCPU handle.
2682 */
2683REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2684{
2685 if (pVM->rem.s.fInREM)
2686 remR3StateUpdate(pVM, pVCpu);
2687}
2688
2689
2690#undef LOG_GROUP
2691#define LOG_GROUP LOG_GROUP_REM
2692
2693
2694/**
2695 * Notify the recompiler about Address Gate 20 state change.
2696 *
2697 * This notification is required since A20 gate changes are
2698 * initialized from a device driver and the VM might just as
2699 * well be in REM mode as in RAW mode.
2700 *
2701 * @param pVM VM handle.
2702 * @param pVCpu VMCPU handle.
2703 * @param fEnable True if the gate should be enabled.
2704 * False if the gate should be disabled.
2705 */
2706REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2707{
2708 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2709 VM_ASSERT_EMT(pVM);
2710
2711 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2712 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2713 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2714}
2715
2716
2717/**
2718 * Replays the handler notification changes
2719 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2720 *
2721 * @param pVM VM handle.
2722 */
2723REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2724{
2725 /*
2726 * Replay the flushes.
2727 */
2728 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2729 VM_ASSERT_EMT(pVM);
2730
2731 /** @todo this isn't ensuring correct replay order. */
2732 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2733 {
2734 uint32_t idxNext;
2735 uint32_t idxRevHead;
2736 uint32_t idxHead;
2737#ifdef VBOX_STRICT
2738 int32_t c = 0;
2739#endif
2740
2741 /* Lockless purging of pending notifications. */
2742 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2743 if (idxHead == UINT32_MAX)
2744 return;
2745 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2746
2747 /*
2748 * Reverse the list to process it in FIFO order.
2749 */
2750 idxRevHead = UINT32_MAX;
2751 do
2752 {
2753 /* Save the index of the next rec. */
2754 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2755 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2756 /* Push the record onto the reversed list. */
2757 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2758 idxRevHead = idxHead;
2759 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2760 /* Advance. */
2761 idxHead = idxNext;
2762 } while (idxHead != UINT32_MAX);
2763
2764 /*
2765 * Loop thru the list, reinserting the record into the free list as they are
2766 * processed to avoid having other EMTs running out of entries while we're flushing.
2767 */
2768 idxHead = idxRevHead;
2769 do
2770 {
2771 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
2772 uint32_t idxCur;
2773 Assert(--c >= 0);
2774
2775 switch (pCur->enmKind)
2776 {
2777 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2778 remR3NotifyHandlerPhysicalRegister(pVM,
2779 pCur->u.PhysicalRegister.enmType,
2780 pCur->u.PhysicalRegister.GCPhys,
2781 pCur->u.PhysicalRegister.cb,
2782 pCur->u.PhysicalRegister.fHasHCHandler);
2783 break;
2784
2785 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2786 remR3NotifyHandlerPhysicalDeregister(pVM,
2787 pCur->u.PhysicalDeregister.enmType,
2788 pCur->u.PhysicalDeregister.GCPhys,
2789 pCur->u.PhysicalDeregister.cb,
2790 pCur->u.PhysicalDeregister.fHasHCHandler,
2791 pCur->u.PhysicalDeregister.fRestoreAsRAM);
2792 break;
2793
2794 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2795 remR3NotifyHandlerPhysicalModify(pVM,
2796 pCur->u.PhysicalModify.enmType,
2797 pCur->u.PhysicalModify.GCPhysOld,
2798 pCur->u.PhysicalModify.GCPhysNew,
2799 pCur->u.PhysicalModify.cb,
2800 pCur->u.PhysicalModify.fHasHCHandler,
2801 pCur->u.PhysicalModify.fRestoreAsRAM);
2802 break;
2803
2804 default:
2805 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
2806 break;
2807 }
2808
2809 /*
2810 * Advance idxHead.
2811 */
2812 idxCur = idxHead;
2813 idxHead = pCur->idxNext;
2814 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
2815
2816 /*
2817 * Put the record back into the free list.
2818 */
2819 do
2820 {
2821 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
2822 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
2823 ASMCompilerBarrier();
2824 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
2825 } while (idxHead != UINT32_MAX);
2826
2827/* Temporarily turned on for release builds to investigate #4113 */
2828#if 1 //def VBOX_STRICT
2829 if (pVM->cCpus == 1)
2830 {
2831 /* Check that all records are now on the free list. */
2832 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
2833 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
2834 c++;
2835 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
2836 }
2837#endif
2838 }
2839}
2840
2841
2842/**
2843 * Notify REM about changed code page.
2844 *
2845 * @returns VBox status code.
2846 * @param pVM VM handle.
2847 * @param pVCpu VMCPU handle.
2848 * @param pvCodePage Code page address
2849 */
2850REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2851{
2852#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2853 int rc;
2854 RTGCPHYS PhysGC;
2855 uint64_t flags;
2856
2857 VM_ASSERT_EMT(pVM);
2858
2859 /*
2860 * Get the physical page address.
2861 */
2862 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2863 if (rc == VINF_SUCCESS)
2864 {
2865 /*
2866 * Sync the required registers and flush the whole page.
2867 * (Easier to do the whole page than notifying it about each physical
2868 * byte that was changed.
2869 */
2870 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2871 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2872 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2873 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2874
2875 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2876 }
2877#endif
2878 return VINF_SUCCESS;
2879}
2880
2881
2882/**
2883 * Notification about a successful MMR3PhysRegister() call.
2884 *
2885 * @param pVM VM handle.
2886 * @param GCPhys The physical address the RAM.
2887 * @param cb Size of the memory.
2888 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2889 */
2890REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2891{
2892 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2893 VM_ASSERT_EMT(pVM);
2894
2895 /*
2896 * Validate input - we trust the caller.
2897 */
2898 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2899 Assert(cb);
2900 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2901 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2902
2903 /*
2904 * Base ram? Update GCPhysLastRam.
2905 */
2906 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2907 {
2908 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2909 {
2910 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2911 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2912 }
2913 }
2914
2915 /*
2916 * Register the ram.
2917 */
2918 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2919
2920 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2921 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2922 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2923
2924 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2925}
2926
2927
2928/**
2929 * Notification about a successful MMR3PhysRomRegister() call.
2930 *
2931 * @param pVM VM handle.
2932 * @param GCPhys The physical address of the ROM.
2933 * @param cb The size of the ROM.
2934 * @param pvCopy Pointer to the ROM copy.
2935 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2936 * This function will be called when ever the protection of the
2937 * shadow ROM changes (at reset and end of POST).
2938 */
2939REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2940{
2941 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2942 VM_ASSERT_EMT(pVM);
2943
2944 /*
2945 * Validate input - we trust the caller.
2946 */
2947 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2948 Assert(cb);
2949 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2950
2951 /*
2952 * Register the rom.
2953 */
2954 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2955
2956 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2957 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2958 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2959
2960 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2961}
2962
2963
2964/**
2965 * Notification about a successful memory deregistration or reservation.
2966 *
2967 * @param pVM VM Handle.
2968 * @param GCPhys Start physical address.
2969 * @param cb The size of the range.
2970 */
2971REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2972{
2973 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2974 VM_ASSERT_EMT(pVM);
2975
2976 /*
2977 * Validate input - we trust the caller.
2978 */
2979 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2980 Assert(cb);
2981 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2982
2983 /*
2984 * Unassigning the memory.
2985 */
2986 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2987
2988 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2989 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2990 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2991
2992 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2993}
2994
2995
2996/**
2997 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2998 *
2999 * @param pVM VM Handle.
3000 * @param enmType Handler type.
3001 * @param GCPhys Handler range address.
3002 * @param cb Size of the handler range.
3003 * @param fHasHCHandler Set if the handler has a HC callback function.
3004 *
3005 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3006 * Handler memory type to memory which has no HC handler.
3007 */
3008static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3009{
3010 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3011 enmType, GCPhys, cb, fHasHCHandler));
3012
3013 VM_ASSERT_EMT(pVM);
3014 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3015 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3016
3017
3018 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3019
3020 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3021 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3022 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3023 else if (fHasHCHandler)
3024 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3025 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3026
3027 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3028}
3029
3030/**
3031 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3032 *
3033 * @param pVM VM Handle.
3034 * @param enmType Handler type.
3035 * @param GCPhys Handler range address.
3036 * @param cb Size of the handler range.
3037 * @param fHasHCHandler Set if the handler has a HC callback function.
3038 *
3039 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3040 * Handler memory type to memory which has no HC handler.
3041 */
3042REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3043{
3044 REMR3ReplayHandlerNotifications(pVM);
3045
3046 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3047}
3048
3049/**
3050 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3051 *
3052 * @param pVM VM Handle.
3053 * @param enmType Handler type.
3054 * @param GCPhys Handler range address.
3055 * @param cb Size of the handler range.
3056 * @param fHasHCHandler Set if the handler has a HC callback function.
3057 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3058 */
3059static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3060{
3061 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3062 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3063 VM_ASSERT_EMT(pVM);
3064
3065
3066 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3067
3068 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3069 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3070 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3071 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3072 else if (fHasHCHandler)
3073 {
3074 if (!fRestoreAsRAM)
3075 {
3076 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3077 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3078 }
3079 else
3080 {
3081 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3082 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3083 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3084 }
3085 }
3086 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3087
3088 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3089}
3090
3091/**
3092 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3093 *
3094 * @param pVM VM Handle.
3095 * @param enmType Handler type.
3096 * @param GCPhys Handler range address.
3097 * @param cb Size of the handler range.
3098 * @param fHasHCHandler Set if the handler has a HC callback function.
3099 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3100 */
3101REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3102{
3103 REMR3ReplayHandlerNotifications(pVM);
3104 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3105}
3106
3107
3108/**
3109 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3110 *
3111 * @param pVM VM Handle.
3112 * @param enmType Handler type.
3113 * @param GCPhysOld Old handler range address.
3114 * @param GCPhysNew New handler range address.
3115 * @param cb Size of the handler range.
3116 * @param fHasHCHandler Set if the handler has a HC callback function.
3117 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3118 */
3119static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3120{
3121 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3122 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3123 VM_ASSERT_EMT(pVM);
3124 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3125
3126 if (fHasHCHandler)
3127 {
3128 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3129
3130 /*
3131 * Reset the old page.
3132 */
3133 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3134 if (!fRestoreAsRAM)
3135 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3136 else
3137 {
3138 /* This is not perfect, but it'll do for PD monitoring... */
3139 Assert(cb == PAGE_SIZE);
3140 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3141 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3142 }
3143
3144 /*
3145 * Update the new page.
3146 */
3147 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3148 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3149 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3150 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3151
3152 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3153 }
3154}
3155
3156/**
3157 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3158 *
3159 * @param pVM VM Handle.
3160 * @param enmType Handler type.
3161 * @param GCPhysOld Old handler range address.
3162 * @param GCPhysNew New handler range address.
3163 * @param cb Size of the handler range.
3164 * @param fHasHCHandler Set if the handler has a HC callback function.
3165 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3166 */
3167REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3168{
3169 REMR3ReplayHandlerNotifications(pVM);
3170
3171 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3172}
3173
3174/**
3175 * Checks if we're handling access to this page or not.
3176 *
3177 * @returns true if we're trapping access.
3178 * @returns false if we aren't.
3179 * @param pVM The VM handle.
3180 * @param GCPhys The physical address.
3181 *
3182 * @remark This function will only work correctly in VBOX_STRICT builds!
3183 */
3184REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3185{
3186#ifdef VBOX_STRICT
3187 unsigned long off;
3188 REMR3ReplayHandlerNotifications(pVM);
3189
3190 off = get_phys_page_offset(GCPhys);
3191 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3192 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3193 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3194#else
3195 return false;
3196#endif
3197}
3198
3199
3200/**
3201 * Deals with a rare case in get_phys_addr_code where the code
3202 * is being monitored.
3203 *
3204 * It could also be an MMIO page, in which case we will raise a fatal error.
3205 *
3206 * @returns The physical address corresponding to addr.
3207 * @param env The cpu environment.
3208 * @param addr The virtual address.
3209 * @param pTLBEntry The TLB entry.
3210 */
3211target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3212 target_ulong addr,
3213 CPUTLBEntry* pTLBEntry,
3214 target_phys_addr_t ioTLBEntry)
3215{
3216 PVM pVM = env->pVM;
3217
3218 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3219 {
3220 /* If code memory is being monitored, appropriate IOTLB entry will have
3221 handler IO type, and addend will provide real physical address, no
3222 matter if we store VA in TLB or not, as handlers are always passed PA */
3223 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3224 return ret;
3225 }
3226 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3227 "*** handlers\n",
3228 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3229 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3230 LogRel(("*** mmio\n"));
3231 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3232 LogRel(("*** phys\n"));
3233 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3234 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3235 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3236 AssertFatalFailed();
3237}
3238
3239/**
3240 * Read guest RAM and ROM.
3241 *
3242 * @param SrcGCPhys The source address (guest physical).
3243 * @param pvDst The destination address.
3244 * @param cb Number of bytes
3245 */
3246void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3247{
3248 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3249 VBOX_CHECK_ADDR(SrcGCPhys);
3250 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3251#ifdef VBOX_DEBUG_PHYS
3252 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3253#endif
3254 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3255}
3256
3257
3258/**
3259 * Read guest RAM and ROM, unsigned 8-bit.
3260 *
3261 * @param SrcGCPhys The source address (guest physical).
3262 */
3263RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3264{
3265 uint8_t val;
3266 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3267 VBOX_CHECK_ADDR(SrcGCPhys);
3268 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3269 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3270#ifdef VBOX_DEBUG_PHYS
3271 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3272#endif
3273 return val;
3274}
3275
3276
3277/**
3278 * Read guest RAM and ROM, signed 8-bit.
3279 *
3280 * @param SrcGCPhys The source address (guest physical).
3281 */
3282RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3283{
3284 int8_t val;
3285 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3286 VBOX_CHECK_ADDR(SrcGCPhys);
3287 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3288 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3289#ifdef VBOX_DEBUG_PHYS
3290 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3291#endif
3292 return val;
3293}
3294
3295
3296/**
3297 * Read guest RAM and ROM, unsigned 16-bit.
3298 *
3299 * @param SrcGCPhys The source address (guest physical).
3300 */
3301RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3302{
3303 uint16_t val;
3304 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3305 VBOX_CHECK_ADDR(SrcGCPhys);
3306 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3307 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3308#ifdef VBOX_DEBUG_PHYS
3309 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3310#endif
3311 return val;
3312}
3313
3314
3315/**
3316 * Read guest RAM and ROM, signed 16-bit.
3317 *
3318 * @param SrcGCPhys The source address (guest physical).
3319 */
3320RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3321{
3322 int16_t val;
3323 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3324 VBOX_CHECK_ADDR(SrcGCPhys);
3325 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3326 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3327#ifdef VBOX_DEBUG_PHYS
3328 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3329#endif
3330 return val;
3331}
3332
3333
3334/**
3335 * Read guest RAM and ROM, unsigned 32-bit.
3336 *
3337 * @param SrcGCPhys The source address (guest physical).
3338 */
3339RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3340{
3341 uint32_t val;
3342 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3343 VBOX_CHECK_ADDR(SrcGCPhys);
3344 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3345 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3346#ifdef VBOX_DEBUG_PHYS
3347 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3348#endif
3349 return val;
3350}
3351
3352
3353/**
3354 * Read guest RAM and ROM, signed 32-bit.
3355 *
3356 * @param SrcGCPhys The source address (guest physical).
3357 */
3358RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3359{
3360 int32_t val;
3361 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3362 VBOX_CHECK_ADDR(SrcGCPhys);
3363 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3364 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3365#ifdef VBOX_DEBUG_PHYS
3366 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3367#endif
3368 return val;
3369}
3370
3371
3372/**
3373 * Read guest RAM and ROM, unsigned 64-bit.
3374 *
3375 * @param SrcGCPhys The source address (guest physical).
3376 */
3377uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3378{
3379 uint64_t val;
3380 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3381 VBOX_CHECK_ADDR(SrcGCPhys);
3382 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3383 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3384#ifdef VBOX_DEBUG_PHYS
3385 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3386#endif
3387 return val;
3388}
3389
3390
3391/**
3392 * Read guest RAM and ROM, signed 64-bit.
3393 *
3394 * @param SrcGCPhys The source address (guest physical).
3395 */
3396int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3397{
3398 int64_t val;
3399 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3400 VBOX_CHECK_ADDR(SrcGCPhys);
3401 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3402 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3403#ifdef VBOX_DEBUG_PHYS
3404 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3405#endif
3406 return val;
3407}
3408
3409
3410/**
3411 * Write guest RAM.
3412 *
3413 * @param DstGCPhys The destination address (guest physical).
3414 * @param pvSrc The source address.
3415 * @param cb Number of bytes to write
3416 */
3417void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3418{
3419 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3420 VBOX_CHECK_ADDR(DstGCPhys);
3421 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3422 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3423#ifdef VBOX_DEBUG_PHYS
3424 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3425#endif
3426}
3427
3428
3429/**
3430 * Write guest RAM, unsigned 8-bit.
3431 *
3432 * @param DstGCPhys The destination address (guest physical).
3433 * @param val Value
3434 */
3435void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3436{
3437 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3438 VBOX_CHECK_ADDR(DstGCPhys);
3439 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3440 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3441#ifdef VBOX_DEBUG_PHYS
3442 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3443#endif
3444}
3445
3446
3447/**
3448 * Write guest RAM, unsigned 8-bit.
3449 *
3450 * @param DstGCPhys The destination address (guest physical).
3451 * @param val Value
3452 */
3453void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3454{
3455 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3456 VBOX_CHECK_ADDR(DstGCPhys);
3457 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3458 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3459#ifdef VBOX_DEBUG_PHYS
3460 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3461#endif
3462}
3463
3464
3465/**
3466 * Write guest RAM, unsigned 32-bit.
3467 *
3468 * @param DstGCPhys The destination address (guest physical).
3469 * @param val Value
3470 */
3471void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3472{
3473 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3474 VBOX_CHECK_ADDR(DstGCPhys);
3475 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3476 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3477#ifdef VBOX_DEBUG_PHYS
3478 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3479#endif
3480}
3481
3482
3483/**
3484 * Write guest RAM, unsigned 64-bit.
3485 *
3486 * @param DstGCPhys The destination address (guest physical).
3487 * @param val Value
3488 */
3489void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3490{
3491 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3492 VBOX_CHECK_ADDR(DstGCPhys);
3493 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3494 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3495#ifdef VBOX_DEBUG_PHYS
3496 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3497#endif
3498}
3499
3500#undef LOG_GROUP
3501#define LOG_GROUP LOG_GROUP_REM_MMIO
3502
3503/** Read MMIO memory. */
3504static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3505{
3506 uint32_t u32 = 0;
3507 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3508 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3509 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3510 return u32;
3511}
3512
3513/** Read MMIO memory. */
3514static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3515{
3516 uint32_t u32 = 0;
3517 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3518 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3519 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3520 return u32;
3521}
3522
3523/** Read MMIO memory. */
3524static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3525{
3526 uint32_t u32 = 0;
3527 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3528 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3529 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3530 return u32;
3531}
3532
3533/** Write to MMIO memory. */
3534static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3535{
3536 int rc;
3537 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3538 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3539 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3540}
3541
3542/** Write to MMIO memory. */
3543static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3544{
3545 int rc;
3546 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3547 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3548 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3549}
3550
3551/** Write to MMIO memory. */
3552static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3553{
3554 int rc;
3555 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3556 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3557 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3558}
3559
3560
3561#undef LOG_GROUP
3562#define LOG_GROUP LOG_GROUP_REM_HANDLER
3563
3564/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3565
3566static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3567{
3568 uint8_t u8;
3569 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3570 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3571 return u8;
3572}
3573
3574static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3575{
3576 uint16_t u16;
3577 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3578 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3579 return u16;
3580}
3581
3582static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3583{
3584 uint32_t u32;
3585 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3586 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3587 return u32;
3588}
3589
3590static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3591{
3592 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3593 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3594}
3595
3596static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3597{
3598 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3599 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3600}
3601
3602static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3603{
3604 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3605 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3606}
3607
3608/* -+- disassembly -+- */
3609
3610#undef LOG_GROUP
3611#define LOG_GROUP LOG_GROUP_REM_DISAS
3612
3613
3614/**
3615 * Enables or disables singled stepped disassembly.
3616 *
3617 * @returns VBox status code.
3618 * @param pVM VM handle.
3619 * @param fEnable To enable set this flag, to disable clear it.
3620 */
3621static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3622{
3623 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3624 VM_ASSERT_EMT(pVM);
3625
3626 if (fEnable)
3627 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3628 else
3629 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3630 return VINF_SUCCESS;
3631}
3632
3633
3634/**
3635 * Enables or disables singled stepped disassembly.
3636 *
3637 * @returns VBox status code.
3638 * @param pVM VM handle.
3639 * @param fEnable To enable set this flag, to disable clear it.
3640 */
3641REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3642{
3643 int rc;
3644
3645 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3646 if (VM_IS_EMT(pVM))
3647 return remR3DisasEnableStepping(pVM, fEnable);
3648
3649 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3650 AssertRC(rc);
3651 return rc;
3652}
3653
3654
3655#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3656/**
3657 * External Debugger Command: .remstep [on|off|1|0]
3658 */
3659static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3660{
3661 bool fEnable;
3662 int rc;
3663
3664 /* print status */
3665 if (cArgs == 0)
3666 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3667 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3668
3669 /* convert the argument and change the mode. */
3670 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3671 if (RT_FAILURE(rc))
3672 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3673 rc = REMR3DisasEnableStepping(pVM, fEnable);
3674 if (RT_FAILURE(rc))
3675 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3676 return rc;
3677}
3678#endif
3679
3680
3681/**
3682 * Disassembles one instruction and prints it to the log.
3683 *
3684 * @returns Success indicator.
3685 * @param env Pointer to the recompiler CPU structure.
3686 * @param f32BitCode Indicates that whether or not the code should
3687 * be disassembled as 16 or 32 bit. If -1 the CS
3688 * selector will be inspected.
3689 * @param pszPrefix
3690 */
3691bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3692{
3693 PVM pVM = env->pVM;
3694 const bool fLog = LogIsEnabled();
3695 const bool fLog2 = LogIs2Enabled();
3696 int rc = VINF_SUCCESS;
3697
3698 /*
3699 * Don't bother if there ain't any log output to do.
3700 */
3701 if (!fLog && !fLog2)
3702 return true;
3703
3704 /*
3705 * Update the state so DBGF reads the correct register values.
3706 */
3707 remR3StateUpdate(pVM, env->pVCpu);
3708
3709 /*
3710 * Log registers if requested.
3711 */
3712 if (!fLog2)
3713 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3714
3715 /*
3716 * Disassemble to log.
3717 */
3718 if (fLog)
3719 rc = DBGFR3DisasInstrCurrentLogInternal(env->pVCpu, pszPrefix);
3720
3721 return RT_SUCCESS(rc);
3722}
3723
3724
3725/**
3726 * Disassemble recompiled code.
3727 *
3728 * @param phFileIgnored Ignored, logfile usually.
3729 * @param pvCode Pointer to the code block.
3730 * @param cb Size of the code block.
3731 */
3732void disas(FILE *phFile, void *pvCode, unsigned long cb)
3733{
3734#ifdef DEBUG_TMP_LOGGING
3735# define DISAS_PRINTF(x...) fprintf(phFile, x)
3736#else
3737# define DISAS_PRINTF(x...) RTLogPrintf(x)
3738 if (LogIs2Enabled())
3739#endif
3740 {
3741 unsigned off = 0;
3742 char szOutput[256];
3743 DISCPUSTATE Cpu;
3744
3745 memset(&Cpu, 0, sizeof(Cpu));
3746#ifdef RT_ARCH_X86
3747 Cpu.mode = CPUMODE_32BIT;
3748#else
3749 Cpu.mode = CPUMODE_64BIT;
3750#endif
3751
3752 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3753 while (off < cb)
3754 {
3755 uint32_t cbInstr;
3756 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3757 DISAS_PRINTF("%s", szOutput);
3758 else
3759 {
3760 DISAS_PRINTF("disas error\n");
3761 cbInstr = 1;
3762#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3763 break;
3764#endif
3765 }
3766 off += cbInstr;
3767 }
3768 }
3769
3770#undef DISAS_PRINTF
3771}
3772
3773
3774/**
3775 * Disassemble guest code.
3776 *
3777 * @param phFileIgnored Ignored, logfile usually.
3778 * @param uCode The guest address of the code to disassemble. (flat?)
3779 * @param cb Number of bytes to disassemble.
3780 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3781 */
3782void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3783{
3784#ifdef DEBUG_TMP_LOGGING
3785# define DISAS_PRINTF(x...) fprintf(phFile, x)
3786#else
3787# define DISAS_PRINTF(x...) RTLogPrintf(x)
3788 if (LogIs2Enabled())
3789#endif
3790 {
3791 PVM pVM = cpu_single_env->pVM;
3792 PVMCPU pVCpu = cpu_single_env->pVCpu;
3793 RTSEL cs;
3794 RTGCUINTPTR eip;
3795
3796 Assert(pVCpu);
3797
3798 /*
3799 * Update the state so DBGF reads the correct register values (flags).
3800 */
3801 remR3StateUpdate(pVM, pVCpu);
3802
3803 /*
3804 * Do the disassembling.
3805 */
3806 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3807 cs = cpu_single_env->segs[R_CS].selector;
3808 eip = uCode - cpu_single_env->segs[R_CS].base;
3809 for (;;)
3810 {
3811 char szBuf[256];
3812 uint32_t cbInstr;
3813 int rc = DBGFR3DisasInstrEx(pVM,
3814 pVCpu->idCpu,
3815 cs,
3816 eip,
3817 0,
3818 szBuf, sizeof(szBuf),
3819 &cbInstr);
3820 if (RT_SUCCESS(rc))
3821 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3822 else
3823 {
3824 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3825 cbInstr = 1;
3826 }
3827
3828 /* next */
3829 if (cb <= cbInstr)
3830 break;
3831 cb -= cbInstr;
3832 uCode += cbInstr;
3833 eip += cbInstr;
3834 }
3835 }
3836#undef DISAS_PRINTF
3837}
3838
3839
3840/**
3841 * Looks up a guest symbol.
3842 *
3843 * @returns Pointer to symbol name. This is a static buffer.
3844 * @param orig_addr The address in question.
3845 */
3846const char *lookup_symbol(target_ulong orig_addr)
3847{
3848 PVM pVM = cpu_single_env->pVM;
3849 RTGCINTPTR off = 0;
3850 RTDBGSYMBOL Sym;
3851 DBGFADDRESS Addr;
3852
3853 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
3854 if (RT_SUCCESS(rc))
3855 {
3856 static char szSym[sizeof(Sym.szName) + 48];
3857 if (!off)
3858 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3859 else if (off > 0)
3860 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3861 else
3862 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3863 return szSym;
3864 }
3865 return "<N/A>";
3866}
3867
3868
3869#undef LOG_GROUP
3870#define LOG_GROUP LOG_GROUP_REM
3871
3872
3873/* -+- FF notifications -+- */
3874
3875
3876/**
3877 * Notification about a pending interrupt.
3878 *
3879 * @param pVM VM Handle.
3880 * @param pVCpu VMCPU Handle.
3881 * @param u8Interrupt Interrupt
3882 * @thread The emulation thread.
3883 */
3884REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3885{
3886 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3887 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3888}
3889
3890/**
3891 * Notification about a pending interrupt.
3892 *
3893 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3894 * @param pVM VM Handle.
3895 * @param pVCpu VMCPU Handle.
3896 * @thread The emulation thread.
3897 */
3898REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3899{
3900 return pVM->rem.s.u32PendingInterrupt;
3901}
3902
3903/**
3904 * Notification about the interrupt FF being set.
3905 *
3906 * @param pVM VM Handle.
3907 * @param pVCpu VMCPU Handle.
3908 * @thread The emulation thread.
3909 */
3910REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3911{
3912 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3913 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3914 if (pVM->rem.s.fInREM)
3915 {
3916 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3917 CPU_INTERRUPT_EXTERNAL_HARD);
3918 }
3919}
3920
3921
3922/**
3923 * Notification about the interrupt FF being set.
3924 *
3925 * @param pVM VM Handle.
3926 * @param pVCpu VMCPU Handle.
3927 * @thread Any.
3928 */
3929REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3930{
3931 LogFlow(("REMR3NotifyInterruptClear:\n"));
3932 if (pVM->rem.s.fInREM)
3933 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3934}
3935
3936
3937/**
3938 * Notification about pending timer(s).
3939 *
3940 * @param pVM VM Handle.
3941 * @param pVCpuDst The target cpu for this notification.
3942 * TM will not broadcast pending timer events, but use
3943 * a decidated EMT for them. So, only interrupt REM
3944 * execution if the given CPU is executing in REM.
3945 * @thread Any.
3946 */
3947REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3948{
3949#ifndef DEBUG_bird
3950 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3951#endif
3952 if (pVM->rem.s.fInREM)
3953 {
3954 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3955 {
3956 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
3957 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
3958 CPU_INTERRUPT_EXTERNAL_TIMER);
3959 }
3960 else
3961 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
3962 }
3963 else
3964 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
3965}
3966
3967
3968/**
3969 * Notification about pending DMA transfers.
3970 *
3971 * @param pVM VM Handle.
3972 * @thread Any.
3973 */
3974REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3975{
3976 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3977 if (pVM->rem.s.fInREM)
3978 {
3979 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3980 CPU_INTERRUPT_EXTERNAL_DMA);
3981 }
3982}
3983
3984
3985/**
3986 * Notification about pending timer(s).
3987 *
3988 * @param pVM VM Handle.
3989 * @thread Any.
3990 */
3991REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3992{
3993 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3994 if (pVM->rem.s.fInREM)
3995 {
3996 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3997 CPU_INTERRUPT_EXTERNAL_EXIT);
3998 }
3999}
4000
4001
4002/**
4003 * Notification about pending FF set by an external thread.
4004 *
4005 * @param pVM VM handle.
4006 * @thread Any.
4007 */
4008REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4009{
4010 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4011 if (pVM->rem.s.fInREM)
4012 {
4013 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4014 CPU_INTERRUPT_EXTERNAL_EXIT);
4015 }
4016}
4017
4018
4019#ifdef VBOX_WITH_STATISTICS
4020void remR3ProfileStart(int statcode)
4021{
4022 STAMPROFILEADV *pStat;
4023 switch(statcode)
4024 {
4025 case STATS_EMULATE_SINGLE_INSTR:
4026 pStat = &gStatExecuteSingleInstr;
4027 break;
4028 case STATS_QEMU_COMPILATION:
4029 pStat = &gStatCompilationQEmu;
4030 break;
4031 case STATS_QEMU_RUN_EMULATED_CODE:
4032 pStat = &gStatRunCodeQEmu;
4033 break;
4034 case STATS_QEMU_TOTAL:
4035 pStat = &gStatTotalTimeQEmu;
4036 break;
4037 case STATS_QEMU_RUN_TIMERS:
4038 pStat = &gStatTimers;
4039 break;
4040 case STATS_TLB_LOOKUP:
4041 pStat= &gStatTBLookup;
4042 break;
4043 case STATS_IRQ_HANDLING:
4044 pStat= &gStatIRQ;
4045 break;
4046 case STATS_RAW_CHECK:
4047 pStat = &gStatRawCheck;
4048 break;
4049
4050 default:
4051 AssertMsgFailed(("unknown stat %d\n", statcode));
4052 return;
4053 }
4054 STAM_PROFILE_ADV_START(pStat, a);
4055}
4056
4057
4058void remR3ProfileStop(int statcode)
4059{
4060 STAMPROFILEADV *pStat;
4061 switch(statcode)
4062 {
4063 case STATS_EMULATE_SINGLE_INSTR:
4064 pStat = &gStatExecuteSingleInstr;
4065 break;
4066 case STATS_QEMU_COMPILATION:
4067 pStat = &gStatCompilationQEmu;
4068 break;
4069 case STATS_QEMU_RUN_EMULATED_CODE:
4070 pStat = &gStatRunCodeQEmu;
4071 break;
4072 case STATS_QEMU_TOTAL:
4073 pStat = &gStatTotalTimeQEmu;
4074 break;
4075 case STATS_QEMU_RUN_TIMERS:
4076 pStat = &gStatTimers;
4077 break;
4078 case STATS_TLB_LOOKUP:
4079 pStat= &gStatTBLookup;
4080 break;
4081 case STATS_IRQ_HANDLING:
4082 pStat= &gStatIRQ;
4083 break;
4084 case STATS_RAW_CHECK:
4085 pStat = &gStatRawCheck;
4086 break;
4087 default:
4088 AssertMsgFailed(("unknown stat %d\n", statcode));
4089 return;
4090 }
4091 STAM_PROFILE_ADV_STOP(pStat, a);
4092}
4093#endif
4094
4095/**
4096 * Raise an RC, force rem exit.
4097 *
4098 * @param pVM VM handle.
4099 * @param rc The rc.
4100 */
4101void remR3RaiseRC(PVM pVM, int rc)
4102{
4103 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4104 Assert(pVM->rem.s.fInREM);
4105 VM_ASSERT_EMT(pVM);
4106 pVM->rem.s.rc = rc;
4107 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4108}
4109
4110
4111/* -+- timers -+- */
4112
4113uint64_t cpu_get_tsc(CPUX86State *env)
4114{
4115 STAM_COUNTER_INC(&gStatCpuGetTSC);
4116 return TMCpuTickGet(env->pVCpu);
4117}
4118
4119
4120/* -+- interrupts -+- */
4121
4122void cpu_set_ferr(CPUX86State *env)
4123{
4124 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4125 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4126}
4127
4128int cpu_get_pic_interrupt(CPUState *env)
4129{
4130 uint8_t u8Interrupt;
4131 int rc;
4132
4133 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4134 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4135 * with the (a)pic.
4136 */
4137 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4138 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4139 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4140 * remove this kludge. */
4141 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4142 {
4143 rc = VINF_SUCCESS;
4144 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4145 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4146 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4147 }
4148 else
4149 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4150
4151 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4152 if (RT_SUCCESS(rc))
4153 {
4154 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4155 env->interrupt_request |= CPU_INTERRUPT_HARD;
4156 return u8Interrupt;
4157 }
4158 return -1;
4159}
4160
4161
4162/* -+- local apic -+- */
4163
4164void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4165{
4166 int rc = PDMApicSetBase(env->pVM, val);
4167 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4168}
4169
4170uint64_t cpu_get_apic_base(CPUX86State *env)
4171{
4172 uint64_t u64;
4173 int rc = PDMApicGetBase(env->pVM, &u64);
4174 if (RT_SUCCESS(rc))
4175 {
4176 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4177 return u64;
4178 }
4179 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4180 return 0;
4181}
4182
4183void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4184{
4185 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4186 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4187}
4188
4189uint8_t cpu_get_apic_tpr(CPUX86State *env)
4190{
4191 uint8_t u8;
4192 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4193 if (RT_SUCCESS(rc))
4194 {
4195 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4196 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4197 }
4198 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4199 return 0;
4200}
4201
4202
4203uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4204{
4205 uint64_t value;
4206 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4207 if (RT_SUCCESS(rc))
4208 {
4209 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4210 return value;
4211 }
4212 /** @todo: exception ? */
4213 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4214 return value;
4215}
4216
4217void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4218{
4219 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4220 /** @todo: exception if error ? */
4221 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4222}
4223
4224uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4225{
4226 Assert(env->pVCpu);
4227 return CPUMGetGuestMsr(env->pVCpu, msr);
4228}
4229
4230void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4231{
4232 Assert(env->pVCpu);
4233 CPUMSetGuestMsr(env->pVCpu, msr, val);
4234}
4235
4236/* -+- I/O Ports -+- */
4237
4238#undef LOG_GROUP
4239#define LOG_GROUP LOG_GROUP_REM_IOPORT
4240
4241void cpu_outb(CPUState *env, int addr, int val)
4242{
4243 int rc;
4244
4245 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4246 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4247
4248 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4249 if (RT_LIKELY(rc == VINF_SUCCESS))
4250 return;
4251 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4252 {
4253 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4254 remR3RaiseRC(env->pVM, rc);
4255 return;
4256 }
4257 remAbort(rc, __FUNCTION__);
4258}
4259
4260void cpu_outw(CPUState *env, int addr, int val)
4261{
4262 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4263 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4264 if (RT_LIKELY(rc == VINF_SUCCESS))
4265 return;
4266 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4267 {
4268 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4269 remR3RaiseRC(env->pVM, rc);
4270 return;
4271 }
4272 remAbort(rc, __FUNCTION__);
4273}
4274
4275void cpu_outl(CPUState *env, int addr, int val)
4276{
4277 int rc;
4278 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4279 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4280 if (RT_LIKELY(rc == VINF_SUCCESS))
4281 return;
4282 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4283 {
4284 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4285 remR3RaiseRC(env->pVM, rc);
4286 return;
4287 }
4288 remAbort(rc, __FUNCTION__);
4289}
4290
4291int cpu_inb(CPUState *env, int addr)
4292{
4293 uint32_t u32 = 0;
4294 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4295 if (RT_LIKELY(rc == VINF_SUCCESS))
4296 {
4297 if (/*addr != 0x61 && */addr != 0x71)
4298 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4299 return (int)u32;
4300 }
4301 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4302 {
4303 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4304 remR3RaiseRC(env->pVM, rc);
4305 return (int)u32;
4306 }
4307 remAbort(rc, __FUNCTION__);
4308 return 0xff;
4309}
4310
4311int cpu_inw(CPUState *env, int addr)
4312{
4313 uint32_t u32 = 0;
4314 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4315 if (RT_LIKELY(rc == VINF_SUCCESS))
4316 {
4317 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4318 return (int)u32;
4319 }
4320 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4321 {
4322 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4323 remR3RaiseRC(env->pVM, rc);
4324 return (int)u32;
4325 }
4326 remAbort(rc, __FUNCTION__);
4327 return 0xffff;
4328}
4329
4330int cpu_inl(CPUState *env, int addr)
4331{
4332 uint32_t u32 = 0;
4333 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4334 if (RT_LIKELY(rc == VINF_SUCCESS))
4335 {
4336//if (addr==0x01f0 && u32 == 0x6b6d)
4337// loglevel = ~0;
4338 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4339 return (int)u32;
4340 }
4341 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4342 {
4343 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4344 remR3RaiseRC(env->pVM, rc);
4345 return (int)u32;
4346 }
4347 remAbort(rc, __FUNCTION__);
4348 return 0xffffffff;
4349}
4350
4351#undef LOG_GROUP
4352#define LOG_GROUP LOG_GROUP_REM
4353
4354
4355/* -+- helpers and misc other interfaces -+- */
4356
4357/**
4358 * Perform the CPUID instruction.
4359 *
4360 * ASMCpuId cannot be invoked from some source files where this is used because of global
4361 * register allocations.
4362 *
4363 * @param env Pointer to the recompiler CPU structure.
4364 * @param uOperator CPUID operation (eax).
4365 * @param pvEAX Where to store eax.
4366 * @param pvEBX Where to store ebx.
4367 * @param pvECX Where to store ecx.
4368 * @param pvEDX Where to store edx.
4369 */
4370void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4371{
4372 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4373}
4374
4375
4376#if 0 /* not used */
4377/**
4378 * Interface for qemu hardware to report back fatal errors.
4379 */
4380void hw_error(const char *pszFormat, ...)
4381{
4382 /*
4383 * Bitch about it.
4384 */
4385 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4386 * this in my Odin32 tree at home! */
4387 va_list args;
4388 va_start(args, pszFormat);
4389 RTLogPrintf("fatal error in virtual hardware:");
4390 RTLogPrintfV(pszFormat, args);
4391 va_end(args);
4392 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4393
4394 /*
4395 * If we're in REM context we'll sync back the state before 'jumping' to
4396 * the EMs failure handling.
4397 */
4398 PVM pVM = cpu_single_env->pVM;
4399 if (pVM->rem.s.fInREM)
4400 REMR3StateBack(pVM);
4401 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4402 AssertMsgFailed(("EMR3FatalError returned!\n"));
4403}
4404#endif
4405
4406/**
4407 * Interface for the qemu cpu to report unhandled situation
4408 * raising a fatal VM error.
4409 */
4410void cpu_abort(CPUState *env, const char *pszFormat, ...)
4411{
4412 va_list va;
4413 PVM pVM;
4414 PVMCPU pVCpu;
4415 char szMsg[256];
4416
4417 /*
4418 * Bitch about it.
4419 */
4420 RTLogFlags(NULL, "nodisabled nobuffered");
4421 RTLogFlush(NULL);
4422
4423 va_start(va, pszFormat);
4424#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4425 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4426 unsigned cArgs = 0;
4427 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4428 const char *psz = strchr(pszFormat, '%');
4429 while (psz && cArgs < 6)
4430 {
4431 auArgs[cArgs++] = va_arg(va, uintptr_t);
4432 psz = strchr(psz + 1, '%');
4433 }
4434 switch (cArgs)
4435 {
4436 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4437 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4438 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4439 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4440 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4441 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4442 default:
4443 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4444 }
4445#else
4446 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4447#endif
4448 va_end(va);
4449
4450 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4451 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4452
4453 /*
4454 * If we're in REM context we'll sync back the state before 'jumping' to
4455 * the EMs failure handling.
4456 */
4457 pVM = cpu_single_env->pVM;
4458 pVCpu = cpu_single_env->pVCpu;
4459 Assert(pVCpu);
4460
4461 if (pVM->rem.s.fInREM)
4462 REMR3StateBack(pVM, pVCpu);
4463 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4464 AssertMsgFailed(("EMR3FatalError returned!\n"));
4465}
4466
4467
4468/**
4469 * Aborts the VM.
4470 *
4471 * @param rc VBox error code.
4472 * @param pszTip Hint about why/when this happend.
4473 */
4474void remAbort(int rc, const char *pszTip)
4475{
4476 PVM pVM;
4477 PVMCPU pVCpu;
4478
4479 /*
4480 * Bitch about it.
4481 */
4482 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4483 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4484
4485 /*
4486 * Jump back to where we entered the recompiler.
4487 */
4488 pVM = cpu_single_env->pVM;
4489 pVCpu = cpu_single_env->pVCpu;
4490 Assert(pVCpu);
4491
4492 if (pVM->rem.s.fInREM)
4493 REMR3StateBack(pVM, pVCpu);
4494
4495 EMR3FatalError(pVCpu, rc);
4496 AssertMsgFailed(("EMR3FatalError returned!\n"));
4497}
4498
4499
4500/**
4501 * Dumps a linux system call.
4502 * @param pVCpu VMCPU handle.
4503 */
4504void remR3DumpLnxSyscall(PVMCPU pVCpu)
4505{
4506 static const char *apsz[] =
4507 {
4508 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4509 "sys_exit",
4510 "sys_fork",
4511 "sys_read",
4512 "sys_write",
4513 "sys_open", /* 5 */
4514 "sys_close",
4515 "sys_waitpid",
4516 "sys_creat",
4517 "sys_link",
4518 "sys_unlink", /* 10 */
4519 "sys_execve",
4520 "sys_chdir",
4521 "sys_time",
4522 "sys_mknod",
4523 "sys_chmod", /* 15 */
4524 "sys_lchown16",
4525 "sys_ni_syscall", /* old break syscall holder */
4526 "sys_stat",
4527 "sys_lseek",
4528 "sys_getpid", /* 20 */
4529 "sys_mount",
4530 "sys_oldumount",
4531 "sys_setuid16",
4532 "sys_getuid16",
4533 "sys_stime", /* 25 */
4534 "sys_ptrace",
4535 "sys_alarm",
4536 "sys_fstat",
4537 "sys_pause",
4538 "sys_utime", /* 30 */
4539 "sys_ni_syscall", /* old stty syscall holder */
4540 "sys_ni_syscall", /* old gtty syscall holder */
4541 "sys_access",
4542 "sys_nice",
4543 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4544 "sys_sync",
4545 "sys_kill",
4546 "sys_rename",
4547 "sys_mkdir",
4548 "sys_rmdir", /* 40 */
4549 "sys_dup",
4550 "sys_pipe",
4551 "sys_times",
4552 "sys_ni_syscall", /* old prof syscall holder */
4553 "sys_brk", /* 45 */
4554 "sys_setgid16",
4555 "sys_getgid16",
4556 "sys_signal",
4557 "sys_geteuid16",
4558 "sys_getegid16", /* 50 */
4559 "sys_acct",
4560 "sys_umount", /* recycled never used phys() */
4561 "sys_ni_syscall", /* old lock syscall holder */
4562 "sys_ioctl",
4563 "sys_fcntl", /* 55 */
4564 "sys_ni_syscall", /* old mpx syscall holder */
4565 "sys_setpgid",
4566 "sys_ni_syscall", /* old ulimit syscall holder */
4567 "sys_olduname",
4568 "sys_umask", /* 60 */
4569 "sys_chroot",
4570 "sys_ustat",
4571 "sys_dup2",
4572 "sys_getppid",
4573 "sys_getpgrp", /* 65 */
4574 "sys_setsid",
4575 "sys_sigaction",
4576 "sys_sgetmask",
4577 "sys_ssetmask",
4578 "sys_setreuid16", /* 70 */
4579 "sys_setregid16",
4580 "sys_sigsuspend",
4581 "sys_sigpending",
4582 "sys_sethostname",
4583 "sys_setrlimit", /* 75 */
4584 "sys_old_getrlimit",
4585 "sys_getrusage",
4586 "sys_gettimeofday",
4587 "sys_settimeofday",
4588 "sys_getgroups16", /* 80 */
4589 "sys_setgroups16",
4590 "old_select",
4591 "sys_symlink",
4592 "sys_lstat",
4593 "sys_readlink", /* 85 */
4594 "sys_uselib",
4595 "sys_swapon",
4596 "sys_reboot",
4597 "old_readdir",
4598 "old_mmap", /* 90 */
4599 "sys_munmap",
4600 "sys_truncate",
4601 "sys_ftruncate",
4602 "sys_fchmod",
4603 "sys_fchown16", /* 95 */
4604 "sys_getpriority",
4605 "sys_setpriority",
4606 "sys_ni_syscall", /* old profil syscall holder */
4607 "sys_statfs",
4608 "sys_fstatfs", /* 100 */
4609 "sys_ioperm",
4610 "sys_socketcall",
4611 "sys_syslog",
4612 "sys_setitimer",
4613 "sys_getitimer", /* 105 */
4614 "sys_newstat",
4615 "sys_newlstat",
4616 "sys_newfstat",
4617 "sys_uname",
4618 "sys_iopl", /* 110 */
4619 "sys_vhangup",
4620 "sys_ni_syscall", /* old "idle" system call */
4621 "sys_vm86old",
4622 "sys_wait4",
4623 "sys_swapoff", /* 115 */
4624 "sys_sysinfo",
4625 "sys_ipc",
4626 "sys_fsync",
4627 "sys_sigreturn",
4628 "sys_clone", /* 120 */
4629 "sys_setdomainname",
4630 "sys_newuname",
4631 "sys_modify_ldt",
4632 "sys_adjtimex",
4633 "sys_mprotect", /* 125 */
4634 "sys_sigprocmask",
4635 "sys_ni_syscall", /* old "create_module" */
4636 "sys_init_module",
4637 "sys_delete_module",
4638 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4639 "sys_quotactl",
4640 "sys_getpgid",
4641 "sys_fchdir",
4642 "sys_bdflush",
4643 "sys_sysfs", /* 135 */
4644 "sys_personality",
4645 "sys_ni_syscall", /* reserved for afs_syscall */
4646 "sys_setfsuid16",
4647 "sys_setfsgid16",
4648 "sys_llseek", /* 140 */
4649 "sys_getdents",
4650 "sys_select",
4651 "sys_flock",
4652 "sys_msync",
4653 "sys_readv", /* 145 */
4654 "sys_writev",
4655 "sys_getsid",
4656 "sys_fdatasync",
4657 "sys_sysctl",
4658 "sys_mlock", /* 150 */
4659 "sys_munlock",
4660 "sys_mlockall",
4661 "sys_munlockall",
4662 "sys_sched_setparam",
4663 "sys_sched_getparam", /* 155 */
4664 "sys_sched_setscheduler",
4665 "sys_sched_getscheduler",
4666 "sys_sched_yield",
4667 "sys_sched_get_priority_max",
4668 "sys_sched_get_priority_min", /* 160 */
4669 "sys_sched_rr_get_interval",
4670 "sys_nanosleep",
4671 "sys_mremap",
4672 "sys_setresuid16",
4673 "sys_getresuid16", /* 165 */
4674 "sys_vm86",
4675 "sys_ni_syscall", /* Old sys_query_module */
4676 "sys_poll",
4677 "sys_nfsservctl",
4678 "sys_setresgid16", /* 170 */
4679 "sys_getresgid16",
4680 "sys_prctl",
4681 "sys_rt_sigreturn",
4682 "sys_rt_sigaction",
4683 "sys_rt_sigprocmask", /* 175 */
4684 "sys_rt_sigpending",
4685 "sys_rt_sigtimedwait",
4686 "sys_rt_sigqueueinfo",
4687 "sys_rt_sigsuspend",
4688 "sys_pread64", /* 180 */
4689 "sys_pwrite64",
4690 "sys_chown16",
4691 "sys_getcwd",
4692 "sys_capget",
4693 "sys_capset", /* 185 */
4694 "sys_sigaltstack",
4695 "sys_sendfile",
4696 "sys_ni_syscall", /* reserved for streams1 */
4697 "sys_ni_syscall", /* reserved for streams2 */
4698 "sys_vfork", /* 190 */
4699 "sys_getrlimit",
4700 "sys_mmap2",
4701 "sys_truncate64",
4702 "sys_ftruncate64",
4703 "sys_stat64", /* 195 */
4704 "sys_lstat64",
4705 "sys_fstat64",
4706 "sys_lchown",
4707 "sys_getuid",
4708 "sys_getgid", /* 200 */
4709 "sys_geteuid",
4710 "sys_getegid",
4711 "sys_setreuid",
4712 "sys_setregid",
4713 "sys_getgroups", /* 205 */
4714 "sys_setgroups",
4715 "sys_fchown",
4716 "sys_setresuid",
4717 "sys_getresuid",
4718 "sys_setresgid", /* 210 */
4719 "sys_getresgid",
4720 "sys_chown",
4721 "sys_setuid",
4722 "sys_setgid",
4723 "sys_setfsuid", /* 215 */
4724 "sys_setfsgid",
4725 "sys_pivot_root",
4726 "sys_mincore",
4727 "sys_madvise",
4728 "sys_getdents64", /* 220 */
4729 "sys_fcntl64",
4730 "sys_ni_syscall", /* reserved for TUX */
4731 "sys_ni_syscall",
4732 "sys_gettid",
4733 "sys_readahead", /* 225 */
4734 "sys_setxattr",
4735 "sys_lsetxattr",
4736 "sys_fsetxattr",
4737 "sys_getxattr",
4738 "sys_lgetxattr", /* 230 */
4739 "sys_fgetxattr",
4740 "sys_listxattr",
4741 "sys_llistxattr",
4742 "sys_flistxattr",
4743 "sys_removexattr", /* 235 */
4744 "sys_lremovexattr",
4745 "sys_fremovexattr",
4746 "sys_tkill",
4747 "sys_sendfile64",
4748 "sys_futex", /* 240 */
4749 "sys_sched_setaffinity",
4750 "sys_sched_getaffinity",
4751 "sys_set_thread_area",
4752 "sys_get_thread_area",
4753 "sys_io_setup", /* 245 */
4754 "sys_io_destroy",
4755 "sys_io_getevents",
4756 "sys_io_submit",
4757 "sys_io_cancel",
4758 "sys_fadvise64", /* 250 */
4759 "sys_ni_syscall",
4760 "sys_exit_group",
4761 "sys_lookup_dcookie",
4762 "sys_epoll_create",
4763 "sys_epoll_ctl", /* 255 */
4764 "sys_epoll_wait",
4765 "sys_remap_file_pages",
4766 "sys_set_tid_address",
4767 "sys_timer_create",
4768 "sys_timer_settime", /* 260 */
4769 "sys_timer_gettime",
4770 "sys_timer_getoverrun",
4771 "sys_timer_delete",
4772 "sys_clock_settime",
4773 "sys_clock_gettime", /* 265 */
4774 "sys_clock_getres",
4775 "sys_clock_nanosleep",
4776 "sys_statfs64",
4777 "sys_fstatfs64",
4778 "sys_tgkill", /* 270 */
4779 "sys_utimes",
4780 "sys_fadvise64_64",
4781 "sys_ni_syscall" /* sys_vserver */
4782 };
4783
4784 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4785 switch (uEAX)
4786 {
4787 default:
4788 if (uEAX < RT_ELEMENTS(apsz))
4789 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4790 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4791 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4792 else
4793 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4794 break;
4795
4796 }
4797}
4798
4799
4800/**
4801 * Dumps an OpenBSD system call.
4802 * @param pVCpu VMCPU handle.
4803 */
4804void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4805{
4806 static const char *apsz[] =
4807 {
4808 "SYS_syscall", //0
4809 "SYS_exit", //1
4810 "SYS_fork", //2
4811 "SYS_read", //3
4812 "SYS_write", //4
4813 "SYS_open", //5
4814 "SYS_close", //6
4815 "SYS_wait4", //7
4816 "SYS_8",
4817 "SYS_link", //9
4818 "SYS_unlink", //10
4819 "SYS_11",
4820 "SYS_chdir", //12
4821 "SYS_fchdir", //13
4822 "SYS_mknod", //14
4823 "SYS_chmod", //15
4824 "SYS_chown", //16
4825 "SYS_break", //17
4826 "SYS_18",
4827 "SYS_19",
4828 "SYS_getpid", //20
4829 "SYS_mount", //21
4830 "SYS_unmount", //22
4831 "SYS_setuid", //23
4832 "SYS_getuid", //24
4833 "SYS_geteuid", //25
4834 "SYS_ptrace", //26
4835 "SYS_recvmsg", //27
4836 "SYS_sendmsg", //28
4837 "SYS_recvfrom", //29
4838 "SYS_accept", //30
4839 "SYS_getpeername", //31
4840 "SYS_getsockname", //32
4841 "SYS_access", //33
4842 "SYS_chflags", //34
4843 "SYS_fchflags", //35
4844 "SYS_sync", //36
4845 "SYS_kill", //37
4846 "SYS_38",
4847 "SYS_getppid", //39
4848 "SYS_40",
4849 "SYS_dup", //41
4850 "SYS_opipe", //42
4851 "SYS_getegid", //43
4852 "SYS_profil", //44
4853 "SYS_ktrace", //45
4854 "SYS_sigaction", //46
4855 "SYS_getgid", //47
4856 "SYS_sigprocmask", //48
4857 "SYS_getlogin", //49
4858 "SYS_setlogin", //50
4859 "SYS_acct", //51
4860 "SYS_sigpending", //52
4861 "SYS_osigaltstack", //53
4862 "SYS_ioctl", //54
4863 "SYS_reboot", //55
4864 "SYS_revoke", //56
4865 "SYS_symlink", //57
4866 "SYS_readlink", //58
4867 "SYS_execve", //59
4868 "SYS_umask", //60
4869 "SYS_chroot", //61
4870 "SYS_62",
4871 "SYS_63",
4872 "SYS_64",
4873 "SYS_65",
4874 "SYS_vfork", //66
4875 "SYS_67",
4876 "SYS_68",
4877 "SYS_sbrk", //69
4878 "SYS_sstk", //70
4879 "SYS_61",
4880 "SYS_vadvise", //72
4881 "SYS_munmap", //73
4882 "SYS_mprotect", //74
4883 "SYS_madvise", //75
4884 "SYS_76",
4885 "SYS_77",
4886 "SYS_mincore", //78
4887 "SYS_getgroups", //79
4888 "SYS_setgroups", //80
4889 "SYS_getpgrp", //81
4890 "SYS_setpgid", //82
4891 "SYS_setitimer", //83
4892 "SYS_84",
4893 "SYS_85",
4894 "SYS_getitimer", //86
4895 "SYS_87",
4896 "SYS_88",
4897 "SYS_89",
4898 "SYS_dup2", //90
4899 "SYS_91",
4900 "SYS_fcntl", //92
4901 "SYS_select", //93
4902 "SYS_94",
4903 "SYS_fsync", //95
4904 "SYS_setpriority", //96
4905 "SYS_socket", //97
4906 "SYS_connect", //98
4907 "SYS_99",
4908 "SYS_getpriority", //100
4909 "SYS_101",
4910 "SYS_102",
4911 "SYS_sigreturn", //103
4912 "SYS_bind", //104
4913 "SYS_setsockopt", //105
4914 "SYS_listen", //106
4915 "SYS_107",
4916 "SYS_108",
4917 "SYS_109",
4918 "SYS_110",
4919 "SYS_sigsuspend", //111
4920 "SYS_112",
4921 "SYS_113",
4922 "SYS_114",
4923 "SYS_115",
4924 "SYS_gettimeofday", //116
4925 "SYS_getrusage", //117
4926 "SYS_getsockopt", //118
4927 "SYS_119",
4928 "SYS_readv", //120
4929 "SYS_writev", //121
4930 "SYS_settimeofday", //122
4931 "SYS_fchown", //123
4932 "SYS_fchmod", //124
4933 "SYS_125",
4934 "SYS_setreuid", //126
4935 "SYS_setregid", //127
4936 "SYS_rename", //128
4937 "SYS_129",
4938 "SYS_130",
4939 "SYS_flock", //131
4940 "SYS_mkfifo", //132
4941 "SYS_sendto", //133
4942 "SYS_shutdown", //134
4943 "SYS_socketpair", //135
4944 "SYS_mkdir", //136
4945 "SYS_rmdir", //137
4946 "SYS_utimes", //138
4947 "SYS_139",
4948 "SYS_adjtime", //140
4949 "SYS_141",
4950 "SYS_142",
4951 "SYS_143",
4952 "SYS_144",
4953 "SYS_145",
4954 "SYS_146",
4955 "SYS_setsid", //147
4956 "SYS_quotactl", //148
4957 "SYS_149",
4958 "SYS_150",
4959 "SYS_151",
4960 "SYS_152",
4961 "SYS_153",
4962 "SYS_154",
4963 "SYS_nfssvc", //155
4964 "SYS_156",
4965 "SYS_157",
4966 "SYS_158",
4967 "SYS_159",
4968 "SYS_160",
4969 "SYS_getfh", //161
4970 "SYS_162",
4971 "SYS_163",
4972 "SYS_164",
4973 "SYS_sysarch", //165
4974 "SYS_166",
4975 "SYS_167",
4976 "SYS_168",
4977 "SYS_169",
4978 "SYS_170",
4979 "SYS_171",
4980 "SYS_172",
4981 "SYS_pread", //173
4982 "SYS_pwrite", //174
4983 "SYS_175",
4984 "SYS_176",
4985 "SYS_177",
4986 "SYS_178",
4987 "SYS_179",
4988 "SYS_180",
4989 "SYS_setgid", //181
4990 "SYS_setegid", //182
4991 "SYS_seteuid", //183
4992 "SYS_lfs_bmapv", //184
4993 "SYS_lfs_markv", //185
4994 "SYS_lfs_segclean", //186
4995 "SYS_lfs_segwait", //187
4996 "SYS_188",
4997 "SYS_189",
4998 "SYS_190",
4999 "SYS_pathconf", //191
5000 "SYS_fpathconf", //192
5001 "SYS_swapctl", //193
5002 "SYS_getrlimit", //194
5003 "SYS_setrlimit", //195
5004 "SYS_getdirentries", //196
5005 "SYS_mmap", //197
5006 "SYS___syscall", //198
5007 "SYS_lseek", //199
5008 "SYS_truncate", //200
5009 "SYS_ftruncate", //201
5010 "SYS___sysctl", //202
5011 "SYS_mlock", //203
5012 "SYS_munlock", //204
5013 "SYS_205",
5014 "SYS_futimes", //206
5015 "SYS_getpgid", //207
5016 "SYS_xfspioctl", //208
5017 "SYS_209",
5018 "SYS_210",
5019 "SYS_211",
5020 "SYS_212",
5021 "SYS_213",
5022 "SYS_214",
5023 "SYS_215",
5024 "SYS_216",
5025 "SYS_217",
5026 "SYS_218",
5027 "SYS_219",
5028 "SYS_220",
5029 "SYS_semget", //221
5030 "SYS_222",
5031 "SYS_223",
5032 "SYS_224",
5033 "SYS_msgget", //225
5034 "SYS_msgsnd", //226
5035 "SYS_msgrcv", //227
5036 "SYS_shmat", //228
5037 "SYS_229",
5038 "SYS_shmdt", //230
5039 "SYS_231",
5040 "SYS_clock_gettime", //232
5041 "SYS_clock_settime", //233
5042 "SYS_clock_getres", //234
5043 "SYS_235",
5044 "SYS_236",
5045 "SYS_237",
5046 "SYS_238",
5047 "SYS_239",
5048 "SYS_nanosleep", //240
5049 "SYS_241",
5050 "SYS_242",
5051 "SYS_243",
5052 "SYS_244",
5053 "SYS_245",
5054 "SYS_246",
5055 "SYS_247",
5056 "SYS_248",
5057 "SYS_249",
5058 "SYS_minherit", //250
5059 "SYS_rfork", //251
5060 "SYS_poll", //252
5061 "SYS_issetugid", //253
5062 "SYS_lchown", //254
5063 "SYS_getsid", //255
5064 "SYS_msync", //256
5065 "SYS_257",
5066 "SYS_258",
5067 "SYS_259",
5068 "SYS_getfsstat", //260
5069 "SYS_statfs", //261
5070 "SYS_fstatfs", //262
5071 "SYS_pipe", //263
5072 "SYS_fhopen", //264
5073 "SYS_265",
5074 "SYS_fhstatfs", //266
5075 "SYS_preadv", //267
5076 "SYS_pwritev", //268
5077 "SYS_kqueue", //269
5078 "SYS_kevent", //270
5079 "SYS_mlockall", //271
5080 "SYS_munlockall", //272
5081 "SYS_getpeereid", //273
5082 "SYS_274",
5083 "SYS_275",
5084 "SYS_276",
5085 "SYS_277",
5086 "SYS_278",
5087 "SYS_279",
5088 "SYS_280",
5089 "SYS_getresuid", //281
5090 "SYS_setresuid", //282
5091 "SYS_getresgid", //283
5092 "SYS_setresgid", //284
5093 "SYS_285",
5094 "SYS_mquery", //286
5095 "SYS_closefrom", //287
5096 "SYS_sigaltstack", //288
5097 "SYS_shmget", //289
5098 "SYS_semop", //290
5099 "SYS_stat", //291
5100 "SYS_fstat", //292
5101 "SYS_lstat", //293
5102 "SYS_fhstat", //294
5103 "SYS___semctl", //295
5104 "SYS_shmctl", //296
5105 "SYS_msgctl", //297
5106 "SYS_MAXSYSCALL", //298
5107 //299
5108 //300
5109 };
5110 uint32_t uEAX;
5111 if (!LogIsEnabled())
5112 return;
5113 uEAX = CPUMGetGuestEAX(pVCpu);
5114 switch (uEAX)
5115 {
5116 default:
5117 if (uEAX < RT_ELEMENTS(apsz))
5118 {
5119 uint32_t au32Args[8] = {0};
5120 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5121 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5122 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5123 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5124 }
5125 else
5126 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5127 break;
5128 }
5129}
5130
5131
5132#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5133/**
5134 * The Dll main entry point (stub).
5135 */
5136bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5137{
5138 return true;
5139}
5140
5141void *memcpy(void *dst, const void *src, size_t size)
5142{
5143 uint8_t*pbDst = dst, *pbSrc = src;
5144 while (size-- > 0)
5145 *pbDst++ = *pbSrc++;
5146 return dst;
5147}
5148
5149#endif
5150
5151void cpu_smm_update(CPUState *env)
5152{
5153}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette