VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 22480

Last change on this file since 22480 was 22480, checked in by vboxsync, 15 years ago

SSM,VMM,Devices,Main,VBoxBFE: Live snapshot/migration SSM API adjustments.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 165.5 KB
Line 
1/* $Id: VBoxRecompiler.c 22480 2009-08-26 17:14:13Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPhase);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .pResultDesc = NULL,
214 .fFlags = 0,
215 .pfnHandler = remR3CmdDisasEnableStepping,
216 .pszSyntax = "[on/off]",
217 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
218 "If no arguments show the current state."
219 }
220};
221#endif
222
223/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
224uint8_t *code_gen_prologue;
225
226
227/*******************************************************************************
228* Internal Functions *
229*******************************************************************************/
230void remAbort(int rc, const char *pszTip);
231extern int testmath(void);
232
233/* Put them here to avoid unused variable warning. */
234AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
235#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
236//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
237/* Why did this have to be identical?? */
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#else
240AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
241#endif
242
243
244/**
245 * Initializes the REM.
246 *
247 * @returns VBox status code.
248 * @param pVM The VM to operate on.
249 */
250REMR3DECL(int) REMR3Init(PVM pVM)
251{
252 PREMHANDLERNOTIFICATION pCur;
253 uint32_t u32Dummy;
254 int rc;
255 unsigned i;
256
257#ifdef VBOX_ENABLE_VBOXREM64
258 LogRel(("Using 64-bit aware REM\n"));
259#endif
260
261 /*
262 * Assert sanity.
263 */
264 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
265 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
266 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
267#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
268 Assert(!testmath());
269#endif
270
271 /*
272 * Init some internal data members.
273 */
274 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
275 pVM->rem.s.Env.pVM = pVM;
276#ifdef CPU_RAW_MODE_INIT
277 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
278#endif
279
280 /*
281 * Initialize the REM critical section.
282 *
283 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
284 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
285 * deadlocks. (mostly pgm vs rem locking)
286 */
287 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, "REM-Register");
288 AssertRCReturn(rc, rc);
289
290 /* ctx. */
291 pVM->rem.s.pCtx = NULL; /* set when executing code. */
292 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
293
294 /* ignore all notifications */
295 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
296
297 code_gen_prologue = RTMemExecAlloc(_1K);
298 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
299
300 cpu_exec_init_all(0);
301
302 /*
303 * Init the recompiler.
304 */
305 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
306 {
307 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
308 return VERR_GENERAL_FAILURE;
309 }
310 PVMCPU pVCpu = VMMGetCpu(pVM);
311 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
312 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
313
314 /* allocate code buffer for single instruction emulation. */
315 pVM->rem.s.Env.cbCodeBuffer = 4096;
316 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
317 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
318
319 /* finally, set the cpu_single_env global. */
320 cpu_single_env = &pVM->rem.s.Env;
321
322 /* Nothing is pending by default */
323 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
324
325 /*
326 * Register ram types.
327 */
328 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
330 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
331 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
332 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
333
334 /* stop ignoring. */
335 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
336
337 /*
338 * Register the saved state data unit.
339 */
340 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
341 NULL, NULL, NULL,
342 NULL, remR3Save, NULL,
343 NULL, remR3Load, NULL);
344 if (RT_FAILURE(rc))
345 return rc;
346
347#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
348 /*
349 * Debugger commands.
350 */
351 static bool fRegisteredCmds = false;
352 if (!fRegisteredCmds)
353 {
354 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
355 if (RT_SUCCESS(rc))
356 fRegisteredCmds = true;
357 }
358#endif
359
360#ifdef VBOX_WITH_STATISTICS
361 /*
362 * Statistics.
363 */
364 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
365 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
366 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
367 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
368 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
370 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
371 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
372 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
373 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
374 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
375 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
376
377 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
378
379 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
380 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
381 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
382 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
383 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
384 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
385 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
386 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
387 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
388 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
389 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
390
391 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
392 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
393 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
394 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
395
396 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
402
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
407 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
409
410 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
411#endif /* VBOX_WITH_STATISTICS */
412
413 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
414 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
415 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
416
417
418#ifdef DEBUG_ALL_LOGGING
419 loglevel = ~0;
420# ifdef DEBUG_TMP_LOGGING
421 logfile = fopen("/tmp/vbox-qemu.log", "w");
422# endif
423#endif
424
425 /*
426 * Init the handler notification lists.
427 */
428 pVM->rem.s.idxPendingList = UINT32_MAX;
429 pVM->rem.s.idxFreeList = 0;
430
431 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
432 {
433 pCur = &pVM->rem.s.aHandlerNotifications[i];
434 pCur->idxNext = i + 1;
435 pCur->idxSelf = i;
436 }
437 pCur->idxNext = UINT32_MAX; /* the last record. */
438
439 return rc;
440}
441
442
443/**
444 * Finalizes the REM initialization.
445 *
446 * This is called after all components, devices and drivers has
447 * been initialized. Its main purpose it to finish the RAM related
448 * initialization.
449 *
450 * @returns VBox status code.
451 *
452 * @param pVM The VM handle.
453 */
454REMR3DECL(int) REMR3InitFinalize(PVM pVM)
455{
456 int rc;
457
458 /*
459 * Ram size & dirty bit map.
460 */
461 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
462 pVM->rem.s.fGCPhysLastRamFixed = true;
463#ifdef RT_STRICT
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
465#else
466 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
467#endif
468 return rc;
469}
470
471
472/**
473 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
474 *
475 * @returns VBox status code.
476 * @param pVM The VM handle.
477 * @param fGuarded Whether to guard the map.
478 */
479static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
480{
481 int rc = VINF_SUCCESS;
482 RTGCPHYS cb;
483
484 cb = pVM->rem.s.GCPhysLastRam + 1;
485 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
486 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
487 VERR_OUT_OF_RANGE);
488 phys_ram_size = cb;
489 phys_ram_dirty_size = cb >> PAGE_SHIFT;
490 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
491
492 if (!fGuarded)
493 {
494 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
495 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
496 }
497 else
498 {
499 /*
500 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
501 */
502 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
503 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
504 if (cbBitmapFull == cbBitmapAligned)
505 cbBitmapFull += _4G >> PAGE_SHIFT;
506 else if (cbBitmapFull - cbBitmapAligned < _64K)
507 cbBitmapFull += _64K;
508
509 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
510 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
511
512 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
513 if (RT_FAILURE(rc))
514 {
515 RTMemPageFree(phys_ram_dirty);
516 AssertLogRelRCReturn(rc, rc);
517 }
518
519 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
520 }
521
522 /* initialize it. */
523 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
524 return rc;
525}
526
527
528/**
529 * Terminates the REM.
530 *
531 * Termination means cleaning up and freeing all resources,
532 * the VM it self is at this point powered off or suspended.
533 *
534 * @returns VBox status code.
535 * @param pVM The VM to operate on.
536 */
537REMR3DECL(int) REMR3Term(PVM pVM)
538{
539#ifdef VBOX_WITH_STATISTICS
540 /*
541 * Statistics.
542 */
543 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
544 STAM_DEREG(pVM, &gStatCompilationQEmu);
545 STAM_DEREG(pVM, &gStatRunCodeQEmu);
546 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
547 STAM_DEREG(pVM, &gStatTimers);
548 STAM_DEREG(pVM, &gStatTBLookup);
549 STAM_DEREG(pVM, &gStatIRQ);
550 STAM_DEREG(pVM, &gStatRawCheck);
551 STAM_DEREG(pVM, &gStatMemRead);
552 STAM_DEREG(pVM, &gStatMemWrite);
553 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
554 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
555
556 STAM_DEREG(pVM, &gStatCpuGetTSC);
557
558 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
559 STAM_DEREG(pVM, &gStatRefuseVM86);
560 STAM_DEREG(pVM, &gStatRefusePaging);
561 STAM_DEREG(pVM, &gStatRefusePAE);
562 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
563 STAM_DEREG(pVM, &gStatRefuseIF0);
564 STAM_DEREG(pVM, &gStatRefuseCode16);
565 STAM_DEREG(pVM, &gStatRefuseWP0);
566 STAM_DEREG(pVM, &gStatRefuseRing1or2);
567 STAM_DEREG(pVM, &gStatRefuseCanExecute);
568 STAM_DEREG(pVM, &gStatFlushTBs);
569
570 STAM_DEREG(pVM, &gStatREMGDTChange);
571 STAM_DEREG(pVM, &gStatREMLDTRChange);
572 STAM_DEREG(pVM, &gStatREMIDTChange);
573 STAM_DEREG(pVM, &gStatREMTRChange);
574
575 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
579 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
580 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
581
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
586 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
588
589 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
590#endif /* VBOX_WITH_STATISTICS */
591
592 STAM_REL_DEREG(pVM, &tb_flush_count);
593 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
594 STAM_REL_DEREG(pVM, &tlb_flush_count);
595
596 return VINF_SUCCESS;
597}
598
599
600/**
601 * The VM is being reset.
602 *
603 * For the REM component this means to call the cpu_reset() and
604 * reinitialize some state variables.
605 *
606 * @param pVM VM handle.
607 */
608REMR3DECL(void) REMR3Reset(PVM pVM)
609{
610 /*
611 * Reset the REM cpu.
612 */
613 Assert(pVM->rem.s.cIgnoreAll == 0);
614 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
615 cpu_reset(&pVM->rem.s.Env);
616 pVM->rem.s.cInvalidatedPages = 0;
617 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
618 Assert(pVM->rem.s.cIgnoreAll == 0);
619
620 /* Clear raw ring 0 init state */
621 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
622
623 /* Flush the TBs the next time we execute code here. */
624 pVM->rem.s.fFlushTBs = true;
625}
626
627
628/**
629 * Execute state save operation.
630 *
631 * @returns VBox status code.
632 * @param pVM VM Handle.
633 * @param pSSM SSM operation handle.
634 */
635static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
636{
637 PREM pRem = &pVM->rem.s;
638
639 /*
640 * Save the required CPU Env bits.
641 * (Not much because we're never in REM when doing the save.)
642 */
643 LogFlow(("remR3Save:\n"));
644 Assert(!pRem->fInREM);
645 SSMR3PutU32(pSSM, pRem->Env.hflags);
646 SSMR3PutU32(pSSM, ~0); /* separator */
647
648 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
649 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
650 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
651
652 return SSMR3PutU32(pSSM, ~0); /* terminator */
653}
654
655
656/**
657 * Execute state load operation.
658 *
659 * @returns VBox status code.
660 * @param pVM VM Handle.
661 * @param pSSM SSM operation handle.
662 * @param uVersion Data layout version.
663 * @param uPhase The data phase.
664 */
665static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPhase)
666{
667 uint32_t u32Dummy;
668 uint32_t fRawRing0 = false;
669 uint32_t u32Sep;
670 unsigned i;
671 int rc;
672 PREM pRem;
673
674 LogFlow(("remR3Load:\n"));
675 Assert(uPhase == SSM_PHASE_FINAL); NOREF(uPhase);
676
677 /*
678 * Validate version.
679 */
680 if ( uVersion != REM_SAVED_STATE_VERSION
681 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
682 {
683 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
684 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
685 }
686
687 /*
688 * Do a reset to be on the safe side...
689 */
690 REMR3Reset(pVM);
691
692 /*
693 * Ignore all ignorable notifications.
694 * (Not doing this will cause serious trouble.)
695 */
696 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
697
698 /*
699 * Load the required CPU Env bits.
700 * (Not much because we're never in REM when doing the save.)
701 */
702 pRem = &pVM->rem.s;
703 Assert(!pRem->fInREM);
704 SSMR3GetU32(pSSM, &pRem->Env.hflags);
705 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
706 {
707 /* Redundant REM CPU state has to be loaded, but can be ignored. */
708 CPUX86State_Ver16 temp;
709 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
710 }
711
712 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
713 if (RT_FAILURE(rc))
714 return rc;
715 if (u32Sep != ~0U)
716 {
717 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
718 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
719 }
720
721 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
722 SSMR3GetUInt(pSSM, &fRawRing0);
723 if (fRawRing0)
724 pRem->Env.state |= CPU_RAW_RING0;
725
726 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
727 {
728 /*
729 * Load the REM stuff.
730 */
731 /** @todo r=bird: We should just drop all these items, restoring doesn't make
732 * sense. */
733 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
734 if (RT_FAILURE(rc))
735 return rc;
736 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
737 {
738 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
739 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
740 }
741 for (i = 0; i < pRem->cInvalidatedPages; i++)
742 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
743 }
744
745 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
746 if (RT_FAILURE(rc))
747 return rc;
748
749 /* check the terminator. */
750 rc = SSMR3GetU32(pSSM, &u32Sep);
751 if (RT_FAILURE(rc))
752 return rc;
753 if (u32Sep != ~0U)
754 {
755 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
756 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
757 }
758
759 /*
760 * Get the CPUID features.
761 */
762 PVMCPU pVCpu = VMMGetCpu(pVM);
763 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
764 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
765
766 /*
767 * Sync the Load Flush the TLB
768 */
769 tlb_flush(&pRem->Env, 1);
770
771 /*
772 * Stop ignoring ignornable notifications.
773 */
774 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
775
776 /*
777 * Sync the whole CPU state when executing code in the recompiler.
778 */
779 for (i=0;i<pVM->cCPUs;i++)
780 {
781 PVMCPU pVCpu = &pVM->aCpus[i];
782
783 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
784 }
785 return VINF_SUCCESS;
786}
787
788
789
790#undef LOG_GROUP
791#define LOG_GROUP LOG_GROUP_REM_RUN
792
793/**
794 * Single steps an instruction in recompiled mode.
795 *
796 * Before calling this function the REM state needs to be in sync with
797 * the VM. Call REMR3State() to perform the sync. It's only necessary
798 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
799 * and after calling REMR3StateBack().
800 *
801 * @returns VBox status code.
802 *
803 * @param pVM VM Handle.
804 * @param pVCpu VMCPU Handle.
805 */
806REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
807{
808 int rc, interrupt_request;
809 RTGCPTR GCPtrPC;
810 bool fBp;
811
812 /*
813 * Lock the REM - we don't wanna have anyone interrupting us
814 * while stepping - and enabled single stepping. We also ignore
815 * pending interrupts and suchlike.
816 */
817 interrupt_request = pVM->rem.s.Env.interrupt_request;
818 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
819 pVM->rem.s.Env.interrupt_request = 0;
820 cpu_single_step(&pVM->rem.s.Env, 1);
821
822 /*
823 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
824 */
825 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
826 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
827
828 /*
829 * Execute and handle the return code.
830 * We execute without enabling the cpu tick, so on success we'll
831 * just flip it on and off to make sure it moves
832 */
833 rc = cpu_exec(&pVM->rem.s.Env);
834 if (rc == EXCP_DEBUG)
835 {
836 TMR3NotifyResume(pVM, pVCpu);
837 TMR3NotifySuspend(pVM, pVCpu);
838 rc = VINF_EM_DBG_STEPPED;
839 }
840 else
841 {
842 switch (rc)
843 {
844 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
845 case EXCP_HLT:
846 case EXCP_HALTED: rc = VINF_EM_HALT; break;
847 case EXCP_RC:
848 rc = pVM->rem.s.rc;
849 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
850 break;
851 case EXCP_EXECUTE_RAW:
852 case EXCP_EXECUTE_HWACC:
853 /** @todo: is it correct? No! */
854 rc = VINF_SUCCESS;
855 break;
856 default:
857 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
858 rc = VERR_INTERNAL_ERROR;
859 break;
860 }
861 }
862
863 /*
864 * Restore the stuff we changed to prevent interruption.
865 * Unlock the REM.
866 */
867 if (fBp)
868 {
869 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
870 Assert(rc2 == 0); NOREF(rc2);
871 }
872 cpu_single_step(&pVM->rem.s.Env, 0);
873 pVM->rem.s.Env.interrupt_request = interrupt_request;
874
875 return rc;
876}
877
878
879/**
880 * Set a breakpoint using the REM facilities.
881 *
882 * @returns VBox status code.
883 * @param pVM The VM handle.
884 * @param Address The breakpoint address.
885 * @thread The emulation thread.
886 */
887REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
888{
889 VM_ASSERT_EMT(pVM);
890 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
891 {
892 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
893 return VINF_SUCCESS;
894 }
895 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
896 return VERR_REM_NO_MORE_BP_SLOTS;
897}
898
899
900/**
901 * Clears a breakpoint set by REMR3BreakpointSet().
902 *
903 * @returns VBox status code.
904 * @param pVM The VM handle.
905 * @param Address The breakpoint address.
906 * @thread The emulation thread.
907 */
908REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
909{
910 VM_ASSERT_EMT(pVM);
911 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
912 {
913 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
914 return VINF_SUCCESS;
915 }
916 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
917 return VERR_REM_BP_NOT_FOUND;
918}
919
920
921/**
922 * Emulate an instruction.
923 *
924 * This function executes one instruction without letting anyone
925 * interrupt it. This is intended for being called while being in
926 * raw mode and thus will take care of all the state syncing between
927 * REM and the rest.
928 *
929 * @returns VBox status code.
930 * @param pVM VM handle.
931 * @param pVCpu VMCPU Handle.
932 */
933REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
934{
935 bool fFlushTBs;
936
937 int rc, rc2;
938 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
939
940 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
941 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
942 */
943 if (HWACCMIsEnabled(pVM))
944 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
945
946 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
947 fFlushTBs = pVM->rem.s.fFlushTBs;
948 pVM->rem.s.fFlushTBs = false;
949
950 /*
951 * Sync the state and enable single instruction / single stepping.
952 */
953 rc = REMR3State(pVM, pVCpu);
954 pVM->rem.s.fFlushTBs = fFlushTBs;
955 if (RT_SUCCESS(rc))
956 {
957 int interrupt_request = pVM->rem.s.Env.interrupt_request;
958 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
959 Assert(!pVM->rem.s.Env.singlestep_enabled);
960 /*
961 * Now we set the execute single instruction flag and enter the cpu_exec loop.
962 */
963 TMNotifyStartOfExecution(pVCpu);
964 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
965 rc = cpu_exec(&pVM->rem.s.Env);
966 TMNotifyEndOfExecution(pVCpu);
967 switch (rc)
968 {
969 /*
970 * Executed without anything out of the way happening.
971 */
972 case EXCP_SINGLE_INSTR:
973 rc = VINF_EM_RESCHEDULE;
974 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
975 break;
976
977 /*
978 * If we take a trap or start servicing a pending interrupt, we might end up here.
979 * (Timer thread or some other thread wishing EMT's attention.)
980 */
981 case EXCP_INTERRUPT:
982 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
983 rc = VINF_EM_RESCHEDULE;
984 break;
985
986 /*
987 * Single step, we assume!
988 * If there was a breakpoint there we're fucked now.
989 */
990 case EXCP_DEBUG:
991 {
992 /* breakpoint or single step? */
993 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
994 int iBP;
995 rc = VINF_EM_DBG_STEPPED;
996 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
997 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
998 {
999 rc = VINF_EM_DBG_BREAKPOINT;
1000 break;
1001 }
1002 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1003 break;
1004 }
1005
1006 /*
1007 * hlt instruction.
1008 */
1009 case EXCP_HLT:
1010 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1011 rc = VINF_EM_HALT;
1012 break;
1013
1014 /*
1015 * The VM has halted.
1016 */
1017 case EXCP_HALTED:
1018 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1019 rc = VINF_EM_HALT;
1020 break;
1021
1022 /*
1023 * Switch to RAW-mode.
1024 */
1025 case EXCP_EXECUTE_RAW:
1026 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1027 rc = VINF_EM_RESCHEDULE_RAW;
1028 break;
1029
1030 /*
1031 * Switch to hardware accelerated RAW-mode.
1032 */
1033 case EXCP_EXECUTE_HWACC:
1034 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1035 rc = VINF_EM_RESCHEDULE_HWACC;
1036 break;
1037
1038 /*
1039 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1040 */
1041 case EXCP_RC:
1042 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1043 rc = pVM->rem.s.rc;
1044 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1045 break;
1046
1047 /*
1048 * Figure out the rest when they arrive....
1049 */
1050 default:
1051 AssertMsgFailed(("rc=%d\n", rc));
1052 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1053 rc = VINF_EM_RESCHEDULE;
1054 break;
1055 }
1056
1057 /*
1058 * Switch back the state.
1059 */
1060 pVM->rem.s.Env.interrupt_request = interrupt_request;
1061 rc2 = REMR3StateBack(pVM, pVCpu);
1062 AssertRC(rc2);
1063 }
1064
1065 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1066 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1067 return rc;
1068}
1069
1070
1071/**
1072 * Runs code in recompiled mode.
1073 *
1074 * Before calling this function the REM state needs to be in sync with
1075 * the VM. Call REMR3State() to perform the sync. It's only necessary
1076 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1077 * and after calling REMR3StateBack().
1078 *
1079 * @returns VBox status code.
1080 *
1081 * @param pVM VM Handle.
1082 * @param pVCpu VMCPU Handle.
1083 */
1084REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1085{
1086 int rc;
1087 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1088 Assert(pVM->rem.s.fInREM);
1089
1090 TMNotifyStartOfExecution(pVCpu);
1091 rc = cpu_exec(&pVM->rem.s.Env);
1092 TMNotifyEndOfExecution(pVCpu);
1093 switch (rc)
1094 {
1095 /*
1096 * This happens when the execution was interrupted
1097 * by an external event, like pending timers.
1098 */
1099 case EXCP_INTERRUPT:
1100 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1101 rc = VINF_SUCCESS;
1102 break;
1103
1104 /*
1105 * hlt instruction.
1106 */
1107 case EXCP_HLT:
1108 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1109 rc = VINF_EM_HALT;
1110 break;
1111
1112 /*
1113 * The VM has halted.
1114 */
1115 case EXCP_HALTED:
1116 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1117 rc = VINF_EM_HALT;
1118 break;
1119
1120 /*
1121 * Breakpoint/single step.
1122 */
1123 case EXCP_DEBUG:
1124 {
1125#if 0//def DEBUG_bird
1126 static int iBP = 0;
1127 printf("howdy, breakpoint! iBP=%d\n", iBP);
1128 switch (iBP)
1129 {
1130 case 0:
1131 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1132 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1133 //pVM->rem.s.Env.interrupt_request = 0;
1134 //pVM->rem.s.Env.exception_index = -1;
1135 //g_fInterruptDisabled = 1;
1136 rc = VINF_SUCCESS;
1137 asm("int3");
1138 break;
1139 default:
1140 asm("int3");
1141 break;
1142 }
1143 iBP++;
1144#else
1145 /* breakpoint or single step? */
1146 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1147 int iBP;
1148 rc = VINF_EM_DBG_STEPPED;
1149 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1150 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1151 {
1152 rc = VINF_EM_DBG_BREAKPOINT;
1153 break;
1154 }
1155 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1156#endif
1157 break;
1158 }
1159
1160 /*
1161 * Switch to RAW-mode.
1162 */
1163 case EXCP_EXECUTE_RAW:
1164 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1165 rc = VINF_EM_RESCHEDULE_RAW;
1166 break;
1167
1168 /*
1169 * Switch to hardware accelerated RAW-mode.
1170 */
1171 case EXCP_EXECUTE_HWACC:
1172 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1173 rc = VINF_EM_RESCHEDULE_HWACC;
1174 break;
1175
1176 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1177 /*
1178 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1179 */
1180 case EXCP_RC:
1181 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1182 rc = pVM->rem.s.rc;
1183 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1184 break;
1185
1186 /*
1187 * Figure out the rest when they arrive....
1188 */
1189 default:
1190 AssertMsgFailed(("rc=%d\n", rc));
1191 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1192 rc = VINF_SUCCESS;
1193 break;
1194 }
1195
1196 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1197 return rc;
1198}
1199
1200
1201/**
1202 * Check if the cpu state is suitable for Raw execution.
1203 *
1204 * @returns boolean
1205 * @param env The CPU env struct.
1206 * @param eip The EIP to check this for (might differ from env->eip).
1207 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1208 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1209 *
1210 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1211 */
1212bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1213{
1214 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1215 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1216 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1217 uint32_t u32CR0;
1218
1219 /* Update counter. */
1220 env->pVM->rem.s.cCanExecuteRaw++;
1221
1222 if (HWACCMIsEnabled(env->pVM))
1223 {
1224 CPUMCTX Ctx;
1225
1226 env->state |= CPU_RAW_HWACC;
1227
1228 /*
1229 * Create partial context for HWACCMR3CanExecuteGuest
1230 */
1231 Ctx.cr0 = env->cr[0];
1232 Ctx.cr3 = env->cr[3];
1233 Ctx.cr4 = env->cr[4];
1234
1235 Ctx.tr = env->tr.selector;
1236 Ctx.trHid.u64Base = env->tr.base;
1237 Ctx.trHid.u32Limit = env->tr.limit;
1238 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1239
1240 Ctx.idtr.cbIdt = env->idt.limit;
1241 Ctx.idtr.pIdt = env->idt.base;
1242
1243 Ctx.gdtr.cbGdt = env->gdt.limit;
1244 Ctx.gdtr.pGdt = env->gdt.base;
1245
1246 Ctx.rsp = env->regs[R_ESP];
1247 Ctx.rip = env->eip;
1248
1249 Ctx.eflags.u32 = env->eflags;
1250
1251 Ctx.cs = env->segs[R_CS].selector;
1252 Ctx.csHid.u64Base = env->segs[R_CS].base;
1253 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1254 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1255
1256 Ctx.ds = env->segs[R_DS].selector;
1257 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1258 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1259 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1260
1261 Ctx.es = env->segs[R_ES].selector;
1262 Ctx.esHid.u64Base = env->segs[R_ES].base;
1263 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1264 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1265
1266 Ctx.fs = env->segs[R_FS].selector;
1267 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1268 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1269 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1270
1271 Ctx.gs = env->segs[R_GS].selector;
1272 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1273 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1274 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1275
1276 Ctx.ss = env->segs[R_SS].selector;
1277 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1278 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1279 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1280
1281 Ctx.msrEFER = env->efer;
1282
1283 /* Hardware accelerated raw-mode:
1284 *
1285 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1286 */
1287 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1288 {
1289 *piException = EXCP_EXECUTE_HWACC;
1290 return true;
1291 }
1292 return false;
1293 }
1294
1295 /*
1296 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1297 * or 32 bits protected mode ring 0 code
1298 *
1299 * The tests are ordered by the likelyhood of being true during normal execution.
1300 */
1301 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1302 {
1303 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1304 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1305 return false;
1306 }
1307
1308#ifndef VBOX_RAW_V86
1309 if (fFlags & VM_MASK) {
1310 STAM_COUNTER_INC(&gStatRefuseVM86);
1311 Log2(("raw mode refused: VM_MASK\n"));
1312 return false;
1313 }
1314#endif
1315
1316 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1317 {
1318#ifndef DEBUG_bird
1319 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1320#endif
1321 return false;
1322 }
1323
1324 if (env->singlestep_enabled)
1325 {
1326 //Log2(("raw mode refused: Single step\n"));
1327 return false;
1328 }
1329
1330 if (env->nb_breakpoints > 0)
1331 {
1332 //Log2(("raw mode refused: Breakpoints\n"));
1333 return false;
1334 }
1335
1336 u32CR0 = env->cr[0];
1337 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1338 {
1339 STAM_COUNTER_INC(&gStatRefusePaging);
1340 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1341 return false;
1342 }
1343
1344 if (env->cr[4] & CR4_PAE_MASK)
1345 {
1346 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1347 {
1348 STAM_COUNTER_INC(&gStatRefusePAE);
1349 return false;
1350 }
1351 }
1352
1353 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1354 {
1355 if (!EMIsRawRing3Enabled(env->pVM))
1356 return false;
1357
1358 if (!(env->eflags & IF_MASK))
1359 {
1360 STAM_COUNTER_INC(&gStatRefuseIF0);
1361 Log2(("raw mode refused: IF (RawR3)\n"));
1362 return false;
1363 }
1364
1365 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1366 {
1367 STAM_COUNTER_INC(&gStatRefuseWP0);
1368 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1369 return false;
1370 }
1371 }
1372 else
1373 {
1374 if (!EMIsRawRing0Enabled(env->pVM))
1375 return false;
1376
1377 // Let's start with pure 32 bits ring 0 code first
1378 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1379 {
1380 STAM_COUNTER_INC(&gStatRefuseCode16);
1381 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1382 return false;
1383 }
1384
1385 // Only R0
1386 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1387 {
1388 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1389 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1390 return false;
1391 }
1392
1393 if (!(u32CR0 & CR0_WP_MASK))
1394 {
1395 STAM_COUNTER_INC(&gStatRefuseWP0);
1396 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1397 return false;
1398 }
1399
1400 if (PATMIsPatchGCAddr(env->pVM, eip))
1401 {
1402 Log2(("raw r0 mode forced: patch code\n"));
1403 *piException = EXCP_EXECUTE_RAW;
1404 return true;
1405 }
1406
1407#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1408 if (!(env->eflags & IF_MASK))
1409 {
1410 STAM_COUNTER_INC(&gStatRefuseIF0);
1411 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1412 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1413 return false;
1414 }
1415#endif
1416
1417 env->state |= CPU_RAW_RING0;
1418 }
1419
1420 /*
1421 * Don't reschedule the first time we're called, because there might be
1422 * special reasons why we're here that is not covered by the above checks.
1423 */
1424 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1425 {
1426 Log2(("raw mode refused: first scheduling\n"));
1427 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1428 return false;
1429 }
1430
1431 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1432 *piException = EXCP_EXECUTE_RAW;
1433 return true;
1434}
1435
1436
1437/**
1438 * Fetches a code byte.
1439 *
1440 * @returns Success indicator (bool) for ease of use.
1441 * @param env The CPU environment structure.
1442 * @param GCPtrInstr Where to fetch code.
1443 * @param pu8Byte Where to store the byte on success
1444 */
1445bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1446{
1447 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1448 if (RT_SUCCESS(rc))
1449 return true;
1450 return false;
1451}
1452
1453
1454/**
1455 * Flush (or invalidate if you like) page table/dir entry.
1456 *
1457 * (invlpg instruction; tlb_flush_page)
1458 *
1459 * @param env Pointer to cpu environment.
1460 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1461 */
1462void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1463{
1464 PVM pVM = env->pVM;
1465 PCPUMCTX pCtx;
1466 int rc;
1467
1468 /*
1469 * When we're replaying invlpg instructions or restoring a saved
1470 * state we disable this path.
1471 */
1472 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1473 return;
1474 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1475 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1476
1477 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1478
1479 /*
1480 * Update the control registers before calling PGMFlushPage.
1481 */
1482 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1483 Assert(pCtx);
1484 pCtx->cr0 = env->cr[0];
1485 pCtx->cr3 = env->cr[3];
1486 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1487 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1488 pCtx->cr4 = env->cr[4];
1489
1490 /*
1491 * Let PGM do the rest.
1492 */
1493 Assert(env->pVCpu);
1494 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1495 if (RT_FAILURE(rc))
1496 {
1497 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1498 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1499 }
1500 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1501}
1502
1503
1504#ifndef REM_PHYS_ADDR_IN_TLB
1505/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1506void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1507{
1508 void *pv;
1509 int rc;
1510
1511 /* Address must be aligned enough to fiddle with lower bits */
1512 Assert((physAddr & 0x3) == 0);
1513
1514 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1515 Assert( rc == VINF_SUCCESS
1516 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1517 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1518 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1519 if (RT_FAILURE(rc))
1520 return (void *)1;
1521 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1522 return (void *)((uintptr_t)pv | 2);
1523 return pv;
1524}
1525#endif /* REM_PHYS_ADDR_IN_TLB */
1526
1527
1528/**
1529 * Called from tlb_protect_code in order to write monitor a code page.
1530 *
1531 * @param env Pointer to the CPU environment.
1532 * @param GCPtr Code page to monitor
1533 */
1534void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1535{
1536#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1537 Assert(env->pVM->rem.s.fInREM);
1538 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1539 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1540 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1541 && !(env->eflags & VM_MASK) /* no V86 mode */
1542 && !HWACCMIsEnabled(env->pVM))
1543 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1544#endif
1545}
1546
1547
1548/**
1549 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1550 *
1551 * @param env Pointer to the CPU environment.
1552 * @param GCPtr Code page to monitor
1553 */
1554void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1555{
1556 Assert(env->pVM->rem.s.fInREM);
1557#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1558 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1559 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1560 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1561 && !(env->eflags & VM_MASK) /* no V86 mode */
1562 && !HWACCMIsEnabled(env->pVM))
1563 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1564#endif
1565}
1566
1567
1568/**
1569 * Called when the CPU is initialized, any of the CRx registers are changed or
1570 * when the A20 line is modified.
1571 *
1572 * @param env Pointer to the CPU environment.
1573 * @param fGlobal Set if the flush is global.
1574 */
1575void remR3FlushTLB(CPUState *env, bool fGlobal)
1576{
1577 PVM pVM = env->pVM;
1578 PCPUMCTX pCtx;
1579
1580 /*
1581 * When we're replaying invlpg instructions or restoring a saved
1582 * state we disable this path.
1583 */
1584 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1585 return;
1586 Assert(pVM->rem.s.fInREM);
1587
1588 /*
1589 * The caller doesn't check cr4, so we have to do that for ourselves.
1590 */
1591 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1592 fGlobal = true;
1593 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1594
1595 /*
1596 * Update the control registers before calling PGMR3FlushTLB.
1597 */
1598 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1599 Assert(pCtx);
1600 pCtx->cr0 = env->cr[0];
1601 pCtx->cr3 = env->cr[3];
1602 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1603 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1604 pCtx->cr4 = env->cr[4];
1605
1606 /*
1607 * Let PGM do the rest.
1608 */
1609 Assert(env->pVCpu);
1610 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1611}
1612
1613
1614/**
1615 * Called when any of the cr0, cr4 or efer registers is updated.
1616 *
1617 * @param env Pointer to the CPU environment.
1618 */
1619void remR3ChangeCpuMode(CPUState *env)
1620{
1621 PVM pVM = env->pVM;
1622 uint64_t efer;
1623 PCPUMCTX pCtx;
1624 int rc;
1625
1626 /*
1627 * When we're replaying loads or restoring a saved
1628 * state this path is disabled.
1629 */
1630 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1631 return;
1632 Assert(pVM->rem.s.fInREM);
1633
1634 /*
1635 * Update the control registers before calling PGMChangeMode()
1636 * as it may need to map whatever cr3 is pointing to.
1637 */
1638 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1639 Assert(pCtx);
1640 pCtx->cr0 = env->cr[0];
1641 pCtx->cr3 = env->cr[3];
1642 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1643 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1644 pCtx->cr4 = env->cr[4];
1645
1646#ifdef TARGET_X86_64
1647 efer = env->efer;
1648#else
1649 efer = 0;
1650#endif
1651 Assert(env->pVCpu);
1652 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1653 if (rc != VINF_SUCCESS)
1654 {
1655 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1656 {
1657 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1658 remR3RaiseRC(env->pVM, rc);
1659 }
1660 else
1661 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1662 }
1663}
1664
1665
1666/**
1667 * Called from compiled code to run dma.
1668 *
1669 * @param env Pointer to the CPU environment.
1670 */
1671void remR3DmaRun(CPUState *env)
1672{
1673 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1674 PDMR3DmaRun(env->pVM);
1675 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1676}
1677
1678
1679/**
1680 * Called from compiled code to schedule pending timers in VMM
1681 *
1682 * @param env Pointer to the CPU environment.
1683 */
1684void remR3TimersRun(CPUState *env)
1685{
1686 LogFlow(("remR3TimersRun:\n"));
1687 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1688 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1689 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1690 TMR3TimerQueuesDo(env->pVM);
1691 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1692 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1693}
1694
1695
1696/**
1697 * Record trap occurance
1698 *
1699 * @returns VBox status code
1700 * @param env Pointer to the CPU environment.
1701 * @param uTrap Trap nr
1702 * @param uErrorCode Error code
1703 * @param pvNextEIP Next EIP
1704 */
1705int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1706{
1707 PVM pVM = env->pVM;
1708#ifdef VBOX_WITH_STATISTICS
1709 static STAMCOUNTER s_aStatTrap[255];
1710 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1711#endif
1712
1713#ifdef VBOX_WITH_STATISTICS
1714 if (uTrap < 255)
1715 {
1716 if (!s_aRegisters[uTrap])
1717 {
1718 char szStatName[64];
1719 s_aRegisters[uTrap] = true;
1720 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1721 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1722 }
1723 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1724 }
1725#endif
1726 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1727 if( uTrap < 0x20
1728 && (env->cr[0] & X86_CR0_PE)
1729 && !(env->eflags & X86_EFL_VM))
1730 {
1731#ifdef DEBUG
1732 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1733#endif
1734 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1735 {
1736 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1737 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1738 return VERR_REM_TOO_MANY_TRAPS;
1739 }
1740 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1741 pVM->rem.s.cPendingExceptions = 1;
1742 pVM->rem.s.uPendingException = uTrap;
1743 pVM->rem.s.uPendingExcptEIP = env->eip;
1744 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1745 }
1746 else
1747 {
1748 pVM->rem.s.cPendingExceptions = 0;
1749 pVM->rem.s.uPendingException = uTrap;
1750 pVM->rem.s.uPendingExcptEIP = env->eip;
1751 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1752 }
1753 return VINF_SUCCESS;
1754}
1755
1756
1757/*
1758 * Clear current active trap
1759 *
1760 * @param pVM VM Handle.
1761 */
1762void remR3TrapClear(PVM pVM)
1763{
1764 pVM->rem.s.cPendingExceptions = 0;
1765 pVM->rem.s.uPendingException = 0;
1766 pVM->rem.s.uPendingExcptEIP = 0;
1767 pVM->rem.s.uPendingExcptCR2 = 0;
1768}
1769
1770
1771/*
1772 * Record previous call instruction addresses
1773 *
1774 * @param env Pointer to the CPU environment.
1775 */
1776void remR3RecordCall(CPUState *env)
1777{
1778 CSAMR3RecordCallAddress(env->pVM, env->eip);
1779}
1780
1781
1782/**
1783 * Syncs the internal REM state with the VM.
1784 *
1785 * This must be called before REMR3Run() is invoked whenever when the REM
1786 * state is not up to date. Calling it several times in a row is not
1787 * permitted.
1788 *
1789 * @returns VBox status code.
1790 *
1791 * @param pVM VM Handle.
1792 * @param pVCpu VMCPU Handle.
1793 *
1794 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1795 * no do this since the majority of the callers don't want any unnecessary of events
1796 * pending that would immediatly interrupt execution.
1797 */
1798REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1799{
1800 register const CPUMCTX *pCtx;
1801 register unsigned fFlags;
1802 bool fHiddenSelRegsValid;
1803 unsigned i;
1804 TRPMEVENT enmType;
1805 uint8_t u8TrapNo;
1806 int rc;
1807
1808 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1809 Log2(("REMR3State:\n"));
1810
1811 pVM->rem.s.Env.pVCpu = pVCpu;
1812 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1813 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1814
1815 Assert(!pVM->rem.s.fInREM);
1816 pVM->rem.s.fInStateSync = true;
1817
1818 /*
1819 * If we have to flush TBs, do that immediately.
1820 */
1821 if (pVM->rem.s.fFlushTBs)
1822 {
1823 STAM_COUNTER_INC(&gStatFlushTBs);
1824 tb_flush(&pVM->rem.s.Env);
1825 pVM->rem.s.fFlushTBs = false;
1826 }
1827
1828 /*
1829 * Copy the registers which require no special handling.
1830 */
1831#ifdef TARGET_X86_64
1832 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1833 Assert(R_EAX == 0);
1834 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1835 Assert(R_ECX == 1);
1836 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1837 Assert(R_EDX == 2);
1838 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1839 Assert(R_EBX == 3);
1840 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1841 Assert(R_ESP == 4);
1842 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1843 Assert(R_EBP == 5);
1844 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1845 Assert(R_ESI == 6);
1846 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1847 Assert(R_EDI == 7);
1848 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1849 pVM->rem.s.Env.regs[8] = pCtx->r8;
1850 pVM->rem.s.Env.regs[9] = pCtx->r9;
1851 pVM->rem.s.Env.regs[10] = pCtx->r10;
1852 pVM->rem.s.Env.regs[11] = pCtx->r11;
1853 pVM->rem.s.Env.regs[12] = pCtx->r12;
1854 pVM->rem.s.Env.regs[13] = pCtx->r13;
1855 pVM->rem.s.Env.regs[14] = pCtx->r14;
1856 pVM->rem.s.Env.regs[15] = pCtx->r15;
1857
1858 pVM->rem.s.Env.eip = pCtx->rip;
1859
1860 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1861#else
1862 Assert(R_EAX == 0);
1863 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1864 Assert(R_ECX == 1);
1865 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1866 Assert(R_EDX == 2);
1867 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1868 Assert(R_EBX == 3);
1869 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1870 Assert(R_ESP == 4);
1871 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1872 Assert(R_EBP == 5);
1873 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1874 Assert(R_ESI == 6);
1875 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1876 Assert(R_EDI == 7);
1877 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1878 pVM->rem.s.Env.eip = pCtx->eip;
1879
1880 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1881#endif
1882
1883 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1884
1885 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1886 for (i=0;i<8;i++)
1887 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1888
1889 /*
1890 * Clear the halted hidden flag (the interrupt waking up the CPU can
1891 * have been dispatched in raw mode).
1892 */
1893 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1894
1895 /*
1896 * Replay invlpg?
1897 */
1898 if (pVM->rem.s.cInvalidatedPages)
1899 {
1900 RTUINT i;
1901
1902 pVM->rem.s.fIgnoreInvlPg = true;
1903 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1904 {
1905 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1906 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1907 }
1908 pVM->rem.s.fIgnoreInvlPg = false;
1909 pVM->rem.s.cInvalidatedPages = 0;
1910 }
1911
1912 /* Replay notification changes. */
1913 REMR3ReplayHandlerNotifications(pVM);
1914
1915 /* Update MSRs; before CRx registers! */
1916 pVM->rem.s.Env.efer = pCtx->msrEFER;
1917 pVM->rem.s.Env.star = pCtx->msrSTAR;
1918 pVM->rem.s.Env.pat = pCtx->msrPAT;
1919#ifdef TARGET_X86_64
1920 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1921 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1922 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1923 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1924
1925 /* Update the internal long mode activate flag according to the new EFER value. */
1926 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1927 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1928 else
1929 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1930#endif
1931
1932 /*
1933 * Registers which are rarely changed and require special handling / order when changed.
1934 */
1935 fFlags = CPUMGetAndClearChangedFlagsREM(pVCpu);
1936 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1937 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1938 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1939 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1940 {
1941 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1942 {
1943 pVM->rem.s.fIgnoreCR3Load = true;
1944 tlb_flush(&pVM->rem.s.Env, true);
1945 pVM->rem.s.fIgnoreCR3Load = false;
1946 }
1947
1948 /* CR4 before CR0! */
1949 if (fFlags & CPUM_CHANGED_CR4)
1950 {
1951 pVM->rem.s.fIgnoreCR3Load = true;
1952 pVM->rem.s.fIgnoreCpuMode = true;
1953 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1954 pVM->rem.s.fIgnoreCpuMode = false;
1955 pVM->rem.s.fIgnoreCR3Load = false;
1956 }
1957
1958 if (fFlags & CPUM_CHANGED_CR0)
1959 {
1960 pVM->rem.s.fIgnoreCR3Load = true;
1961 pVM->rem.s.fIgnoreCpuMode = true;
1962 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1963 pVM->rem.s.fIgnoreCpuMode = false;
1964 pVM->rem.s.fIgnoreCR3Load = false;
1965 }
1966
1967 if (fFlags & CPUM_CHANGED_CR3)
1968 {
1969 pVM->rem.s.fIgnoreCR3Load = true;
1970 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1971 pVM->rem.s.fIgnoreCR3Load = false;
1972 }
1973
1974 if (fFlags & CPUM_CHANGED_GDTR)
1975 {
1976 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1977 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1978 }
1979
1980 if (fFlags & CPUM_CHANGED_IDTR)
1981 {
1982 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1983 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1984 }
1985
1986 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1987 {
1988 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1989 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1990 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1991 }
1992
1993 if (fFlags & CPUM_CHANGED_LDTR)
1994 {
1995 if (fHiddenSelRegsValid)
1996 {
1997 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1998 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1999 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2000 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2001 }
2002 else
2003 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2004 }
2005
2006 if (fFlags & CPUM_CHANGED_CPUID)
2007 {
2008 uint32_t u32Dummy;
2009
2010 /*
2011 * Get the CPUID features.
2012 */
2013 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2014 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2015 }
2016
2017 /* Sync FPU state after CR4, CPUID and EFER (!). */
2018 if (fFlags & CPUM_CHANGED_FPU_REM)
2019 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2020 }
2021
2022 /*
2023 * Sync TR unconditionally to make life simpler.
2024 */
2025 pVM->rem.s.Env.tr.selector = pCtx->tr;
2026 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2027 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2028 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2029 /* Note! do_interrupt will fault if the busy flag is still set... */
2030 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2031
2032 /*
2033 * Update selector registers.
2034 * This must be done *after* we've synced gdt, ldt and crX registers
2035 * since we're reading the GDT/LDT om sync_seg. This will happen with
2036 * saved state which takes a quick dip into rawmode for instance.
2037 */
2038 /*
2039 * Stack; Note first check this one as the CPL might have changed. The
2040 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2041 */
2042
2043 if (fHiddenSelRegsValid)
2044 {
2045 /* The hidden selector registers are valid in the CPU context. */
2046 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2047
2048 /* Set current CPL */
2049 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2050
2051 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2052 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2053 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2054 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2055 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2056 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2057 }
2058 else
2059 {
2060 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2061 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2062 {
2063 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2064
2065 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2066 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2067#ifdef VBOX_WITH_STATISTICS
2068 if (pVM->rem.s.Env.segs[R_SS].newselector)
2069 {
2070 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2071 }
2072#endif
2073 }
2074 else
2075 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2076
2077 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2078 {
2079 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2080 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2081#ifdef VBOX_WITH_STATISTICS
2082 if (pVM->rem.s.Env.segs[R_ES].newselector)
2083 {
2084 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2085 }
2086#endif
2087 }
2088 else
2089 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2090
2091 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2092 {
2093 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2094 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2095#ifdef VBOX_WITH_STATISTICS
2096 if (pVM->rem.s.Env.segs[R_CS].newselector)
2097 {
2098 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2099 }
2100#endif
2101 }
2102 else
2103 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2104
2105 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2106 {
2107 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2108 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2109#ifdef VBOX_WITH_STATISTICS
2110 if (pVM->rem.s.Env.segs[R_DS].newselector)
2111 {
2112 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2113 }
2114#endif
2115 }
2116 else
2117 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2118
2119 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2120 * be the same but not the base/limit. */
2121 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2122 {
2123 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2124 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2125#ifdef VBOX_WITH_STATISTICS
2126 if (pVM->rem.s.Env.segs[R_FS].newselector)
2127 {
2128 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2129 }
2130#endif
2131 }
2132 else
2133 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2134
2135 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2136 {
2137 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2138 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2139#ifdef VBOX_WITH_STATISTICS
2140 if (pVM->rem.s.Env.segs[R_GS].newselector)
2141 {
2142 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2143 }
2144#endif
2145 }
2146 else
2147 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2148 }
2149
2150 /*
2151 * Check for traps.
2152 */
2153 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2154 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2155 if (RT_SUCCESS(rc))
2156 {
2157#ifdef DEBUG
2158 if (u8TrapNo == 0x80)
2159 {
2160 remR3DumpLnxSyscall(pVCpu);
2161 remR3DumpOBsdSyscall(pVCpu);
2162 }
2163#endif
2164
2165 pVM->rem.s.Env.exception_index = u8TrapNo;
2166 if (enmType != TRPM_SOFTWARE_INT)
2167 {
2168 pVM->rem.s.Env.exception_is_int = 0;
2169 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2170 }
2171 else
2172 {
2173 /*
2174 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2175 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2176 * for int03 and into.
2177 */
2178 pVM->rem.s.Env.exception_is_int = 1;
2179 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2180 /* int 3 may be generated by one-byte 0xcc */
2181 if (u8TrapNo == 3)
2182 {
2183 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2184 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2185 }
2186 /* int 4 may be generated by one-byte 0xce */
2187 else if (u8TrapNo == 4)
2188 {
2189 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2190 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2191 }
2192 }
2193
2194 /* get error code and cr2 if needed. */
2195 switch (u8TrapNo)
2196 {
2197 case 0x0e:
2198 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2199 /* fallthru */
2200 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2201 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2202 break;
2203
2204 case 0x11: case 0x08:
2205 default:
2206 pVM->rem.s.Env.error_code = 0;
2207 break;
2208 }
2209
2210 /*
2211 * We can now reset the active trap since the recompiler is gonna have a go at it.
2212 */
2213 rc = TRPMResetTrap(pVCpu);
2214 AssertRC(rc);
2215 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2216 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2217 }
2218
2219 /*
2220 * Clear old interrupt request flags; Check for pending hardware interrupts.
2221 * (See @remark for why we don't check for other FFs.)
2222 */
2223 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2224 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2225 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2226 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2227
2228 /*
2229 * We're now in REM mode.
2230 */
2231 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2232 pVM->rem.s.fInREM = true;
2233 pVM->rem.s.fInStateSync = false;
2234 pVM->rem.s.cCanExecuteRaw = 0;
2235 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2236 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2237 return VINF_SUCCESS;
2238}
2239
2240
2241/**
2242 * Syncs back changes in the REM state to the the VM state.
2243 *
2244 * This must be called after invoking REMR3Run().
2245 * Calling it several times in a row is not permitted.
2246 *
2247 * @returns VBox status code.
2248 *
2249 * @param pVM VM Handle.
2250 * @param pVCpu VMCPU Handle.
2251 */
2252REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2253{
2254 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2255 Assert(pCtx);
2256 unsigned i;
2257
2258 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2259 Log2(("REMR3StateBack:\n"));
2260 Assert(pVM->rem.s.fInREM);
2261
2262 /*
2263 * Copy back the registers.
2264 * This is done in the order they are declared in the CPUMCTX structure.
2265 */
2266
2267 /** @todo FOP */
2268 /** @todo FPUIP */
2269 /** @todo CS */
2270 /** @todo FPUDP */
2271 /** @todo DS */
2272
2273 /** @todo check if FPU/XMM was actually used in the recompiler */
2274 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2275//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2276
2277#ifdef TARGET_X86_64
2278 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2279 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2280 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2281 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2282 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2283 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2284 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2285 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2286 pCtx->r8 = pVM->rem.s.Env.regs[8];
2287 pCtx->r9 = pVM->rem.s.Env.regs[9];
2288 pCtx->r10 = pVM->rem.s.Env.regs[10];
2289 pCtx->r11 = pVM->rem.s.Env.regs[11];
2290 pCtx->r12 = pVM->rem.s.Env.regs[12];
2291 pCtx->r13 = pVM->rem.s.Env.regs[13];
2292 pCtx->r14 = pVM->rem.s.Env.regs[14];
2293 pCtx->r15 = pVM->rem.s.Env.regs[15];
2294
2295 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2296
2297#else
2298 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2299 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2300 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2301 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2302 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2303 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2304 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2305
2306 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2307#endif
2308
2309 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2310
2311#ifdef VBOX_WITH_STATISTICS
2312 if (pVM->rem.s.Env.segs[R_SS].newselector)
2313 {
2314 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2315 }
2316 if (pVM->rem.s.Env.segs[R_GS].newselector)
2317 {
2318 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2319 }
2320 if (pVM->rem.s.Env.segs[R_FS].newselector)
2321 {
2322 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2323 }
2324 if (pVM->rem.s.Env.segs[R_ES].newselector)
2325 {
2326 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2327 }
2328 if (pVM->rem.s.Env.segs[R_DS].newselector)
2329 {
2330 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2331 }
2332 if (pVM->rem.s.Env.segs[R_CS].newselector)
2333 {
2334 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2335 }
2336#endif
2337 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2338 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2339 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2340 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2341 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2342
2343#ifdef TARGET_X86_64
2344 pCtx->rip = pVM->rem.s.Env.eip;
2345 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2346#else
2347 pCtx->eip = pVM->rem.s.Env.eip;
2348 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2349#endif
2350
2351 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2352 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2353 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2354 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2355 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2356 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2357
2358 for (i = 0; i < 8; i++)
2359 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2360
2361 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2362 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2363 {
2364 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2365 STAM_COUNTER_INC(&gStatREMGDTChange);
2366 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2367 }
2368
2369 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2370 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2371 {
2372 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2373 STAM_COUNTER_INC(&gStatREMIDTChange);
2374 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2375 }
2376
2377 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2378 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2379 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2380 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2381 {
2382 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2383 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2384 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2385 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2386 STAM_COUNTER_INC(&gStatREMLDTRChange);
2387 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2388 }
2389
2390 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2391 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2392 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2393 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2394 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2395 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2396 : 0) )
2397 {
2398 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2399 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2400 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2401 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2402 pCtx->tr = pVM->rem.s.Env.tr.selector;
2403 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2404 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2405 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2406 if (pCtx->trHid.Attr.u)
2407 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2408 STAM_COUNTER_INC(&gStatREMTRChange);
2409 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2410 }
2411
2412 /** @todo These values could still be out of sync! */
2413 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2414 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2415 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2416 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2417
2418 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2419 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2420 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2421
2422 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2423 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2424 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2425
2426 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2427 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2428 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2429
2430 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2431 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2432 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2433
2434 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2435 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2436 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2437
2438 /* Sysenter MSR */
2439 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2440 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2441 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2442
2443 /* System MSRs. */
2444 pCtx->msrEFER = pVM->rem.s.Env.efer;
2445 pCtx->msrSTAR = pVM->rem.s.Env.star;
2446 pCtx->msrPAT = pVM->rem.s.Env.pat;
2447#ifdef TARGET_X86_64
2448 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2449 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2450 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2451 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2452#endif
2453
2454 remR3TrapClear(pVM);
2455
2456 /*
2457 * Check for traps.
2458 */
2459 if ( pVM->rem.s.Env.exception_index >= 0
2460 && pVM->rem.s.Env.exception_index < 256)
2461 {
2462 int rc;
2463
2464 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2465 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2466 AssertRC(rc);
2467 switch (pVM->rem.s.Env.exception_index)
2468 {
2469 case 0x0e:
2470 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2471 /* fallthru */
2472 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2473 case 0x11: case 0x08: /* 0 */
2474 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2475 break;
2476 }
2477
2478 }
2479
2480 /*
2481 * We're not longer in REM mode.
2482 */
2483 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2484 pVM->rem.s.fInREM = false;
2485 pVM->rem.s.pCtx = NULL;
2486 pVM->rem.s.Env.pVCpu = NULL;
2487 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2488 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2489 return VINF_SUCCESS;
2490}
2491
2492
2493/**
2494 * This is called by the disassembler when it wants to update the cpu state
2495 * before for instance doing a register dump.
2496 */
2497static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2498{
2499 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2500 unsigned i;
2501
2502 Assert(pVM->rem.s.fInREM);
2503
2504 /*
2505 * Copy back the registers.
2506 * This is done in the order they are declared in the CPUMCTX structure.
2507 */
2508
2509 /** @todo FOP */
2510 /** @todo FPUIP */
2511 /** @todo CS */
2512 /** @todo FPUDP */
2513 /** @todo DS */
2514 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2515 pCtx->fpu.MXCSR = 0;
2516 pCtx->fpu.MXCSR_MASK = 0;
2517
2518 /** @todo check if FPU/XMM was actually used in the recompiler */
2519 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2520//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2521
2522#ifdef TARGET_X86_64
2523 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2524 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2525 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2526 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2527 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2528 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2529 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2530 pCtx->r8 = pVM->rem.s.Env.regs[8];
2531 pCtx->r9 = pVM->rem.s.Env.regs[9];
2532 pCtx->r10 = pVM->rem.s.Env.regs[10];
2533 pCtx->r11 = pVM->rem.s.Env.regs[11];
2534 pCtx->r12 = pVM->rem.s.Env.regs[12];
2535 pCtx->r13 = pVM->rem.s.Env.regs[13];
2536 pCtx->r14 = pVM->rem.s.Env.regs[14];
2537 pCtx->r15 = pVM->rem.s.Env.regs[15];
2538
2539 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2540#else
2541 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2542 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2543 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2544 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2545 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2546 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2547 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2548
2549 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2550#endif
2551
2552 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2553
2554 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2555 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2556 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2557 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2558 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2559
2560#ifdef TARGET_X86_64
2561 pCtx->rip = pVM->rem.s.Env.eip;
2562 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2563#else
2564 pCtx->eip = pVM->rem.s.Env.eip;
2565 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2566#endif
2567
2568 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2569 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2570 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2571 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2572 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2573 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2574
2575 for (i = 0; i < 8; i++)
2576 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2577
2578 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2579 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2580 {
2581 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2582 STAM_COUNTER_INC(&gStatREMGDTChange);
2583 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2584 }
2585
2586 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2587 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2588 {
2589 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2590 STAM_COUNTER_INC(&gStatREMIDTChange);
2591 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2592 }
2593
2594 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2595 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2596 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2597 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2598 {
2599 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2600 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2601 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2602 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2603 STAM_COUNTER_INC(&gStatREMLDTRChange);
2604 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2605 }
2606
2607 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2608 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2609 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2610 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2611 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2612 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2613 : 0) )
2614 {
2615 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2616 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2617 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2618 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2619 pCtx->tr = pVM->rem.s.Env.tr.selector;
2620 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2621 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2622 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2623 if (pCtx->trHid.Attr.u)
2624 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2625 STAM_COUNTER_INC(&gStatREMTRChange);
2626 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2627 }
2628
2629 /** @todo These values could still be out of sync! */
2630 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2631 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2632 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2633 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2634
2635 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2636 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2637 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2638
2639 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2640 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2641 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2642
2643 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2644 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2645 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2646
2647 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2648 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2649 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2650
2651 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2652 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2653 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2654
2655 /* Sysenter MSR */
2656 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2657 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2658 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2659
2660 /* System MSRs. */
2661 pCtx->msrEFER = pVM->rem.s.Env.efer;
2662 pCtx->msrSTAR = pVM->rem.s.Env.star;
2663 pCtx->msrPAT = pVM->rem.s.Env.pat;
2664#ifdef TARGET_X86_64
2665 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2666 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2667 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2668 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2669#endif
2670
2671}
2672
2673
2674/**
2675 * Update the VMM state information if we're currently in REM.
2676 *
2677 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2678 * we're currently executing in REM and the VMM state is invalid. This method will of
2679 * course check that we're executing in REM before syncing any data over to the VMM.
2680 *
2681 * @param pVM The VM handle.
2682 * @param pVCpu The VMCPU handle.
2683 */
2684REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2685{
2686 if (pVM->rem.s.fInREM)
2687 remR3StateUpdate(pVM, pVCpu);
2688}
2689
2690
2691#undef LOG_GROUP
2692#define LOG_GROUP LOG_GROUP_REM
2693
2694
2695/**
2696 * Notify the recompiler about Address Gate 20 state change.
2697 *
2698 * This notification is required since A20 gate changes are
2699 * initialized from a device driver and the VM might just as
2700 * well be in REM mode as in RAW mode.
2701 *
2702 * @param pVM VM handle.
2703 * @param pVCpu VMCPU handle.
2704 * @param fEnable True if the gate should be enabled.
2705 * False if the gate should be disabled.
2706 */
2707REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2708{
2709 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2710 VM_ASSERT_EMT(pVM);
2711
2712 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2713 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2714 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2715}
2716
2717
2718/**
2719 * Replays the handler notification changes
2720 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2721 *
2722 * @param pVM VM handle.
2723 */
2724REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2725{
2726 /*
2727 * Replay the flushes.
2728 */
2729 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2730 VM_ASSERT_EMT(pVM);
2731
2732 /** @todo this isn't ensuring correct replay order. */
2733 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2734 {
2735 uint32_t idxNext;
2736 uint32_t idxRevHead;
2737 uint32_t idxHead;
2738#ifdef VBOX_STRICT
2739 int32_t c = 0;
2740#endif
2741
2742 /* Lockless purging of pending notifications. */
2743 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2744 if (idxHead == UINT32_MAX)
2745 return;
2746 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2747
2748 /*
2749 * Reverse the list to process it in FIFO order.
2750 */
2751 idxRevHead = UINT32_MAX;
2752 do
2753 {
2754 /* Save the index of the next rec. */
2755 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2756 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2757 /* Push the record onto the reversed list. */
2758 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2759 idxRevHead = idxHead;
2760 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2761 /* Advance. */
2762 idxHead = idxNext;
2763 } while (idxHead != UINT32_MAX);
2764
2765 /*
2766 * Loop thru the list, reinserting the record into the free list as they are
2767 * processed to avoid having other EMTs running out of entries while we're flushing.
2768 */
2769 idxHead = idxRevHead;
2770 do
2771 {
2772 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
2773 uint32_t idxCur;
2774 Assert(--c >= 0);
2775
2776 switch (pCur->enmKind)
2777 {
2778 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2779 remR3NotifyHandlerPhysicalRegister(pVM,
2780 pCur->u.PhysicalRegister.enmType,
2781 pCur->u.PhysicalRegister.GCPhys,
2782 pCur->u.PhysicalRegister.cb,
2783 pCur->u.PhysicalRegister.fHasHCHandler);
2784 break;
2785
2786 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2787 remR3NotifyHandlerPhysicalDeregister(pVM,
2788 pCur->u.PhysicalDeregister.enmType,
2789 pCur->u.PhysicalDeregister.GCPhys,
2790 pCur->u.PhysicalDeregister.cb,
2791 pCur->u.PhysicalDeregister.fHasHCHandler,
2792 pCur->u.PhysicalDeregister.fRestoreAsRAM);
2793 break;
2794
2795 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2796 remR3NotifyHandlerPhysicalModify(pVM,
2797 pCur->u.PhysicalModify.enmType,
2798 pCur->u.PhysicalModify.GCPhysOld,
2799 pCur->u.PhysicalModify.GCPhysNew,
2800 pCur->u.PhysicalModify.cb,
2801 pCur->u.PhysicalModify.fHasHCHandler,
2802 pCur->u.PhysicalModify.fRestoreAsRAM);
2803 break;
2804
2805 default:
2806 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
2807 break;
2808 }
2809
2810 /*
2811 * Advance idxHead.
2812 */
2813 idxCur = idxHead;
2814 idxHead = pCur->idxNext;
2815 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
2816
2817 /*
2818 * Put the record back into the free list.
2819 */
2820 do
2821 {
2822 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
2823 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
2824 ASMCompilerBarrier();
2825 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
2826 } while (idxHead != UINT32_MAX);
2827
2828#ifdef VBOX_STRICT
2829 if (pVM->cCPUs == 1)
2830 {
2831 /* Check that all records are now on the free list. */
2832 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
2833 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
2834 c++;
2835 AssertMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
2836 }
2837#endif
2838 }
2839}
2840
2841
2842/**
2843 * Notify REM about changed code page.
2844 *
2845 * @returns VBox status code.
2846 * @param pVM VM handle.
2847 * @param pVCpu VMCPU handle.
2848 * @param pvCodePage Code page address
2849 */
2850REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2851{
2852#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2853 int rc;
2854 RTGCPHYS PhysGC;
2855 uint64_t flags;
2856
2857 VM_ASSERT_EMT(pVM);
2858
2859 /*
2860 * Get the physical page address.
2861 */
2862 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2863 if (rc == VINF_SUCCESS)
2864 {
2865 /*
2866 * Sync the required registers and flush the whole page.
2867 * (Easier to do the whole page than notifying it about each physical
2868 * byte that was changed.
2869 */
2870 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2871 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2872 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2873 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2874
2875 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2876 }
2877#endif
2878 return VINF_SUCCESS;
2879}
2880
2881
2882/**
2883 * Notification about a successful MMR3PhysRegister() call.
2884 *
2885 * @param pVM VM handle.
2886 * @param GCPhys The physical address the RAM.
2887 * @param cb Size of the memory.
2888 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2889 */
2890REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2891{
2892 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2893 VM_ASSERT_EMT(pVM);
2894
2895 /*
2896 * Validate input - we trust the caller.
2897 */
2898 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2899 Assert(cb);
2900 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2901 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2902
2903 /*
2904 * Base ram? Update GCPhysLastRam.
2905 */
2906 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2907 {
2908 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2909 {
2910 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2911 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2912 }
2913 }
2914
2915 /*
2916 * Register the ram.
2917 */
2918 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2919
2920 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2921 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2922 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2923
2924 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2925}
2926
2927
2928/**
2929 * Notification about a successful MMR3PhysRomRegister() call.
2930 *
2931 * @param pVM VM handle.
2932 * @param GCPhys The physical address of the ROM.
2933 * @param cb The size of the ROM.
2934 * @param pvCopy Pointer to the ROM copy.
2935 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2936 * This function will be called when ever the protection of the
2937 * shadow ROM changes (at reset and end of POST).
2938 */
2939REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2940{
2941 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2942 VM_ASSERT_EMT(pVM);
2943
2944 /*
2945 * Validate input - we trust the caller.
2946 */
2947 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2948 Assert(cb);
2949 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2950
2951 /*
2952 * Register the rom.
2953 */
2954 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2955
2956 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2957 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2958 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2959
2960 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2961}
2962
2963
2964/**
2965 * Notification about a successful memory deregistration or reservation.
2966 *
2967 * @param pVM VM Handle.
2968 * @param GCPhys Start physical address.
2969 * @param cb The size of the range.
2970 */
2971REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2972{
2973 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2974 VM_ASSERT_EMT(pVM);
2975
2976 /*
2977 * Validate input - we trust the caller.
2978 */
2979 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2980 Assert(cb);
2981 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2982
2983 /*
2984 * Unassigning the memory.
2985 */
2986 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2987
2988 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2989 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2990 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2991
2992 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2993}
2994
2995
2996/**
2997 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2998 *
2999 * @param pVM VM Handle.
3000 * @param enmType Handler type.
3001 * @param GCPhys Handler range address.
3002 * @param cb Size of the handler range.
3003 * @param fHasHCHandler Set if the handler has a HC callback function.
3004 *
3005 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3006 * Handler memory type to memory which has no HC handler.
3007 */
3008static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3009{
3010 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3011 enmType, GCPhys, cb, fHasHCHandler));
3012
3013 VM_ASSERT_EMT(pVM);
3014 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3015 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3016
3017
3018 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3019
3020 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3021 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3022 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3023 else if (fHasHCHandler)
3024 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3025 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3026
3027 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3028}
3029
3030/**
3031 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3032 *
3033 * @param pVM VM Handle.
3034 * @param enmType Handler type.
3035 * @param GCPhys Handler range address.
3036 * @param cb Size of the handler range.
3037 * @param fHasHCHandler Set if the handler has a HC callback function.
3038 *
3039 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3040 * Handler memory type to memory which has no HC handler.
3041 */
3042REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3043{
3044 REMR3ReplayHandlerNotifications(pVM);
3045
3046 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3047}
3048
3049/**
3050 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3051 *
3052 * @param pVM VM Handle.
3053 * @param enmType Handler type.
3054 * @param GCPhys Handler range address.
3055 * @param cb Size of the handler range.
3056 * @param fHasHCHandler Set if the handler has a HC callback function.
3057 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3058 */
3059static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3060{
3061 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3062 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3063 VM_ASSERT_EMT(pVM);
3064
3065
3066 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3067
3068 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3069 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3070 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3071 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3072 else if (fHasHCHandler)
3073 {
3074 if (!fRestoreAsRAM)
3075 {
3076 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3077 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3078 }
3079 else
3080 {
3081 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3082 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3083 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3084 }
3085 }
3086 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3087
3088 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3089}
3090
3091/**
3092 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3093 *
3094 * @param pVM VM Handle.
3095 * @param enmType Handler type.
3096 * @param GCPhys Handler range address.
3097 * @param cb Size of the handler range.
3098 * @param fHasHCHandler Set if the handler has a HC callback function.
3099 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3100 */
3101REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3102{
3103 REMR3ReplayHandlerNotifications(pVM);
3104 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3105}
3106
3107
3108/**
3109 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3110 *
3111 * @param pVM VM Handle.
3112 * @param enmType Handler type.
3113 * @param GCPhysOld Old handler range address.
3114 * @param GCPhysNew New handler range address.
3115 * @param cb Size of the handler range.
3116 * @param fHasHCHandler Set if the handler has a HC callback function.
3117 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3118 */
3119static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3120{
3121 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3122 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3123 VM_ASSERT_EMT(pVM);
3124 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3125
3126 if (fHasHCHandler)
3127 {
3128 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3129
3130 /*
3131 * Reset the old page.
3132 */
3133 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3134 if (!fRestoreAsRAM)
3135 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3136 else
3137 {
3138 /* This is not perfect, but it'll do for PD monitoring... */
3139 Assert(cb == PAGE_SIZE);
3140 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3141 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3142 }
3143
3144 /*
3145 * Update the new page.
3146 */
3147 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3148 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3149 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3150 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3151
3152 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3153 }
3154}
3155
3156/**
3157 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3158 *
3159 * @param pVM VM Handle.
3160 * @param enmType Handler type.
3161 * @param GCPhysOld Old handler range address.
3162 * @param GCPhysNew New handler range address.
3163 * @param cb Size of the handler range.
3164 * @param fHasHCHandler Set if the handler has a HC callback function.
3165 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3166 */
3167REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3168{
3169 REMR3ReplayHandlerNotifications(pVM);
3170
3171 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3172}
3173
3174/**
3175 * Checks if we're handling access to this page or not.
3176 *
3177 * @returns true if we're trapping access.
3178 * @returns false if we aren't.
3179 * @param pVM The VM handle.
3180 * @param GCPhys The physical address.
3181 *
3182 * @remark This function will only work correctly in VBOX_STRICT builds!
3183 */
3184REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3185{
3186#ifdef VBOX_STRICT
3187 unsigned long off;
3188 REMR3ReplayHandlerNotifications(pVM);
3189
3190 off = get_phys_page_offset(GCPhys);
3191 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3192 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3193 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3194#else
3195 return false;
3196#endif
3197}
3198
3199
3200/**
3201 * Deals with a rare case in get_phys_addr_code where the code
3202 * is being monitored.
3203 *
3204 * It could also be an MMIO page, in which case we will raise a fatal error.
3205 *
3206 * @returns The physical address corresponding to addr.
3207 * @param env The cpu environment.
3208 * @param addr The virtual address.
3209 * @param pTLBEntry The TLB entry.
3210 */
3211target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3212 target_ulong addr,
3213 CPUTLBEntry* pTLBEntry,
3214 target_phys_addr_t ioTLBEntry)
3215{
3216 PVM pVM = env->pVM;
3217
3218 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3219 {
3220 /* If code memory is being monitored, appropriate IOTLB entry will have
3221 handler IO type, and addend will provide real physical address, no
3222 matter if we store VA in TLB or not, as handlers are always passed PA */
3223 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3224 return ret;
3225 }
3226 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3227 "*** handlers\n",
3228 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3229 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3230 LogRel(("*** mmio\n"));
3231 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3232 LogRel(("*** phys\n"));
3233 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3234 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3235 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3236 AssertFatalFailed();
3237}
3238
3239/**
3240 * Read guest RAM and ROM.
3241 *
3242 * @param SrcGCPhys The source address (guest physical).
3243 * @param pvDst The destination address.
3244 * @param cb Number of bytes
3245 */
3246void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3247{
3248 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3249 VBOX_CHECK_ADDR(SrcGCPhys);
3250 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3251#ifdef VBOX_DEBUG_PHYS
3252 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3253#endif
3254 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3255}
3256
3257
3258/**
3259 * Read guest RAM and ROM, unsigned 8-bit.
3260 *
3261 * @param SrcGCPhys The source address (guest physical).
3262 */
3263RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3264{
3265 uint8_t val;
3266 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3267 VBOX_CHECK_ADDR(SrcGCPhys);
3268 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3269 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3270#ifdef VBOX_DEBUG_PHYS
3271 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3272#endif
3273 return val;
3274}
3275
3276
3277/**
3278 * Read guest RAM and ROM, signed 8-bit.
3279 *
3280 * @param SrcGCPhys The source address (guest physical).
3281 */
3282RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3283{
3284 int8_t val;
3285 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3286 VBOX_CHECK_ADDR(SrcGCPhys);
3287 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3288 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3289#ifdef VBOX_DEBUG_PHYS
3290 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3291#endif
3292 return val;
3293}
3294
3295
3296/**
3297 * Read guest RAM and ROM, unsigned 16-bit.
3298 *
3299 * @param SrcGCPhys The source address (guest physical).
3300 */
3301RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3302{
3303 uint16_t val;
3304 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3305 VBOX_CHECK_ADDR(SrcGCPhys);
3306 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3307 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3308#ifdef VBOX_DEBUG_PHYS
3309 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3310#endif
3311 return val;
3312}
3313
3314
3315/**
3316 * Read guest RAM and ROM, signed 16-bit.
3317 *
3318 * @param SrcGCPhys The source address (guest physical).
3319 */
3320RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3321{
3322 int16_t val;
3323 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3324 VBOX_CHECK_ADDR(SrcGCPhys);
3325 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3326 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3327#ifdef VBOX_DEBUG_PHYS
3328 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3329#endif
3330 return val;
3331}
3332
3333
3334/**
3335 * Read guest RAM and ROM, unsigned 32-bit.
3336 *
3337 * @param SrcGCPhys The source address (guest physical).
3338 */
3339RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3340{
3341 uint32_t val;
3342 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3343 VBOX_CHECK_ADDR(SrcGCPhys);
3344 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3345 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3346#ifdef VBOX_DEBUG_PHYS
3347 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3348#endif
3349 return val;
3350}
3351
3352
3353/**
3354 * Read guest RAM and ROM, signed 32-bit.
3355 *
3356 * @param SrcGCPhys The source address (guest physical).
3357 */
3358RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3359{
3360 int32_t val;
3361 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3362 VBOX_CHECK_ADDR(SrcGCPhys);
3363 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3364 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3365#ifdef VBOX_DEBUG_PHYS
3366 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3367#endif
3368 return val;
3369}
3370
3371
3372/**
3373 * Read guest RAM and ROM, unsigned 64-bit.
3374 *
3375 * @param SrcGCPhys The source address (guest physical).
3376 */
3377uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3378{
3379 uint64_t val;
3380 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3381 VBOX_CHECK_ADDR(SrcGCPhys);
3382 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3383 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3384#ifdef VBOX_DEBUG_PHYS
3385 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3386#endif
3387 return val;
3388}
3389
3390
3391/**
3392 * Read guest RAM and ROM, signed 64-bit.
3393 *
3394 * @param SrcGCPhys The source address (guest physical).
3395 */
3396int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3397{
3398 int64_t val;
3399 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3400 VBOX_CHECK_ADDR(SrcGCPhys);
3401 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3402 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3403#ifdef VBOX_DEBUG_PHYS
3404 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3405#endif
3406 return val;
3407}
3408
3409
3410/**
3411 * Write guest RAM.
3412 *
3413 * @param DstGCPhys The destination address (guest physical).
3414 * @param pvSrc The source address.
3415 * @param cb Number of bytes to write
3416 */
3417void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3418{
3419 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3420 VBOX_CHECK_ADDR(DstGCPhys);
3421 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3422 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3423#ifdef VBOX_DEBUG_PHYS
3424 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3425#endif
3426}
3427
3428
3429/**
3430 * Write guest RAM, unsigned 8-bit.
3431 *
3432 * @param DstGCPhys The destination address (guest physical).
3433 * @param val Value
3434 */
3435void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3436{
3437 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3438 VBOX_CHECK_ADDR(DstGCPhys);
3439 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3440 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3441#ifdef VBOX_DEBUG_PHYS
3442 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3443#endif
3444}
3445
3446
3447/**
3448 * Write guest RAM, unsigned 8-bit.
3449 *
3450 * @param DstGCPhys The destination address (guest physical).
3451 * @param val Value
3452 */
3453void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3454{
3455 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3456 VBOX_CHECK_ADDR(DstGCPhys);
3457 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3458 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3459#ifdef VBOX_DEBUG_PHYS
3460 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3461#endif
3462}
3463
3464
3465/**
3466 * Write guest RAM, unsigned 32-bit.
3467 *
3468 * @param DstGCPhys The destination address (guest physical).
3469 * @param val Value
3470 */
3471void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3472{
3473 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3474 VBOX_CHECK_ADDR(DstGCPhys);
3475 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3476 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3477#ifdef VBOX_DEBUG_PHYS
3478 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3479#endif
3480}
3481
3482
3483/**
3484 * Write guest RAM, unsigned 64-bit.
3485 *
3486 * @param DstGCPhys The destination address (guest physical).
3487 * @param val Value
3488 */
3489void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3490{
3491 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3492 VBOX_CHECK_ADDR(DstGCPhys);
3493 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3494 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3495#ifdef VBOX_DEBUG_PHYS
3496 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3497#endif
3498}
3499
3500#undef LOG_GROUP
3501#define LOG_GROUP LOG_GROUP_REM_MMIO
3502
3503/** Read MMIO memory. */
3504static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3505{
3506 uint32_t u32 = 0;
3507 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3508 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3509 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3510 return u32;
3511}
3512
3513/** Read MMIO memory. */
3514static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3515{
3516 uint32_t u32 = 0;
3517 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3518 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3519 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3520 return u32;
3521}
3522
3523/** Read MMIO memory. */
3524static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3525{
3526 uint32_t u32 = 0;
3527 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3528 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3529 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3530 return u32;
3531}
3532
3533/** Write to MMIO memory. */
3534static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3535{
3536 int rc;
3537 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3538 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3539 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3540}
3541
3542/** Write to MMIO memory. */
3543static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3544{
3545 int rc;
3546 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3547 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3548 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3549}
3550
3551/** Write to MMIO memory. */
3552static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3553{
3554 int rc;
3555 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3556 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3557 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3558}
3559
3560
3561#undef LOG_GROUP
3562#define LOG_GROUP LOG_GROUP_REM_HANDLER
3563
3564/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3565
3566static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3567{
3568 uint8_t u8;
3569 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3570 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3571 return u8;
3572}
3573
3574static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3575{
3576 uint16_t u16;
3577 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3578 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3579 return u16;
3580}
3581
3582static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3583{
3584 uint32_t u32;
3585 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3586 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3587 return u32;
3588}
3589
3590static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3591{
3592 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3593 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3594}
3595
3596static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3597{
3598 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3599 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3600}
3601
3602static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3603{
3604 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3605 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3606}
3607
3608/* -+- disassembly -+- */
3609
3610#undef LOG_GROUP
3611#define LOG_GROUP LOG_GROUP_REM_DISAS
3612
3613
3614/**
3615 * Enables or disables singled stepped disassembly.
3616 *
3617 * @returns VBox status code.
3618 * @param pVM VM handle.
3619 * @param fEnable To enable set this flag, to disable clear it.
3620 */
3621static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3622{
3623 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3624 VM_ASSERT_EMT(pVM);
3625
3626 if (fEnable)
3627 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3628 else
3629 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3630 return VINF_SUCCESS;
3631}
3632
3633
3634/**
3635 * Enables or disables singled stepped disassembly.
3636 *
3637 * @returns VBox status code.
3638 * @param pVM VM handle.
3639 * @param fEnable To enable set this flag, to disable clear it.
3640 */
3641REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3642{
3643 PVMREQ pReq;
3644 int rc;
3645
3646 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3647 if (VM_IS_EMT(pVM))
3648 return remR3DisasEnableStepping(pVM, fEnable);
3649
3650 rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3651 AssertRC(rc);
3652 if (RT_SUCCESS(rc))
3653 rc = pReq->iStatus;
3654 VMR3ReqFree(pReq);
3655 return rc;
3656}
3657
3658
3659#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3660/**
3661 * External Debugger Command: .remstep [on|off|1|0]
3662 */
3663static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3664{
3665 bool fEnable;
3666 int rc;
3667
3668 /* print status */
3669 if (cArgs == 0)
3670 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3671 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3672
3673 /* convert the argument and change the mode. */
3674 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3675 if (RT_FAILURE(rc))
3676 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3677 rc = REMR3DisasEnableStepping(pVM, fEnable);
3678 if (RT_FAILURE(rc))
3679 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3680 return rc;
3681}
3682#endif
3683
3684
3685/**
3686 * Disassembles one instruction and prints it to the log.
3687 *
3688 * @returns Success indicator.
3689 * @param env Pointer to the recompiler CPU structure.
3690 * @param f32BitCode Indicates that whether or not the code should
3691 * be disassembled as 16 or 32 bit. If -1 the CS
3692 * selector will be inspected.
3693 * @param pszPrefix
3694 */
3695bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3696{
3697 PVM pVM = env->pVM;
3698 const bool fLog = LogIsEnabled();
3699 const bool fLog2 = LogIs2Enabled();
3700 int rc = VINF_SUCCESS;
3701
3702 /*
3703 * Don't bother if there ain't any log output to do.
3704 */
3705 if (!fLog && !fLog2)
3706 return true;
3707
3708 /*
3709 * Update the state so DBGF reads the correct register values.
3710 */
3711 remR3StateUpdate(pVM, env->pVCpu);
3712
3713 /*
3714 * Log registers if requested.
3715 */
3716 if (!fLog2)
3717 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3718
3719 /*
3720 * Disassemble to log.
3721 */
3722 if (fLog)
3723 rc = DBGFR3DisasInstrCurrentLogInternal(env->pVCpu, pszPrefix);
3724
3725 return RT_SUCCESS(rc);
3726}
3727
3728
3729/**
3730 * Disassemble recompiled code.
3731 *
3732 * @param phFileIgnored Ignored, logfile usually.
3733 * @param pvCode Pointer to the code block.
3734 * @param cb Size of the code block.
3735 */
3736void disas(FILE *phFile, void *pvCode, unsigned long cb)
3737{
3738#ifdef DEBUG_TMP_LOGGING
3739# define DISAS_PRINTF(x...) fprintf(phFile, x)
3740#else
3741# define DISAS_PRINTF(x...) RTLogPrintf(x)
3742 if (LogIs2Enabled())
3743#endif
3744 {
3745 unsigned off = 0;
3746 char szOutput[256];
3747 DISCPUSTATE Cpu;
3748
3749 memset(&Cpu, 0, sizeof(Cpu));
3750#ifdef RT_ARCH_X86
3751 Cpu.mode = CPUMODE_32BIT;
3752#else
3753 Cpu.mode = CPUMODE_64BIT;
3754#endif
3755
3756 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3757 while (off < cb)
3758 {
3759 uint32_t cbInstr;
3760 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3761 DISAS_PRINTF("%s", szOutput);
3762 else
3763 {
3764 DISAS_PRINTF("disas error\n");
3765 cbInstr = 1;
3766#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3767 break;
3768#endif
3769 }
3770 off += cbInstr;
3771 }
3772 }
3773
3774#undef DISAS_PRINTF
3775}
3776
3777
3778/**
3779 * Disassemble guest code.
3780 *
3781 * @param phFileIgnored Ignored, logfile usually.
3782 * @param uCode The guest address of the code to disassemble. (flat?)
3783 * @param cb Number of bytes to disassemble.
3784 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3785 */
3786void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3787{
3788#ifdef DEBUG_TMP_LOGGING
3789# define DISAS_PRINTF(x...) fprintf(phFile, x)
3790#else
3791# define DISAS_PRINTF(x...) RTLogPrintf(x)
3792 if (LogIs2Enabled())
3793#endif
3794 {
3795 PVM pVM = cpu_single_env->pVM;
3796 PVMCPU pVCpu = cpu_single_env->pVCpu;
3797 RTSEL cs;
3798 RTGCUINTPTR eip;
3799
3800 Assert(pVCpu);
3801
3802 /*
3803 * Update the state so DBGF reads the correct register values (flags).
3804 */
3805 remR3StateUpdate(pVM, pVCpu);
3806
3807 /*
3808 * Do the disassembling.
3809 */
3810 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3811 cs = cpu_single_env->segs[R_CS].selector;
3812 eip = uCode - cpu_single_env->segs[R_CS].base;
3813 for (;;)
3814 {
3815 char szBuf[256];
3816 uint32_t cbInstr;
3817 int rc = DBGFR3DisasInstrEx(pVM,
3818 pVCpu->idCpu,
3819 cs,
3820 eip,
3821 0,
3822 szBuf, sizeof(szBuf),
3823 &cbInstr);
3824 if (RT_SUCCESS(rc))
3825 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3826 else
3827 {
3828 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3829 cbInstr = 1;
3830 }
3831
3832 /* next */
3833 if (cb <= cbInstr)
3834 break;
3835 cb -= cbInstr;
3836 uCode += cbInstr;
3837 eip += cbInstr;
3838 }
3839 }
3840#undef DISAS_PRINTF
3841}
3842
3843
3844/**
3845 * Looks up a guest symbol.
3846 *
3847 * @returns Pointer to symbol name. This is a static buffer.
3848 * @param orig_addr The address in question.
3849 */
3850const char *lookup_symbol(target_ulong orig_addr)
3851{
3852 PVM pVM = cpu_single_env->pVM;
3853 RTGCINTPTR off = 0;
3854 RTDBGSYMBOL Sym;
3855 DBGFADDRESS Addr;
3856
3857 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
3858 if (RT_SUCCESS(rc))
3859 {
3860 static char szSym[sizeof(Sym.szName) + 48];
3861 if (!off)
3862 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3863 else if (off > 0)
3864 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3865 else
3866 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3867 return szSym;
3868 }
3869 return "<N/A>";
3870}
3871
3872
3873#undef LOG_GROUP
3874#define LOG_GROUP LOG_GROUP_REM
3875
3876
3877/* -+- FF notifications -+- */
3878
3879
3880/**
3881 * Notification about a pending interrupt.
3882 *
3883 * @param pVM VM Handle.
3884 * @param pVCpu VMCPU Handle.
3885 * @param u8Interrupt Interrupt
3886 * @thread The emulation thread.
3887 */
3888REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3889{
3890 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3891 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3892}
3893
3894/**
3895 * Notification about a pending interrupt.
3896 *
3897 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3898 * @param pVM VM Handle.
3899 * @param pVCpu VMCPU Handle.
3900 * @thread The emulation thread.
3901 */
3902REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3903{
3904 return pVM->rem.s.u32PendingInterrupt;
3905}
3906
3907/**
3908 * Notification about the interrupt FF being set.
3909 *
3910 * @param pVM VM Handle.
3911 * @param pVCpu VMCPU Handle.
3912 * @thread The emulation thread.
3913 */
3914REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3915{
3916 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3917 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3918 if (pVM->rem.s.fInREM)
3919 {
3920 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3921 CPU_INTERRUPT_EXTERNAL_HARD);
3922 }
3923}
3924
3925
3926/**
3927 * Notification about the interrupt FF being set.
3928 *
3929 * @param pVM VM Handle.
3930 * @param pVCpu VMCPU Handle.
3931 * @thread Any.
3932 */
3933REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3934{
3935 LogFlow(("REMR3NotifyInterruptClear:\n"));
3936 if (pVM->rem.s.fInREM)
3937 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3938}
3939
3940
3941/**
3942 * Notification about pending timer(s).
3943 *
3944 * @param pVM VM Handle.
3945 * @param pVCpuDst The target cpu for this notification.
3946 * TM will not broadcast pending timer events, but use
3947 * a decidated EMT for them. So, only interrupt REM
3948 * execution if the given CPU is executing in REM.
3949 * @thread Any.
3950 */
3951REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3952{
3953#ifndef DEBUG_bird
3954 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3955#endif
3956 if (pVM->rem.s.fInREM)
3957 {
3958 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3959 {
3960 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
3961 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
3962 CPU_INTERRUPT_EXTERNAL_TIMER);
3963 }
3964 else
3965 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
3966 }
3967 else
3968 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
3969}
3970
3971
3972/**
3973 * Notification about pending DMA transfers.
3974 *
3975 * @param pVM VM Handle.
3976 * @thread Any.
3977 */
3978REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3979{
3980 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3981 if (pVM->rem.s.fInREM)
3982 {
3983 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3984 CPU_INTERRUPT_EXTERNAL_DMA);
3985 }
3986}
3987
3988
3989/**
3990 * Notification about pending timer(s).
3991 *
3992 * @param pVM VM Handle.
3993 * @thread Any.
3994 */
3995REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3996{
3997 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3998 if (pVM->rem.s.fInREM)
3999 {
4000 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4001 CPU_INTERRUPT_EXTERNAL_EXIT);
4002 }
4003}
4004
4005
4006/**
4007 * Notification about pending FF set by an external thread.
4008 *
4009 * @param pVM VM handle.
4010 * @thread Any.
4011 */
4012REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4013{
4014 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4015 if (pVM->rem.s.fInREM)
4016 {
4017 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4018 CPU_INTERRUPT_EXTERNAL_EXIT);
4019 }
4020}
4021
4022
4023#ifdef VBOX_WITH_STATISTICS
4024void remR3ProfileStart(int statcode)
4025{
4026 STAMPROFILEADV *pStat;
4027 switch(statcode)
4028 {
4029 case STATS_EMULATE_SINGLE_INSTR:
4030 pStat = &gStatExecuteSingleInstr;
4031 break;
4032 case STATS_QEMU_COMPILATION:
4033 pStat = &gStatCompilationQEmu;
4034 break;
4035 case STATS_QEMU_RUN_EMULATED_CODE:
4036 pStat = &gStatRunCodeQEmu;
4037 break;
4038 case STATS_QEMU_TOTAL:
4039 pStat = &gStatTotalTimeQEmu;
4040 break;
4041 case STATS_QEMU_RUN_TIMERS:
4042 pStat = &gStatTimers;
4043 break;
4044 case STATS_TLB_LOOKUP:
4045 pStat= &gStatTBLookup;
4046 break;
4047 case STATS_IRQ_HANDLING:
4048 pStat= &gStatIRQ;
4049 break;
4050 case STATS_RAW_CHECK:
4051 pStat = &gStatRawCheck;
4052 break;
4053
4054 default:
4055 AssertMsgFailed(("unknown stat %d\n", statcode));
4056 return;
4057 }
4058 STAM_PROFILE_ADV_START(pStat, a);
4059}
4060
4061
4062void remR3ProfileStop(int statcode)
4063{
4064 STAMPROFILEADV *pStat;
4065 switch(statcode)
4066 {
4067 case STATS_EMULATE_SINGLE_INSTR:
4068 pStat = &gStatExecuteSingleInstr;
4069 break;
4070 case STATS_QEMU_COMPILATION:
4071 pStat = &gStatCompilationQEmu;
4072 break;
4073 case STATS_QEMU_RUN_EMULATED_CODE:
4074 pStat = &gStatRunCodeQEmu;
4075 break;
4076 case STATS_QEMU_TOTAL:
4077 pStat = &gStatTotalTimeQEmu;
4078 break;
4079 case STATS_QEMU_RUN_TIMERS:
4080 pStat = &gStatTimers;
4081 break;
4082 case STATS_TLB_LOOKUP:
4083 pStat= &gStatTBLookup;
4084 break;
4085 case STATS_IRQ_HANDLING:
4086 pStat= &gStatIRQ;
4087 break;
4088 case STATS_RAW_CHECK:
4089 pStat = &gStatRawCheck;
4090 break;
4091 default:
4092 AssertMsgFailed(("unknown stat %d\n", statcode));
4093 return;
4094 }
4095 STAM_PROFILE_ADV_STOP(pStat, a);
4096}
4097#endif
4098
4099/**
4100 * Raise an RC, force rem exit.
4101 *
4102 * @param pVM VM handle.
4103 * @param rc The rc.
4104 */
4105void remR3RaiseRC(PVM pVM, int rc)
4106{
4107 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4108 Assert(pVM->rem.s.fInREM);
4109 VM_ASSERT_EMT(pVM);
4110 pVM->rem.s.rc = rc;
4111 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4112}
4113
4114
4115/* -+- timers -+- */
4116
4117uint64_t cpu_get_tsc(CPUX86State *env)
4118{
4119 STAM_COUNTER_INC(&gStatCpuGetTSC);
4120 return TMCpuTickGet(env->pVCpu);
4121}
4122
4123
4124/* -+- interrupts -+- */
4125
4126void cpu_set_ferr(CPUX86State *env)
4127{
4128 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4129 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4130}
4131
4132int cpu_get_pic_interrupt(CPUState *env)
4133{
4134 uint8_t u8Interrupt;
4135 int rc;
4136
4137 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4138 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4139 * with the (a)pic.
4140 */
4141 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4142 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4143 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4144 * remove this kludge. */
4145 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4146 {
4147 rc = VINF_SUCCESS;
4148 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4149 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4150 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4151 }
4152 else
4153 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4154
4155 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4156 if (RT_SUCCESS(rc))
4157 {
4158 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4159 env->interrupt_request |= CPU_INTERRUPT_HARD;
4160 return u8Interrupt;
4161 }
4162 return -1;
4163}
4164
4165
4166/* -+- local apic -+- */
4167
4168void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4169{
4170 int rc = PDMApicSetBase(env->pVM, val);
4171 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4172}
4173
4174uint64_t cpu_get_apic_base(CPUX86State *env)
4175{
4176 uint64_t u64;
4177 int rc = PDMApicGetBase(env->pVM, &u64);
4178 if (RT_SUCCESS(rc))
4179 {
4180 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4181 return u64;
4182 }
4183 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4184 return 0;
4185}
4186
4187void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4188{
4189 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4190 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4191}
4192
4193uint8_t cpu_get_apic_tpr(CPUX86State *env)
4194{
4195 uint8_t u8;
4196 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4197 if (RT_SUCCESS(rc))
4198 {
4199 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4200 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4201 }
4202 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4203 return 0;
4204}
4205
4206
4207uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4208{
4209 uint64_t value;
4210 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4211 if (RT_SUCCESS(rc))
4212 {
4213 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4214 return value;
4215 }
4216 /** @todo: exception ? */
4217 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4218 return value;
4219}
4220
4221void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4222{
4223 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4224 /** @todo: exception if error ? */
4225 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4226}
4227
4228uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4229{
4230 Assert(env->pVCpu);
4231 return CPUMGetGuestMsr(env->pVCpu, msr);
4232}
4233
4234void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4235{
4236 Assert(env->pVCpu);
4237 CPUMSetGuestMsr(env->pVCpu, msr, val);
4238}
4239
4240/* -+- I/O Ports -+- */
4241
4242#undef LOG_GROUP
4243#define LOG_GROUP LOG_GROUP_REM_IOPORT
4244
4245void cpu_outb(CPUState *env, int addr, int val)
4246{
4247 int rc;
4248
4249 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4250 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4251
4252 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4253 if (RT_LIKELY(rc == VINF_SUCCESS))
4254 return;
4255 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4256 {
4257 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4258 remR3RaiseRC(env->pVM, rc);
4259 return;
4260 }
4261 remAbort(rc, __FUNCTION__);
4262}
4263
4264void cpu_outw(CPUState *env, int addr, int val)
4265{
4266 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4267 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4268 if (RT_LIKELY(rc == VINF_SUCCESS))
4269 return;
4270 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4271 {
4272 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4273 remR3RaiseRC(env->pVM, rc);
4274 return;
4275 }
4276 remAbort(rc, __FUNCTION__);
4277}
4278
4279void cpu_outl(CPUState *env, int addr, int val)
4280{
4281 int rc;
4282 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4283 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4284 if (RT_LIKELY(rc == VINF_SUCCESS))
4285 return;
4286 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4287 {
4288 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4289 remR3RaiseRC(env->pVM, rc);
4290 return;
4291 }
4292 remAbort(rc, __FUNCTION__);
4293}
4294
4295int cpu_inb(CPUState *env, int addr)
4296{
4297 uint32_t u32 = 0;
4298 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4299 if (RT_LIKELY(rc == VINF_SUCCESS))
4300 {
4301 if (/*addr != 0x61 && */addr != 0x71)
4302 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4303 return (int)u32;
4304 }
4305 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4306 {
4307 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4308 remR3RaiseRC(env->pVM, rc);
4309 return (int)u32;
4310 }
4311 remAbort(rc, __FUNCTION__);
4312 return 0xff;
4313}
4314
4315int cpu_inw(CPUState *env, int addr)
4316{
4317 uint32_t u32 = 0;
4318 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4319 if (RT_LIKELY(rc == VINF_SUCCESS))
4320 {
4321 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4322 return (int)u32;
4323 }
4324 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4325 {
4326 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4327 remR3RaiseRC(env->pVM, rc);
4328 return (int)u32;
4329 }
4330 remAbort(rc, __FUNCTION__);
4331 return 0xffff;
4332}
4333
4334int cpu_inl(CPUState *env, int addr)
4335{
4336 uint32_t u32 = 0;
4337 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4338 if (RT_LIKELY(rc == VINF_SUCCESS))
4339 {
4340//if (addr==0x01f0 && u32 == 0x6b6d)
4341// loglevel = ~0;
4342 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4343 return (int)u32;
4344 }
4345 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4346 {
4347 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4348 remR3RaiseRC(env->pVM, rc);
4349 return (int)u32;
4350 }
4351 remAbort(rc, __FUNCTION__);
4352 return 0xffffffff;
4353}
4354
4355#undef LOG_GROUP
4356#define LOG_GROUP LOG_GROUP_REM
4357
4358
4359/* -+- helpers and misc other interfaces -+- */
4360
4361/**
4362 * Perform the CPUID instruction.
4363 *
4364 * ASMCpuId cannot be invoked from some source files where this is used because of global
4365 * register allocations.
4366 *
4367 * @param env Pointer to the recompiler CPU structure.
4368 * @param uOperator CPUID operation (eax).
4369 * @param pvEAX Where to store eax.
4370 * @param pvEBX Where to store ebx.
4371 * @param pvECX Where to store ecx.
4372 * @param pvEDX Where to store edx.
4373 */
4374void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4375{
4376 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4377}
4378
4379
4380#if 0 /* not used */
4381/**
4382 * Interface for qemu hardware to report back fatal errors.
4383 */
4384void hw_error(const char *pszFormat, ...)
4385{
4386 /*
4387 * Bitch about it.
4388 */
4389 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4390 * this in my Odin32 tree at home! */
4391 va_list args;
4392 va_start(args, pszFormat);
4393 RTLogPrintf("fatal error in virtual hardware:");
4394 RTLogPrintfV(pszFormat, args);
4395 va_end(args);
4396 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4397
4398 /*
4399 * If we're in REM context we'll sync back the state before 'jumping' to
4400 * the EMs failure handling.
4401 */
4402 PVM pVM = cpu_single_env->pVM;
4403 if (pVM->rem.s.fInREM)
4404 REMR3StateBack(pVM);
4405 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4406 AssertMsgFailed(("EMR3FatalError returned!\n"));
4407}
4408#endif
4409
4410/**
4411 * Interface for the qemu cpu to report unhandled situation
4412 * raising a fatal VM error.
4413 */
4414void cpu_abort(CPUState *env, const char *pszFormat, ...)
4415{
4416 va_list va;
4417 PVM pVM;
4418 PVMCPU pVCpu;
4419 char szMsg[256];
4420
4421 /*
4422 * Bitch about it.
4423 */
4424 RTLogFlags(NULL, "nodisabled nobuffered");
4425 RTLogFlush(NULL);
4426
4427 va_start(va, pszFormat);
4428#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4429 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4430 unsigned cArgs = 0;
4431 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4432 const char *psz = strchr(pszFormat, '%');
4433 while (psz && cArgs < 6)
4434 {
4435 auArgs[cArgs++] = va_arg(va, uintptr_t);
4436 psz = strchr(psz + 1, '%');
4437 }
4438 switch (cArgs)
4439 {
4440 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4441 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4442 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4443 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4444 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4445 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4446 default:
4447 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4448 }
4449#else
4450 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4451#endif
4452 va_end(va);
4453
4454 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4455 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4456
4457 /*
4458 * If we're in REM context we'll sync back the state before 'jumping' to
4459 * the EMs failure handling.
4460 */
4461 pVM = cpu_single_env->pVM;
4462 pVCpu = cpu_single_env->pVCpu;
4463 Assert(pVCpu);
4464
4465 if (pVM->rem.s.fInREM)
4466 REMR3StateBack(pVM, pVCpu);
4467 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4468 AssertMsgFailed(("EMR3FatalError returned!\n"));
4469}
4470
4471
4472/**
4473 * Aborts the VM.
4474 *
4475 * @param rc VBox error code.
4476 * @param pszTip Hint about why/when this happend.
4477 */
4478void remAbort(int rc, const char *pszTip)
4479{
4480 PVM pVM;
4481 PVMCPU pVCpu;
4482
4483 /*
4484 * Bitch about it.
4485 */
4486 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4487 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4488
4489 /*
4490 * Jump back to where we entered the recompiler.
4491 */
4492 pVM = cpu_single_env->pVM;
4493 pVCpu = cpu_single_env->pVCpu;
4494 Assert(pVCpu);
4495
4496 if (pVM->rem.s.fInREM)
4497 REMR3StateBack(pVM, pVCpu);
4498
4499 EMR3FatalError(pVCpu, rc);
4500 AssertMsgFailed(("EMR3FatalError returned!\n"));
4501}
4502
4503
4504/**
4505 * Dumps a linux system call.
4506 * @param pVCpu VMCPU handle.
4507 */
4508void remR3DumpLnxSyscall(PVMCPU pVCpu)
4509{
4510 static const char *apsz[] =
4511 {
4512 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4513 "sys_exit",
4514 "sys_fork",
4515 "sys_read",
4516 "sys_write",
4517 "sys_open", /* 5 */
4518 "sys_close",
4519 "sys_waitpid",
4520 "sys_creat",
4521 "sys_link",
4522 "sys_unlink", /* 10 */
4523 "sys_execve",
4524 "sys_chdir",
4525 "sys_time",
4526 "sys_mknod",
4527 "sys_chmod", /* 15 */
4528 "sys_lchown16",
4529 "sys_ni_syscall", /* old break syscall holder */
4530 "sys_stat",
4531 "sys_lseek",
4532 "sys_getpid", /* 20 */
4533 "sys_mount",
4534 "sys_oldumount",
4535 "sys_setuid16",
4536 "sys_getuid16",
4537 "sys_stime", /* 25 */
4538 "sys_ptrace",
4539 "sys_alarm",
4540 "sys_fstat",
4541 "sys_pause",
4542 "sys_utime", /* 30 */
4543 "sys_ni_syscall", /* old stty syscall holder */
4544 "sys_ni_syscall", /* old gtty syscall holder */
4545 "sys_access",
4546 "sys_nice",
4547 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4548 "sys_sync",
4549 "sys_kill",
4550 "sys_rename",
4551 "sys_mkdir",
4552 "sys_rmdir", /* 40 */
4553 "sys_dup",
4554 "sys_pipe",
4555 "sys_times",
4556 "sys_ni_syscall", /* old prof syscall holder */
4557 "sys_brk", /* 45 */
4558 "sys_setgid16",
4559 "sys_getgid16",
4560 "sys_signal",
4561 "sys_geteuid16",
4562 "sys_getegid16", /* 50 */
4563 "sys_acct",
4564 "sys_umount", /* recycled never used phys() */
4565 "sys_ni_syscall", /* old lock syscall holder */
4566 "sys_ioctl",
4567 "sys_fcntl", /* 55 */
4568 "sys_ni_syscall", /* old mpx syscall holder */
4569 "sys_setpgid",
4570 "sys_ni_syscall", /* old ulimit syscall holder */
4571 "sys_olduname",
4572 "sys_umask", /* 60 */
4573 "sys_chroot",
4574 "sys_ustat",
4575 "sys_dup2",
4576 "sys_getppid",
4577 "sys_getpgrp", /* 65 */
4578 "sys_setsid",
4579 "sys_sigaction",
4580 "sys_sgetmask",
4581 "sys_ssetmask",
4582 "sys_setreuid16", /* 70 */
4583 "sys_setregid16",
4584 "sys_sigsuspend",
4585 "sys_sigpending",
4586 "sys_sethostname",
4587 "sys_setrlimit", /* 75 */
4588 "sys_old_getrlimit",
4589 "sys_getrusage",
4590 "sys_gettimeofday",
4591 "sys_settimeofday",
4592 "sys_getgroups16", /* 80 */
4593 "sys_setgroups16",
4594 "old_select",
4595 "sys_symlink",
4596 "sys_lstat",
4597 "sys_readlink", /* 85 */
4598 "sys_uselib",
4599 "sys_swapon",
4600 "sys_reboot",
4601 "old_readdir",
4602 "old_mmap", /* 90 */
4603 "sys_munmap",
4604 "sys_truncate",
4605 "sys_ftruncate",
4606 "sys_fchmod",
4607 "sys_fchown16", /* 95 */
4608 "sys_getpriority",
4609 "sys_setpriority",
4610 "sys_ni_syscall", /* old profil syscall holder */
4611 "sys_statfs",
4612 "sys_fstatfs", /* 100 */
4613 "sys_ioperm",
4614 "sys_socketcall",
4615 "sys_syslog",
4616 "sys_setitimer",
4617 "sys_getitimer", /* 105 */
4618 "sys_newstat",
4619 "sys_newlstat",
4620 "sys_newfstat",
4621 "sys_uname",
4622 "sys_iopl", /* 110 */
4623 "sys_vhangup",
4624 "sys_ni_syscall", /* old "idle" system call */
4625 "sys_vm86old",
4626 "sys_wait4",
4627 "sys_swapoff", /* 115 */
4628 "sys_sysinfo",
4629 "sys_ipc",
4630 "sys_fsync",
4631 "sys_sigreturn",
4632 "sys_clone", /* 120 */
4633 "sys_setdomainname",
4634 "sys_newuname",
4635 "sys_modify_ldt",
4636 "sys_adjtimex",
4637 "sys_mprotect", /* 125 */
4638 "sys_sigprocmask",
4639 "sys_ni_syscall", /* old "create_module" */
4640 "sys_init_module",
4641 "sys_delete_module",
4642 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4643 "sys_quotactl",
4644 "sys_getpgid",
4645 "sys_fchdir",
4646 "sys_bdflush",
4647 "sys_sysfs", /* 135 */
4648 "sys_personality",
4649 "sys_ni_syscall", /* reserved for afs_syscall */
4650 "sys_setfsuid16",
4651 "sys_setfsgid16",
4652 "sys_llseek", /* 140 */
4653 "sys_getdents",
4654 "sys_select",
4655 "sys_flock",
4656 "sys_msync",
4657 "sys_readv", /* 145 */
4658 "sys_writev",
4659 "sys_getsid",
4660 "sys_fdatasync",
4661 "sys_sysctl",
4662 "sys_mlock", /* 150 */
4663 "sys_munlock",
4664 "sys_mlockall",
4665 "sys_munlockall",
4666 "sys_sched_setparam",
4667 "sys_sched_getparam", /* 155 */
4668 "sys_sched_setscheduler",
4669 "sys_sched_getscheduler",
4670 "sys_sched_yield",
4671 "sys_sched_get_priority_max",
4672 "sys_sched_get_priority_min", /* 160 */
4673 "sys_sched_rr_get_interval",
4674 "sys_nanosleep",
4675 "sys_mremap",
4676 "sys_setresuid16",
4677 "sys_getresuid16", /* 165 */
4678 "sys_vm86",
4679 "sys_ni_syscall", /* Old sys_query_module */
4680 "sys_poll",
4681 "sys_nfsservctl",
4682 "sys_setresgid16", /* 170 */
4683 "sys_getresgid16",
4684 "sys_prctl",
4685 "sys_rt_sigreturn",
4686 "sys_rt_sigaction",
4687 "sys_rt_sigprocmask", /* 175 */
4688 "sys_rt_sigpending",
4689 "sys_rt_sigtimedwait",
4690 "sys_rt_sigqueueinfo",
4691 "sys_rt_sigsuspend",
4692 "sys_pread64", /* 180 */
4693 "sys_pwrite64",
4694 "sys_chown16",
4695 "sys_getcwd",
4696 "sys_capget",
4697 "sys_capset", /* 185 */
4698 "sys_sigaltstack",
4699 "sys_sendfile",
4700 "sys_ni_syscall", /* reserved for streams1 */
4701 "sys_ni_syscall", /* reserved for streams2 */
4702 "sys_vfork", /* 190 */
4703 "sys_getrlimit",
4704 "sys_mmap2",
4705 "sys_truncate64",
4706 "sys_ftruncate64",
4707 "sys_stat64", /* 195 */
4708 "sys_lstat64",
4709 "sys_fstat64",
4710 "sys_lchown",
4711 "sys_getuid",
4712 "sys_getgid", /* 200 */
4713 "sys_geteuid",
4714 "sys_getegid",
4715 "sys_setreuid",
4716 "sys_setregid",
4717 "sys_getgroups", /* 205 */
4718 "sys_setgroups",
4719 "sys_fchown",
4720 "sys_setresuid",
4721 "sys_getresuid",
4722 "sys_setresgid", /* 210 */
4723 "sys_getresgid",
4724 "sys_chown",
4725 "sys_setuid",
4726 "sys_setgid",
4727 "sys_setfsuid", /* 215 */
4728 "sys_setfsgid",
4729 "sys_pivot_root",
4730 "sys_mincore",
4731 "sys_madvise",
4732 "sys_getdents64", /* 220 */
4733 "sys_fcntl64",
4734 "sys_ni_syscall", /* reserved for TUX */
4735 "sys_ni_syscall",
4736 "sys_gettid",
4737 "sys_readahead", /* 225 */
4738 "sys_setxattr",
4739 "sys_lsetxattr",
4740 "sys_fsetxattr",
4741 "sys_getxattr",
4742 "sys_lgetxattr", /* 230 */
4743 "sys_fgetxattr",
4744 "sys_listxattr",
4745 "sys_llistxattr",
4746 "sys_flistxattr",
4747 "sys_removexattr", /* 235 */
4748 "sys_lremovexattr",
4749 "sys_fremovexattr",
4750 "sys_tkill",
4751 "sys_sendfile64",
4752 "sys_futex", /* 240 */
4753 "sys_sched_setaffinity",
4754 "sys_sched_getaffinity",
4755 "sys_set_thread_area",
4756 "sys_get_thread_area",
4757 "sys_io_setup", /* 245 */
4758 "sys_io_destroy",
4759 "sys_io_getevents",
4760 "sys_io_submit",
4761 "sys_io_cancel",
4762 "sys_fadvise64", /* 250 */
4763 "sys_ni_syscall",
4764 "sys_exit_group",
4765 "sys_lookup_dcookie",
4766 "sys_epoll_create",
4767 "sys_epoll_ctl", /* 255 */
4768 "sys_epoll_wait",
4769 "sys_remap_file_pages",
4770 "sys_set_tid_address",
4771 "sys_timer_create",
4772 "sys_timer_settime", /* 260 */
4773 "sys_timer_gettime",
4774 "sys_timer_getoverrun",
4775 "sys_timer_delete",
4776 "sys_clock_settime",
4777 "sys_clock_gettime", /* 265 */
4778 "sys_clock_getres",
4779 "sys_clock_nanosleep",
4780 "sys_statfs64",
4781 "sys_fstatfs64",
4782 "sys_tgkill", /* 270 */
4783 "sys_utimes",
4784 "sys_fadvise64_64",
4785 "sys_ni_syscall" /* sys_vserver */
4786 };
4787
4788 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4789 switch (uEAX)
4790 {
4791 default:
4792 if (uEAX < RT_ELEMENTS(apsz))
4793 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4794 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4795 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4796 else
4797 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4798 break;
4799
4800 }
4801}
4802
4803
4804/**
4805 * Dumps an OpenBSD system call.
4806 * @param pVCpu VMCPU handle.
4807 */
4808void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4809{
4810 static const char *apsz[] =
4811 {
4812 "SYS_syscall", //0
4813 "SYS_exit", //1
4814 "SYS_fork", //2
4815 "SYS_read", //3
4816 "SYS_write", //4
4817 "SYS_open", //5
4818 "SYS_close", //6
4819 "SYS_wait4", //7
4820 "SYS_8",
4821 "SYS_link", //9
4822 "SYS_unlink", //10
4823 "SYS_11",
4824 "SYS_chdir", //12
4825 "SYS_fchdir", //13
4826 "SYS_mknod", //14
4827 "SYS_chmod", //15
4828 "SYS_chown", //16
4829 "SYS_break", //17
4830 "SYS_18",
4831 "SYS_19",
4832 "SYS_getpid", //20
4833 "SYS_mount", //21
4834 "SYS_unmount", //22
4835 "SYS_setuid", //23
4836 "SYS_getuid", //24
4837 "SYS_geteuid", //25
4838 "SYS_ptrace", //26
4839 "SYS_recvmsg", //27
4840 "SYS_sendmsg", //28
4841 "SYS_recvfrom", //29
4842 "SYS_accept", //30
4843 "SYS_getpeername", //31
4844 "SYS_getsockname", //32
4845 "SYS_access", //33
4846 "SYS_chflags", //34
4847 "SYS_fchflags", //35
4848 "SYS_sync", //36
4849 "SYS_kill", //37
4850 "SYS_38",
4851 "SYS_getppid", //39
4852 "SYS_40",
4853 "SYS_dup", //41
4854 "SYS_opipe", //42
4855 "SYS_getegid", //43
4856 "SYS_profil", //44
4857 "SYS_ktrace", //45
4858 "SYS_sigaction", //46
4859 "SYS_getgid", //47
4860 "SYS_sigprocmask", //48
4861 "SYS_getlogin", //49
4862 "SYS_setlogin", //50
4863 "SYS_acct", //51
4864 "SYS_sigpending", //52
4865 "SYS_osigaltstack", //53
4866 "SYS_ioctl", //54
4867 "SYS_reboot", //55
4868 "SYS_revoke", //56
4869 "SYS_symlink", //57
4870 "SYS_readlink", //58
4871 "SYS_execve", //59
4872 "SYS_umask", //60
4873 "SYS_chroot", //61
4874 "SYS_62",
4875 "SYS_63",
4876 "SYS_64",
4877 "SYS_65",
4878 "SYS_vfork", //66
4879 "SYS_67",
4880 "SYS_68",
4881 "SYS_sbrk", //69
4882 "SYS_sstk", //70
4883 "SYS_61",
4884 "SYS_vadvise", //72
4885 "SYS_munmap", //73
4886 "SYS_mprotect", //74
4887 "SYS_madvise", //75
4888 "SYS_76",
4889 "SYS_77",
4890 "SYS_mincore", //78
4891 "SYS_getgroups", //79
4892 "SYS_setgroups", //80
4893 "SYS_getpgrp", //81
4894 "SYS_setpgid", //82
4895 "SYS_setitimer", //83
4896 "SYS_84",
4897 "SYS_85",
4898 "SYS_getitimer", //86
4899 "SYS_87",
4900 "SYS_88",
4901 "SYS_89",
4902 "SYS_dup2", //90
4903 "SYS_91",
4904 "SYS_fcntl", //92
4905 "SYS_select", //93
4906 "SYS_94",
4907 "SYS_fsync", //95
4908 "SYS_setpriority", //96
4909 "SYS_socket", //97
4910 "SYS_connect", //98
4911 "SYS_99",
4912 "SYS_getpriority", //100
4913 "SYS_101",
4914 "SYS_102",
4915 "SYS_sigreturn", //103
4916 "SYS_bind", //104
4917 "SYS_setsockopt", //105
4918 "SYS_listen", //106
4919 "SYS_107",
4920 "SYS_108",
4921 "SYS_109",
4922 "SYS_110",
4923 "SYS_sigsuspend", //111
4924 "SYS_112",
4925 "SYS_113",
4926 "SYS_114",
4927 "SYS_115",
4928 "SYS_gettimeofday", //116
4929 "SYS_getrusage", //117
4930 "SYS_getsockopt", //118
4931 "SYS_119",
4932 "SYS_readv", //120
4933 "SYS_writev", //121
4934 "SYS_settimeofday", //122
4935 "SYS_fchown", //123
4936 "SYS_fchmod", //124
4937 "SYS_125",
4938 "SYS_setreuid", //126
4939 "SYS_setregid", //127
4940 "SYS_rename", //128
4941 "SYS_129",
4942 "SYS_130",
4943 "SYS_flock", //131
4944 "SYS_mkfifo", //132
4945 "SYS_sendto", //133
4946 "SYS_shutdown", //134
4947 "SYS_socketpair", //135
4948 "SYS_mkdir", //136
4949 "SYS_rmdir", //137
4950 "SYS_utimes", //138
4951 "SYS_139",
4952 "SYS_adjtime", //140
4953 "SYS_141",
4954 "SYS_142",
4955 "SYS_143",
4956 "SYS_144",
4957 "SYS_145",
4958 "SYS_146",
4959 "SYS_setsid", //147
4960 "SYS_quotactl", //148
4961 "SYS_149",
4962 "SYS_150",
4963 "SYS_151",
4964 "SYS_152",
4965 "SYS_153",
4966 "SYS_154",
4967 "SYS_nfssvc", //155
4968 "SYS_156",
4969 "SYS_157",
4970 "SYS_158",
4971 "SYS_159",
4972 "SYS_160",
4973 "SYS_getfh", //161
4974 "SYS_162",
4975 "SYS_163",
4976 "SYS_164",
4977 "SYS_sysarch", //165
4978 "SYS_166",
4979 "SYS_167",
4980 "SYS_168",
4981 "SYS_169",
4982 "SYS_170",
4983 "SYS_171",
4984 "SYS_172",
4985 "SYS_pread", //173
4986 "SYS_pwrite", //174
4987 "SYS_175",
4988 "SYS_176",
4989 "SYS_177",
4990 "SYS_178",
4991 "SYS_179",
4992 "SYS_180",
4993 "SYS_setgid", //181
4994 "SYS_setegid", //182
4995 "SYS_seteuid", //183
4996 "SYS_lfs_bmapv", //184
4997 "SYS_lfs_markv", //185
4998 "SYS_lfs_segclean", //186
4999 "SYS_lfs_segwait", //187
5000 "SYS_188",
5001 "SYS_189",
5002 "SYS_190",
5003 "SYS_pathconf", //191
5004 "SYS_fpathconf", //192
5005 "SYS_swapctl", //193
5006 "SYS_getrlimit", //194
5007 "SYS_setrlimit", //195
5008 "SYS_getdirentries", //196
5009 "SYS_mmap", //197
5010 "SYS___syscall", //198
5011 "SYS_lseek", //199
5012 "SYS_truncate", //200
5013 "SYS_ftruncate", //201
5014 "SYS___sysctl", //202
5015 "SYS_mlock", //203
5016 "SYS_munlock", //204
5017 "SYS_205",
5018 "SYS_futimes", //206
5019 "SYS_getpgid", //207
5020 "SYS_xfspioctl", //208
5021 "SYS_209",
5022 "SYS_210",
5023 "SYS_211",
5024 "SYS_212",
5025 "SYS_213",
5026 "SYS_214",
5027 "SYS_215",
5028 "SYS_216",
5029 "SYS_217",
5030 "SYS_218",
5031 "SYS_219",
5032 "SYS_220",
5033 "SYS_semget", //221
5034 "SYS_222",
5035 "SYS_223",
5036 "SYS_224",
5037 "SYS_msgget", //225
5038 "SYS_msgsnd", //226
5039 "SYS_msgrcv", //227
5040 "SYS_shmat", //228
5041 "SYS_229",
5042 "SYS_shmdt", //230
5043 "SYS_231",
5044 "SYS_clock_gettime", //232
5045 "SYS_clock_settime", //233
5046 "SYS_clock_getres", //234
5047 "SYS_235",
5048 "SYS_236",
5049 "SYS_237",
5050 "SYS_238",
5051 "SYS_239",
5052 "SYS_nanosleep", //240
5053 "SYS_241",
5054 "SYS_242",
5055 "SYS_243",
5056 "SYS_244",
5057 "SYS_245",
5058 "SYS_246",
5059 "SYS_247",
5060 "SYS_248",
5061 "SYS_249",
5062 "SYS_minherit", //250
5063 "SYS_rfork", //251
5064 "SYS_poll", //252
5065 "SYS_issetugid", //253
5066 "SYS_lchown", //254
5067 "SYS_getsid", //255
5068 "SYS_msync", //256
5069 "SYS_257",
5070 "SYS_258",
5071 "SYS_259",
5072 "SYS_getfsstat", //260
5073 "SYS_statfs", //261
5074 "SYS_fstatfs", //262
5075 "SYS_pipe", //263
5076 "SYS_fhopen", //264
5077 "SYS_265",
5078 "SYS_fhstatfs", //266
5079 "SYS_preadv", //267
5080 "SYS_pwritev", //268
5081 "SYS_kqueue", //269
5082 "SYS_kevent", //270
5083 "SYS_mlockall", //271
5084 "SYS_munlockall", //272
5085 "SYS_getpeereid", //273
5086 "SYS_274",
5087 "SYS_275",
5088 "SYS_276",
5089 "SYS_277",
5090 "SYS_278",
5091 "SYS_279",
5092 "SYS_280",
5093 "SYS_getresuid", //281
5094 "SYS_setresuid", //282
5095 "SYS_getresgid", //283
5096 "SYS_setresgid", //284
5097 "SYS_285",
5098 "SYS_mquery", //286
5099 "SYS_closefrom", //287
5100 "SYS_sigaltstack", //288
5101 "SYS_shmget", //289
5102 "SYS_semop", //290
5103 "SYS_stat", //291
5104 "SYS_fstat", //292
5105 "SYS_lstat", //293
5106 "SYS_fhstat", //294
5107 "SYS___semctl", //295
5108 "SYS_shmctl", //296
5109 "SYS_msgctl", //297
5110 "SYS_MAXSYSCALL", //298
5111 //299
5112 //300
5113 };
5114 uint32_t uEAX;
5115 if (!LogIsEnabled())
5116 return;
5117 uEAX = CPUMGetGuestEAX(pVCpu);
5118 switch (uEAX)
5119 {
5120 default:
5121 if (uEAX < RT_ELEMENTS(apsz))
5122 {
5123 uint32_t au32Args[8] = {0};
5124 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5125 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5126 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5127 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5128 }
5129 else
5130 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5131 break;
5132 }
5133}
5134
5135
5136#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5137/**
5138 * The Dll main entry point (stub).
5139 */
5140bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5141{
5142 return true;
5143}
5144
5145void *memcpy(void *dst, const void *src, size_t size)
5146{
5147 uint8_t*pbDst = dst, *pbSrc = src;
5148 while (size-- > 0)
5149 *pbDst++ = *pbSrc++;
5150 return dst;
5151}
5152
5153#endif
5154
5155void cpu_smm_update(CPUState *env)
5156{
5157}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette