VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 20421

Last change on this file since 20421 was 20421, checked in by vboxsync, 15 years ago

Rewrote rem notification handling.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 163.7 KB
Line 
1/* $Id: VBoxRecompiler.c 20421 2009-06-09 09:34:53Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .pResultDesc = NULL,
214 .fFlags = 0,
215 .pfnHandler = remR3CmdDisasEnableStepping,
216 .pszSyntax = "[on/off]",
217 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
218 "If no arguments show the current state."
219 }
220};
221#endif
222
223/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
224uint8_t *code_gen_prologue;
225
226
227/*******************************************************************************
228* Internal Functions *
229*******************************************************************************/
230void remAbort(int rc, const char *pszTip);
231extern int testmath(void);
232
233/* Put them here to avoid unused variable warning. */
234AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
235#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
236//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
237/* Why did this have to be identical?? */
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#else
240AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
241#endif
242
243
244/**
245 * Initializes the REM.
246 *
247 * @returns VBox status code.
248 * @param pVM The VM to operate on.
249 */
250REMR3DECL(int) REMR3Init(PVM pVM)
251{
252 uint32_t u32Dummy;
253 int rc;
254
255#ifdef VBOX_ENABLE_VBOXREM64
256 LogRel(("Using 64-bit aware REM\n"));
257#endif
258
259 /*
260 * Assert sanity.
261 */
262 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
263 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
264 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
265#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
266 Assert(!testmath());
267#endif
268
269 /*
270 * Init some internal data members.
271 */
272 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
273 pVM->rem.s.Env.pVM = pVM;
274#ifdef CPU_RAW_MODE_INIT
275 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
276#endif
277
278 /* ctx. */
279 pVM->rem.s.pCtx = NULL; /* set when executing code. */
280 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
281
282 /* ignore all notifications */
283 pVM->rem.s.fIgnoreAll = true;
284
285 code_gen_prologue = RTMemExecAlloc(_1K);
286 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
287
288 cpu_exec_init_all(0);
289
290 /*
291 * Init the recompiler.
292 */
293 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
294 {
295 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
296 return VERR_GENERAL_FAILURE;
297 }
298 PVMCPU pVCpu = VMMGetCpu(pVM);
299 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
300 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
301
302 /* allocate code buffer for single instruction emulation. */
303 pVM->rem.s.Env.cbCodeBuffer = 4096;
304 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
305 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
306
307 /* finally, set the cpu_single_env global. */
308 cpu_single_env = &pVM->rem.s.Env;
309
310 /* Nothing is pending by default */
311 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
312
313 /*
314 * Register ram types.
315 */
316 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
317 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
318 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
319 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
320 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
321
322 /* stop ignoring. */
323 pVM->rem.s.fIgnoreAll = false;
324
325 /*
326 * Register the saved state data unit.
327 */
328 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
329 NULL, remR3Save, NULL,
330 NULL, remR3Load, NULL);
331 if (RT_FAILURE(rc))
332 return rc;
333
334#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
335 /*
336 * Debugger commands.
337 */
338 static bool fRegisteredCmds = false;
339 if (!fRegisteredCmds)
340 {
341 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
342 if (RT_SUCCESS(rc))
343 fRegisteredCmds = true;
344 }
345#endif
346
347#ifdef VBOX_WITH_STATISTICS
348 /*
349 * Statistics.
350 */
351 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
352 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
353 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
354 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
355 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
356 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
357 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
358 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
359 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
360 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
361 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
362 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
363
364 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
365
366 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
367 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
368 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
369 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
370 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
371 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
372 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
373 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
374 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
375 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
376 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
377
378 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
379 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
380 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
381 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
382
383 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
385 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
386 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
387 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
388 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
389
390 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
391 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
392 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
393 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
394 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
396
397 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
398#endif /* VBOX_WITH_STATISTICS */
399
400 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
401 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
402 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
403
404
405#ifdef DEBUG_ALL_LOGGING
406 loglevel = ~0;
407# ifdef DEBUG_TMP_LOGGING
408 logfile = fopen("/tmp/vbox-qemu.log", "w");
409# endif
410#endif
411
412 PREMHANDLERNOTIFICATION pCur;
413 unsigned i;
414
415 pVM->rem.s.idxPendingList = -1;
416 pVM->rem.s.idxFreeList = 0;
417
418 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) - 1; i++)
419 {
420 pCur = &pVM->rem.s.aHandlerNotifications[i];
421 pCur->idxNext = i + 1;
422 pCur->idxSelf = i;
423 }
424
425 pCur = &pVM->rem.s.aHandlerNotifications[RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) - 1];
426 pCur->idxNext = -1;
427 pCur->idxSelf = RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) - 1;
428
429 return rc;
430}
431
432
433/**
434 * Finalizes the REM initialization.
435 *
436 * This is called after all components, devices and drivers has
437 * been initialized. Its main purpose it to finish the RAM related
438 * initialization.
439 *
440 * @returns VBox status code.
441 *
442 * @param pVM The VM handle.
443 */
444REMR3DECL(int) REMR3InitFinalize(PVM pVM)
445{
446 int rc;
447
448 /*
449 * Ram size & dirty bit map.
450 */
451 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
452 pVM->rem.s.fGCPhysLastRamFixed = true;
453#ifdef RT_STRICT
454 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
455#else
456 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
457#endif
458 return rc;
459}
460
461
462/**
463 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
464 *
465 * @returns VBox status code.
466 * @param pVM The VM handle.
467 * @param fGuarded Whether to guard the map.
468 */
469static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
470{
471 int rc = VINF_SUCCESS;
472 RTGCPHYS cb;
473
474 cb = pVM->rem.s.GCPhysLastRam + 1;
475 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
476 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
477 VERR_OUT_OF_RANGE);
478 phys_ram_size = cb;
479 phys_ram_dirty_size = cb >> PAGE_SHIFT;
480 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
481
482 if (!fGuarded)
483 {
484 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
485 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
486 }
487 else
488 {
489 /*
490 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
491 */
492 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
493 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
494 if (cbBitmapFull == cbBitmapAligned)
495 cbBitmapFull += _4G >> PAGE_SHIFT;
496 else if (cbBitmapFull - cbBitmapAligned < _64K)
497 cbBitmapFull += _64K;
498
499 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
500 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
501
502 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
503 if (RT_FAILURE(rc))
504 {
505 RTMemPageFree(phys_ram_dirty);
506 AssertLogRelRCReturn(rc, rc);
507 }
508
509 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
510 }
511
512 /* initialize it. */
513 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
514 return rc;
515}
516
517
518/**
519 * Terminates the REM.
520 *
521 * Termination means cleaning up and freeing all resources,
522 * the VM it self is at this point powered off or suspended.
523 *
524 * @returns VBox status code.
525 * @param pVM The VM to operate on.
526 */
527REMR3DECL(int) REMR3Term(PVM pVM)
528{
529#ifdef VBOX_WITH_STATISTICS
530 /*
531 * Statistics.
532 */
533 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
534 STAM_DEREG(pVM, &gStatCompilationQEmu);
535 STAM_DEREG(pVM, &gStatRunCodeQEmu);
536 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
537 STAM_DEREG(pVM, &gStatTimers);
538 STAM_DEREG(pVM, &gStatTBLookup);
539 STAM_DEREG(pVM, &gStatIRQ);
540 STAM_DEREG(pVM, &gStatRawCheck);
541 STAM_DEREG(pVM, &gStatMemRead);
542 STAM_DEREG(pVM, &gStatMemWrite);
543 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
544 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
545
546 STAM_DEREG(pVM, &gStatCpuGetTSC);
547
548 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
549 STAM_DEREG(pVM, &gStatRefuseVM86);
550 STAM_DEREG(pVM, &gStatRefusePaging);
551 STAM_DEREG(pVM, &gStatRefusePAE);
552 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
553 STAM_DEREG(pVM, &gStatRefuseIF0);
554 STAM_DEREG(pVM, &gStatRefuseCode16);
555 STAM_DEREG(pVM, &gStatRefuseWP0);
556 STAM_DEREG(pVM, &gStatRefuseRing1or2);
557 STAM_DEREG(pVM, &gStatRefuseCanExecute);
558 STAM_DEREG(pVM, &gStatFlushTBs);
559
560 STAM_DEREG(pVM, &gStatREMGDTChange);
561 STAM_DEREG(pVM, &gStatREMLDTRChange);
562 STAM_DEREG(pVM, &gStatREMIDTChange);
563 STAM_DEREG(pVM, &gStatREMTRChange);
564
565 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
566 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
567 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
568 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
569 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
570 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
571
572 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
573 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
574 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
575 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
576 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
577 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
578
579 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
580#endif /* VBOX_WITH_STATISTICS */
581
582 STAM_REL_DEREG(pVM, &tb_flush_count);
583 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
584 STAM_REL_DEREG(pVM, &tlb_flush_count);
585
586 return VINF_SUCCESS;
587}
588
589
590/**
591 * The VM is being reset.
592 *
593 * For the REM component this means to call the cpu_reset() and
594 * reinitialize some state variables.
595 *
596 * @param pVM VM handle.
597 */
598REMR3DECL(void) REMR3Reset(PVM pVM)
599{
600 /*
601 * Reset the REM cpu.
602 */
603 pVM->rem.s.fIgnoreAll = true;
604 cpu_reset(&pVM->rem.s.Env);
605 pVM->rem.s.cInvalidatedPages = 0;
606 pVM->rem.s.fIgnoreAll = false;
607
608 /* Clear raw ring 0 init state */
609 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
610
611 /* Flush the TBs the next time we execute code here. */
612 pVM->rem.s.fFlushTBs = true;
613}
614
615
616/**
617 * Execute state save operation.
618 *
619 * @returns VBox status code.
620 * @param pVM VM Handle.
621 * @param pSSM SSM operation handle.
622 */
623static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
624{
625 PREM pRem = &pVM->rem.s;
626
627 /*
628 * Save the required CPU Env bits.
629 * (Not much because we're never in REM when doing the save.)
630 */
631 LogFlow(("remR3Save:\n"));
632 Assert(!pRem->fInREM);
633 SSMR3PutU32(pSSM, pRem->Env.hflags);
634 SSMR3PutU32(pSSM, ~0); /* separator */
635
636 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
637 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
638 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
639
640 return SSMR3PutU32(pSSM, ~0); /* terminator */
641}
642
643
644/**
645 * Execute state load operation.
646 *
647 * @returns VBox status code.
648 * @param pVM VM Handle.
649 * @param pSSM SSM operation handle.
650 * @param u32Version Data layout version.
651 */
652static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
653{
654 uint32_t u32Dummy;
655 uint32_t fRawRing0 = false;
656 uint32_t u32Sep;
657 unsigned i;
658 int rc;
659 PREM pRem;
660 LogFlow(("remR3Load:\n"));
661
662 /*
663 * Validate version.
664 */
665 if ( u32Version != REM_SAVED_STATE_VERSION
666 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
667 {
668 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
669 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
670 }
671
672 /*
673 * Do a reset to be on the safe side...
674 */
675 REMR3Reset(pVM);
676
677 /*
678 * Ignore all ignorable notifications.
679 * (Not doing this will cause serious trouble.)
680 */
681 pVM->rem.s.fIgnoreAll = true;
682
683 /*
684 * Load the required CPU Env bits.
685 * (Not much because we're never in REM when doing the save.)
686 */
687 pRem = &pVM->rem.s;
688 Assert(!pRem->fInREM);
689 SSMR3GetU32(pSSM, &pRem->Env.hflags);
690 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
691 {
692 /* Redundant REM CPU state has to be loaded, but can be ignored. */
693 CPUX86State_Ver16 temp;
694 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
695 }
696
697 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
698 if (RT_FAILURE(rc))
699 return rc;
700 if (u32Sep != ~0U)
701 {
702 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
703 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
704 }
705
706 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
707 SSMR3GetUInt(pSSM, &fRawRing0);
708 if (fRawRing0)
709 pRem->Env.state |= CPU_RAW_RING0;
710
711 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
712 {
713 /*
714 * Load the REM stuff.
715 */
716 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
717 if (RT_FAILURE(rc))
718 return rc;
719 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
720 {
721 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
722 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
723 }
724 for (i = 0; i < pRem->cInvalidatedPages; i++)
725 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
726 }
727
728 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
729 if (RT_FAILURE(rc))
730 return rc;
731
732 /* check the terminator. */
733 rc = SSMR3GetU32(pSSM, &u32Sep);
734 if (RT_FAILURE(rc))
735 return rc;
736 if (u32Sep != ~0U)
737 {
738 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
739 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
740 }
741
742 /*
743 * Get the CPUID features.
744 */
745 PVMCPU pVCpu = VMMGetCpu(pVM);
746 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
747 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
748
749 /*
750 * Sync the Load Flush the TLB
751 */
752 tlb_flush(&pRem->Env, 1);
753
754 /*
755 * Stop ignoring ignornable notifications.
756 */
757 pVM->rem.s.fIgnoreAll = false;
758
759 /*
760 * Sync the whole CPU state when executing code in the recompiler.
761 */
762 for (i=0;i<pVM->cCPUs;i++)
763 {
764 PVMCPU pVCpu = &pVM->aCpus[i];
765
766 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
767 }
768 return VINF_SUCCESS;
769}
770
771
772
773#undef LOG_GROUP
774#define LOG_GROUP LOG_GROUP_REM_RUN
775
776/**
777 * Single steps an instruction in recompiled mode.
778 *
779 * Before calling this function the REM state needs to be in sync with
780 * the VM. Call REMR3State() to perform the sync. It's only necessary
781 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
782 * and after calling REMR3StateBack().
783 *
784 * @returns VBox status code.
785 *
786 * @param pVM VM Handle.
787 * @param pVCpu VMCPU Handle.
788 */
789REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
790{
791 int rc, interrupt_request;
792 RTGCPTR GCPtrPC;
793 bool fBp;
794
795 /*
796 * Lock the REM - we don't wanna have anyone interrupting us
797 * while stepping - and enabled single stepping. We also ignore
798 * pending interrupts and suchlike.
799 */
800 interrupt_request = pVM->rem.s.Env.interrupt_request;
801 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
802 pVM->rem.s.Env.interrupt_request = 0;
803 cpu_single_step(&pVM->rem.s.Env, 1);
804
805 /*
806 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
807 */
808 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
809 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
810
811 /*
812 * Execute and handle the return code.
813 * We execute without enabling the cpu tick, so on success we'll
814 * just flip it on and off to make sure it moves
815 */
816 rc = cpu_exec(&pVM->rem.s.Env);
817 if (rc == EXCP_DEBUG)
818 {
819 TMR3NotifyResume(pVM, pVCpu);
820 TMR3NotifySuspend(pVM, pVCpu);
821 rc = VINF_EM_DBG_STEPPED;
822 }
823 else
824 {
825 switch (rc)
826 {
827 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
828 case EXCP_HLT:
829 case EXCP_HALTED: rc = VINF_EM_HALT; break;
830 case EXCP_RC:
831 rc = pVM->rem.s.rc;
832 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
833 break;
834 case EXCP_EXECUTE_RAW:
835 case EXCP_EXECUTE_HWACC:
836 /** @todo: is it correct? No! */
837 rc = VINF_SUCCESS;
838 break;
839 default:
840 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
841 rc = VERR_INTERNAL_ERROR;
842 break;
843 }
844 }
845
846 /*
847 * Restore the stuff we changed to prevent interruption.
848 * Unlock the REM.
849 */
850 if (fBp)
851 {
852 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
853 Assert(rc2 == 0); NOREF(rc2);
854 }
855 cpu_single_step(&pVM->rem.s.Env, 0);
856 pVM->rem.s.Env.interrupt_request = interrupt_request;
857
858 return rc;
859}
860
861
862/**
863 * Set a breakpoint using the REM facilities.
864 *
865 * @returns VBox status code.
866 * @param pVM The VM handle.
867 * @param Address The breakpoint address.
868 * @thread The emulation thread.
869 */
870REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
871{
872 VM_ASSERT_EMT(pVM);
873 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
874 {
875 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
876 return VINF_SUCCESS;
877 }
878 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
879 return VERR_REM_NO_MORE_BP_SLOTS;
880}
881
882
883/**
884 * Clears a breakpoint set by REMR3BreakpointSet().
885 *
886 * @returns VBox status code.
887 * @param pVM The VM handle.
888 * @param Address The breakpoint address.
889 * @thread The emulation thread.
890 */
891REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
892{
893 VM_ASSERT_EMT(pVM);
894 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
895 {
896 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
897 return VINF_SUCCESS;
898 }
899 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
900 return VERR_REM_BP_NOT_FOUND;
901}
902
903
904/**
905 * Emulate an instruction.
906 *
907 * This function executes one instruction without letting anyone
908 * interrupt it. This is intended for being called while being in
909 * raw mode and thus will take care of all the state syncing between
910 * REM and the rest.
911 *
912 * @returns VBox status code.
913 * @param pVM VM handle.
914 * @param pVCpu VMCPU Handle.
915 */
916REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
917{
918 bool fFlushTBs;
919
920 int rc, rc2;
921 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
922
923 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
924 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
925 */
926 if (HWACCMIsEnabled(pVM))
927 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
928
929 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
930 fFlushTBs = pVM->rem.s.fFlushTBs;
931 pVM->rem.s.fFlushTBs = false;
932
933 /*
934 * Sync the state and enable single instruction / single stepping.
935 */
936 rc = REMR3State(pVM, pVCpu);
937 pVM->rem.s.fFlushTBs = fFlushTBs;
938 if (RT_SUCCESS(rc))
939 {
940 int interrupt_request = pVM->rem.s.Env.interrupt_request;
941 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
942 Assert(!pVM->rem.s.Env.singlestep_enabled);
943 /*
944 * Now we set the execute single instruction flag and enter the cpu_exec loop.
945 */
946 TMNotifyStartOfExecution(pVCpu);
947 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
948 rc = cpu_exec(&pVM->rem.s.Env);
949 TMNotifyEndOfExecution(pVCpu);
950 switch (rc)
951 {
952 /*
953 * Executed without anything out of the way happening.
954 */
955 case EXCP_SINGLE_INSTR:
956 rc = VINF_EM_RESCHEDULE;
957 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
958 break;
959
960 /*
961 * If we take a trap or start servicing a pending interrupt, we might end up here.
962 * (Timer thread or some other thread wishing EMT's attention.)
963 */
964 case EXCP_INTERRUPT:
965 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
966 rc = VINF_EM_RESCHEDULE;
967 break;
968
969 /*
970 * Single step, we assume!
971 * If there was a breakpoint there we're fucked now.
972 */
973 case EXCP_DEBUG:
974 {
975 /* breakpoint or single step? */
976 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
977 int iBP;
978 rc = VINF_EM_DBG_STEPPED;
979 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
980 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
981 {
982 rc = VINF_EM_DBG_BREAKPOINT;
983 break;
984 }
985 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
986 break;
987 }
988
989 /*
990 * hlt instruction.
991 */
992 case EXCP_HLT:
993 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
994 rc = VINF_EM_HALT;
995 break;
996
997 /*
998 * The VM has halted.
999 */
1000 case EXCP_HALTED:
1001 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1002 rc = VINF_EM_HALT;
1003 break;
1004
1005 /*
1006 * Switch to RAW-mode.
1007 */
1008 case EXCP_EXECUTE_RAW:
1009 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1010 rc = VINF_EM_RESCHEDULE_RAW;
1011 break;
1012
1013 /*
1014 * Switch to hardware accelerated RAW-mode.
1015 */
1016 case EXCP_EXECUTE_HWACC:
1017 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1018 rc = VINF_EM_RESCHEDULE_HWACC;
1019 break;
1020
1021 /*
1022 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1023 */
1024 case EXCP_RC:
1025 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1026 rc = pVM->rem.s.rc;
1027 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1028 break;
1029
1030 /*
1031 * Figure out the rest when they arrive....
1032 */
1033 default:
1034 AssertMsgFailed(("rc=%d\n", rc));
1035 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1036 rc = VINF_EM_RESCHEDULE;
1037 break;
1038 }
1039
1040 /*
1041 * Switch back the state.
1042 */
1043 pVM->rem.s.Env.interrupt_request = interrupt_request;
1044 rc2 = REMR3StateBack(pVM, pVCpu);
1045 AssertRC(rc2);
1046 }
1047
1048 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1049 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1050 return rc;
1051}
1052
1053
1054/**
1055 * Runs code in recompiled mode.
1056 *
1057 * Before calling this function the REM state needs to be in sync with
1058 * the VM. Call REMR3State() to perform the sync. It's only necessary
1059 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1060 * and after calling REMR3StateBack().
1061 *
1062 * @returns VBox status code.
1063 *
1064 * @param pVM VM Handle.
1065 * @param pVCpu VMCPU Handle.
1066 */
1067REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1068{
1069 int rc;
1070 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1071 Assert(pVM->rem.s.fInREM);
1072
1073 TMNotifyStartOfExecution(pVCpu);
1074 rc = cpu_exec(&pVM->rem.s.Env);
1075 TMNotifyEndOfExecution(pVCpu);
1076 switch (rc)
1077 {
1078 /*
1079 * This happens when the execution was interrupted
1080 * by an external event, like pending timers.
1081 */
1082 case EXCP_INTERRUPT:
1083 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1084 rc = VINF_SUCCESS;
1085 break;
1086
1087 /*
1088 * hlt instruction.
1089 */
1090 case EXCP_HLT:
1091 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1092 rc = VINF_EM_HALT;
1093 break;
1094
1095 /*
1096 * The VM has halted.
1097 */
1098 case EXCP_HALTED:
1099 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1100 rc = VINF_EM_HALT;
1101 break;
1102
1103 /*
1104 * Breakpoint/single step.
1105 */
1106 case EXCP_DEBUG:
1107 {
1108#if 0//def DEBUG_bird
1109 static int iBP = 0;
1110 printf("howdy, breakpoint! iBP=%d\n", iBP);
1111 switch (iBP)
1112 {
1113 case 0:
1114 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1115 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1116 //pVM->rem.s.Env.interrupt_request = 0;
1117 //pVM->rem.s.Env.exception_index = -1;
1118 //g_fInterruptDisabled = 1;
1119 rc = VINF_SUCCESS;
1120 asm("int3");
1121 break;
1122 default:
1123 asm("int3");
1124 break;
1125 }
1126 iBP++;
1127#else
1128 /* breakpoint or single step? */
1129 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1130 int iBP;
1131 rc = VINF_EM_DBG_STEPPED;
1132 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1133 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1134 {
1135 rc = VINF_EM_DBG_BREAKPOINT;
1136 break;
1137 }
1138 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1139#endif
1140 break;
1141 }
1142
1143 /*
1144 * Switch to RAW-mode.
1145 */
1146 case EXCP_EXECUTE_RAW:
1147 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1148 rc = VINF_EM_RESCHEDULE_RAW;
1149 break;
1150
1151 /*
1152 * Switch to hardware accelerated RAW-mode.
1153 */
1154 case EXCP_EXECUTE_HWACC:
1155 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1156 rc = VINF_EM_RESCHEDULE_HWACC;
1157 break;
1158
1159 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1160 /*
1161 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1162 */
1163 case EXCP_RC:
1164 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1165 rc = pVM->rem.s.rc;
1166 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1167 break;
1168
1169 /*
1170 * Figure out the rest when they arrive....
1171 */
1172 default:
1173 AssertMsgFailed(("rc=%d\n", rc));
1174 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1175 rc = VINF_SUCCESS;
1176 break;
1177 }
1178
1179 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1180 return rc;
1181}
1182
1183
1184/**
1185 * Check if the cpu state is suitable for Raw execution.
1186 *
1187 * @returns boolean
1188 * @param env The CPU env struct.
1189 * @param eip The EIP to check this for (might differ from env->eip).
1190 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1191 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1192 *
1193 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1194 */
1195bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1196{
1197 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1198 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1199 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1200 uint32_t u32CR0;
1201
1202 /* Update counter. */
1203 env->pVM->rem.s.cCanExecuteRaw++;
1204
1205 if (HWACCMIsEnabled(env->pVM))
1206 {
1207 CPUMCTX Ctx;
1208
1209 env->state |= CPU_RAW_HWACC;
1210
1211 /*
1212 * Create partial context for HWACCMR3CanExecuteGuest
1213 */
1214 Ctx.cr0 = env->cr[0];
1215 Ctx.cr3 = env->cr[3];
1216 Ctx.cr4 = env->cr[4];
1217
1218 Ctx.tr = env->tr.selector;
1219 Ctx.trHid.u64Base = env->tr.base;
1220 Ctx.trHid.u32Limit = env->tr.limit;
1221 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1222
1223 Ctx.idtr.cbIdt = env->idt.limit;
1224 Ctx.idtr.pIdt = env->idt.base;
1225
1226 Ctx.gdtr.cbGdt = env->gdt.limit;
1227 Ctx.gdtr.pGdt = env->gdt.base;
1228
1229 Ctx.rsp = env->regs[R_ESP];
1230 Ctx.rip = env->eip;
1231
1232 Ctx.eflags.u32 = env->eflags;
1233
1234 Ctx.cs = env->segs[R_CS].selector;
1235 Ctx.csHid.u64Base = env->segs[R_CS].base;
1236 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1237 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1238
1239 Ctx.ds = env->segs[R_DS].selector;
1240 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1241 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1242 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1243
1244 Ctx.es = env->segs[R_ES].selector;
1245 Ctx.esHid.u64Base = env->segs[R_ES].base;
1246 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1247 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1248
1249 Ctx.fs = env->segs[R_FS].selector;
1250 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1251 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1252 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1253
1254 Ctx.gs = env->segs[R_GS].selector;
1255 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1256 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1257 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1258
1259 Ctx.ss = env->segs[R_SS].selector;
1260 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1261 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1262 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1263
1264 Ctx.msrEFER = env->efer;
1265
1266 /* Hardware accelerated raw-mode:
1267 *
1268 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1269 */
1270 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1271 {
1272 *piException = EXCP_EXECUTE_HWACC;
1273 return true;
1274 }
1275 return false;
1276 }
1277
1278 /*
1279 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1280 * or 32 bits protected mode ring 0 code
1281 *
1282 * The tests are ordered by the likelyhood of being true during normal execution.
1283 */
1284 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1285 {
1286 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1287 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1288 return false;
1289 }
1290
1291#ifndef VBOX_RAW_V86
1292 if (fFlags & VM_MASK) {
1293 STAM_COUNTER_INC(&gStatRefuseVM86);
1294 Log2(("raw mode refused: VM_MASK\n"));
1295 return false;
1296 }
1297#endif
1298
1299 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1300 {
1301#ifndef DEBUG_bird
1302 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1303#endif
1304 return false;
1305 }
1306
1307 if (env->singlestep_enabled)
1308 {
1309 //Log2(("raw mode refused: Single step\n"));
1310 return false;
1311 }
1312
1313 if (env->nb_breakpoints > 0)
1314 {
1315 //Log2(("raw mode refused: Breakpoints\n"));
1316 return false;
1317 }
1318
1319 u32CR0 = env->cr[0];
1320 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1321 {
1322 STAM_COUNTER_INC(&gStatRefusePaging);
1323 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1324 return false;
1325 }
1326
1327 if (env->cr[4] & CR4_PAE_MASK)
1328 {
1329 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1330 {
1331 STAM_COUNTER_INC(&gStatRefusePAE);
1332 return false;
1333 }
1334 }
1335
1336 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1337 {
1338 if (!EMIsRawRing3Enabled(env->pVM))
1339 return false;
1340
1341 if (!(env->eflags & IF_MASK))
1342 {
1343 STAM_COUNTER_INC(&gStatRefuseIF0);
1344 Log2(("raw mode refused: IF (RawR3)\n"));
1345 return false;
1346 }
1347
1348 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1349 {
1350 STAM_COUNTER_INC(&gStatRefuseWP0);
1351 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1352 return false;
1353 }
1354 }
1355 else
1356 {
1357 if (!EMIsRawRing0Enabled(env->pVM))
1358 return false;
1359
1360 // Let's start with pure 32 bits ring 0 code first
1361 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1362 {
1363 STAM_COUNTER_INC(&gStatRefuseCode16);
1364 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1365 return false;
1366 }
1367
1368 // Only R0
1369 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1370 {
1371 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1372 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1373 return false;
1374 }
1375
1376 if (!(u32CR0 & CR0_WP_MASK))
1377 {
1378 STAM_COUNTER_INC(&gStatRefuseWP0);
1379 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1380 return false;
1381 }
1382
1383 if (PATMIsPatchGCAddr(env->pVM, eip))
1384 {
1385 Log2(("raw r0 mode forced: patch code\n"));
1386 *piException = EXCP_EXECUTE_RAW;
1387 return true;
1388 }
1389
1390#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1391 if (!(env->eflags & IF_MASK))
1392 {
1393 STAM_COUNTER_INC(&gStatRefuseIF0);
1394 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1395 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1396 return false;
1397 }
1398#endif
1399
1400 env->state |= CPU_RAW_RING0;
1401 }
1402
1403 /*
1404 * Don't reschedule the first time we're called, because there might be
1405 * special reasons why we're here that is not covered by the above checks.
1406 */
1407 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1408 {
1409 Log2(("raw mode refused: first scheduling\n"));
1410 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1411 return false;
1412 }
1413
1414 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1415 *piException = EXCP_EXECUTE_RAW;
1416 return true;
1417}
1418
1419
1420/**
1421 * Fetches a code byte.
1422 *
1423 * @returns Success indicator (bool) for ease of use.
1424 * @param env The CPU environment structure.
1425 * @param GCPtrInstr Where to fetch code.
1426 * @param pu8Byte Where to store the byte on success
1427 */
1428bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1429{
1430 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1431 if (RT_SUCCESS(rc))
1432 return true;
1433 return false;
1434}
1435
1436
1437/**
1438 * Flush (or invalidate if you like) page table/dir entry.
1439 *
1440 * (invlpg instruction; tlb_flush_page)
1441 *
1442 * @param env Pointer to cpu environment.
1443 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1444 */
1445void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1446{
1447 PVM pVM = env->pVM;
1448 PCPUMCTX pCtx;
1449 int rc;
1450
1451 /*
1452 * When we're replaying invlpg instructions or restoring a saved
1453 * state we disable this path.
1454 */
1455 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1456 return;
1457 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1458 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1459
1460 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1461
1462 /*
1463 * Update the control registers before calling PGMFlushPage.
1464 */
1465 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1466 Assert(pCtx);
1467 pCtx->cr0 = env->cr[0];
1468 pCtx->cr3 = env->cr[3];
1469 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1470 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1471 pCtx->cr4 = env->cr[4];
1472
1473 /*
1474 * Let PGM do the rest.
1475 */
1476 Assert(env->pVCpu);
1477 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1478 if (RT_FAILURE(rc))
1479 {
1480 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1481 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1482 }
1483 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1484}
1485
1486
1487#ifndef REM_PHYS_ADDR_IN_TLB
1488/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1489void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1490{
1491 void *pv;
1492 int rc;
1493
1494 /* Address must be aligned enough to fiddle with lower bits */
1495 Assert((physAddr & 0x3) == 0);
1496
1497 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1498 Assert( rc == VINF_SUCCESS
1499 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1500 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1501 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1502 if (RT_FAILURE(rc))
1503 return (void *)1;
1504 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1505 return (void *)((uintptr_t)pv | 2);
1506 return pv;
1507}
1508#endif /* REM_PHYS_ADDR_IN_TLB */
1509
1510
1511/**
1512 * Called from tlb_protect_code in order to write monitor a code page.
1513 *
1514 * @param env Pointer to the CPU environment.
1515 * @param GCPtr Code page to monitor
1516 */
1517void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1518{
1519#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1520 Assert(env->pVM->rem.s.fInREM);
1521 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1522 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1523 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1524 && !(env->eflags & VM_MASK) /* no V86 mode */
1525 && !HWACCMIsEnabled(env->pVM))
1526 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1527#endif
1528}
1529
1530
1531/**
1532 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1533 *
1534 * @param env Pointer to the CPU environment.
1535 * @param GCPtr Code page to monitor
1536 */
1537void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1538{
1539 Assert(env->pVM->rem.s.fInREM);
1540#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1541 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1542 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1543 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1544 && !(env->eflags & VM_MASK) /* no V86 mode */
1545 && !HWACCMIsEnabled(env->pVM))
1546 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1547#endif
1548}
1549
1550
1551/**
1552 * Called when the CPU is initialized, any of the CRx registers are changed or
1553 * when the A20 line is modified.
1554 *
1555 * @param env Pointer to the CPU environment.
1556 * @param fGlobal Set if the flush is global.
1557 */
1558void remR3FlushTLB(CPUState *env, bool fGlobal)
1559{
1560 PVM pVM = env->pVM;
1561 PCPUMCTX pCtx;
1562
1563 /*
1564 * When we're replaying invlpg instructions or restoring a saved
1565 * state we disable this path.
1566 */
1567 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1568 return;
1569 Assert(pVM->rem.s.fInREM);
1570
1571 /*
1572 * The caller doesn't check cr4, so we have to do that for ourselves.
1573 */
1574 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1575 fGlobal = true;
1576 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1577
1578 /*
1579 * Update the control registers before calling PGMR3FlushTLB.
1580 */
1581 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1582 Assert(pCtx);
1583 pCtx->cr0 = env->cr[0];
1584 pCtx->cr3 = env->cr[3];
1585 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1586 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1587 pCtx->cr4 = env->cr[4];
1588
1589 /*
1590 * Let PGM do the rest.
1591 */
1592 Assert(env->pVCpu);
1593 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1594}
1595
1596
1597/**
1598 * Called when any of the cr0, cr4 or efer registers is updated.
1599 *
1600 * @param env Pointer to the CPU environment.
1601 */
1602void remR3ChangeCpuMode(CPUState *env)
1603{
1604 PVM pVM = env->pVM;
1605 uint64_t efer;
1606 PCPUMCTX pCtx;
1607 int rc;
1608
1609 /*
1610 * When we're replaying loads or restoring a saved
1611 * state this path is disabled.
1612 */
1613 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1614 return;
1615 Assert(pVM->rem.s.fInREM);
1616
1617 /*
1618 * Update the control registers before calling PGMChangeMode()
1619 * as it may need to map whatever cr3 is pointing to.
1620 */
1621 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1622 Assert(pCtx);
1623 pCtx->cr0 = env->cr[0];
1624 pCtx->cr3 = env->cr[3];
1625 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1626 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1627 pCtx->cr4 = env->cr[4];
1628
1629#ifdef TARGET_X86_64
1630 efer = env->efer;
1631#else
1632 efer = 0;
1633#endif
1634 Assert(env->pVCpu);
1635 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1636 if (rc != VINF_SUCCESS)
1637 {
1638 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1639 {
1640 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1641 remR3RaiseRC(env->pVM, rc);
1642 }
1643 else
1644 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1645 }
1646}
1647
1648
1649/**
1650 * Called from compiled code to run dma.
1651 *
1652 * @param env Pointer to the CPU environment.
1653 */
1654void remR3DmaRun(CPUState *env)
1655{
1656 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1657 PDMR3DmaRun(env->pVM);
1658 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1659}
1660
1661
1662/**
1663 * Called from compiled code to schedule pending timers in VMM
1664 *
1665 * @param env Pointer to the CPU environment.
1666 */
1667void remR3TimersRun(CPUState *env)
1668{
1669 LogFlow(("remR3TimersRun:\n"));
1670 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1671 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1672 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1673 TMR3TimerQueuesDo(env->pVM);
1674 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1675 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1676}
1677
1678
1679/**
1680 * Record trap occurance
1681 *
1682 * @returns VBox status code
1683 * @param env Pointer to the CPU environment.
1684 * @param uTrap Trap nr
1685 * @param uErrorCode Error code
1686 * @param pvNextEIP Next EIP
1687 */
1688int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1689{
1690 PVM pVM = env->pVM;
1691#ifdef VBOX_WITH_STATISTICS
1692 static STAMCOUNTER s_aStatTrap[255];
1693 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1694#endif
1695
1696#ifdef VBOX_WITH_STATISTICS
1697 if (uTrap < 255)
1698 {
1699 if (!s_aRegisters[uTrap])
1700 {
1701 char szStatName[64];
1702 s_aRegisters[uTrap] = true;
1703 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1704 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1705 }
1706 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1707 }
1708#endif
1709 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1710 if( uTrap < 0x20
1711 && (env->cr[0] & X86_CR0_PE)
1712 && !(env->eflags & X86_EFL_VM))
1713 {
1714#ifdef DEBUG
1715 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1716#endif
1717 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1718 {
1719 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1720 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1721 return VERR_REM_TOO_MANY_TRAPS;
1722 }
1723 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1724 pVM->rem.s.cPendingExceptions = 1;
1725 pVM->rem.s.uPendingException = uTrap;
1726 pVM->rem.s.uPendingExcptEIP = env->eip;
1727 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1728 }
1729 else
1730 {
1731 pVM->rem.s.cPendingExceptions = 0;
1732 pVM->rem.s.uPendingException = uTrap;
1733 pVM->rem.s.uPendingExcptEIP = env->eip;
1734 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1735 }
1736 return VINF_SUCCESS;
1737}
1738
1739
1740/*
1741 * Clear current active trap
1742 *
1743 * @param pVM VM Handle.
1744 */
1745void remR3TrapClear(PVM pVM)
1746{
1747 pVM->rem.s.cPendingExceptions = 0;
1748 pVM->rem.s.uPendingException = 0;
1749 pVM->rem.s.uPendingExcptEIP = 0;
1750 pVM->rem.s.uPendingExcptCR2 = 0;
1751}
1752
1753
1754/*
1755 * Record previous call instruction addresses
1756 *
1757 * @param env Pointer to the CPU environment.
1758 */
1759void remR3RecordCall(CPUState *env)
1760{
1761 CSAMR3RecordCallAddress(env->pVM, env->eip);
1762}
1763
1764
1765/**
1766 * Syncs the internal REM state with the VM.
1767 *
1768 * This must be called before REMR3Run() is invoked whenever when the REM
1769 * state is not up to date. Calling it several times in a row is not
1770 * permitted.
1771 *
1772 * @returns VBox status code.
1773 *
1774 * @param pVM VM Handle.
1775 * @param pVCpu VMCPU Handle.
1776 *
1777 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1778 * no do this since the majority of the callers don't want any unnecessary of events
1779 * pending that would immediatly interrupt execution.
1780 */
1781REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1782{
1783 register const CPUMCTX *pCtx;
1784 register unsigned fFlags;
1785 bool fHiddenSelRegsValid;
1786 unsigned i;
1787 TRPMEVENT enmType;
1788 uint8_t u8TrapNo;
1789 int rc;
1790
1791 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1792 Log2(("REMR3State:\n"));
1793
1794 pVM->rem.s.Env.pVCpu = pVCpu;
1795 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1796 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1797
1798 Assert(!pVM->rem.s.fInREM);
1799 pVM->rem.s.fInStateSync = true;
1800
1801 /*
1802 * If we have to flush TBs, do that immediately.
1803 */
1804 if (pVM->rem.s.fFlushTBs)
1805 {
1806 STAM_COUNTER_INC(&gStatFlushTBs);
1807 tb_flush(&pVM->rem.s.Env);
1808 pVM->rem.s.fFlushTBs = false;
1809 }
1810
1811 /*
1812 * Copy the registers which require no special handling.
1813 */
1814#ifdef TARGET_X86_64
1815 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1816 Assert(R_EAX == 0);
1817 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1818 Assert(R_ECX == 1);
1819 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1820 Assert(R_EDX == 2);
1821 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1822 Assert(R_EBX == 3);
1823 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1824 Assert(R_ESP == 4);
1825 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1826 Assert(R_EBP == 5);
1827 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1828 Assert(R_ESI == 6);
1829 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1830 Assert(R_EDI == 7);
1831 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1832 pVM->rem.s.Env.regs[8] = pCtx->r8;
1833 pVM->rem.s.Env.regs[9] = pCtx->r9;
1834 pVM->rem.s.Env.regs[10] = pCtx->r10;
1835 pVM->rem.s.Env.regs[11] = pCtx->r11;
1836 pVM->rem.s.Env.regs[12] = pCtx->r12;
1837 pVM->rem.s.Env.regs[13] = pCtx->r13;
1838 pVM->rem.s.Env.regs[14] = pCtx->r14;
1839 pVM->rem.s.Env.regs[15] = pCtx->r15;
1840
1841 pVM->rem.s.Env.eip = pCtx->rip;
1842
1843 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1844#else
1845 Assert(R_EAX == 0);
1846 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1847 Assert(R_ECX == 1);
1848 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1849 Assert(R_EDX == 2);
1850 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1851 Assert(R_EBX == 3);
1852 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1853 Assert(R_ESP == 4);
1854 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1855 Assert(R_EBP == 5);
1856 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1857 Assert(R_ESI == 6);
1858 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1859 Assert(R_EDI == 7);
1860 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1861 pVM->rem.s.Env.eip = pCtx->eip;
1862
1863 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1864#endif
1865
1866 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1867
1868 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1869 for (i=0;i<8;i++)
1870 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1871
1872 /*
1873 * Clear the halted hidden flag (the interrupt waking up the CPU can
1874 * have been dispatched in raw mode).
1875 */
1876 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1877
1878 /*
1879 * Replay invlpg?
1880 */
1881 if (pVM->rem.s.cInvalidatedPages)
1882 {
1883 RTUINT i;
1884
1885 pVM->rem.s.fIgnoreInvlPg = true;
1886 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1887 {
1888 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1889 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1890 }
1891 pVM->rem.s.fIgnoreInvlPg = false;
1892 pVM->rem.s.cInvalidatedPages = 0;
1893 }
1894
1895 /* Replay notification changes. */
1896 REMR3ReplayHandlerNotifications(pVM);
1897
1898 /* Update MSRs; before CRx registers! */
1899 pVM->rem.s.Env.efer = pCtx->msrEFER;
1900 pVM->rem.s.Env.star = pCtx->msrSTAR;
1901 pVM->rem.s.Env.pat = pCtx->msrPAT;
1902#ifdef TARGET_X86_64
1903 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1904 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1905 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1906 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1907
1908 /* Update the internal long mode activate flag according to the new EFER value. */
1909 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1910 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1911 else
1912 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1913#endif
1914
1915 /*
1916 * Registers which are rarely changed and require special handling / order when changed.
1917 */
1918 fFlags = CPUMGetAndClearChangedFlagsREM(pVCpu);
1919 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1920 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1921 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1922 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1923 {
1924 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1925 {
1926 pVM->rem.s.fIgnoreCR3Load = true;
1927 tlb_flush(&pVM->rem.s.Env, true);
1928 pVM->rem.s.fIgnoreCR3Load = false;
1929 }
1930
1931 /* CR4 before CR0! */
1932 if (fFlags & CPUM_CHANGED_CR4)
1933 {
1934 pVM->rem.s.fIgnoreCR3Load = true;
1935 pVM->rem.s.fIgnoreCpuMode = true;
1936 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1937 pVM->rem.s.fIgnoreCpuMode = false;
1938 pVM->rem.s.fIgnoreCR3Load = false;
1939 }
1940
1941 if (fFlags & CPUM_CHANGED_CR0)
1942 {
1943 pVM->rem.s.fIgnoreCR3Load = true;
1944 pVM->rem.s.fIgnoreCpuMode = true;
1945 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1946 pVM->rem.s.fIgnoreCpuMode = false;
1947 pVM->rem.s.fIgnoreCR3Load = false;
1948 }
1949
1950 if (fFlags & CPUM_CHANGED_CR3)
1951 {
1952 pVM->rem.s.fIgnoreCR3Load = true;
1953 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1954 pVM->rem.s.fIgnoreCR3Load = false;
1955 }
1956
1957 if (fFlags & CPUM_CHANGED_GDTR)
1958 {
1959 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1960 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1961 }
1962
1963 if (fFlags & CPUM_CHANGED_IDTR)
1964 {
1965 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1966 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1967 }
1968
1969 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1970 {
1971 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1972 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1973 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1974 }
1975
1976 if (fFlags & CPUM_CHANGED_LDTR)
1977 {
1978 if (fHiddenSelRegsValid)
1979 {
1980 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1981 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1982 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1983 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1984 }
1985 else
1986 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1987 }
1988
1989 if (fFlags & CPUM_CHANGED_CPUID)
1990 {
1991 uint32_t u32Dummy;
1992
1993 /*
1994 * Get the CPUID features.
1995 */
1996 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1997 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1998 }
1999
2000 /* Sync FPU state after CR4, CPUID and EFER (!). */
2001 if (fFlags & CPUM_CHANGED_FPU_REM)
2002 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2003 }
2004
2005 /*
2006 * Sync TR unconditionally to make life simpler.
2007 */
2008 pVM->rem.s.Env.tr.selector = pCtx->tr;
2009 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2010 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2011 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2012 /* Note! do_interrupt will fault if the busy flag is still set... */
2013 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2014
2015 /*
2016 * Update selector registers.
2017 * This must be done *after* we've synced gdt, ldt and crX registers
2018 * since we're reading the GDT/LDT om sync_seg. This will happen with
2019 * saved state which takes a quick dip into rawmode for instance.
2020 */
2021 /*
2022 * Stack; Note first check this one as the CPL might have changed. The
2023 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2024 */
2025
2026 if (fHiddenSelRegsValid)
2027 {
2028 /* The hidden selector registers are valid in the CPU context. */
2029 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2030
2031 /* Set current CPL */
2032 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2033
2034 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2035 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2036 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2037 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2038 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2039 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2040 }
2041 else
2042 {
2043 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2044 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2045 {
2046 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2047
2048 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2049 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2050#ifdef VBOX_WITH_STATISTICS
2051 if (pVM->rem.s.Env.segs[R_SS].newselector)
2052 {
2053 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2054 }
2055#endif
2056 }
2057 else
2058 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2059
2060 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2061 {
2062 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2063 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2064#ifdef VBOX_WITH_STATISTICS
2065 if (pVM->rem.s.Env.segs[R_ES].newselector)
2066 {
2067 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2068 }
2069#endif
2070 }
2071 else
2072 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2073
2074 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2075 {
2076 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2077 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2078#ifdef VBOX_WITH_STATISTICS
2079 if (pVM->rem.s.Env.segs[R_CS].newselector)
2080 {
2081 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2082 }
2083#endif
2084 }
2085 else
2086 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2087
2088 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2089 {
2090 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2091 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2092#ifdef VBOX_WITH_STATISTICS
2093 if (pVM->rem.s.Env.segs[R_DS].newselector)
2094 {
2095 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2096 }
2097#endif
2098 }
2099 else
2100 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2101
2102 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2103 * be the same but not the base/limit. */
2104 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2105 {
2106 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2107 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2108#ifdef VBOX_WITH_STATISTICS
2109 if (pVM->rem.s.Env.segs[R_FS].newselector)
2110 {
2111 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2112 }
2113#endif
2114 }
2115 else
2116 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2117
2118 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2119 {
2120 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2121 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2122#ifdef VBOX_WITH_STATISTICS
2123 if (pVM->rem.s.Env.segs[R_GS].newselector)
2124 {
2125 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2126 }
2127#endif
2128 }
2129 else
2130 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2131 }
2132
2133 /*
2134 * Check for traps.
2135 */
2136 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2137 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2138 if (RT_SUCCESS(rc))
2139 {
2140#ifdef DEBUG
2141 if (u8TrapNo == 0x80)
2142 {
2143 remR3DumpLnxSyscall(pVCpu);
2144 remR3DumpOBsdSyscall(pVCpu);
2145 }
2146#endif
2147
2148 pVM->rem.s.Env.exception_index = u8TrapNo;
2149 if (enmType != TRPM_SOFTWARE_INT)
2150 {
2151 pVM->rem.s.Env.exception_is_int = 0;
2152 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2153 }
2154 else
2155 {
2156 /*
2157 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2158 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2159 * for int03 and into.
2160 */
2161 pVM->rem.s.Env.exception_is_int = 1;
2162 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2163 /* int 3 may be generated by one-byte 0xcc */
2164 if (u8TrapNo == 3)
2165 {
2166 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2167 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2168 }
2169 /* int 4 may be generated by one-byte 0xce */
2170 else if (u8TrapNo == 4)
2171 {
2172 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2173 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2174 }
2175 }
2176
2177 /* get error code and cr2 if needed. */
2178 switch (u8TrapNo)
2179 {
2180 case 0x0e:
2181 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2182 /* fallthru */
2183 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2184 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2185 break;
2186
2187 case 0x11: case 0x08:
2188 default:
2189 pVM->rem.s.Env.error_code = 0;
2190 break;
2191 }
2192
2193 /*
2194 * We can now reset the active trap since the recompiler is gonna have a go at it.
2195 */
2196 rc = TRPMResetTrap(pVCpu);
2197 AssertRC(rc);
2198 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2199 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2200 }
2201
2202 /*
2203 * Clear old interrupt request flags; Check for pending hardware interrupts.
2204 * (See @remark for why we don't check for other FFs.)
2205 */
2206 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2207 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2208 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2209 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2210
2211 /*
2212 * We're now in REM mode.
2213 */
2214 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2215 pVM->rem.s.fInREM = true;
2216 pVM->rem.s.fInStateSync = false;
2217 pVM->rem.s.cCanExecuteRaw = 0;
2218 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2219 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2220 return VINF_SUCCESS;
2221}
2222
2223
2224/**
2225 * Syncs back changes in the REM state to the the VM state.
2226 *
2227 * This must be called after invoking REMR3Run().
2228 * Calling it several times in a row is not permitted.
2229 *
2230 * @returns VBox status code.
2231 *
2232 * @param pVM VM Handle.
2233 * @param pVCpu VMCPU Handle.
2234 */
2235REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2236{
2237 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2238 Assert(pCtx);
2239 unsigned i;
2240
2241 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2242 Log2(("REMR3StateBack:\n"));
2243 Assert(pVM->rem.s.fInREM);
2244
2245 /*
2246 * Copy back the registers.
2247 * This is done in the order they are declared in the CPUMCTX structure.
2248 */
2249
2250 /** @todo FOP */
2251 /** @todo FPUIP */
2252 /** @todo CS */
2253 /** @todo FPUDP */
2254 /** @todo DS */
2255
2256 /** @todo check if FPU/XMM was actually used in the recompiler */
2257 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2258//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2259
2260#ifdef TARGET_X86_64
2261 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2262 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2263 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2264 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2265 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2266 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2267 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2268 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2269 pCtx->r8 = pVM->rem.s.Env.regs[8];
2270 pCtx->r9 = pVM->rem.s.Env.regs[9];
2271 pCtx->r10 = pVM->rem.s.Env.regs[10];
2272 pCtx->r11 = pVM->rem.s.Env.regs[11];
2273 pCtx->r12 = pVM->rem.s.Env.regs[12];
2274 pCtx->r13 = pVM->rem.s.Env.regs[13];
2275 pCtx->r14 = pVM->rem.s.Env.regs[14];
2276 pCtx->r15 = pVM->rem.s.Env.regs[15];
2277
2278 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2279
2280#else
2281 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2282 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2283 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2284 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2285 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2286 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2287 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2288
2289 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2290#endif
2291
2292 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2293
2294#ifdef VBOX_WITH_STATISTICS
2295 if (pVM->rem.s.Env.segs[R_SS].newselector)
2296 {
2297 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2298 }
2299 if (pVM->rem.s.Env.segs[R_GS].newselector)
2300 {
2301 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2302 }
2303 if (pVM->rem.s.Env.segs[R_FS].newselector)
2304 {
2305 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2306 }
2307 if (pVM->rem.s.Env.segs[R_ES].newselector)
2308 {
2309 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2310 }
2311 if (pVM->rem.s.Env.segs[R_DS].newselector)
2312 {
2313 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2314 }
2315 if (pVM->rem.s.Env.segs[R_CS].newselector)
2316 {
2317 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2318 }
2319#endif
2320 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2321 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2322 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2323 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2324 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2325
2326#ifdef TARGET_X86_64
2327 pCtx->rip = pVM->rem.s.Env.eip;
2328 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2329#else
2330 pCtx->eip = pVM->rem.s.Env.eip;
2331 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2332#endif
2333
2334 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2335 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2336 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2337 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2338 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2339 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2340
2341 for (i = 0; i < 8; i++)
2342 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2343
2344 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2345 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2346 {
2347 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2348 STAM_COUNTER_INC(&gStatREMGDTChange);
2349 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2350 }
2351
2352 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2353 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2354 {
2355 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2356 STAM_COUNTER_INC(&gStatREMIDTChange);
2357 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2358 }
2359
2360 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2361 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2362 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2363 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2364 {
2365 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2366 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2367 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2368 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2369 STAM_COUNTER_INC(&gStatREMLDTRChange);
2370 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2371 }
2372
2373 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2374 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2375 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2376 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2377 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2378 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2379 : 0) )
2380 {
2381 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2382 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2383 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2384 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2385 pCtx->tr = pVM->rem.s.Env.tr.selector;
2386 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2387 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2388 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2389 if (pCtx->trHid.Attr.u)
2390 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2391 STAM_COUNTER_INC(&gStatREMTRChange);
2392 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2393 }
2394
2395 /** @todo These values could still be out of sync! */
2396 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2397 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2398 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2399 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2400
2401 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2402 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2403 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2404
2405 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2406 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2407 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2408
2409 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2410 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2411 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2412
2413 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2414 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2415 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2416
2417 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2418 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2419 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2420
2421 /* Sysenter MSR */
2422 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2423 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2424 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2425
2426 /* System MSRs. */
2427 pCtx->msrEFER = pVM->rem.s.Env.efer;
2428 pCtx->msrSTAR = pVM->rem.s.Env.star;
2429 pCtx->msrPAT = pVM->rem.s.Env.pat;
2430#ifdef TARGET_X86_64
2431 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2432 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2433 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2434 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2435#endif
2436
2437 remR3TrapClear(pVM);
2438
2439 /*
2440 * Check for traps.
2441 */
2442 if ( pVM->rem.s.Env.exception_index >= 0
2443 && pVM->rem.s.Env.exception_index < 256)
2444 {
2445 int rc;
2446
2447 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2448 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2449 AssertRC(rc);
2450 switch (pVM->rem.s.Env.exception_index)
2451 {
2452 case 0x0e:
2453 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2454 /* fallthru */
2455 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2456 case 0x11: case 0x08: /* 0 */
2457 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2458 break;
2459 }
2460
2461 }
2462
2463 /*
2464 * We're not longer in REM mode.
2465 */
2466 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2467 pVM->rem.s.fInREM = false;
2468 pVM->rem.s.pCtx = NULL;
2469 pVM->rem.s.Env.pVCpu = NULL;
2470 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2471 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2472 return VINF_SUCCESS;
2473}
2474
2475
2476/**
2477 * This is called by the disassembler when it wants to update the cpu state
2478 * before for instance doing a register dump.
2479 */
2480static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2481{
2482 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2483 unsigned i;
2484
2485 Assert(pVM->rem.s.fInREM);
2486
2487 /*
2488 * Copy back the registers.
2489 * This is done in the order they are declared in the CPUMCTX structure.
2490 */
2491
2492 /** @todo FOP */
2493 /** @todo FPUIP */
2494 /** @todo CS */
2495 /** @todo FPUDP */
2496 /** @todo DS */
2497 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2498 pCtx->fpu.MXCSR = 0;
2499 pCtx->fpu.MXCSR_MASK = 0;
2500
2501 /** @todo check if FPU/XMM was actually used in the recompiler */
2502 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2503//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2504
2505#ifdef TARGET_X86_64
2506 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2507 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2508 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2509 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2510 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2511 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2512 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2513 pCtx->r8 = pVM->rem.s.Env.regs[8];
2514 pCtx->r9 = pVM->rem.s.Env.regs[9];
2515 pCtx->r10 = pVM->rem.s.Env.regs[10];
2516 pCtx->r11 = pVM->rem.s.Env.regs[11];
2517 pCtx->r12 = pVM->rem.s.Env.regs[12];
2518 pCtx->r13 = pVM->rem.s.Env.regs[13];
2519 pCtx->r14 = pVM->rem.s.Env.regs[14];
2520 pCtx->r15 = pVM->rem.s.Env.regs[15];
2521
2522 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2523#else
2524 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2525 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2526 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2527 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2528 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2529 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2530 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2531
2532 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2533#endif
2534
2535 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2536
2537 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2538 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2539 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2540 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2541 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2542
2543#ifdef TARGET_X86_64
2544 pCtx->rip = pVM->rem.s.Env.eip;
2545 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2546#else
2547 pCtx->eip = pVM->rem.s.Env.eip;
2548 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2549#endif
2550
2551 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2552 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2553 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2554 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2555 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2556 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2557
2558 for (i = 0; i < 8; i++)
2559 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2560
2561 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2562 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2563 {
2564 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2565 STAM_COUNTER_INC(&gStatREMGDTChange);
2566 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2567 }
2568
2569 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2570 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2571 {
2572 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2573 STAM_COUNTER_INC(&gStatREMIDTChange);
2574 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2575 }
2576
2577 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2578 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2579 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2580 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2581 {
2582 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2583 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2584 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2585 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2586 STAM_COUNTER_INC(&gStatREMLDTRChange);
2587 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2588 }
2589
2590 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2591 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2592 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2593 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2594 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2595 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2596 : 0) )
2597 {
2598 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2599 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2600 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2601 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2602 pCtx->tr = pVM->rem.s.Env.tr.selector;
2603 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2604 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2605 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2606 if (pCtx->trHid.Attr.u)
2607 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2608 STAM_COUNTER_INC(&gStatREMTRChange);
2609 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2610 }
2611
2612 /** @todo These values could still be out of sync! */
2613 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2614 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2615 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2616 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2617
2618 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2619 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2620 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2621
2622 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2623 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2624 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2625
2626 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2627 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2628 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2629
2630 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2631 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2632 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2633
2634 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2635 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2636 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2637
2638 /* Sysenter MSR */
2639 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2640 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2641 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2642
2643 /* System MSRs. */
2644 pCtx->msrEFER = pVM->rem.s.Env.efer;
2645 pCtx->msrSTAR = pVM->rem.s.Env.star;
2646 pCtx->msrPAT = pVM->rem.s.Env.pat;
2647#ifdef TARGET_X86_64
2648 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2649 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2650 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2651 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2652#endif
2653
2654}
2655
2656
2657/**
2658 * Update the VMM state information if we're currently in REM.
2659 *
2660 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2661 * we're currently executing in REM and the VMM state is invalid. This method will of
2662 * course check that we're executing in REM before syncing any data over to the VMM.
2663 *
2664 * @param pVM The VM handle.
2665 * @param pVCpu The VMCPU handle.
2666 */
2667REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2668{
2669 if (pVM->rem.s.fInREM)
2670 remR3StateUpdate(pVM, pVCpu);
2671}
2672
2673
2674#undef LOG_GROUP
2675#define LOG_GROUP LOG_GROUP_REM
2676
2677
2678/**
2679 * Notify the recompiler about Address Gate 20 state change.
2680 *
2681 * This notification is required since A20 gate changes are
2682 * initialized from a device driver and the VM might just as
2683 * well be in REM mode as in RAW mode.
2684 *
2685 * @param pVM VM handle.
2686 * @param pVCpu VMCPU handle.
2687 * @param fEnable True if the gate should be enabled.
2688 * False if the gate should be disabled.
2689 */
2690REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2691{
2692 bool fSaved;
2693
2694 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2695 VM_ASSERT_EMT(pVM);
2696
2697 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2698 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2699
2700 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2701
2702 pVM->rem.s.fIgnoreAll = fSaved;
2703}
2704
2705
2706/**
2707 * Replays the handler notification changes
2708 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2709 *
2710 * @param pVM VM handle.
2711 */
2712REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2713{
2714 /*
2715 * Replay the flushes.
2716 */
2717 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2718 VM_ASSERT_EMT(pVM);
2719
2720 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY_BIT))
2721 {
2722 /* Lockless purging of pending notifications. */
2723 uint32_t idxReqs = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, -1);
2724 if (idxReqs == -1)
2725 return;
2726
2727 Assert(idxReqs < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2728 PREMHANDLERNOTIFICATION pReqs = &pVM->rem.s.aHandlerNotifications[idxReqs];
2729
2730 /*
2731 * Reverse the list to process it in FIFO order.
2732 */
2733 PREMHANDLERNOTIFICATION pReq = pReqs;
2734 pReqs = NULL;
2735 while (pReq)
2736 {
2737 PREMHANDLERNOTIFICATION pCur = pReq;
2738
2739 if (pReq->idxNext != -1)
2740 {
2741 Assert(pReq->idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2742 pReq = &pVM->rem.s.aHandlerNotifications[pReq->idxNext];
2743 }
2744 else
2745 pReq = NULL;
2746
2747 pCur->idxNext = (pReqs) ? pReqs->idxSelf : -1;
2748 pReqs = pCur;
2749 }
2750
2751 while (pReqs)
2752 {
2753 PREMHANDLERNOTIFICATION pRec = pReqs;
2754
2755 switch (pRec->enmKind)
2756 {
2757 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2758 remR3NotifyHandlerPhysicalRegister(pVM,
2759 pRec->u.PhysicalRegister.enmType,
2760 pRec->u.PhysicalRegister.GCPhys,
2761 pRec->u.PhysicalRegister.cb,
2762 pRec->u.PhysicalRegister.fHasHCHandler);
2763 break;
2764
2765 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2766 remR3NotifyHandlerPhysicalDeregister(pVM,
2767 pRec->u.PhysicalDeregister.enmType,
2768 pRec->u.PhysicalDeregister.GCPhys,
2769 pRec->u.PhysicalDeregister.cb,
2770 pRec->u.PhysicalDeregister.fHasHCHandler,
2771 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2772 break;
2773
2774 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2775 remR3NotifyHandlerPhysicalModify(pVM,
2776 pRec->u.PhysicalModify.enmType,
2777 pRec->u.PhysicalModify.GCPhysOld,
2778 pRec->u.PhysicalModify.GCPhysNew,
2779 pRec->u.PhysicalModify.cb,
2780 pRec->u.PhysicalModify.fHasHCHandler,
2781 pRec->u.PhysicalModify.fRestoreAsRAM);
2782 break;
2783
2784 default:
2785 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2786 break;
2787 }
2788 if (pReqs->idxNext != -1)
2789 {
2790 AssertMsg(pReqs->idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("pReqs->idxNext=%d\n", pReqs->idxNext));
2791 pReqs = &pVM->rem.s.aHandlerNotifications[pReqs->idxNext];
2792 }
2793 else
2794 pReqs = NULL;
2795
2796 /* Put the record back into the free list */
2797 uint32_t idxNext;
2798
2799 do
2800 {
2801 idxNext = pVM->rem.s.idxFreeList;
2802 pRec->idxNext = idxNext;
2803 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, pRec->idxSelf, idxNext));
2804 }
2805 }
2806}
2807
2808
2809/**
2810 * Notify REM about changed code page.
2811 *
2812 * @returns VBox status code.
2813 * @param pVM VM handle.
2814 * @param pVCpu VMCPU handle.
2815 * @param pvCodePage Code page address
2816 */
2817REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2818{
2819#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2820 int rc;
2821 RTGCPHYS PhysGC;
2822 uint64_t flags;
2823
2824 VM_ASSERT_EMT(pVM);
2825
2826 /*
2827 * Get the physical page address.
2828 */
2829 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2830 if (rc == VINF_SUCCESS)
2831 {
2832 /*
2833 * Sync the required registers and flush the whole page.
2834 * (Easier to do the whole page than notifying it about each physical
2835 * byte that was changed.
2836 */
2837 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2838 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2839 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2840 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2841
2842 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2843 }
2844#endif
2845 return VINF_SUCCESS;
2846}
2847
2848
2849/**
2850 * Notification about a successful MMR3PhysRegister() call.
2851 *
2852 * @param pVM VM handle.
2853 * @param GCPhys The physical address the RAM.
2854 * @param cb Size of the memory.
2855 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2856 */
2857REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2858{
2859 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2860 VM_ASSERT_EMT(pVM);
2861
2862 /*
2863 * Validate input - we trust the caller.
2864 */
2865 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2866 Assert(cb);
2867 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2868 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2869#ifdef VBOX_WITH_REM_LOCKING
2870 Assert(!PGMIsLockOwner(pVM));
2871 EMRemLock(pVM);
2872#endif
2873 /*
2874 * Base ram? Update GCPhysLastRam.
2875 */
2876 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2877 {
2878 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2879 {
2880 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2881 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2882 }
2883 }
2884
2885 /*
2886 * Register the ram.
2887 */
2888 Assert(!pVM->rem.s.fIgnoreAll);
2889 pVM->rem.s.fIgnoreAll = true;
2890
2891 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2892 Assert(pVM->rem.s.fIgnoreAll);
2893 pVM->rem.s.fIgnoreAll = false;
2894#ifdef VBOX_WITH_REM_LOCKING
2895 EMRemUnlock(pVM);
2896#endif
2897}
2898
2899
2900/**
2901 * Notification about a successful MMR3PhysRomRegister() call.
2902 *
2903 * @param pVM VM handle.
2904 * @param GCPhys The physical address of the ROM.
2905 * @param cb The size of the ROM.
2906 * @param pvCopy Pointer to the ROM copy.
2907 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2908 * This function will be called when ever the protection of the
2909 * shadow ROM changes (at reset and end of POST).
2910 */
2911REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2912{
2913 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2914 VM_ASSERT_EMT(pVM);
2915
2916 /*
2917 * Validate input - we trust the caller.
2918 */
2919 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2920 Assert(cb);
2921 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2922
2923#ifdef VBOX_WITH_REM_LOCKING
2924 EMRemLock(pVM);
2925#endif
2926 /*
2927 * Register the rom.
2928 */
2929 Assert(!pVM->rem.s.fIgnoreAll);
2930 pVM->rem.s.fIgnoreAll = true;
2931
2932 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2933
2934 Assert(pVM->rem.s.fIgnoreAll);
2935 pVM->rem.s.fIgnoreAll = false;
2936#ifdef VBOX_WITH_REM_LOCKING
2937 EMRemUnlock(pVM);
2938#endif
2939}
2940
2941
2942/**
2943 * Notification about a successful memory deregistration or reservation.
2944 *
2945 * @param pVM VM Handle.
2946 * @param GCPhys Start physical address.
2947 * @param cb The size of the range.
2948 */
2949REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2950{
2951 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2952 VM_ASSERT_EMT(pVM);
2953
2954 /*
2955 * Validate input - we trust the caller.
2956 */
2957 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2958 Assert(cb);
2959 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2960
2961#ifdef VBOX_WITH_REM_LOCKING
2962 EMRemLock(pVM);
2963#endif
2964 /*
2965 * Unassigning the memory.
2966 */
2967 Assert(!pVM->rem.s.fIgnoreAll);
2968 pVM->rem.s.fIgnoreAll = true;
2969
2970 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2971
2972 Assert(pVM->rem.s.fIgnoreAll);
2973 pVM->rem.s.fIgnoreAll = false;
2974#ifdef VBOX_WITH_REM_LOCKING
2975 EMRemUnlock(pVM);
2976#endif
2977}
2978
2979
2980/**
2981 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2982 *
2983 * @param pVM VM Handle.
2984 * @param enmType Handler type.
2985 * @param GCPhys Handler range address.
2986 * @param cb Size of the handler range.
2987 * @param fHasHCHandler Set if the handler has a HC callback function.
2988 *
2989 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2990 * Handler memory type to memory which has no HC handler.
2991 */
2992static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2993{
2994 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2995 enmType, GCPhys, cb, fHasHCHandler));
2996
2997 VM_ASSERT_EMT(pVM);
2998 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2999 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3000
3001#ifdef VBOX_WITH_REM_LOCKING
3002 EMRemLock(pVM);
3003#endif
3004
3005 Assert(!pVM->rem.s.fIgnoreAll);
3006 pVM->rem.s.fIgnoreAll = true;
3007
3008 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3009 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3010 else if (fHasHCHandler)
3011 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3012
3013 Assert(pVM->rem.s.fIgnoreAll);
3014 pVM->rem.s.fIgnoreAll = false;
3015#ifdef VBOX_WITH_REM_LOCKING
3016 EMRemUnlock(pVM);
3017#endif
3018}
3019
3020/**
3021 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3022 *
3023 * @param pVM VM Handle.
3024 * @param enmType Handler type.
3025 * @param GCPhys Handler range address.
3026 * @param cb Size of the handler range.
3027 * @param fHasHCHandler Set if the handler has a HC callback function.
3028 *
3029 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3030 * Handler memory type to memory which has no HC handler.
3031 */
3032REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3033{
3034 REMR3ReplayHandlerNotifications(pVM);
3035
3036 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3037}
3038
3039/**
3040 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3041 *
3042 * @param pVM VM Handle.
3043 * @param enmType Handler type.
3044 * @param GCPhys Handler range address.
3045 * @param cb Size of the handler range.
3046 * @param fHasHCHandler Set if the handler has a HC callback function.
3047 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3048 */
3049static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3050{
3051 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3052 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3053 VM_ASSERT_EMT(pVM);
3054
3055#ifdef VBOX_WITH_REM_LOCKING
3056 EMRemLock(pVM);
3057#endif
3058
3059 Assert(!pVM->rem.s.fIgnoreAll);
3060 pVM->rem.s.fIgnoreAll = true;
3061
3062/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3063 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3064 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3065 else if (fHasHCHandler)
3066 {
3067 if (!fRestoreAsRAM)
3068 {
3069 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3070 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3071 }
3072 else
3073 {
3074 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3075 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3076 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3077 }
3078 }
3079
3080 Assert(pVM->rem.s.fIgnoreAll);
3081 pVM->rem.s.fIgnoreAll = false;
3082#ifdef VBOX_WITH_REM_LOCKING
3083 EMRemUnlock(pVM);
3084#endif
3085}
3086
3087/**
3088 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3089 *
3090 * @param pVM VM Handle.
3091 * @param enmType Handler type.
3092 * @param GCPhys Handler range address.
3093 * @param cb Size of the handler range.
3094 * @param fHasHCHandler Set if the handler has a HC callback function.
3095 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3096 */
3097REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3098{
3099 REMR3ReplayHandlerNotifications(pVM);
3100 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3101}
3102
3103
3104/**
3105 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3106 *
3107 * @param pVM VM Handle.
3108 * @param enmType Handler type.
3109 * @param GCPhysOld Old handler range address.
3110 * @param GCPhysNew New handler range address.
3111 * @param cb Size of the handler range.
3112 * @param fHasHCHandler Set if the handler has a HC callback function.
3113 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3114 */
3115static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3116{
3117 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3118 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3119 VM_ASSERT_EMT(pVM);
3120 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3121
3122#ifdef VBOX_WITH_REM_LOCKING
3123 EMRemLock(pVM);
3124#endif
3125
3126 if (fHasHCHandler)
3127 {
3128 Assert(!pVM->rem.s.fIgnoreAll);
3129 pVM->rem.s.fIgnoreAll = true;
3130
3131 /*
3132 * Reset the old page.
3133 */
3134 if (!fRestoreAsRAM)
3135 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3136 else
3137 {
3138 /* This is not perfect, but it'll do for PD monitoring... */
3139 Assert(cb == PAGE_SIZE);
3140 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3141 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3142 }
3143
3144 /*
3145 * Update the new page.
3146 */
3147 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3148 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3149 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3150
3151 Assert(pVM->rem.s.fIgnoreAll);
3152 pVM->rem.s.fIgnoreAll = false;
3153 }
3154#ifdef VBOX_WITH_REM_LOCKING
3155 EMRemUnlock(pVM);
3156#endif
3157}
3158
3159/**
3160 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3161 *
3162 * @param pVM VM Handle.
3163 * @param enmType Handler type.
3164 * @param GCPhysOld Old handler range address.
3165 * @param GCPhysNew New handler range address.
3166 * @param cb Size of the handler range.
3167 * @param fHasHCHandler Set if the handler has a HC callback function.
3168 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3169 */
3170REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3171{
3172 REMR3ReplayHandlerNotifications(pVM);
3173
3174 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3175}
3176
3177/**
3178 * Checks if we're handling access to this page or not.
3179 *
3180 * @returns true if we're trapping access.
3181 * @returns false if we aren't.
3182 * @param pVM The VM handle.
3183 * @param GCPhys The physical address.
3184 *
3185 * @remark This function will only work correctly in VBOX_STRICT builds!
3186 */
3187REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3188{
3189#ifdef VBOX_STRICT
3190 unsigned long off;
3191 REMR3ReplayHandlerNotifications(pVM);
3192
3193 off = get_phys_page_offset(GCPhys);
3194 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3195 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3196 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3197#else
3198 return false;
3199#endif
3200}
3201
3202
3203/**
3204 * Deals with a rare case in get_phys_addr_code where the code
3205 * is being monitored.
3206 *
3207 * It could also be an MMIO page, in which case we will raise a fatal error.
3208 *
3209 * @returns The physical address corresponding to addr.
3210 * @param env The cpu environment.
3211 * @param addr The virtual address.
3212 * @param pTLBEntry The TLB entry.
3213 */
3214target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3215 target_ulong addr,
3216 CPUTLBEntry* pTLBEntry,
3217 target_phys_addr_t ioTLBEntry)
3218{
3219 PVM pVM = env->pVM;
3220
3221 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3222 {
3223 /* If code memory is being monitored, appropriate IOTLB entry will have
3224 handler IO type, and addend will provide real physical address, no
3225 matter if we store VA in TLB or not, as handlers are always passed PA */
3226 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3227 return ret;
3228 }
3229 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3230 "*** handlers\n",
3231 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3232 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3233 LogRel(("*** mmio\n"));
3234 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3235 LogRel(("*** phys\n"));
3236 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3237 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3238 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3239 AssertFatalFailed();
3240}
3241
3242/**
3243 * Read guest RAM and ROM.
3244 *
3245 * @param SrcGCPhys The source address (guest physical).
3246 * @param pvDst The destination address.
3247 * @param cb Number of bytes
3248 */
3249void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3250{
3251 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3252 VBOX_CHECK_ADDR(SrcGCPhys);
3253 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3254#ifdef VBOX_DEBUG_PHYS
3255 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3256#endif
3257 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3258}
3259
3260
3261/**
3262 * Read guest RAM and ROM, unsigned 8-bit.
3263 *
3264 * @param SrcGCPhys The source address (guest physical).
3265 */
3266RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3267{
3268 uint8_t val;
3269 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3270 VBOX_CHECK_ADDR(SrcGCPhys);
3271 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3272 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3273#ifdef VBOX_DEBUG_PHYS
3274 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3275#endif
3276 return val;
3277}
3278
3279
3280/**
3281 * Read guest RAM and ROM, signed 8-bit.
3282 *
3283 * @param SrcGCPhys The source address (guest physical).
3284 */
3285RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3286{
3287 int8_t val;
3288 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3289 VBOX_CHECK_ADDR(SrcGCPhys);
3290 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3291 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3292#ifdef VBOX_DEBUG_PHYS
3293 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3294#endif
3295 return val;
3296}
3297
3298
3299/**
3300 * Read guest RAM and ROM, unsigned 16-bit.
3301 *
3302 * @param SrcGCPhys The source address (guest physical).
3303 */
3304RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3305{
3306 uint16_t val;
3307 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3308 VBOX_CHECK_ADDR(SrcGCPhys);
3309 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3310 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3311#ifdef VBOX_DEBUG_PHYS
3312 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3313#endif
3314 return val;
3315}
3316
3317
3318/**
3319 * Read guest RAM and ROM, signed 16-bit.
3320 *
3321 * @param SrcGCPhys The source address (guest physical).
3322 */
3323RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3324{
3325 int16_t val;
3326 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3327 VBOX_CHECK_ADDR(SrcGCPhys);
3328 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3329 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3330#ifdef VBOX_DEBUG_PHYS
3331 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3332#endif
3333 return val;
3334}
3335
3336
3337/**
3338 * Read guest RAM and ROM, unsigned 32-bit.
3339 *
3340 * @param SrcGCPhys The source address (guest physical).
3341 */
3342RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3343{
3344 uint32_t val;
3345 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3346 VBOX_CHECK_ADDR(SrcGCPhys);
3347 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3348 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3349#ifdef VBOX_DEBUG_PHYS
3350 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3351#endif
3352 return val;
3353}
3354
3355
3356/**
3357 * Read guest RAM and ROM, signed 32-bit.
3358 *
3359 * @param SrcGCPhys The source address (guest physical).
3360 */
3361RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3362{
3363 int32_t val;
3364 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3365 VBOX_CHECK_ADDR(SrcGCPhys);
3366 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3367 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3368#ifdef VBOX_DEBUG_PHYS
3369 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3370#endif
3371 return val;
3372}
3373
3374
3375/**
3376 * Read guest RAM and ROM, unsigned 64-bit.
3377 *
3378 * @param SrcGCPhys The source address (guest physical).
3379 */
3380uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3381{
3382 uint64_t val;
3383 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3384 VBOX_CHECK_ADDR(SrcGCPhys);
3385 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3386 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3387#ifdef VBOX_DEBUG_PHYS
3388 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3389#endif
3390 return val;
3391}
3392
3393
3394/**
3395 * Read guest RAM and ROM, signed 64-bit.
3396 *
3397 * @param SrcGCPhys The source address (guest physical).
3398 */
3399int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3400{
3401 int64_t val;
3402 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3403 VBOX_CHECK_ADDR(SrcGCPhys);
3404 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3405 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3406#ifdef VBOX_DEBUG_PHYS
3407 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3408#endif
3409 return val;
3410}
3411
3412
3413/**
3414 * Write guest RAM.
3415 *
3416 * @param DstGCPhys The destination address (guest physical).
3417 * @param pvSrc The source address.
3418 * @param cb Number of bytes to write
3419 */
3420void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3421{
3422 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3423 VBOX_CHECK_ADDR(DstGCPhys);
3424 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3425 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3426#ifdef VBOX_DEBUG_PHYS
3427 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3428#endif
3429}
3430
3431
3432/**
3433 * Write guest RAM, unsigned 8-bit.
3434 *
3435 * @param DstGCPhys The destination address (guest physical).
3436 * @param val Value
3437 */
3438void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3439{
3440 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3441 VBOX_CHECK_ADDR(DstGCPhys);
3442 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3443 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3444#ifdef VBOX_DEBUG_PHYS
3445 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3446#endif
3447}
3448
3449
3450/**
3451 * Write guest RAM, unsigned 8-bit.
3452 *
3453 * @param DstGCPhys The destination address (guest physical).
3454 * @param val Value
3455 */
3456void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3457{
3458 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3459 VBOX_CHECK_ADDR(DstGCPhys);
3460 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3461 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3462#ifdef VBOX_DEBUG_PHYS
3463 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3464#endif
3465}
3466
3467
3468/**
3469 * Write guest RAM, unsigned 32-bit.
3470 *
3471 * @param DstGCPhys The destination address (guest physical).
3472 * @param val Value
3473 */
3474void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3475{
3476 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3477 VBOX_CHECK_ADDR(DstGCPhys);
3478 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3479 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3480#ifdef VBOX_DEBUG_PHYS
3481 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3482#endif
3483}
3484
3485
3486/**
3487 * Write guest RAM, unsigned 64-bit.
3488 *
3489 * @param DstGCPhys The destination address (guest physical).
3490 * @param val Value
3491 */
3492void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3493{
3494 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3495 VBOX_CHECK_ADDR(DstGCPhys);
3496 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3497 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3498#ifdef VBOX_DEBUG_PHYS
3499 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3500#endif
3501}
3502
3503#undef LOG_GROUP
3504#define LOG_GROUP LOG_GROUP_REM_MMIO
3505
3506/** Read MMIO memory. */
3507static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3508{
3509 uint32_t u32 = 0;
3510 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3511 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3512 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3513 return u32;
3514}
3515
3516/** Read MMIO memory. */
3517static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3518{
3519 uint32_t u32 = 0;
3520 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3521 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3522 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3523 return u32;
3524}
3525
3526/** Read MMIO memory. */
3527static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3528{
3529 uint32_t u32 = 0;
3530 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3531 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3532 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3533 return u32;
3534}
3535
3536/** Write to MMIO memory. */
3537static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3538{
3539 int rc;
3540 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3541 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3542 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3543}
3544
3545/** Write to MMIO memory. */
3546static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3547{
3548 int rc;
3549 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3550 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3551 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3552}
3553
3554/** Write to MMIO memory. */
3555static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3556{
3557 int rc;
3558 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3559 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3560 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3561}
3562
3563
3564#undef LOG_GROUP
3565#define LOG_GROUP LOG_GROUP_REM_HANDLER
3566
3567/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3568
3569static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3570{
3571 uint8_t u8;
3572 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3573 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3574 return u8;
3575}
3576
3577static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3578{
3579 uint16_t u16;
3580 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3581 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3582 return u16;
3583}
3584
3585static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3586{
3587 uint32_t u32;
3588 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3589 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3590 return u32;
3591}
3592
3593static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3594{
3595 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3596 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3597}
3598
3599static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3600{
3601 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3602 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3603}
3604
3605static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3606{
3607 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3608 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3609}
3610
3611/* -+- disassembly -+- */
3612
3613#undef LOG_GROUP
3614#define LOG_GROUP LOG_GROUP_REM_DISAS
3615
3616
3617/**
3618 * Enables or disables singled stepped disassembly.
3619 *
3620 * @returns VBox status code.
3621 * @param pVM VM handle.
3622 * @param fEnable To enable set this flag, to disable clear it.
3623 */
3624static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3625{
3626 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3627 VM_ASSERT_EMT(pVM);
3628
3629 if (fEnable)
3630 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3631 else
3632 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3633 return VINF_SUCCESS;
3634}
3635
3636
3637/**
3638 * Enables or disables singled stepped disassembly.
3639 *
3640 * @returns VBox status code.
3641 * @param pVM VM handle.
3642 * @param fEnable To enable set this flag, to disable clear it.
3643 */
3644REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3645{
3646 PVMREQ pReq;
3647 int rc;
3648
3649 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3650 if (VM_IS_EMT(pVM))
3651 return remR3DisasEnableStepping(pVM, fEnable);
3652
3653 rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3654 AssertRC(rc);
3655 if (RT_SUCCESS(rc))
3656 rc = pReq->iStatus;
3657 VMR3ReqFree(pReq);
3658 return rc;
3659}
3660
3661
3662#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3663/**
3664 * External Debugger Command: .remstep [on|off|1|0]
3665 */
3666static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3667{
3668 bool fEnable;
3669 int rc;
3670
3671 /* print status */
3672 if (cArgs == 0)
3673 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3674 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3675
3676 /* convert the argument and change the mode. */
3677 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3678 if (RT_FAILURE(rc))
3679 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3680 rc = REMR3DisasEnableStepping(pVM, fEnable);
3681 if (RT_FAILURE(rc))
3682 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3683 return rc;
3684}
3685#endif
3686
3687
3688/**
3689 * Disassembles one instruction and prints it to the log.
3690 *
3691 * @returns Success indicator.
3692 * @param env Pointer to the recompiler CPU structure.
3693 * @param f32BitCode Indicates that whether or not the code should
3694 * be disassembled as 16 or 32 bit. If -1 the CS
3695 * selector will be inspected.
3696 * @param pszPrefix
3697 */
3698bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3699{
3700 PVM pVM = env->pVM;
3701 const bool fLog = LogIsEnabled();
3702 const bool fLog2 = LogIs2Enabled();
3703 int rc = VINF_SUCCESS;
3704
3705 /*
3706 * Don't bother if there ain't any log output to do.
3707 */
3708 if (!fLog && !fLog2)
3709 return true;
3710
3711 /*
3712 * Update the state so DBGF reads the correct register values.
3713 */
3714 remR3StateUpdate(pVM, env->pVCpu);
3715
3716 /*
3717 * Log registers if requested.
3718 */
3719 if (!fLog2)
3720 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3721
3722 /*
3723 * Disassemble to log.
3724 */
3725 if (fLog)
3726 rc = DBGFR3DisasInstrCurrentLogInternal(env->pVCpu, pszPrefix);
3727
3728 return RT_SUCCESS(rc);
3729}
3730
3731
3732/**
3733 * Disassemble recompiled code.
3734 *
3735 * @param phFileIgnored Ignored, logfile usually.
3736 * @param pvCode Pointer to the code block.
3737 * @param cb Size of the code block.
3738 */
3739void disas(FILE *phFile, void *pvCode, unsigned long cb)
3740{
3741#ifdef DEBUG_TMP_LOGGING
3742# define DISAS_PRINTF(x...) fprintf(phFile, x)
3743#else
3744# define DISAS_PRINTF(x...) RTLogPrintf(x)
3745 if (LogIs2Enabled())
3746#endif
3747 {
3748 unsigned off = 0;
3749 char szOutput[256];
3750 DISCPUSTATE Cpu;
3751
3752 memset(&Cpu, 0, sizeof(Cpu));
3753#ifdef RT_ARCH_X86
3754 Cpu.mode = CPUMODE_32BIT;
3755#else
3756 Cpu.mode = CPUMODE_64BIT;
3757#endif
3758
3759 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3760 while (off < cb)
3761 {
3762 uint32_t cbInstr;
3763 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3764 DISAS_PRINTF("%s", szOutput);
3765 else
3766 {
3767 DISAS_PRINTF("disas error\n");
3768 cbInstr = 1;
3769#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3770 break;
3771#endif
3772 }
3773 off += cbInstr;
3774 }
3775 }
3776
3777#undef DISAS_PRINTF
3778}
3779
3780
3781/**
3782 * Disassemble guest code.
3783 *
3784 * @param phFileIgnored Ignored, logfile usually.
3785 * @param uCode The guest address of the code to disassemble. (flat?)
3786 * @param cb Number of bytes to disassemble.
3787 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3788 */
3789void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3790{
3791#ifdef DEBUG_TMP_LOGGING
3792# define DISAS_PRINTF(x...) fprintf(phFile, x)
3793#else
3794# define DISAS_PRINTF(x...) RTLogPrintf(x)
3795 if (LogIs2Enabled())
3796#endif
3797 {
3798 PVM pVM = cpu_single_env->pVM;
3799 PVMCPU pVCpu = cpu_single_env->pVCpu;
3800 RTSEL cs;
3801 RTGCUINTPTR eip;
3802
3803 Assert(pVCpu);
3804
3805 /*
3806 * Update the state so DBGF reads the correct register values (flags).
3807 */
3808 remR3StateUpdate(pVM, pVCpu);
3809
3810 /*
3811 * Do the disassembling.
3812 */
3813 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3814 cs = cpu_single_env->segs[R_CS].selector;
3815 eip = uCode - cpu_single_env->segs[R_CS].base;
3816 for (;;)
3817 {
3818 char szBuf[256];
3819 uint32_t cbInstr;
3820 int rc = DBGFR3DisasInstrEx(pVM,
3821 pVCpu->idCpu,
3822 cs,
3823 eip,
3824 0,
3825 szBuf, sizeof(szBuf),
3826 &cbInstr);
3827 if (RT_SUCCESS(rc))
3828 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3829 else
3830 {
3831 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3832 cbInstr = 1;
3833 }
3834
3835 /* next */
3836 if (cb <= cbInstr)
3837 break;
3838 cb -= cbInstr;
3839 uCode += cbInstr;
3840 eip += cbInstr;
3841 }
3842 }
3843#undef DISAS_PRINTF
3844}
3845
3846
3847/**
3848 * Looks up a guest symbol.
3849 *
3850 * @returns Pointer to symbol name. This is a static buffer.
3851 * @param orig_addr The address in question.
3852 */
3853const char *lookup_symbol(target_ulong orig_addr)
3854{
3855 RTGCINTPTR off = 0;
3856 DBGFSYMBOL Sym;
3857 PVM pVM = cpu_single_env->pVM;
3858 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3859 if (RT_SUCCESS(rc))
3860 {
3861 static char szSym[sizeof(Sym.szName) + 48];
3862 if (!off)
3863 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3864 else if (off > 0)
3865 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3866 else
3867 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3868 return szSym;
3869 }
3870 return "<N/A>";
3871}
3872
3873
3874#undef LOG_GROUP
3875#define LOG_GROUP LOG_GROUP_REM
3876
3877
3878/* -+- FF notifications -+- */
3879
3880
3881/**
3882 * Notification about a pending interrupt.
3883 *
3884 * @param pVM VM Handle.
3885 * @param pVCpu VMCPU Handle.
3886 * @param u8Interrupt Interrupt
3887 * @thread The emulation thread.
3888 */
3889REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3890{
3891 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3892 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3893}
3894
3895/**
3896 * Notification about a pending interrupt.
3897 *
3898 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3899 * @param pVM VM Handle.
3900 * @param pVCpu VMCPU Handle.
3901 * @thread The emulation thread.
3902 */
3903REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3904{
3905 return pVM->rem.s.u32PendingInterrupt;
3906}
3907
3908/**
3909 * Notification about the interrupt FF being set.
3910 *
3911 * @param pVM VM Handle.
3912 * @param pVCpu VMCPU Handle.
3913 * @thread The emulation thread.
3914 */
3915REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3916{
3917 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3918 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3919 if (pVM->rem.s.fInREM)
3920 {
3921 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3922 CPU_INTERRUPT_EXTERNAL_HARD);
3923 }
3924}
3925
3926
3927/**
3928 * Notification about the interrupt FF being set.
3929 *
3930 * @param pVM VM Handle.
3931 * @param pVCpu VMCPU Handle.
3932 * @thread Any.
3933 */
3934REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3935{
3936 LogFlow(("REMR3NotifyInterruptClear:\n"));
3937 if (pVM->rem.s.fInREM)
3938 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3939}
3940
3941
3942/**
3943 * Notification about pending timer(s).
3944 *
3945 * @param pVM VM Handle.
3946 * @param pVCpuDst The target cpu for this notification.
3947 * TM will not broadcast pending timer events, but use
3948 * a decidated EMT for them. So, only interrupt REM
3949 * execution if the given CPU is executing in REM.
3950 * @thread Any.
3951 */
3952REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3953{
3954#ifndef DEBUG_bird
3955 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3956#endif
3957 if (pVM->rem.s.fInREM)
3958 {
3959 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3960 {
3961 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
3962 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
3963 CPU_INTERRUPT_EXTERNAL_TIMER);
3964 }
3965 else
3966 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
3967 }
3968 else
3969 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
3970}
3971
3972
3973/**
3974 * Notification about pending DMA transfers.
3975 *
3976 * @param pVM VM Handle.
3977 * @thread Any.
3978 */
3979REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3980{
3981 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3982 if (pVM->rem.s.fInREM)
3983 {
3984 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3985 CPU_INTERRUPT_EXTERNAL_DMA);
3986 }
3987}
3988
3989
3990/**
3991 * Notification about pending timer(s).
3992 *
3993 * @param pVM VM Handle.
3994 * @thread Any.
3995 */
3996REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3997{
3998 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3999 if (pVM->rem.s.fInREM)
4000 {
4001 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4002 CPU_INTERRUPT_EXTERNAL_EXIT);
4003 }
4004}
4005
4006
4007/**
4008 * Notification about pending FF set by an external thread.
4009 *
4010 * @param pVM VM handle.
4011 * @thread Any.
4012 */
4013REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4014{
4015 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4016 if (pVM->rem.s.fInREM)
4017 {
4018 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4019 CPU_INTERRUPT_EXTERNAL_EXIT);
4020 }
4021}
4022
4023
4024#ifdef VBOX_WITH_STATISTICS
4025void remR3ProfileStart(int statcode)
4026{
4027 STAMPROFILEADV *pStat;
4028 switch(statcode)
4029 {
4030 case STATS_EMULATE_SINGLE_INSTR:
4031 pStat = &gStatExecuteSingleInstr;
4032 break;
4033 case STATS_QEMU_COMPILATION:
4034 pStat = &gStatCompilationQEmu;
4035 break;
4036 case STATS_QEMU_RUN_EMULATED_CODE:
4037 pStat = &gStatRunCodeQEmu;
4038 break;
4039 case STATS_QEMU_TOTAL:
4040 pStat = &gStatTotalTimeQEmu;
4041 break;
4042 case STATS_QEMU_RUN_TIMERS:
4043 pStat = &gStatTimers;
4044 break;
4045 case STATS_TLB_LOOKUP:
4046 pStat= &gStatTBLookup;
4047 break;
4048 case STATS_IRQ_HANDLING:
4049 pStat= &gStatIRQ;
4050 break;
4051 case STATS_RAW_CHECK:
4052 pStat = &gStatRawCheck;
4053 break;
4054
4055 default:
4056 AssertMsgFailed(("unknown stat %d\n", statcode));
4057 return;
4058 }
4059 STAM_PROFILE_ADV_START(pStat, a);
4060}
4061
4062
4063void remR3ProfileStop(int statcode)
4064{
4065 STAMPROFILEADV *pStat;
4066 switch(statcode)
4067 {
4068 case STATS_EMULATE_SINGLE_INSTR:
4069 pStat = &gStatExecuteSingleInstr;
4070 break;
4071 case STATS_QEMU_COMPILATION:
4072 pStat = &gStatCompilationQEmu;
4073 break;
4074 case STATS_QEMU_RUN_EMULATED_CODE:
4075 pStat = &gStatRunCodeQEmu;
4076 break;
4077 case STATS_QEMU_TOTAL:
4078 pStat = &gStatTotalTimeQEmu;
4079 break;
4080 case STATS_QEMU_RUN_TIMERS:
4081 pStat = &gStatTimers;
4082 break;
4083 case STATS_TLB_LOOKUP:
4084 pStat= &gStatTBLookup;
4085 break;
4086 case STATS_IRQ_HANDLING:
4087 pStat= &gStatIRQ;
4088 break;
4089 case STATS_RAW_CHECK:
4090 pStat = &gStatRawCheck;
4091 break;
4092 default:
4093 AssertMsgFailed(("unknown stat %d\n", statcode));
4094 return;
4095 }
4096 STAM_PROFILE_ADV_STOP(pStat, a);
4097}
4098#endif
4099
4100/**
4101 * Raise an RC, force rem exit.
4102 *
4103 * @param pVM VM handle.
4104 * @param rc The rc.
4105 */
4106void remR3RaiseRC(PVM pVM, int rc)
4107{
4108 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4109 Assert(pVM->rem.s.fInREM);
4110 VM_ASSERT_EMT(pVM);
4111 pVM->rem.s.rc = rc;
4112 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4113}
4114
4115
4116/* -+- timers -+- */
4117
4118uint64_t cpu_get_tsc(CPUX86State *env)
4119{
4120 STAM_COUNTER_INC(&gStatCpuGetTSC);
4121 return TMCpuTickGet(env->pVCpu);
4122}
4123
4124
4125/* -+- interrupts -+- */
4126
4127void cpu_set_ferr(CPUX86State *env)
4128{
4129 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4130 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4131}
4132
4133int cpu_get_pic_interrupt(CPUState *env)
4134{
4135 uint8_t u8Interrupt;
4136 int rc;
4137
4138 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4139 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4140 * with the (a)pic.
4141 */
4142 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4143 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4144 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4145 * remove this kludge. */
4146 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4147 {
4148 rc = VINF_SUCCESS;
4149 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4150 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4151 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4152 }
4153 else
4154 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4155
4156 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4157 if (RT_SUCCESS(rc))
4158 {
4159 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4160 env->interrupt_request |= CPU_INTERRUPT_HARD;
4161 return u8Interrupt;
4162 }
4163 return -1;
4164}
4165
4166
4167/* -+- local apic -+- */
4168
4169void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4170{
4171 int rc = PDMApicSetBase(env->pVM, val);
4172 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4173}
4174
4175uint64_t cpu_get_apic_base(CPUX86State *env)
4176{
4177 uint64_t u64;
4178 int rc = PDMApicGetBase(env->pVM, &u64);
4179 if (RT_SUCCESS(rc))
4180 {
4181 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4182 return u64;
4183 }
4184 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4185 return 0;
4186}
4187
4188void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4189{
4190 int rc = PDMApicSetTPR(env->pVCpu, val);
4191 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4192}
4193
4194uint8_t cpu_get_apic_tpr(CPUX86State *env)
4195{
4196 uint8_t u8;
4197 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4198 if (RT_SUCCESS(rc))
4199 {
4200 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4201 return u8;
4202 }
4203 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4204 return 0;
4205}
4206
4207
4208uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4209{
4210 uint64_t value;
4211 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4212 if (RT_SUCCESS(rc))
4213 {
4214 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4215 return value;
4216 }
4217 /** @todo: exception ? */
4218 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4219 return value;
4220}
4221
4222void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4223{
4224 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4225 /** @todo: exception if error ? */
4226 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4227}
4228
4229uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4230{
4231 Assert(env->pVCpu);
4232 return CPUMGetGuestMsr(env->pVCpu, msr);
4233}
4234
4235void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4236{
4237 Assert(env->pVCpu);
4238 CPUMSetGuestMsr(env->pVCpu, msr, val);
4239}
4240
4241/* -+- I/O Ports -+- */
4242
4243#undef LOG_GROUP
4244#define LOG_GROUP LOG_GROUP_REM_IOPORT
4245
4246void cpu_outb(CPUState *env, int addr, int val)
4247{
4248 int rc;
4249
4250 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4251 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4252
4253 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4254 if (RT_LIKELY(rc == VINF_SUCCESS))
4255 return;
4256 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4257 {
4258 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4259 remR3RaiseRC(env->pVM, rc);
4260 return;
4261 }
4262 remAbort(rc, __FUNCTION__);
4263}
4264
4265void cpu_outw(CPUState *env, int addr, int val)
4266{
4267 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4268 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4269 if (RT_LIKELY(rc == VINF_SUCCESS))
4270 return;
4271 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4272 {
4273 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4274 remR3RaiseRC(env->pVM, rc);
4275 return;
4276 }
4277 remAbort(rc, __FUNCTION__);
4278}
4279
4280void cpu_outl(CPUState *env, int addr, int val)
4281{
4282 int rc;
4283 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4284 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4285 if (RT_LIKELY(rc == VINF_SUCCESS))
4286 return;
4287 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4288 {
4289 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4290 remR3RaiseRC(env->pVM, rc);
4291 return;
4292 }
4293 remAbort(rc, __FUNCTION__);
4294}
4295
4296int cpu_inb(CPUState *env, int addr)
4297{
4298 uint32_t u32 = 0;
4299 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4300 if (RT_LIKELY(rc == VINF_SUCCESS))
4301 {
4302 if (/*addr != 0x61 && */addr != 0x71)
4303 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4304 return (int)u32;
4305 }
4306 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4307 {
4308 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4309 remR3RaiseRC(env->pVM, rc);
4310 return (int)u32;
4311 }
4312 remAbort(rc, __FUNCTION__);
4313 return 0xff;
4314}
4315
4316int cpu_inw(CPUState *env, int addr)
4317{
4318 uint32_t u32 = 0;
4319 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4320 if (RT_LIKELY(rc == VINF_SUCCESS))
4321 {
4322 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4323 return (int)u32;
4324 }
4325 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4326 {
4327 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4328 remR3RaiseRC(env->pVM, rc);
4329 return (int)u32;
4330 }
4331 remAbort(rc, __FUNCTION__);
4332 return 0xffff;
4333}
4334
4335int cpu_inl(CPUState *env, int addr)
4336{
4337 uint32_t u32 = 0;
4338 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4339 if (RT_LIKELY(rc == VINF_SUCCESS))
4340 {
4341//if (addr==0x01f0 && u32 == 0x6b6d)
4342// loglevel = ~0;
4343 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4344 return (int)u32;
4345 }
4346 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4347 {
4348 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4349 remR3RaiseRC(env->pVM, rc);
4350 return (int)u32;
4351 }
4352 remAbort(rc, __FUNCTION__);
4353 return 0xffffffff;
4354}
4355
4356#undef LOG_GROUP
4357#define LOG_GROUP LOG_GROUP_REM
4358
4359
4360/* -+- helpers and misc other interfaces -+- */
4361
4362/**
4363 * Perform the CPUID instruction.
4364 *
4365 * ASMCpuId cannot be invoked from some source files where this is used because of global
4366 * register allocations.
4367 *
4368 * @param env Pointer to the recompiler CPU structure.
4369 * @param uOperator CPUID operation (eax).
4370 * @param pvEAX Where to store eax.
4371 * @param pvEBX Where to store ebx.
4372 * @param pvECX Where to store ecx.
4373 * @param pvEDX Where to store edx.
4374 */
4375void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4376{
4377 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4378}
4379
4380
4381#if 0 /* not used */
4382/**
4383 * Interface for qemu hardware to report back fatal errors.
4384 */
4385void hw_error(const char *pszFormat, ...)
4386{
4387 /*
4388 * Bitch about it.
4389 */
4390 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4391 * this in my Odin32 tree at home! */
4392 va_list args;
4393 va_start(args, pszFormat);
4394 RTLogPrintf("fatal error in virtual hardware:");
4395 RTLogPrintfV(pszFormat, args);
4396 va_end(args);
4397 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4398
4399 /*
4400 * If we're in REM context we'll sync back the state before 'jumping' to
4401 * the EMs failure handling.
4402 */
4403 PVM pVM = cpu_single_env->pVM;
4404 if (pVM->rem.s.fInREM)
4405 REMR3StateBack(pVM);
4406 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4407 AssertMsgFailed(("EMR3FatalError returned!\n"));
4408}
4409#endif
4410
4411/**
4412 * Interface for the qemu cpu to report unhandled situation
4413 * raising a fatal VM error.
4414 */
4415void cpu_abort(CPUState *env, const char *pszFormat, ...)
4416{
4417 va_list va;
4418 PVM pVM;
4419 PVMCPU pVCpu;
4420 char szMsg[256];
4421
4422 /*
4423 * Bitch about it.
4424 */
4425 RTLogFlags(NULL, "nodisabled nobuffered");
4426 RTLogFlush(NULL);
4427
4428 va_start(va, pszFormat);
4429#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4430 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4431 unsigned cArgs = 0;
4432 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4433 const char *psz = strchr(pszFormat, '%');
4434 while (psz && cArgs < 6)
4435 {
4436 auArgs[cArgs++] = va_arg(va, uintptr_t);
4437 psz = strchr(psz + 1, '%');
4438 }
4439 switch (cArgs)
4440 {
4441 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4442 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4443 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4444 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4445 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4446 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4447 default:
4448 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4449 }
4450#else
4451 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4452#endif
4453 va_end(va);
4454
4455 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4456 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4457
4458 /*
4459 * If we're in REM context we'll sync back the state before 'jumping' to
4460 * the EMs failure handling.
4461 */
4462 pVM = cpu_single_env->pVM;
4463 pVCpu = cpu_single_env->pVCpu;
4464 Assert(pVCpu);
4465
4466 if (pVM->rem.s.fInREM)
4467 REMR3StateBack(pVM, pVCpu);
4468 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4469 AssertMsgFailed(("EMR3FatalError returned!\n"));
4470}
4471
4472
4473/**
4474 * Aborts the VM.
4475 *
4476 * @param rc VBox error code.
4477 * @param pszTip Hint about why/when this happend.
4478 */
4479void remAbort(int rc, const char *pszTip)
4480{
4481 PVM pVM;
4482 PVMCPU pVCpu;
4483
4484 /*
4485 * Bitch about it.
4486 */
4487 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4488 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4489
4490 /*
4491 * Jump back to where we entered the recompiler.
4492 */
4493 pVM = cpu_single_env->pVM;
4494 pVCpu = cpu_single_env->pVCpu;
4495 Assert(pVCpu);
4496
4497 if (pVM->rem.s.fInREM)
4498 REMR3StateBack(pVM, pVCpu);
4499
4500 EMR3FatalError(pVCpu, rc);
4501 AssertMsgFailed(("EMR3FatalError returned!\n"));
4502}
4503
4504
4505/**
4506 * Dumps a linux system call.
4507 * @param pVCpu VMCPU handle.
4508 */
4509void remR3DumpLnxSyscall(PVMCPU pVCpu)
4510{
4511 static const char *apsz[] =
4512 {
4513 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4514 "sys_exit",
4515 "sys_fork",
4516 "sys_read",
4517 "sys_write",
4518 "sys_open", /* 5 */
4519 "sys_close",
4520 "sys_waitpid",
4521 "sys_creat",
4522 "sys_link",
4523 "sys_unlink", /* 10 */
4524 "sys_execve",
4525 "sys_chdir",
4526 "sys_time",
4527 "sys_mknod",
4528 "sys_chmod", /* 15 */
4529 "sys_lchown16",
4530 "sys_ni_syscall", /* old break syscall holder */
4531 "sys_stat",
4532 "sys_lseek",
4533 "sys_getpid", /* 20 */
4534 "sys_mount",
4535 "sys_oldumount",
4536 "sys_setuid16",
4537 "sys_getuid16",
4538 "sys_stime", /* 25 */
4539 "sys_ptrace",
4540 "sys_alarm",
4541 "sys_fstat",
4542 "sys_pause",
4543 "sys_utime", /* 30 */
4544 "sys_ni_syscall", /* old stty syscall holder */
4545 "sys_ni_syscall", /* old gtty syscall holder */
4546 "sys_access",
4547 "sys_nice",
4548 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4549 "sys_sync",
4550 "sys_kill",
4551 "sys_rename",
4552 "sys_mkdir",
4553 "sys_rmdir", /* 40 */
4554 "sys_dup",
4555 "sys_pipe",
4556 "sys_times",
4557 "sys_ni_syscall", /* old prof syscall holder */
4558 "sys_brk", /* 45 */
4559 "sys_setgid16",
4560 "sys_getgid16",
4561 "sys_signal",
4562 "sys_geteuid16",
4563 "sys_getegid16", /* 50 */
4564 "sys_acct",
4565 "sys_umount", /* recycled never used phys() */
4566 "sys_ni_syscall", /* old lock syscall holder */
4567 "sys_ioctl",
4568 "sys_fcntl", /* 55 */
4569 "sys_ni_syscall", /* old mpx syscall holder */
4570 "sys_setpgid",
4571 "sys_ni_syscall", /* old ulimit syscall holder */
4572 "sys_olduname",
4573 "sys_umask", /* 60 */
4574 "sys_chroot",
4575 "sys_ustat",
4576 "sys_dup2",
4577 "sys_getppid",
4578 "sys_getpgrp", /* 65 */
4579 "sys_setsid",
4580 "sys_sigaction",
4581 "sys_sgetmask",
4582 "sys_ssetmask",
4583 "sys_setreuid16", /* 70 */
4584 "sys_setregid16",
4585 "sys_sigsuspend",
4586 "sys_sigpending",
4587 "sys_sethostname",
4588 "sys_setrlimit", /* 75 */
4589 "sys_old_getrlimit",
4590 "sys_getrusage",
4591 "sys_gettimeofday",
4592 "sys_settimeofday",
4593 "sys_getgroups16", /* 80 */
4594 "sys_setgroups16",
4595 "old_select",
4596 "sys_symlink",
4597 "sys_lstat",
4598 "sys_readlink", /* 85 */
4599 "sys_uselib",
4600 "sys_swapon",
4601 "sys_reboot",
4602 "old_readdir",
4603 "old_mmap", /* 90 */
4604 "sys_munmap",
4605 "sys_truncate",
4606 "sys_ftruncate",
4607 "sys_fchmod",
4608 "sys_fchown16", /* 95 */
4609 "sys_getpriority",
4610 "sys_setpriority",
4611 "sys_ni_syscall", /* old profil syscall holder */
4612 "sys_statfs",
4613 "sys_fstatfs", /* 100 */
4614 "sys_ioperm",
4615 "sys_socketcall",
4616 "sys_syslog",
4617 "sys_setitimer",
4618 "sys_getitimer", /* 105 */
4619 "sys_newstat",
4620 "sys_newlstat",
4621 "sys_newfstat",
4622 "sys_uname",
4623 "sys_iopl", /* 110 */
4624 "sys_vhangup",
4625 "sys_ni_syscall", /* old "idle" system call */
4626 "sys_vm86old",
4627 "sys_wait4",
4628 "sys_swapoff", /* 115 */
4629 "sys_sysinfo",
4630 "sys_ipc",
4631 "sys_fsync",
4632 "sys_sigreturn",
4633 "sys_clone", /* 120 */
4634 "sys_setdomainname",
4635 "sys_newuname",
4636 "sys_modify_ldt",
4637 "sys_adjtimex",
4638 "sys_mprotect", /* 125 */
4639 "sys_sigprocmask",
4640 "sys_ni_syscall", /* old "create_module" */
4641 "sys_init_module",
4642 "sys_delete_module",
4643 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4644 "sys_quotactl",
4645 "sys_getpgid",
4646 "sys_fchdir",
4647 "sys_bdflush",
4648 "sys_sysfs", /* 135 */
4649 "sys_personality",
4650 "sys_ni_syscall", /* reserved for afs_syscall */
4651 "sys_setfsuid16",
4652 "sys_setfsgid16",
4653 "sys_llseek", /* 140 */
4654 "sys_getdents",
4655 "sys_select",
4656 "sys_flock",
4657 "sys_msync",
4658 "sys_readv", /* 145 */
4659 "sys_writev",
4660 "sys_getsid",
4661 "sys_fdatasync",
4662 "sys_sysctl",
4663 "sys_mlock", /* 150 */
4664 "sys_munlock",
4665 "sys_mlockall",
4666 "sys_munlockall",
4667 "sys_sched_setparam",
4668 "sys_sched_getparam", /* 155 */
4669 "sys_sched_setscheduler",
4670 "sys_sched_getscheduler",
4671 "sys_sched_yield",
4672 "sys_sched_get_priority_max",
4673 "sys_sched_get_priority_min", /* 160 */
4674 "sys_sched_rr_get_interval",
4675 "sys_nanosleep",
4676 "sys_mremap",
4677 "sys_setresuid16",
4678 "sys_getresuid16", /* 165 */
4679 "sys_vm86",
4680 "sys_ni_syscall", /* Old sys_query_module */
4681 "sys_poll",
4682 "sys_nfsservctl",
4683 "sys_setresgid16", /* 170 */
4684 "sys_getresgid16",
4685 "sys_prctl",
4686 "sys_rt_sigreturn",
4687 "sys_rt_sigaction",
4688 "sys_rt_sigprocmask", /* 175 */
4689 "sys_rt_sigpending",
4690 "sys_rt_sigtimedwait",
4691 "sys_rt_sigqueueinfo",
4692 "sys_rt_sigsuspend",
4693 "sys_pread64", /* 180 */
4694 "sys_pwrite64",
4695 "sys_chown16",
4696 "sys_getcwd",
4697 "sys_capget",
4698 "sys_capset", /* 185 */
4699 "sys_sigaltstack",
4700 "sys_sendfile",
4701 "sys_ni_syscall", /* reserved for streams1 */
4702 "sys_ni_syscall", /* reserved for streams2 */
4703 "sys_vfork", /* 190 */
4704 "sys_getrlimit",
4705 "sys_mmap2",
4706 "sys_truncate64",
4707 "sys_ftruncate64",
4708 "sys_stat64", /* 195 */
4709 "sys_lstat64",
4710 "sys_fstat64",
4711 "sys_lchown",
4712 "sys_getuid",
4713 "sys_getgid", /* 200 */
4714 "sys_geteuid",
4715 "sys_getegid",
4716 "sys_setreuid",
4717 "sys_setregid",
4718 "sys_getgroups", /* 205 */
4719 "sys_setgroups",
4720 "sys_fchown",
4721 "sys_setresuid",
4722 "sys_getresuid",
4723 "sys_setresgid", /* 210 */
4724 "sys_getresgid",
4725 "sys_chown",
4726 "sys_setuid",
4727 "sys_setgid",
4728 "sys_setfsuid", /* 215 */
4729 "sys_setfsgid",
4730 "sys_pivot_root",
4731 "sys_mincore",
4732 "sys_madvise",
4733 "sys_getdents64", /* 220 */
4734 "sys_fcntl64",
4735 "sys_ni_syscall", /* reserved for TUX */
4736 "sys_ni_syscall",
4737 "sys_gettid",
4738 "sys_readahead", /* 225 */
4739 "sys_setxattr",
4740 "sys_lsetxattr",
4741 "sys_fsetxattr",
4742 "sys_getxattr",
4743 "sys_lgetxattr", /* 230 */
4744 "sys_fgetxattr",
4745 "sys_listxattr",
4746 "sys_llistxattr",
4747 "sys_flistxattr",
4748 "sys_removexattr", /* 235 */
4749 "sys_lremovexattr",
4750 "sys_fremovexattr",
4751 "sys_tkill",
4752 "sys_sendfile64",
4753 "sys_futex", /* 240 */
4754 "sys_sched_setaffinity",
4755 "sys_sched_getaffinity",
4756 "sys_set_thread_area",
4757 "sys_get_thread_area",
4758 "sys_io_setup", /* 245 */
4759 "sys_io_destroy",
4760 "sys_io_getevents",
4761 "sys_io_submit",
4762 "sys_io_cancel",
4763 "sys_fadvise64", /* 250 */
4764 "sys_ni_syscall",
4765 "sys_exit_group",
4766 "sys_lookup_dcookie",
4767 "sys_epoll_create",
4768 "sys_epoll_ctl", /* 255 */
4769 "sys_epoll_wait",
4770 "sys_remap_file_pages",
4771 "sys_set_tid_address",
4772 "sys_timer_create",
4773 "sys_timer_settime", /* 260 */
4774 "sys_timer_gettime",
4775 "sys_timer_getoverrun",
4776 "sys_timer_delete",
4777 "sys_clock_settime",
4778 "sys_clock_gettime", /* 265 */
4779 "sys_clock_getres",
4780 "sys_clock_nanosleep",
4781 "sys_statfs64",
4782 "sys_fstatfs64",
4783 "sys_tgkill", /* 270 */
4784 "sys_utimes",
4785 "sys_fadvise64_64",
4786 "sys_ni_syscall" /* sys_vserver */
4787 };
4788
4789 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4790 switch (uEAX)
4791 {
4792 default:
4793 if (uEAX < RT_ELEMENTS(apsz))
4794 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4795 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4796 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4797 else
4798 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4799 break;
4800
4801 }
4802}
4803
4804
4805/**
4806 * Dumps an OpenBSD system call.
4807 * @param pVCpu VMCPU handle.
4808 */
4809void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4810{
4811 static const char *apsz[] =
4812 {
4813 "SYS_syscall", //0
4814 "SYS_exit", //1
4815 "SYS_fork", //2
4816 "SYS_read", //3
4817 "SYS_write", //4
4818 "SYS_open", //5
4819 "SYS_close", //6
4820 "SYS_wait4", //7
4821 "SYS_8",
4822 "SYS_link", //9
4823 "SYS_unlink", //10
4824 "SYS_11",
4825 "SYS_chdir", //12
4826 "SYS_fchdir", //13
4827 "SYS_mknod", //14
4828 "SYS_chmod", //15
4829 "SYS_chown", //16
4830 "SYS_break", //17
4831 "SYS_18",
4832 "SYS_19",
4833 "SYS_getpid", //20
4834 "SYS_mount", //21
4835 "SYS_unmount", //22
4836 "SYS_setuid", //23
4837 "SYS_getuid", //24
4838 "SYS_geteuid", //25
4839 "SYS_ptrace", //26
4840 "SYS_recvmsg", //27
4841 "SYS_sendmsg", //28
4842 "SYS_recvfrom", //29
4843 "SYS_accept", //30
4844 "SYS_getpeername", //31
4845 "SYS_getsockname", //32
4846 "SYS_access", //33
4847 "SYS_chflags", //34
4848 "SYS_fchflags", //35
4849 "SYS_sync", //36
4850 "SYS_kill", //37
4851 "SYS_38",
4852 "SYS_getppid", //39
4853 "SYS_40",
4854 "SYS_dup", //41
4855 "SYS_opipe", //42
4856 "SYS_getegid", //43
4857 "SYS_profil", //44
4858 "SYS_ktrace", //45
4859 "SYS_sigaction", //46
4860 "SYS_getgid", //47
4861 "SYS_sigprocmask", //48
4862 "SYS_getlogin", //49
4863 "SYS_setlogin", //50
4864 "SYS_acct", //51
4865 "SYS_sigpending", //52
4866 "SYS_osigaltstack", //53
4867 "SYS_ioctl", //54
4868 "SYS_reboot", //55
4869 "SYS_revoke", //56
4870 "SYS_symlink", //57
4871 "SYS_readlink", //58
4872 "SYS_execve", //59
4873 "SYS_umask", //60
4874 "SYS_chroot", //61
4875 "SYS_62",
4876 "SYS_63",
4877 "SYS_64",
4878 "SYS_65",
4879 "SYS_vfork", //66
4880 "SYS_67",
4881 "SYS_68",
4882 "SYS_sbrk", //69
4883 "SYS_sstk", //70
4884 "SYS_61",
4885 "SYS_vadvise", //72
4886 "SYS_munmap", //73
4887 "SYS_mprotect", //74
4888 "SYS_madvise", //75
4889 "SYS_76",
4890 "SYS_77",
4891 "SYS_mincore", //78
4892 "SYS_getgroups", //79
4893 "SYS_setgroups", //80
4894 "SYS_getpgrp", //81
4895 "SYS_setpgid", //82
4896 "SYS_setitimer", //83
4897 "SYS_84",
4898 "SYS_85",
4899 "SYS_getitimer", //86
4900 "SYS_87",
4901 "SYS_88",
4902 "SYS_89",
4903 "SYS_dup2", //90
4904 "SYS_91",
4905 "SYS_fcntl", //92
4906 "SYS_select", //93
4907 "SYS_94",
4908 "SYS_fsync", //95
4909 "SYS_setpriority", //96
4910 "SYS_socket", //97
4911 "SYS_connect", //98
4912 "SYS_99",
4913 "SYS_getpriority", //100
4914 "SYS_101",
4915 "SYS_102",
4916 "SYS_sigreturn", //103
4917 "SYS_bind", //104
4918 "SYS_setsockopt", //105
4919 "SYS_listen", //106
4920 "SYS_107",
4921 "SYS_108",
4922 "SYS_109",
4923 "SYS_110",
4924 "SYS_sigsuspend", //111
4925 "SYS_112",
4926 "SYS_113",
4927 "SYS_114",
4928 "SYS_115",
4929 "SYS_gettimeofday", //116
4930 "SYS_getrusage", //117
4931 "SYS_getsockopt", //118
4932 "SYS_119",
4933 "SYS_readv", //120
4934 "SYS_writev", //121
4935 "SYS_settimeofday", //122
4936 "SYS_fchown", //123
4937 "SYS_fchmod", //124
4938 "SYS_125",
4939 "SYS_setreuid", //126
4940 "SYS_setregid", //127
4941 "SYS_rename", //128
4942 "SYS_129",
4943 "SYS_130",
4944 "SYS_flock", //131
4945 "SYS_mkfifo", //132
4946 "SYS_sendto", //133
4947 "SYS_shutdown", //134
4948 "SYS_socketpair", //135
4949 "SYS_mkdir", //136
4950 "SYS_rmdir", //137
4951 "SYS_utimes", //138
4952 "SYS_139",
4953 "SYS_adjtime", //140
4954 "SYS_141",
4955 "SYS_142",
4956 "SYS_143",
4957 "SYS_144",
4958 "SYS_145",
4959 "SYS_146",
4960 "SYS_setsid", //147
4961 "SYS_quotactl", //148
4962 "SYS_149",
4963 "SYS_150",
4964 "SYS_151",
4965 "SYS_152",
4966 "SYS_153",
4967 "SYS_154",
4968 "SYS_nfssvc", //155
4969 "SYS_156",
4970 "SYS_157",
4971 "SYS_158",
4972 "SYS_159",
4973 "SYS_160",
4974 "SYS_getfh", //161
4975 "SYS_162",
4976 "SYS_163",
4977 "SYS_164",
4978 "SYS_sysarch", //165
4979 "SYS_166",
4980 "SYS_167",
4981 "SYS_168",
4982 "SYS_169",
4983 "SYS_170",
4984 "SYS_171",
4985 "SYS_172",
4986 "SYS_pread", //173
4987 "SYS_pwrite", //174
4988 "SYS_175",
4989 "SYS_176",
4990 "SYS_177",
4991 "SYS_178",
4992 "SYS_179",
4993 "SYS_180",
4994 "SYS_setgid", //181
4995 "SYS_setegid", //182
4996 "SYS_seteuid", //183
4997 "SYS_lfs_bmapv", //184
4998 "SYS_lfs_markv", //185
4999 "SYS_lfs_segclean", //186
5000 "SYS_lfs_segwait", //187
5001 "SYS_188",
5002 "SYS_189",
5003 "SYS_190",
5004 "SYS_pathconf", //191
5005 "SYS_fpathconf", //192
5006 "SYS_swapctl", //193
5007 "SYS_getrlimit", //194
5008 "SYS_setrlimit", //195
5009 "SYS_getdirentries", //196
5010 "SYS_mmap", //197
5011 "SYS___syscall", //198
5012 "SYS_lseek", //199
5013 "SYS_truncate", //200
5014 "SYS_ftruncate", //201
5015 "SYS___sysctl", //202
5016 "SYS_mlock", //203
5017 "SYS_munlock", //204
5018 "SYS_205",
5019 "SYS_futimes", //206
5020 "SYS_getpgid", //207
5021 "SYS_xfspioctl", //208
5022 "SYS_209",
5023 "SYS_210",
5024 "SYS_211",
5025 "SYS_212",
5026 "SYS_213",
5027 "SYS_214",
5028 "SYS_215",
5029 "SYS_216",
5030 "SYS_217",
5031 "SYS_218",
5032 "SYS_219",
5033 "SYS_220",
5034 "SYS_semget", //221
5035 "SYS_222",
5036 "SYS_223",
5037 "SYS_224",
5038 "SYS_msgget", //225
5039 "SYS_msgsnd", //226
5040 "SYS_msgrcv", //227
5041 "SYS_shmat", //228
5042 "SYS_229",
5043 "SYS_shmdt", //230
5044 "SYS_231",
5045 "SYS_clock_gettime", //232
5046 "SYS_clock_settime", //233
5047 "SYS_clock_getres", //234
5048 "SYS_235",
5049 "SYS_236",
5050 "SYS_237",
5051 "SYS_238",
5052 "SYS_239",
5053 "SYS_nanosleep", //240
5054 "SYS_241",
5055 "SYS_242",
5056 "SYS_243",
5057 "SYS_244",
5058 "SYS_245",
5059 "SYS_246",
5060 "SYS_247",
5061 "SYS_248",
5062 "SYS_249",
5063 "SYS_minherit", //250
5064 "SYS_rfork", //251
5065 "SYS_poll", //252
5066 "SYS_issetugid", //253
5067 "SYS_lchown", //254
5068 "SYS_getsid", //255
5069 "SYS_msync", //256
5070 "SYS_257",
5071 "SYS_258",
5072 "SYS_259",
5073 "SYS_getfsstat", //260
5074 "SYS_statfs", //261
5075 "SYS_fstatfs", //262
5076 "SYS_pipe", //263
5077 "SYS_fhopen", //264
5078 "SYS_265",
5079 "SYS_fhstatfs", //266
5080 "SYS_preadv", //267
5081 "SYS_pwritev", //268
5082 "SYS_kqueue", //269
5083 "SYS_kevent", //270
5084 "SYS_mlockall", //271
5085 "SYS_munlockall", //272
5086 "SYS_getpeereid", //273
5087 "SYS_274",
5088 "SYS_275",
5089 "SYS_276",
5090 "SYS_277",
5091 "SYS_278",
5092 "SYS_279",
5093 "SYS_280",
5094 "SYS_getresuid", //281
5095 "SYS_setresuid", //282
5096 "SYS_getresgid", //283
5097 "SYS_setresgid", //284
5098 "SYS_285",
5099 "SYS_mquery", //286
5100 "SYS_closefrom", //287
5101 "SYS_sigaltstack", //288
5102 "SYS_shmget", //289
5103 "SYS_semop", //290
5104 "SYS_stat", //291
5105 "SYS_fstat", //292
5106 "SYS_lstat", //293
5107 "SYS_fhstat", //294
5108 "SYS___semctl", //295
5109 "SYS_shmctl", //296
5110 "SYS_msgctl", //297
5111 "SYS_MAXSYSCALL", //298
5112 //299
5113 //300
5114 };
5115 uint32_t uEAX;
5116 if (!LogIsEnabled())
5117 return;
5118 uEAX = CPUMGetGuestEAX(pVCpu);
5119 switch (uEAX)
5120 {
5121 default:
5122 if (uEAX < RT_ELEMENTS(apsz))
5123 {
5124 uint32_t au32Args[8] = {0};
5125 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5126 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5127 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5128 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5129 }
5130 else
5131 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5132 break;
5133 }
5134}
5135
5136
5137#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5138/**
5139 * The Dll main entry point (stub).
5140 */
5141bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5142{
5143 return true;
5144}
5145
5146void *memcpy(void *dst, const void *src, size_t size)
5147{
5148 uint8_t*pbDst = dst, *pbSrc = src;
5149 while (size-- > 0)
5150 *pbDst++ = *pbSrc++;
5151 return dst;
5152}
5153
5154#endif
5155
5156void cpu_smm_update(CPUState *env)
5157{
5158}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette